diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8f50e918933fe90681060beacafcd27b230742d9 --- /dev/null +++ b/README.md @@ -0,0 +1,27 @@ +# Core ML Stable Diffusion Mixed-Bit Palettization Resources + +This repository contains resources related to the use of [mixed-bit palettization techniques for Stable Diffusion](https://github.com/apple/ml-stable-diffusion#mbp). As explained in the original repo, this is an advanced compression technique that applies palettization using a varying number of bits per layer to keep quality above the desired threshold (as measured by the PSNR with respect to the original model). This can achieve strong space savings without resorting to learned quantization tables during fine-tuning. + +The repository contains the following resources: + +- [`recipes/`](tree/main/recipes). JSON files with results from the analysis phase [(Step 1 of the instructions)](https://github.com/apple/ml-stable-diffusion#mbp) on three popular Stable Diffusion models: + * Stable Diffusion 1.5 + * Stable Diffusion 2.1 base + * Stable Diffusion XL 1.0 base + +You can apply any of the available recipes on the appropriate model to experiment and compare results. + +- [`unet-mbp-sdxl-1-base/`](tree/main/unet-mbp-sdxl-1-base). Three pre-applied recipes on the UNet of Stable Diffusion XL 1.0 base: + +| Recipe | Effective bits | UNet size (GB) | Size reduction | +|------------------------------|----------------|----------------|----------------| +| recipe_3_41_bit_mixedpalette | 3.41 | 1.1 | 77% | +| recipe_4_50_bit_mixedpalette | 4.50 | 1.4 | 71% | +| recipe_6_55_bit_mixedpalette | 6.55 | 2.0 | 58% | + +- [`coreml-stable-diffusion-xl-base_mbp_4_50_palettized`]. A full pipeline of a Core ML version of Stable Diffusion XL 1.0 base, with the `recipe_4_50_bit_mixedpalette` recipe pre-applied to the UNet. You can download it to use in your own Core ML apps. + +- [`coreml-stable-diffusion-xl-base_mbp_4_50_palettized.zip`]. An archived version of the same pipeline, for use with [Hugging Face demo app](https://github.com/huggingface/swift-coreml-diffusers) and other third party tools. + +This repository was prepared by Apple and Hugging Face in July 2023, from experiments conducted on July 2023 using public beta versions of iOS 17.0, iPadOS 17.0 and macOS 14.0. + diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized.zip b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized.zip new file mode 100644 index 0000000000000000000000000000000000000000..40bc8046a9a3514a2c1ba9616df5a5ccdd6737cf --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f0b7ee4cbb803bbaf83d4eef69b2868e0da864d8fd7baec988eb8155f92ced +size 3356775856 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/analytics/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..e93e6ceaa6e8e319e2ea8cdd6dcc1488ccc49d1f --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f732919fa370a1b7b09ec2b227539269b6543149a2b0dbae95cc4cf350e4b697 +size 207 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..bca87797b63a2002be08fcd99a522c5011db367c --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ff26866d8d8fbb4e53a0628f8aab5f7edf1b3ec763a96e6812c8f7fbf4c9827 +size 825 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..df8a5ad64ed7dccc831ee8dc734c7a263010eb6e --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/metadata.json @@ -0,0 +1,82 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Hidden states after the encoder layers", + "shape" : "[]", + "name" : "hidden_embeds", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "The version of the `last_hidden_state` output after pooling", + "shape" : "[]", + "name" : "pooled_outputs", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Float16", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "Ios16.cast" : 3, + "Ios16.mul" : 36, + "Ios16.layerNorm" : 25, + "Stack" : 1, + "Transpose" : 60, + "Ios16.sigmoid" : 12, + "Ios16.linear" : 72, + "Ios16.add" : 37, + "Ios16.matmul" : 24, + "Ios16.softmax" : 12, + "Ios16.gatherNd" : 1, + "Ios16.gather" : 1, + "Ios16.reshape" : 120, + "Ios16.reduceArgmax" : 1 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32 1 × 77)", + "shortDescription" : "The token ids that represent the input text", + "shape" : "[1, 77]", + "name" : "input_ids", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "generatedClassName" : "Stable_Diffusion_version_diffusers_stable_diffusion_xl_base_1_0_text_encoder", + "method" : "predict" + } +] \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/model.mil b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..02bd1f912cc81bf538be2548551c918d9c4482b3 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/model.mil @@ -0,0 +1,896 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1839.0.0"}, {"coremltools-component-torch", "2.0.1+cu117"}, {"coremltools-version", "7.0b1"}})] +{ + func main(tensor input_ids) { + tensor var_5 = const()[name = tensor("op_5"), val = tensor(-1)]; + tensor var_6 = const()[name = tensor("op_6"), val = tensor(false)]; + tensor cast_1_dtype_0 = const()[name = tensor("cast_1_dtype_0"), val = tensor("int32")]; + tensor inputs_embeds_axis_0 = const()[name = tensor("inputs_embeds_axis_0"), val = tensor(0)]; + tensor inputs_embeds_batch_dims_0 = const()[name = tensor("inputs_embeds_batch_dims_0"), val = tensor(0)]; + tensor text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor cast_526 = cast(dtype = cast_1_dtype_0, x = input_ids)[name = tensor("cast_526")]; + tensor inputs_embeds_cast = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = cast_526, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor("inputs_embeds_cast")]; + tensor position_embeddings_to_fp16 = const()[name = tensor("position_embeddings_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75890816)))]; + tensor input_3_cast = add(x = inputs_embeds_cast, y = position_embeddings_to_fp16)[name = tensor("input_3_cast")]; + tensor hidden_states_1_axes_0 = const()[name = tensor("hidden_states_1_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76009152)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76010752)))]; + tensor var_12_to_fp16 = const()[name = tensor("op_12_to_fp16"), val = tensor(0x1.5p-17)]; + tensor hidden_states_1_cast = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast)[name = tensor("hidden_states_1_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76012352)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(77192064)))]; + tensor var_86_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("op_86_cast")]; + tensor var_87_to_fp16 = const()[name = tensor("op_87_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_5_cast = mul(x = var_86_cast, y = var_87_to_fp16)[name = tensor("tensor_5_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(77193664)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78373376)))]; + tensor tensor_1_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_1_cast")]; + tensor var_92 = const()[name = tensor("op_92"), val = tensor([1, -1, 12, 64])]; + tensor var_93_cast = reshape(shape = var_92, x = tensor_1_cast)[name = tensor("op_93_cast")]; + tensor var_94_perm_0 = const()[name = tensor("op_94_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78374976)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79554688)))]; + tensor tensor_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_3_cast")]; + tensor var_99 = const()[name = tensor("op_99"), val = tensor([1, -1, 12, 64])]; + tensor var_100_cast = reshape(shape = var_99, x = tensor_3_cast)[name = tensor("op_100_cast")]; + tensor var_101_perm_0 = const()[name = tensor("op_101_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_108 = const()[name = tensor("op_108"), val = tensor([1, 77, 12, 64])]; + tensor var_109_cast = reshape(shape = var_108, x = tensor_5_cast)[name = tensor("op_109_cast")]; + tensor var_110_perm_0 = const()[name = tensor("op_110_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_112 = const()[name = tensor("op_112"), val = tensor([12, -1, 64])]; + tensor transpose_58 = transpose(perm = var_110_perm_0, x = var_109_cast)[name = tensor("transpose_58")]; + tensor query_states_1_cast = reshape(shape = var_112, x = transpose_58)[name = tensor("query_states_1_cast")]; + tensor var_114 = const()[name = tensor("op_114"), val = tensor([12, -1, 64])]; + tensor transpose_60 = transpose(perm = var_94_perm_0, x = var_93_cast)[name = tensor("transpose_60")]; + tensor key_states_3_cast = reshape(shape = var_114, x = transpose_60)[name = tensor("key_states_3_cast")]; + tensor var_116 = const()[name = tensor("op_116"), val = tensor([12, -1, 64])]; + tensor transpose_59 = transpose(perm = var_101_perm_0, x = var_100_cast)[name = tensor("transpose_59")]; + tensor value_states_3_cast = reshape(shape = var_116, x = transpose_59)[name = tensor("value_states_3_cast")]; + tensor var_119_perm_0 = const()[name = tensor("op_119_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor transpose_57 = transpose(perm = var_119_perm_0, x = key_states_3_cast)[name = tensor("transpose_57")]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1_cast, y = transpose_57)[name = tensor("attn_weights_1_cast")]; + tensor var_121 = const()[name = tensor("op_121"), val = tensor([1, 12, 77, 77])]; + tensor var_122_cast = reshape(shape = var_121, x = attn_weights_1_cast)[name = tensor("op_122_cast")]; + tensor causal_attention_mask_to_fp16 = const()[name = tensor("causal_attention_mask_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79556288)))]; + tensor attn_weights_3_cast = add(x = var_122_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_127 = const()[name = tensor("op_127"), val = tensor([12, 77, 77])]; + tensor input_5_cast = reshape(shape = var_127, x = attn_weights_3_cast)[name = tensor("input_5_cast")]; + tensor input_7_cast = softmax(axis = var_5, x = input_5_cast)[name = tensor("input_7_cast")]; + tensor attn_output_1_transpose_x_0 = const()[name = tensor("attn_output_1_transpose_x_0"), val = tensor(false)]; + tensor attn_output_1_transpose_y_0 = const()[name = tensor("attn_output_1_transpose_y_0"), val = tensor(false)]; + tensor attn_output_1_cast = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast, y = value_states_3_cast)[name = tensor("attn_output_1_cast")]; + tensor var_132 = const()[name = tensor("op_132"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_3_cast = reshape(shape = var_132, x = attn_output_1_cast)[name = tensor("attn_output_3_cast")]; + tensor attn_output_5_perm_0 = const()[name = tensor("attn_output_5_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_135 = const()[name = tensor("op_135"), val = tensor([1, 77, 768])]; + tensor transpose_56 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast)[name = tensor("transpose_56")]; + tensor input_9_cast = reshape(shape = var_135, x = transpose_56)[name = tensor("input_9_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79568256)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80747968)))]; + tensor hidden_states_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16, x = input_9_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_11_cast = add(x = input_3_cast, y = hidden_states_3_cast)[name = tensor("input_11_cast")]; + tensor input_13_axes_0 = const()[name = tensor("input_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80749568)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80751168)))]; + tensor input_13_cast = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast)[name = tensor("input_13_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(80752768)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85471424)))]; + tensor input_15_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16, x = input_13_cast)[name = tensor("input_15_cast")]; + tensor var_150_to_fp16 = const()[name = tensor("op_150_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_151_cast = mul(x = input_15_cast, y = var_150_to_fp16)[name = tensor("op_151_cast")]; + tensor var_152_cast = sigmoid(x = var_151_cast)[name = tensor("op_152_cast")]; + tensor input_17_cast = mul(x = input_15_cast, y = var_152_cast)[name = tensor("input_17_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85477632)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90196288)))]; + tensor hidden_states_5_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_5_cast")]; + tensor input_19_cast = add(x = input_11_cast, y = hidden_states_5_cast)[name = tensor("input_19_cast")]; + tensor hidden_states_7_axes_0 = const()[name = tensor("hidden_states_7_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90197888)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90199488)))]; + tensor hidden_states_7_cast = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast)[name = tensor("hidden_states_7_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90201088)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91380800)))]; + tensor var_176_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("op_176_cast")]; + tensor var_177_to_fp16 = const()[name = tensor("op_177_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_11_cast = mul(x = var_176_cast, y = var_177_to_fp16)[name = tensor("tensor_11_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91382400)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92562112)))]; + tensor tensor_7_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_7_cast")]; + tensor var_182 = const()[name = tensor("op_182"), val = tensor([1, -1, 12, 64])]; + tensor var_183_cast = reshape(shape = var_182, x = tensor_7_cast)[name = tensor("op_183_cast")]; + tensor var_184_perm_0 = const()[name = tensor("op_184_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92563712)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93743424)))]; + tensor tensor_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_9_cast")]; + tensor var_189 = const()[name = tensor("op_189"), val = tensor([1, -1, 12, 64])]; + tensor var_190_cast = reshape(shape = var_189, x = tensor_9_cast)[name = tensor("op_190_cast")]; + tensor var_191_perm_0 = const()[name = tensor("op_191_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_198 = const()[name = tensor("op_198"), val = tensor([1, 77, 12, 64])]; + tensor var_199_cast = reshape(shape = var_198, x = tensor_11_cast)[name = tensor("op_199_cast")]; + tensor var_200_perm_0 = const()[name = tensor("op_200_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_202 = const()[name = tensor("op_202"), val = tensor([12, -1, 64])]; + tensor transpose_53 = transpose(perm = var_200_perm_0, x = var_199_cast)[name = tensor("transpose_53")]; + tensor query_states_3_cast = reshape(shape = var_202, x = transpose_53)[name = tensor("query_states_3_cast")]; + tensor var_204 = const()[name = tensor("op_204"), val = tensor([12, -1, 64])]; + tensor transpose_55 = transpose(perm = var_184_perm_0, x = var_183_cast)[name = tensor("transpose_55")]; + tensor key_states_7_cast = reshape(shape = var_204, x = transpose_55)[name = tensor("key_states_7_cast")]; + tensor var_206 = const()[name = tensor("op_206"), val = tensor([12, -1, 64])]; + tensor transpose_54 = transpose(perm = var_191_perm_0, x = var_190_cast)[name = tensor("transpose_54")]; + tensor value_states_7_cast = reshape(shape = var_206, x = transpose_54)[name = tensor("value_states_7_cast")]; + tensor var_209_perm_0 = const()[name = tensor("op_209_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_7_transpose_x_0 = const()[name = tensor("attn_weights_7_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_7_transpose_y_0 = const()[name = tensor("attn_weights_7_transpose_y_0"), val = tensor(false)]; + tensor transpose_52 = transpose(perm = var_209_perm_0, x = key_states_7_cast)[name = tensor("transpose_52")]; + tensor attn_weights_7_cast = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3_cast, y = transpose_52)[name = tensor("attn_weights_7_cast")]; + tensor var_211 = const()[name = tensor("op_211"), val = tensor([1, 12, 77, 77])]; + tensor var_212_cast = reshape(shape = var_211, x = attn_weights_7_cast)[name = tensor("op_212_cast")]; + tensor attn_weights_9_cast = add(x = var_212_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_9_cast")]; + tensor var_217 = const()[name = tensor("op_217"), val = tensor([12, 77, 77])]; + tensor input_21_cast = reshape(shape = var_217, x = attn_weights_9_cast)[name = tensor("input_21_cast")]; + tensor input_23_cast = softmax(axis = var_5, x = input_21_cast)[name = tensor("input_23_cast")]; + tensor attn_output_7_transpose_x_0 = const()[name = tensor("attn_output_7_transpose_x_0"), val = tensor(false)]; + tensor attn_output_7_transpose_y_0 = const()[name = tensor("attn_output_7_transpose_y_0"), val = tensor(false)]; + tensor attn_output_7_cast = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast, y = value_states_7_cast)[name = tensor("attn_output_7_cast")]; + tensor var_222 = const()[name = tensor("op_222"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_9_cast = reshape(shape = var_222, x = attn_output_7_cast)[name = tensor("attn_output_9_cast")]; + tensor attn_output_11_perm_0 = const()[name = tensor("attn_output_11_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_225 = const()[name = tensor("op_225"), val = tensor([1, 77, 768])]; + tensor transpose_51 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast)[name = tensor("transpose_51")]; + tensor input_25_cast = reshape(shape = var_225, x = transpose_51)[name = tensor("input_25_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93745024)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94924736)))]; + tensor hidden_states_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16, x = input_25_cast)[name = tensor("hidden_states_9_cast")]; + tensor input_27_cast = add(x = input_19_cast, y = hidden_states_9_cast)[name = tensor("input_27_cast")]; + tensor input_29_axes_0 = const()[name = tensor("input_29_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94926336)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94927936)))]; + tensor input_29_cast = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast)[name = tensor("input_29_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94929536)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99648192)))]; + tensor input_31_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16, x = input_29_cast)[name = tensor("input_31_cast")]; + tensor var_240_to_fp16 = const()[name = tensor("op_240_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_241_cast = mul(x = input_31_cast, y = var_240_to_fp16)[name = tensor("op_241_cast")]; + tensor var_242_cast = sigmoid(x = var_241_cast)[name = tensor("op_242_cast")]; + tensor input_33_cast = mul(x = input_31_cast, y = var_242_cast)[name = tensor("input_33_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99654400)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104373056)))]; + tensor hidden_states_11_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_11_cast")]; + tensor input_35_cast = add(x = input_27_cast, y = hidden_states_11_cast)[name = tensor("input_35_cast")]; + tensor hidden_states_13_axes_0 = const()[name = tensor("hidden_states_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104374656)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104376256)))]; + tensor hidden_states_13_cast = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast)[name = tensor("hidden_states_13_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(104377856)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105557568)))]; + tensor var_266_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("op_266_cast")]; + tensor var_267_to_fp16 = const()[name = tensor("op_267_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_17_cast = mul(x = var_266_cast, y = var_267_to_fp16)[name = tensor("tensor_17_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(105559168)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106738880)))]; + tensor tensor_13_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_13_cast")]; + tensor var_272 = const()[name = tensor("op_272"), val = tensor([1, -1, 12, 64])]; + tensor var_273_cast = reshape(shape = var_272, x = tensor_13_cast)[name = tensor("op_273_cast")]; + tensor var_274_perm_0 = const()[name = tensor("op_274_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(106740480)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107920192)))]; + tensor tensor_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_15_cast")]; + tensor var_279 = const()[name = tensor("op_279"), val = tensor([1, -1, 12, 64])]; + tensor var_280_cast = reshape(shape = var_279, x = tensor_15_cast)[name = tensor("op_280_cast")]; + tensor var_281_perm_0 = const()[name = tensor("op_281_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_288 = const()[name = tensor("op_288"), val = tensor([1, 77, 12, 64])]; + tensor var_289_cast = reshape(shape = var_288, x = tensor_17_cast)[name = tensor("op_289_cast")]; + tensor var_290_perm_0 = const()[name = tensor("op_290_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_292 = const()[name = tensor("op_292"), val = tensor([12, -1, 64])]; + tensor transpose_48 = transpose(perm = var_290_perm_0, x = var_289_cast)[name = tensor("transpose_48")]; + tensor query_states_5_cast = reshape(shape = var_292, x = transpose_48)[name = tensor("query_states_5_cast")]; + tensor var_294 = const()[name = tensor("op_294"), val = tensor([12, -1, 64])]; + tensor transpose_50 = transpose(perm = var_274_perm_0, x = var_273_cast)[name = tensor("transpose_50")]; + tensor key_states_11_cast = reshape(shape = var_294, x = transpose_50)[name = tensor("key_states_11_cast")]; + tensor var_296 = const()[name = tensor("op_296"), val = tensor([12, -1, 64])]; + tensor transpose_49 = transpose(perm = var_281_perm_0, x = var_280_cast)[name = tensor("transpose_49")]; + tensor value_states_11_cast = reshape(shape = var_296, x = transpose_49)[name = tensor("value_states_11_cast")]; + tensor var_299_perm_0 = const()[name = tensor("op_299_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor transpose_47 = transpose(perm = var_299_perm_0, x = key_states_11_cast)[name = tensor("transpose_47")]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5_cast, y = transpose_47)[name = tensor("attn_weights_13_cast")]; + tensor var_301 = const()[name = tensor("op_301"), val = tensor([1, 12, 77, 77])]; + tensor var_302_cast = reshape(shape = var_301, x = attn_weights_13_cast)[name = tensor("op_302_cast")]; + tensor attn_weights_15_cast = add(x = var_302_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([12, 77, 77])]; + tensor input_37_cast = reshape(shape = var_307, x = attn_weights_15_cast)[name = tensor("input_37_cast")]; + tensor input_39_cast = softmax(axis = var_5, x = input_37_cast)[name = tensor("input_39_cast")]; + tensor attn_output_13_transpose_x_0 = const()[name = tensor("attn_output_13_transpose_x_0"), val = tensor(false)]; + tensor attn_output_13_transpose_y_0 = const()[name = tensor("attn_output_13_transpose_y_0"), val = tensor(false)]; + tensor attn_output_13_cast = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast, y = value_states_11_cast)[name = tensor("attn_output_13_cast")]; + tensor var_312 = const()[name = tensor("op_312"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_15_cast = reshape(shape = var_312, x = attn_output_13_cast)[name = tensor("attn_output_15_cast")]; + tensor attn_output_17_perm_0 = const()[name = tensor("attn_output_17_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, 77, 768])]; + tensor transpose_46 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast)[name = tensor("transpose_46")]; + tensor input_41_cast = reshape(shape = var_315, x = transpose_46)[name = tensor("input_41_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(107921792)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109101504)))]; + tensor hidden_states_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_15_cast")]; + tensor input_43_cast = add(x = input_35_cast, y = hidden_states_15_cast)[name = tensor("input_43_cast")]; + tensor input_45_axes_0 = const()[name = tensor("input_45_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109103104)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109104704)))]; + tensor input_45_cast = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(109106304)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113824960)))]; + tensor input_47_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16, x = input_45_cast)[name = tensor("input_47_cast")]; + tensor var_330_to_fp16 = const()[name = tensor("op_330_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_331_cast = mul(x = input_47_cast, y = var_330_to_fp16)[name = tensor("op_331_cast")]; + tensor var_332_cast = sigmoid(x = var_331_cast)[name = tensor("op_332_cast")]; + tensor input_49_cast = mul(x = input_47_cast, y = var_332_cast)[name = tensor("input_49_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113831168)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118549824)))]; + tensor hidden_states_17_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16, x = input_49_cast)[name = tensor("hidden_states_17_cast")]; + tensor input_51_cast = add(x = input_43_cast, y = hidden_states_17_cast)[name = tensor("input_51_cast")]; + tensor hidden_states_19_axes_0 = const()[name = tensor("hidden_states_19_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118551424)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118553024)))]; + tensor hidden_states_19_cast = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast)[name = tensor("hidden_states_19_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118554624)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(119734336)))]; + tensor var_356_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("op_356_cast")]; + tensor var_357_to_fp16 = const()[name = tensor("op_357_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_23_cast = mul(x = var_356_cast, y = var_357_to_fp16)[name = tensor("tensor_23_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(119735936)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120915648)))]; + tensor tensor_19_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_19_cast")]; + tensor var_362 = const()[name = tensor("op_362"), val = tensor([1, -1, 12, 64])]; + tensor var_363_cast = reshape(shape = var_362, x = tensor_19_cast)[name = tensor("op_363_cast")]; + tensor var_364_perm_0 = const()[name = tensor("op_364_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(120917248)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122096960)))]; + tensor tensor_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_21_cast")]; + tensor var_369 = const()[name = tensor("op_369"), val = tensor([1, -1, 12, 64])]; + tensor var_370_cast = reshape(shape = var_369, x = tensor_21_cast)[name = tensor("op_370_cast")]; + tensor var_371_perm_0 = const()[name = tensor("op_371_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_378 = const()[name = tensor("op_378"), val = tensor([1, 77, 12, 64])]; + tensor var_379_cast = reshape(shape = var_378, x = tensor_23_cast)[name = tensor("op_379_cast")]; + tensor var_380_perm_0 = const()[name = tensor("op_380_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_382 = const()[name = tensor("op_382"), val = tensor([12, -1, 64])]; + tensor transpose_43 = transpose(perm = var_380_perm_0, x = var_379_cast)[name = tensor("transpose_43")]; + tensor query_states_7_cast = reshape(shape = var_382, x = transpose_43)[name = tensor("query_states_7_cast")]; + tensor var_384 = const()[name = tensor("op_384"), val = tensor([12, -1, 64])]; + tensor transpose_45 = transpose(perm = var_364_perm_0, x = var_363_cast)[name = tensor("transpose_45")]; + tensor key_states_15_cast = reshape(shape = var_384, x = transpose_45)[name = tensor("key_states_15_cast")]; + tensor var_386 = const()[name = tensor("op_386"), val = tensor([12, -1, 64])]; + tensor transpose_44 = transpose(perm = var_371_perm_0, x = var_370_cast)[name = tensor("transpose_44")]; + tensor value_states_15_cast = reshape(shape = var_386, x = transpose_44)[name = tensor("value_states_15_cast")]; + tensor var_389_perm_0 = const()[name = tensor("op_389_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_19_transpose_x_0 = const()[name = tensor("attn_weights_19_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_19_transpose_y_0 = const()[name = tensor("attn_weights_19_transpose_y_0"), val = tensor(false)]; + tensor transpose_42 = transpose(perm = var_389_perm_0, x = key_states_15_cast)[name = tensor("transpose_42")]; + tensor attn_weights_19_cast = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7_cast, y = transpose_42)[name = tensor("attn_weights_19_cast")]; + tensor var_391 = const()[name = tensor("op_391"), val = tensor([1, 12, 77, 77])]; + tensor var_392_cast = reshape(shape = var_391, x = attn_weights_19_cast)[name = tensor("op_392_cast")]; + tensor attn_weights_21_cast = add(x = var_392_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_21_cast")]; + tensor var_397 = const()[name = tensor("op_397"), val = tensor([12, 77, 77])]; + tensor input_53_cast = reshape(shape = var_397, x = attn_weights_21_cast)[name = tensor("input_53_cast")]; + tensor input_55_cast = softmax(axis = var_5, x = input_53_cast)[name = tensor("input_55_cast")]; + tensor attn_output_19_transpose_x_0 = const()[name = tensor("attn_output_19_transpose_x_0"), val = tensor(false)]; + tensor attn_output_19_transpose_y_0 = const()[name = tensor("attn_output_19_transpose_y_0"), val = tensor(false)]; + tensor attn_output_19_cast = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast, y = value_states_15_cast)[name = tensor("attn_output_19_cast")]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_21_cast = reshape(shape = var_402, x = attn_output_19_cast)[name = tensor("attn_output_21_cast")]; + tensor attn_output_23_perm_0 = const()[name = tensor("attn_output_23_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_405 = const()[name = tensor("op_405"), val = tensor([1, 77, 768])]; + tensor transpose_41 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast)[name = tensor("transpose_41")]; + tensor input_57_cast = reshape(shape = var_405, x = transpose_41)[name = tensor("input_57_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122098560)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123278272)))]; + tensor hidden_states_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16, x = input_57_cast)[name = tensor("hidden_states_21_cast")]; + tensor input_59_cast = add(x = input_51_cast, y = hidden_states_21_cast)[name = tensor("input_59_cast")]; + tensor input_61_axes_0 = const()[name = tensor("input_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123279872)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123281472)))]; + tensor input_61_cast = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast)[name = tensor("input_61_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123283072)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128001728)))]; + tensor input_63_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16, x = input_61_cast)[name = tensor("input_63_cast")]; + tensor var_420_to_fp16 = const()[name = tensor("op_420_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_421_cast = mul(x = input_63_cast, y = var_420_to_fp16)[name = tensor("op_421_cast")]; + tensor var_422_cast = sigmoid(x = var_421_cast)[name = tensor("op_422_cast")]; + tensor input_65_cast = mul(x = input_63_cast, y = var_422_cast)[name = tensor("input_65_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128007936)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132726592)))]; + tensor hidden_states_23_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16, x = input_65_cast)[name = tensor("hidden_states_23_cast")]; + tensor input_67_cast = add(x = input_59_cast, y = hidden_states_23_cast)[name = tensor("input_67_cast")]; + tensor hidden_states_25_axes_0 = const()[name = tensor("hidden_states_25_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132728192)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132729792)))]; + tensor hidden_states_25_cast = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_25_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(132731392)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133911104)))]; + tensor var_446_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("op_446_cast")]; + tensor var_447_to_fp16 = const()[name = tensor("op_447_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_29_cast = mul(x = var_446_cast, y = var_447_to_fp16)[name = tensor("tensor_29_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133912704)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(135092416)))]; + tensor tensor_25_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_25_cast")]; + tensor var_452 = const()[name = tensor("op_452"), val = tensor([1, -1, 12, 64])]; + tensor var_453_cast = reshape(shape = var_452, x = tensor_25_cast)[name = tensor("op_453_cast")]; + tensor var_454_perm_0 = const()[name = tensor("op_454_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(135094016)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136273728)))]; + tensor tensor_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_27_cast")]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, -1, 12, 64])]; + tensor var_460_cast = reshape(shape = var_459, x = tensor_27_cast)[name = tensor("op_460_cast")]; + tensor var_461_perm_0 = const()[name = tensor("op_461_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_468 = const()[name = tensor("op_468"), val = tensor([1, 77, 12, 64])]; + tensor var_469_cast = reshape(shape = var_468, x = tensor_29_cast)[name = tensor("op_469_cast")]; + tensor var_470_perm_0 = const()[name = tensor("op_470_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_472 = const()[name = tensor("op_472"), val = tensor([12, -1, 64])]; + tensor transpose_38 = transpose(perm = var_470_perm_0, x = var_469_cast)[name = tensor("transpose_38")]; + tensor query_states_9_cast = reshape(shape = var_472, x = transpose_38)[name = tensor("query_states_9_cast")]; + tensor var_474 = const()[name = tensor("op_474"), val = tensor([12, -1, 64])]; + tensor transpose_40 = transpose(perm = var_454_perm_0, x = var_453_cast)[name = tensor("transpose_40")]; + tensor key_states_19_cast = reshape(shape = var_474, x = transpose_40)[name = tensor("key_states_19_cast")]; + tensor var_476 = const()[name = tensor("op_476"), val = tensor([12, -1, 64])]; + tensor transpose_39 = transpose(perm = var_461_perm_0, x = var_460_cast)[name = tensor("transpose_39")]; + tensor value_states_19_cast = reshape(shape = var_476, x = transpose_39)[name = tensor("value_states_19_cast")]; + tensor var_479_perm_0 = const()[name = tensor("op_479_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor transpose_37 = transpose(perm = var_479_perm_0, x = key_states_19_cast)[name = tensor("transpose_37")]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9_cast, y = transpose_37)[name = tensor("attn_weights_25_cast")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([1, 12, 77, 77])]; + tensor var_482_cast = reshape(shape = var_481, x = attn_weights_25_cast)[name = tensor("op_482_cast")]; + tensor attn_weights_27_cast = add(x = var_482_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_487 = const()[name = tensor("op_487"), val = tensor([12, 77, 77])]; + tensor input_69_cast = reshape(shape = var_487, x = attn_weights_27_cast)[name = tensor("input_69_cast")]; + tensor input_71_cast = softmax(axis = var_5, x = input_69_cast)[name = tensor("input_71_cast")]; + tensor attn_output_25_transpose_x_0 = const()[name = tensor("attn_output_25_transpose_x_0"), val = tensor(false)]; + tensor attn_output_25_transpose_y_0 = const()[name = tensor("attn_output_25_transpose_y_0"), val = tensor(false)]; + tensor attn_output_25_cast = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast, y = value_states_19_cast)[name = tensor("attn_output_25_cast")]; + tensor var_492 = const()[name = tensor("op_492"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_27_cast = reshape(shape = var_492, x = attn_output_25_cast)[name = tensor("attn_output_27_cast")]; + tensor attn_output_29_perm_0 = const()[name = tensor("attn_output_29_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 77, 768])]; + tensor transpose_36 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast)[name = tensor("transpose_36")]; + tensor input_73_cast = reshape(shape = var_495, x = transpose_36)[name = tensor("input_73_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136275328)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137455040)))]; + tensor hidden_states_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16, x = input_73_cast)[name = tensor("hidden_states_27_cast")]; + tensor input_75_cast = add(x = input_67_cast, y = hidden_states_27_cast)[name = tensor("input_75_cast")]; + tensor input_77_axes_0 = const()[name = tensor("input_77_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137456640)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137458240)))]; + tensor input_77_cast = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(137459840)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(142178496)))]; + tensor input_79_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16, x = input_77_cast)[name = tensor("input_79_cast")]; + tensor var_510_to_fp16 = const()[name = tensor("op_510_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_511_cast = mul(x = input_79_cast, y = var_510_to_fp16)[name = tensor("op_511_cast")]; + tensor var_512_cast = sigmoid(x = var_511_cast)[name = tensor("op_512_cast")]; + tensor input_81_cast = mul(x = input_79_cast, y = var_512_cast)[name = tensor("input_81_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(142184704)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146903360)))]; + tensor hidden_states_29_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16, x = input_81_cast)[name = tensor("hidden_states_29_cast")]; + tensor input_83_cast = add(x = input_75_cast, y = hidden_states_29_cast)[name = tensor("input_83_cast")]; + tensor hidden_states_31_axes_0 = const()[name = tensor("hidden_states_31_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146904960)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146906560)))]; + tensor hidden_states_31_cast = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast)[name = tensor("hidden_states_31_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146908160)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148087872)))]; + tensor var_536_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("op_536_cast")]; + tensor var_537_to_fp16 = const()[name = tensor("op_537_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_35_cast = mul(x = var_536_cast, y = var_537_to_fp16)[name = tensor("tensor_35_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148089472)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149269184)))]; + tensor tensor_31_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_31_cast")]; + tensor var_542 = const()[name = tensor("op_542"), val = tensor([1, -1, 12, 64])]; + tensor var_543_cast = reshape(shape = var_542, x = tensor_31_cast)[name = tensor("op_543_cast")]; + tensor var_544_perm_0 = const()[name = tensor("op_544_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149270784)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(150450496)))]; + tensor tensor_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_33_cast")]; + tensor var_549 = const()[name = tensor("op_549"), val = tensor([1, -1, 12, 64])]; + tensor var_550_cast = reshape(shape = var_549, x = tensor_33_cast)[name = tensor("op_550_cast")]; + tensor var_551_perm_0 = const()[name = tensor("op_551_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_558 = const()[name = tensor("op_558"), val = tensor([1, 77, 12, 64])]; + tensor var_559_cast = reshape(shape = var_558, x = tensor_35_cast)[name = tensor("op_559_cast")]; + tensor var_560_perm_0 = const()[name = tensor("op_560_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_562 = const()[name = tensor("op_562"), val = tensor([12, -1, 64])]; + tensor transpose_33 = transpose(perm = var_560_perm_0, x = var_559_cast)[name = tensor("transpose_33")]; + tensor query_states_11_cast = reshape(shape = var_562, x = transpose_33)[name = tensor("query_states_11_cast")]; + tensor var_564 = const()[name = tensor("op_564"), val = tensor([12, -1, 64])]; + tensor transpose_35 = transpose(perm = var_544_perm_0, x = var_543_cast)[name = tensor("transpose_35")]; + tensor key_states_23_cast = reshape(shape = var_564, x = transpose_35)[name = tensor("key_states_23_cast")]; + tensor var_566 = const()[name = tensor("op_566"), val = tensor([12, -1, 64])]; + tensor transpose_34 = transpose(perm = var_551_perm_0, x = var_550_cast)[name = tensor("transpose_34")]; + tensor value_states_23_cast = reshape(shape = var_566, x = transpose_34)[name = tensor("value_states_23_cast")]; + tensor var_569_perm_0 = const()[name = tensor("op_569_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_31_transpose_x_0 = const()[name = tensor("attn_weights_31_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_31_transpose_y_0 = const()[name = tensor("attn_weights_31_transpose_y_0"), val = tensor(false)]; + tensor transpose_32 = transpose(perm = var_569_perm_0, x = key_states_23_cast)[name = tensor("transpose_32")]; + tensor attn_weights_31_cast = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11_cast, y = transpose_32)[name = tensor("attn_weights_31_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, 12, 77, 77])]; + tensor var_572_cast = reshape(shape = var_571, x = attn_weights_31_cast)[name = tensor("op_572_cast")]; + tensor attn_weights_33_cast = add(x = var_572_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_33_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([12, 77, 77])]; + tensor input_85_cast = reshape(shape = var_577, x = attn_weights_33_cast)[name = tensor("input_85_cast")]; + tensor input_87_cast = softmax(axis = var_5, x = input_85_cast)[name = tensor("input_87_cast")]; + tensor attn_output_31_transpose_x_0 = const()[name = tensor("attn_output_31_transpose_x_0"), val = tensor(false)]; + tensor attn_output_31_transpose_y_0 = const()[name = tensor("attn_output_31_transpose_y_0"), val = tensor(false)]; + tensor attn_output_31_cast = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast, y = value_states_23_cast)[name = tensor("attn_output_31_cast")]; + tensor var_582 = const()[name = tensor("op_582"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_33_cast = reshape(shape = var_582, x = attn_output_31_cast)[name = tensor("attn_output_33_cast")]; + tensor attn_output_35_perm_0 = const()[name = tensor("attn_output_35_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 77, 768])]; + tensor transpose_31 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast)[name = tensor("transpose_31")]; + tensor input_89_cast = reshape(shape = var_585, x = transpose_31)[name = tensor("input_89_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(150452096)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151631808)))]; + tensor hidden_states_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16, x = input_89_cast)[name = tensor("hidden_states_33_cast")]; + tensor input_91_cast = add(x = input_83_cast, y = hidden_states_33_cast)[name = tensor("input_91_cast")]; + tensor input_93_axes_0 = const()[name = tensor("input_93_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151633408)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151635008)))]; + tensor input_93_cast = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast)[name = tensor("input_93_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151636608)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(156355264)))]; + tensor input_95_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16, x = input_93_cast)[name = tensor("input_95_cast")]; + tensor var_600_to_fp16 = const()[name = tensor("op_600_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_601_cast = mul(x = input_95_cast, y = var_600_to_fp16)[name = tensor("op_601_cast")]; + tensor var_602_cast = sigmoid(x = var_601_cast)[name = tensor("op_602_cast")]; + tensor input_97_cast = mul(x = input_95_cast, y = var_602_cast)[name = tensor("input_97_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(156361472)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161080128)))]; + tensor hidden_states_35_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16, x = input_97_cast)[name = tensor("hidden_states_35_cast")]; + tensor input_99_cast = add(x = input_91_cast, y = hidden_states_35_cast)[name = tensor("input_99_cast")]; + tensor hidden_states_37_axes_0 = const()[name = tensor("hidden_states_37_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161081728)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161083328)))]; + tensor hidden_states_37_cast = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast)[name = tensor("hidden_states_37_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161084928)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162264640)))]; + tensor var_626_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("op_626_cast")]; + tensor var_627_to_fp16 = const()[name = tensor("op_627_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_41_cast = mul(x = var_626_cast, y = var_627_to_fp16)[name = tensor("tensor_41_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162266240)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163445952)))]; + tensor tensor_37_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_37_cast")]; + tensor var_632 = const()[name = tensor("op_632"), val = tensor([1, -1, 12, 64])]; + tensor var_633_cast = reshape(shape = var_632, x = tensor_37_cast)[name = tensor("op_633_cast")]; + tensor var_634_perm_0 = const()[name = tensor("op_634_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163447552)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164627264)))]; + tensor tensor_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_39_cast")]; + tensor var_639 = const()[name = tensor("op_639"), val = tensor([1, -1, 12, 64])]; + tensor var_640_cast = reshape(shape = var_639, x = tensor_39_cast)[name = tensor("op_640_cast")]; + tensor var_641_perm_0 = const()[name = tensor("op_641_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_648 = const()[name = tensor("op_648"), val = tensor([1, 77, 12, 64])]; + tensor var_649_cast = reshape(shape = var_648, x = tensor_41_cast)[name = tensor("op_649_cast")]; + tensor var_650_perm_0 = const()[name = tensor("op_650_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_652 = const()[name = tensor("op_652"), val = tensor([12, -1, 64])]; + tensor transpose_28 = transpose(perm = var_650_perm_0, x = var_649_cast)[name = tensor("transpose_28")]; + tensor query_states_13_cast = reshape(shape = var_652, x = transpose_28)[name = tensor("query_states_13_cast")]; + tensor var_654 = const()[name = tensor("op_654"), val = tensor([12, -1, 64])]; + tensor transpose_30 = transpose(perm = var_634_perm_0, x = var_633_cast)[name = tensor("transpose_30")]; + tensor key_states_27_cast = reshape(shape = var_654, x = transpose_30)[name = tensor("key_states_27_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([12, -1, 64])]; + tensor transpose_29 = transpose(perm = var_641_perm_0, x = var_640_cast)[name = tensor("transpose_29")]; + tensor value_states_27_cast = reshape(shape = var_656, x = transpose_29)[name = tensor("value_states_27_cast")]; + tensor var_659_perm_0 = const()[name = tensor("op_659_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor transpose_27 = transpose(perm = var_659_perm_0, x = key_states_27_cast)[name = tensor("transpose_27")]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13_cast, y = transpose_27)[name = tensor("attn_weights_37_cast")]; + tensor var_661 = const()[name = tensor("op_661"), val = tensor([1, 12, 77, 77])]; + tensor var_662_cast = reshape(shape = var_661, x = attn_weights_37_cast)[name = tensor("op_662_cast")]; + tensor attn_weights_39_cast = add(x = var_662_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([12, 77, 77])]; + tensor input_101_cast = reshape(shape = var_667, x = attn_weights_39_cast)[name = tensor("input_101_cast")]; + tensor input_103_cast = softmax(axis = var_5, x = input_101_cast)[name = tensor("input_103_cast")]; + tensor attn_output_37_transpose_x_0 = const()[name = tensor("attn_output_37_transpose_x_0"), val = tensor(false)]; + tensor attn_output_37_transpose_y_0 = const()[name = tensor("attn_output_37_transpose_y_0"), val = tensor(false)]; + tensor attn_output_37_cast = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast, y = value_states_27_cast)[name = tensor("attn_output_37_cast")]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_39_cast = reshape(shape = var_672, x = attn_output_37_cast)[name = tensor("attn_output_39_cast")]; + tensor attn_output_41_perm_0 = const()[name = tensor("attn_output_41_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_675 = const()[name = tensor("op_675"), val = tensor([1, 77, 768])]; + tensor transpose_26 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast)[name = tensor("transpose_26")]; + tensor input_105_cast = reshape(shape = var_675, x = transpose_26)[name = tensor("input_105_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164628864)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165808576)))]; + tensor hidden_states_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16, x = input_105_cast)[name = tensor("hidden_states_39_cast")]; + tensor input_107_cast = add(x = input_99_cast, y = hidden_states_39_cast)[name = tensor("input_107_cast")]; + tensor input_109_axes_0 = const()[name = tensor("input_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165810176)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165811776)))]; + tensor input_109_cast = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast)[name = tensor("input_109_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165813376)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170532032)))]; + tensor input_111_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16, x = input_109_cast)[name = tensor("input_111_cast")]; + tensor var_690_to_fp16 = const()[name = tensor("op_690_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_691_cast = mul(x = input_111_cast, y = var_690_to_fp16)[name = tensor("op_691_cast")]; + tensor var_692_cast = sigmoid(x = var_691_cast)[name = tensor("op_692_cast")]; + tensor input_113_cast = mul(x = input_111_cast, y = var_692_cast)[name = tensor("input_113_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170538240)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175256896)))]; + tensor hidden_states_41_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16, x = input_113_cast)[name = tensor("hidden_states_41_cast")]; + tensor input_115_cast = add(x = input_107_cast, y = hidden_states_41_cast)[name = tensor("input_115_cast")]; + tensor hidden_states_43_axes_0 = const()[name = tensor("hidden_states_43_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175258496)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175260096)))]; + tensor hidden_states_43_cast = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast)[name = tensor("hidden_states_43_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175261696)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176441408)))]; + tensor var_716_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("op_716_cast")]; + tensor var_717_to_fp16 = const()[name = tensor("op_717_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_47_cast = mul(x = var_716_cast, y = var_717_to_fp16)[name = tensor("tensor_47_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176443008)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177622720)))]; + tensor tensor_43_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_43_cast")]; + tensor var_722 = const()[name = tensor("op_722"), val = tensor([1, -1, 12, 64])]; + tensor var_723_cast = reshape(shape = var_722, x = tensor_43_cast)[name = tensor("op_723_cast")]; + tensor var_724_perm_0 = const()[name = tensor("op_724_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177624320)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178804032)))]; + tensor tensor_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_45_cast")]; + tensor var_729 = const()[name = tensor("op_729"), val = tensor([1, -1, 12, 64])]; + tensor var_730_cast = reshape(shape = var_729, x = tensor_45_cast)[name = tensor("op_730_cast")]; + tensor var_731_perm_0 = const()[name = tensor("op_731_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_738 = const()[name = tensor("op_738"), val = tensor([1, 77, 12, 64])]; + tensor var_739_cast = reshape(shape = var_738, x = tensor_47_cast)[name = tensor("op_739_cast")]; + tensor var_740_perm_0 = const()[name = tensor("op_740_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_742 = const()[name = tensor("op_742"), val = tensor([12, -1, 64])]; + tensor transpose_23 = transpose(perm = var_740_perm_0, x = var_739_cast)[name = tensor("transpose_23")]; + tensor query_states_15_cast = reshape(shape = var_742, x = transpose_23)[name = tensor("query_states_15_cast")]; + tensor var_744 = const()[name = tensor("op_744"), val = tensor([12, -1, 64])]; + tensor transpose_25 = transpose(perm = var_724_perm_0, x = var_723_cast)[name = tensor("transpose_25")]; + tensor key_states_31_cast = reshape(shape = var_744, x = transpose_25)[name = tensor("key_states_31_cast")]; + tensor var_746 = const()[name = tensor("op_746"), val = tensor([12, -1, 64])]; + tensor transpose_24 = transpose(perm = var_731_perm_0, x = var_730_cast)[name = tensor("transpose_24")]; + tensor value_states_31_cast = reshape(shape = var_746, x = transpose_24)[name = tensor("value_states_31_cast")]; + tensor var_749_perm_0 = const()[name = tensor("op_749_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_43_transpose_x_0 = const()[name = tensor("attn_weights_43_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_43_transpose_y_0 = const()[name = tensor("attn_weights_43_transpose_y_0"), val = tensor(false)]; + tensor transpose_22 = transpose(perm = var_749_perm_0, x = key_states_31_cast)[name = tensor("transpose_22")]; + tensor attn_weights_43_cast = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15_cast, y = transpose_22)[name = tensor("attn_weights_43_cast")]; + tensor var_751 = const()[name = tensor("op_751"), val = tensor([1, 12, 77, 77])]; + tensor var_752_cast = reshape(shape = var_751, x = attn_weights_43_cast)[name = tensor("op_752_cast")]; + tensor attn_weights_45_cast = add(x = var_752_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_45_cast")]; + tensor var_757 = const()[name = tensor("op_757"), val = tensor([12, 77, 77])]; + tensor input_117_cast = reshape(shape = var_757, x = attn_weights_45_cast)[name = tensor("input_117_cast")]; + tensor input_119_cast = softmax(axis = var_5, x = input_117_cast)[name = tensor("input_119_cast")]; + tensor attn_output_43_transpose_x_0 = const()[name = tensor("attn_output_43_transpose_x_0"), val = tensor(false)]; + tensor attn_output_43_transpose_y_0 = const()[name = tensor("attn_output_43_transpose_y_0"), val = tensor(false)]; + tensor attn_output_43_cast = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast, y = value_states_31_cast)[name = tensor("attn_output_43_cast")]; + tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_45_cast = reshape(shape = var_762, x = attn_output_43_cast)[name = tensor("attn_output_45_cast")]; + tensor attn_output_47_perm_0 = const()[name = tensor("attn_output_47_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_765 = const()[name = tensor("op_765"), val = tensor([1, 77, 768])]; + tensor transpose_21 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast)[name = tensor("transpose_21")]; + tensor input_121_cast = reshape(shape = var_765, x = transpose_21)[name = tensor("input_121_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178805632)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179985344)))]; + tensor hidden_states_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16, x = input_121_cast)[name = tensor("hidden_states_45_cast")]; + tensor input_123_cast = add(x = input_115_cast, y = hidden_states_45_cast)[name = tensor("input_123_cast")]; + tensor input_125_axes_0 = const()[name = tensor("input_125_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179986944)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179988544)))]; + tensor input_125_cast = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast)[name = tensor("input_125_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179990144)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184708800)))]; + tensor input_127_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16, x = input_125_cast)[name = tensor("input_127_cast")]; + tensor var_780_to_fp16 = const()[name = tensor("op_780_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_781_cast = mul(x = input_127_cast, y = var_780_to_fp16)[name = tensor("op_781_cast")]; + tensor var_782_cast = sigmoid(x = var_781_cast)[name = tensor("op_782_cast")]; + tensor input_129_cast = mul(x = input_127_cast, y = var_782_cast)[name = tensor("input_129_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184715008)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189433664)))]; + tensor hidden_states_47_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16, x = input_129_cast)[name = tensor("hidden_states_47_cast")]; + tensor input_131_cast = add(x = input_123_cast, y = hidden_states_47_cast)[name = tensor("input_131_cast")]; + tensor hidden_states_49_axes_0 = const()[name = tensor("hidden_states_49_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189435264)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189436864)))]; + tensor hidden_states_49_cast = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast)[name = tensor("hidden_states_49_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(189438464)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190618176)))]; + tensor var_806_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("op_806_cast")]; + tensor var_807_to_fp16 = const()[name = tensor("op_807_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_53_cast = mul(x = var_806_cast, y = var_807_to_fp16)[name = tensor("tensor_53_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(190619776)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191799488)))]; + tensor tensor_49_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_49_cast")]; + tensor var_812 = const()[name = tensor("op_812"), val = tensor([1, -1, 12, 64])]; + tensor var_813_cast = reshape(shape = var_812, x = tensor_49_cast)[name = tensor("op_813_cast")]; + tensor var_814_perm_0 = const()[name = tensor("op_814_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191801088)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192980800)))]; + tensor tensor_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_51_cast")]; + tensor var_819 = const()[name = tensor("op_819"), val = tensor([1, -1, 12, 64])]; + tensor var_820_cast = reshape(shape = var_819, x = tensor_51_cast)[name = tensor("op_820_cast")]; + tensor var_821_perm_0 = const()[name = tensor("op_821_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_828 = const()[name = tensor("op_828"), val = tensor([1, 77, 12, 64])]; + tensor var_829_cast = reshape(shape = var_828, x = tensor_53_cast)[name = tensor("op_829_cast")]; + tensor var_830_perm_0 = const()[name = tensor("op_830_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_832 = const()[name = tensor("op_832"), val = tensor([12, -1, 64])]; + tensor transpose_18 = transpose(perm = var_830_perm_0, x = var_829_cast)[name = tensor("transpose_18")]; + tensor query_states_17_cast = reshape(shape = var_832, x = transpose_18)[name = tensor("query_states_17_cast")]; + tensor var_834 = const()[name = tensor("op_834"), val = tensor([12, -1, 64])]; + tensor transpose_20 = transpose(perm = var_814_perm_0, x = var_813_cast)[name = tensor("transpose_20")]; + tensor key_states_35_cast = reshape(shape = var_834, x = transpose_20)[name = tensor("key_states_35_cast")]; + tensor var_836 = const()[name = tensor("op_836"), val = tensor([12, -1, 64])]; + tensor transpose_19 = transpose(perm = var_821_perm_0, x = var_820_cast)[name = tensor("transpose_19")]; + tensor value_states_35_cast = reshape(shape = var_836, x = transpose_19)[name = tensor("value_states_35_cast")]; + tensor var_839_perm_0 = const()[name = tensor("op_839_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor transpose_17 = transpose(perm = var_839_perm_0, x = key_states_35_cast)[name = tensor("transpose_17")]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17_cast, y = transpose_17)[name = tensor("attn_weights_49_cast")]; + tensor var_841 = const()[name = tensor("op_841"), val = tensor([1, 12, 77, 77])]; + tensor var_842_cast = reshape(shape = var_841, x = attn_weights_49_cast)[name = tensor("op_842_cast")]; + tensor attn_weights_51_cast = add(x = var_842_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_847 = const()[name = tensor("op_847"), val = tensor([12, 77, 77])]; + tensor input_133_cast = reshape(shape = var_847, x = attn_weights_51_cast)[name = tensor("input_133_cast")]; + tensor input_135_cast = softmax(axis = var_5, x = input_133_cast)[name = tensor("input_135_cast")]; + tensor attn_output_49_transpose_x_0 = const()[name = tensor("attn_output_49_transpose_x_0"), val = tensor(false)]; + tensor attn_output_49_transpose_y_0 = const()[name = tensor("attn_output_49_transpose_y_0"), val = tensor(false)]; + tensor attn_output_49_cast = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast, y = value_states_35_cast)[name = tensor("attn_output_49_cast")]; + tensor var_852 = const()[name = tensor("op_852"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_51_cast = reshape(shape = var_852, x = attn_output_49_cast)[name = tensor("attn_output_51_cast")]; + tensor attn_output_53_perm_0 = const()[name = tensor("attn_output_53_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_855 = const()[name = tensor("op_855"), val = tensor([1, 77, 768])]; + tensor transpose_16 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast)[name = tensor("transpose_16")]; + tensor input_137_cast = reshape(shape = var_855, x = transpose_16)[name = tensor("input_137_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192982400)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194162112)))]; + tensor hidden_states_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16, x = input_137_cast)[name = tensor("hidden_states_51_cast")]; + tensor input_139_cast = add(x = input_131_cast, y = hidden_states_51_cast)[name = tensor("input_139_cast")]; + tensor input_141_axes_0 = const()[name = tensor("input_141_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194163712)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194165312)))]; + tensor input_141_cast = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194166912)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198885568)))]; + tensor input_143_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16, x = input_141_cast)[name = tensor("input_143_cast")]; + tensor var_870_to_fp16 = const()[name = tensor("op_870_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_871_cast = mul(x = input_143_cast, y = var_870_to_fp16)[name = tensor("op_871_cast")]; + tensor var_872_cast = sigmoid(x = var_871_cast)[name = tensor("op_872_cast")]; + tensor input_145_cast = mul(x = input_143_cast, y = var_872_cast)[name = tensor("input_145_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198891776)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203610432)))]; + tensor hidden_states_53_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16, x = input_145_cast)[name = tensor("hidden_states_53_cast")]; + tensor input_147_cast = add(x = input_139_cast, y = hidden_states_53_cast)[name = tensor("input_147_cast")]; + tensor hidden_states_55_axes_0 = const()[name = tensor("hidden_states_55_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203612032)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203613632)))]; + tensor hidden_states_55_cast = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast)[name = tensor("hidden_states_55_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(203615232)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204794944)))]; + tensor var_896_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("op_896_cast")]; + tensor var_897_to_fp16 = const()[name = tensor("op_897_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_59_cast = mul(x = var_896_cast, y = var_897_to_fp16)[name = tensor("tensor_59_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204796544)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205976256)))]; + tensor tensor_55_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_55_cast")]; + tensor var_902 = const()[name = tensor("op_902"), val = tensor([1, -1, 12, 64])]; + tensor var_903_cast = reshape(shape = var_902, x = tensor_55_cast)[name = tensor("op_903_cast")]; + tensor var_904_perm_0 = const()[name = tensor("op_904_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205977856)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207157568)))]; + tensor tensor_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_57_cast")]; + tensor var_909 = const()[name = tensor("op_909"), val = tensor([1, -1, 12, 64])]; + tensor var_910_cast = reshape(shape = var_909, x = tensor_57_cast)[name = tensor("op_910_cast")]; + tensor var_911_perm_0 = const()[name = tensor("op_911_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([1, 77, 12, 64])]; + tensor var_919_cast = reshape(shape = var_918, x = tensor_59_cast)[name = tensor("op_919_cast")]; + tensor var_920_perm_0 = const()[name = tensor("op_920_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_922 = const()[name = tensor("op_922"), val = tensor([12, -1, 64])]; + tensor transpose_13 = transpose(perm = var_920_perm_0, x = var_919_cast)[name = tensor("transpose_13")]; + tensor query_states_19_cast = reshape(shape = var_922, x = transpose_13)[name = tensor("query_states_19_cast")]; + tensor var_924 = const()[name = tensor("op_924"), val = tensor([12, -1, 64])]; + tensor transpose_15 = transpose(perm = var_904_perm_0, x = var_903_cast)[name = tensor("transpose_15")]; + tensor key_states_39_cast = reshape(shape = var_924, x = transpose_15)[name = tensor("key_states_39_cast")]; + tensor var_926 = const()[name = tensor("op_926"), val = tensor([12, -1, 64])]; + tensor transpose_14 = transpose(perm = var_911_perm_0, x = var_910_cast)[name = tensor("transpose_14")]; + tensor value_states_39_cast = reshape(shape = var_926, x = transpose_14)[name = tensor("value_states_39_cast")]; + tensor var_929_perm_0 = const()[name = tensor("op_929_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_55_transpose_x_0 = const()[name = tensor("attn_weights_55_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_55_transpose_y_0 = const()[name = tensor("attn_weights_55_transpose_y_0"), val = tensor(false)]; + tensor transpose_12 = transpose(perm = var_929_perm_0, x = key_states_39_cast)[name = tensor("transpose_12")]; + tensor attn_weights_55_cast = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19_cast, y = transpose_12)[name = tensor("attn_weights_55_cast")]; + tensor var_931 = const()[name = tensor("op_931"), val = tensor([1, 12, 77, 77])]; + tensor var_932_cast = reshape(shape = var_931, x = attn_weights_55_cast)[name = tensor("op_932_cast")]; + tensor attn_weights_57_cast = add(x = var_932_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_57_cast")]; + tensor var_937 = const()[name = tensor("op_937"), val = tensor([12, 77, 77])]; + tensor input_149_cast = reshape(shape = var_937, x = attn_weights_57_cast)[name = tensor("input_149_cast")]; + tensor input_151_cast = softmax(axis = var_5, x = input_149_cast)[name = tensor("input_151_cast")]; + tensor attn_output_55_transpose_x_0 = const()[name = tensor("attn_output_55_transpose_x_0"), val = tensor(false)]; + tensor attn_output_55_transpose_y_0 = const()[name = tensor("attn_output_55_transpose_y_0"), val = tensor(false)]; + tensor attn_output_55_cast = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast, y = value_states_39_cast)[name = tensor("attn_output_55_cast")]; + tensor var_942 = const()[name = tensor("op_942"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_57_cast = reshape(shape = var_942, x = attn_output_55_cast)[name = tensor("attn_output_57_cast")]; + tensor attn_output_59_perm_0 = const()[name = tensor("attn_output_59_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_945 = const()[name = tensor("op_945"), val = tensor([1, 77, 768])]; + tensor transpose_11 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast)[name = tensor("transpose_11")]; + tensor input_153_cast = reshape(shape = var_945, x = transpose_11)[name = tensor("input_153_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207159168)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208338880)))]; + tensor hidden_states_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16, x = input_153_cast)[name = tensor("hidden_states_57_cast")]; + tensor input_155_cast = add(x = input_147_cast, y = hidden_states_57_cast)[name = tensor("input_155_cast")]; + tensor input_157_axes_0 = const()[name = tensor("input_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208340480)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208342080)))]; + tensor input_157_cast = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast)[name = tensor("input_157_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208343680)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(213062336)))]; + tensor input_159_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16, x = input_157_cast)[name = tensor("input_159_cast")]; + tensor var_960_to_fp16 = const()[name = tensor("op_960_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_961_cast = mul(x = input_159_cast, y = var_960_to_fp16)[name = tensor("op_961_cast")]; + tensor var_962_cast = sigmoid(x = var_961_cast)[name = tensor("op_962_cast")]; + tensor input_161_cast = mul(x = input_159_cast, y = var_962_cast)[name = tensor("input_161_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(213068544)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217787200)))]; + tensor hidden_states_59_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16, x = input_161_cast)[name = tensor("hidden_states_59_cast")]; + tensor input_163_cast = add(x = input_155_cast, y = hidden_states_59_cast)[name = tensor("input_163_cast")]; + tensor hidden_states_61_axes_0 = const()[name = tensor("hidden_states_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217788800)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217790400)))]; + tensor hidden_states_61_cast = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast)[name = tensor("hidden_states_61_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217792000)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218971712)))]; + tensor var_986_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("op_986_cast")]; + tensor var_987_to_fp16 = const()[name = tensor("op_987_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_65_cast = mul(x = var_986_cast, y = var_987_to_fp16)[name = tensor("tensor_65_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218973312)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220153024)))]; + tensor tensor_61_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_61_cast")]; + tensor var_992 = const()[name = tensor("op_992"), val = tensor([1, -1, 12, 64])]; + tensor var_993_cast = reshape(shape = var_992, x = tensor_61_cast)[name = tensor("op_993_cast")]; + tensor var_994_perm_0 = const()[name = tensor("op_994_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220154624)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221334336)))]; + tensor tensor_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_63_cast")]; + tensor var_999 = const()[name = tensor("op_999"), val = tensor([1, -1, 12, 64])]; + tensor var_1000_cast = reshape(shape = var_999, x = tensor_63_cast)[name = tensor("op_1000_cast")]; + tensor var_1001_perm_0 = const()[name = tensor("op_1001_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1008 = const()[name = tensor("op_1008"), val = tensor([1, 77, 12, 64])]; + tensor var_1009_cast = reshape(shape = var_1008, x = tensor_65_cast)[name = tensor("op_1009_cast")]; + tensor var_1010_perm_0 = const()[name = tensor("op_1010_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1012 = const()[name = tensor("op_1012"), val = tensor([12, -1, 64])]; + tensor transpose_8 = transpose(perm = var_1010_perm_0, x = var_1009_cast)[name = tensor("transpose_8")]; + tensor query_states_21_cast = reshape(shape = var_1012, x = transpose_8)[name = tensor("query_states_21_cast")]; + tensor var_1014 = const()[name = tensor("op_1014"), val = tensor([12, -1, 64])]; + tensor transpose_10 = transpose(perm = var_994_perm_0, x = var_993_cast)[name = tensor("transpose_10")]; + tensor key_states_43_cast = reshape(shape = var_1014, x = transpose_10)[name = tensor("key_states_43_cast")]; + tensor var_1016 = const()[name = tensor("op_1016"), val = tensor([12, -1, 64])]; + tensor transpose_9 = transpose(perm = var_1001_perm_0, x = var_1000_cast)[name = tensor("transpose_9")]; + tensor value_states_43_cast = reshape(shape = var_1016, x = transpose_9)[name = tensor("value_states_43_cast")]; + tensor var_1019_perm_0 = const()[name = tensor("op_1019_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor transpose_7 = transpose(perm = var_1019_perm_0, x = key_states_43_cast)[name = tensor("transpose_7")]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21_cast, y = transpose_7)[name = tensor("attn_weights_61_cast")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([1, 12, 77, 77])]; + tensor var_1022_cast = reshape(shape = var_1021, x = attn_weights_61_cast)[name = tensor("op_1022_cast")]; + tensor attn_weights_63_cast = add(x = var_1022_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1027 = const()[name = tensor("op_1027"), val = tensor([12, 77, 77])]; + tensor input_165_cast = reshape(shape = var_1027, x = attn_weights_63_cast)[name = tensor("input_165_cast")]; + tensor input_167_cast = softmax(axis = var_5, x = input_165_cast)[name = tensor("input_167_cast")]; + tensor attn_output_61_transpose_x_0 = const()[name = tensor("attn_output_61_transpose_x_0"), val = tensor(false)]; + tensor attn_output_61_transpose_y_0 = const()[name = tensor("attn_output_61_transpose_y_0"), val = tensor(false)]; + tensor attn_output_61_cast = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast, y = value_states_43_cast)[name = tensor("attn_output_61_cast")]; + tensor var_1032 = const()[name = tensor("op_1032"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_63_cast = reshape(shape = var_1032, x = attn_output_61_cast)[name = tensor("attn_output_63_cast")]; + tensor attn_output_65_perm_0 = const()[name = tensor("attn_output_65_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 77, 768])]; + tensor transpose_6 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast)[name = tensor("transpose_6")]; + tensor input_169_cast = reshape(shape = var_1035, x = transpose_6)[name = tensor("input_169_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(221335936)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222515648)))]; + tensor hidden_states_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16, x = input_169_cast)[name = tensor("hidden_states_63_cast")]; + tensor input_171_cast = add(x = input_163_cast, y = hidden_states_63_cast)[name = tensor("input_171_cast")]; + tensor input_173_axes_0 = const()[name = tensor("input_173_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222517248)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222518848)))]; + tensor input_173_cast = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast)[name = tensor("input_173_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(222520448)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227239104)))]; + tensor input_175_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16, x = input_173_cast)[name = tensor("input_175_cast")]; + tensor var_1050_to_fp16 = const()[name = tensor("op_1050_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_1051_cast = mul(x = input_175_cast, y = var_1050_to_fp16)[name = tensor("op_1051_cast")]; + tensor var_1052_cast = sigmoid(x = var_1051_cast)[name = tensor("op_1052_cast")]; + tensor input_177_cast = mul(x = input_175_cast, y = var_1052_cast)[name = tensor("input_177_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227245312)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231963968)))]; + tensor hidden_states_65_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16, x = input_177_cast)[name = tensor("hidden_states_65_cast")]; + tensor input_179_cast = add(x = input_171_cast, y = hidden_states_65_cast)[name = tensor("input_179_cast")]; + tensor input_179_cast_to_fp32_dtype_0 = const()[name = tensor("input_179_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor hidden_states_67_axes_0 = const()[name = tensor("hidden_states_67_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231965568)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231967168)))]; + tensor hidden_states_67_cast = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast)[name = tensor("hidden_states_67_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231968768)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233148480)))]; + tensor var_1076_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("op_1076_cast")]; + tensor var_1077_to_fp16 = const()[name = tensor("op_1077_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_cast = mul(x = var_1076_cast, y = var_1077_to_fp16)[name = tensor("tensor_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233150080)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234329792)))]; + tensor tensor_67_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_67_cast")]; + tensor var_1082 = const()[name = tensor("op_1082"), val = tensor([1, -1, 12, 64])]; + tensor var_1083_cast = reshape(shape = var_1082, x = tensor_67_cast)[name = tensor("op_1083_cast")]; + tensor var_1084_perm_0 = const()[name = tensor("op_1084_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234331392)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235511104)))]; + tensor tensor_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_69_cast")]; + tensor var_1089 = const()[name = tensor("op_1089"), val = tensor([1, -1, 12, 64])]; + tensor var_1090_cast = reshape(shape = var_1089, x = tensor_69_cast)[name = tensor("op_1090_cast")]; + tensor var_1091_perm_0 = const()[name = tensor("op_1091_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1098 = const()[name = tensor("op_1098"), val = tensor([1, 77, 12, 64])]; + tensor var_1099_cast = reshape(shape = var_1098, x = tensor_cast)[name = tensor("op_1099_cast")]; + tensor var_1100_perm_0 = const()[name = tensor("op_1100_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1102 = const()[name = tensor("op_1102"), val = tensor([12, -1, 64])]; + tensor transpose_3 = transpose(perm = var_1100_perm_0, x = var_1099_cast)[name = tensor("transpose_3")]; + tensor query_states_cast = reshape(shape = var_1102, x = transpose_3)[name = tensor("query_states_cast")]; + tensor var_1104 = const()[name = tensor("op_1104"), val = tensor([12, -1, 64])]; + tensor transpose_5 = transpose(perm = var_1084_perm_0, x = var_1083_cast)[name = tensor("transpose_5")]; + tensor key_states_cast = reshape(shape = var_1104, x = transpose_5)[name = tensor("key_states_cast")]; + tensor var_1106 = const()[name = tensor("op_1106"), val = tensor([12, -1, 64])]; + tensor transpose_4 = transpose(perm = var_1091_perm_0, x = var_1090_cast)[name = tensor("transpose_4")]; + tensor value_states_cast = reshape(shape = var_1106, x = transpose_4)[name = tensor("value_states_cast")]; + tensor var_1109_perm_0 = const()[name = tensor("op_1109_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_67_transpose_x_0 = const()[name = tensor("attn_weights_67_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_67_transpose_y_0 = const()[name = tensor("attn_weights_67_transpose_y_0"), val = tensor(false)]; + tensor transpose_2 = transpose(perm = var_1109_perm_0, x = key_states_cast)[name = tensor("transpose_2")]; + tensor attn_weights_67_cast = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_cast, y = transpose_2)[name = tensor("attn_weights_67_cast")]; + tensor var_1111 = const()[name = tensor("op_1111"), val = tensor([1, 12, 77, 77])]; + tensor var_1112_cast = reshape(shape = var_1111, x = attn_weights_67_cast)[name = tensor("op_1112_cast")]; + tensor attn_weights_69_cast = add(x = var_1112_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_69_cast")]; + tensor var_1117 = const()[name = tensor("op_1117"), val = tensor([12, 77, 77])]; + tensor input_181_cast = reshape(shape = var_1117, x = attn_weights_69_cast)[name = tensor("input_181_cast")]; + tensor input_183_cast = softmax(axis = var_5, x = input_181_cast)[name = tensor("input_183_cast")]; + tensor attn_output_67_transpose_x_0 = const()[name = tensor("attn_output_67_transpose_x_0"), val = tensor(false)]; + tensor attn_output_67_transpose_y_0 = const()[name = tensor("attn_output_67_transpose_y_0"), val = tensor(false)]; + tensor attn_output_67_cast = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast, y = value_states_cast)[name = tensor("attn_output_67_cast")]; + tensor var_1122 = const()[name = tensor("op_1122"), val = tensor([1, 12, 77, 64])]; + tensor attn_output_69_cast = reshape(shape = var_1122, x = attn_output_67_cast)[name = tensor("attn_output_69_cast")]; + tensor attn_output_perm_0 = const()[name = tensor("attn_output_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1125 = const()[name = tensor("op_1125"), val = tensor([1, 77, 768])]; + tensor transpose_1 = transpose(perm = attn_output_perm_0, x = attn_output_69_cast)[name = tensor("transpose_1")]; + tensor input_185_cast = reshape(shape = var_1125, x = transpose_1)[name = tensor("input_185_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235512704)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236692416)))]; + tensor hidden_states_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16, x = input_185_cast)[name = tensor("hidden_states_69_cast")]; + tensor input_187_cast = add(x = input_179_cast, y = hidden_states_69_cast)[name = tensor("input_187_cast")]; + tensor input_189_axes_0 = const()[name = tensor("input_189_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236694016)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236695616)))]; + tensor input_189_cast = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast)[name = tensor("input_189_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236697216)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241415872)))]; + tensor input_191_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16, x = input_189_cast)[name = tensor("input_191_cast")]; + tensor var_1140_to_fp16 = const()[name = tensor("op_1140_to_fp16"), val = tensor(0x1.b3cp+0)]; + tensor var_1141_cast = mul(x = input_191_cast, y = var_1140_to_fp16)[name = tensor("op_1141_cast")]; + tensor var_1142_cast = sigmoid(x = var_1141_cast)[name = tensor("op_1142_cast")]; + tensor input_193_cast = mul(x = input_191_cast, y = var_1142_cast)[name = tensor("input_193_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241422080)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246140736)))]; + tensor hidden_states_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16, x = input_193_cast)[name = tensor("hidden_states_cast")]; + tensor input_cast = add(x = input_187_cast, y = hidden_states_cast)[name = tensor("input_cast")]; + tensor last_hidden_state_axes_0 = const()[name = tensor("last_hidden_state_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246142336)))]; + tensor text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246143936)))]; + tensor last_hidden_state_cast = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_cast)[name = tensor("last_hidden_state_cast")]; + tensor var_1156 = const()[name = tensor("op_1156"), val = tensor([0])]; + tensor var_1158 = reduce_argmax(axis = var_5, keep_dims = var_6, x = cast_526)[name = tensor("op_1158")]; + tensor stack_0_axis_0 = const()[name = tensor("stack_0_axis_0"), val = tensor(1)]; + tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_1156, var_1158))[name = tensor("stack_0")]; + tensor var_1160_transpose_batch_dims_0 = const()[name = tensor("op_1160_transpose_batch_dims_0"), val = tensor(0)]; + tensor var_1160_transpose_cast = gather_nd(batch_dims = var_1160_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast)[name = tensor("op_1160_transpose_cast")]; + tensor var_1160_cast_to_fp32_dtype_0 = const()[name = tensor("op_1160_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor pooled_outputs = cast(dtype = var_1160_cast_to_fp32_dtype_0, x = var_1160_transpose_cast)[name = tensor("cast_125")]; + tensor hidden_embeds = cast(dtype = input_179_cast_to_fp32_dtype_0, x = input_179_cast)[name = tensor("cast_161")]; + } -> (hidden_embeds, pooled_outputs); +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..3cd2ebb4765b40c98cb09d0e35c908e9a2e229ef --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a86533724aadf50c8f5539592d440887a484f60002d7967505c69c7faf4d7797 +size 246145536 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/analytics/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..ab0c2019dd8664c30fb272fd43f2f559a5fbd2b8 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef66b480388714bb62f2f0f2f97a8953e44acbb00b25f9a9fd63c759f4f0e83 +size 207 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..bca87797b63a2002be08fcd99a522c5011db367c --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ff26866d8d8fbb4e53a0628f8aab5f7edf1b3ec763a96e6812c8f7fbf4c9827 +size 825 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..14166b3db2c377b93a3f3b936bc215a6bf082ec1 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/metadata.json @@ -0,0 +1,82 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Hidden states after the encoder layers", + "shape" : "[]", + "name" : "hidden_embeds", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "The version of the `last_hidden_state` output after pooling", + "shape" : "[]", + "name" : "pooled_outputs", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Float16", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "Ios16.cast" : 3, + "Ios16.mul" : 32, + "Ios16.layerNorm" : 65, + "Stack" : 1, + "Transpose" : 160, + "Ios16.linear" : 193, + "Ios16.add" : 97, + "Ios16.matmul" : 64, + "Ios16.gelu" : 32, + "Ios16.softmax" : 32, + "Ios16.gatherNd" : 1, + "Ios16.gather" : 1, + "Ios16.reshape" : 320, + "Ios16.reduceArgmax" : 1 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32 1 × 77)", + "shortDescription" : "The token ids that represent the input text", + "shape" : "[1, 77]", + "name" : "input_ids", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "generatedClassName" : "Stable_Diffusion_version_diffusers_stable_diffusion_xl_base_1_0_text_encoder_2", + "method" : "predict" + } +] \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..62c9f845ab22560659be573a0c15fcd970d3ec60 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/model.mil @@ -0,0 +1,2275 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1839.0.0"}, {"coremltools-component-torch", "2.0.1+cu117"}, {"coremltools-version", "7.0b1"}})] +{ + func main(tensor input_ids) { + tensor var_5 = const()[name = tensor("op_5"), val = tensor(-1)]; + tensor var_6 = const()[name = tensor("op_6"), val = tensor(false)]; + tensor cast_1_dtype_0 = const()[name = tensor("cast_1_dtype_0"), val = tensor("int32")]; + tensor inputs_embeds_axis_0 = const()[name = tensor("inputs_embeds_axis_0"), val = tensor(0)]; + tensor inputs_embeds_batch_dims_0 = const()[name = tensor("inputs_embeds_batch_dims_0"), val = tensor(0)]; + tensor text_encoder_text_model_embeddings_token_embedding_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_embeddings_token_embedding_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor cast_1322 = cast(dtype = cast_1_dtype_0, x = input_ids)[name = tensor("cast_1322")]; + tensor inputs_embeds_cast = gather(axis = inputs_embeds_axis_0, batch_dims = inputs_embeds_batch_dims_0, indices = cast_1322, x = text_encoder_text_model_embeddings_token_embedding_weight_to_fp16)[name = tensor("inputs_embeds_cast")]; + tensor position_embeddings_to_fp16 = const()[name = tensor("position_embeddings_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126484608)))]; + tensor input_3_cast = add(x = inputs_embeds_cast, y = position_embeddings_to_fp16)[name = tensor("input_3_cast")]; + tensor hidden_states_1_axes_0 = const()[name = tensor("hidden_states_1_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126681792)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126684416)))]; + tensor var_12_to_fp16 = const()[name = tensor("op_12_to_fp16"), val = tensor(0x1.5p-17)]; + tensor hidden_states_1_cast = layer_norm(axes = hidden_states_1_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm1_weight_to_fp16, x = input_3_cast)[name = tensor("hidden_states_1_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126687040)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129963904)))]; + tensor var_128_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_q_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("op_128_cast")]; + tensor var_129_to_fp16 = const()[name = tensor("op_129_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_5_cast = mul(x = var_128_cast, y = var_129_to_fp16)[name = tensor("tensor_5_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129966528)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133243392)))]; + tensor tensor_1_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_k_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_1_cast")]; + tensor var_134 = const()[name = tensor("op_134"), val = tensor([1, -1, 20, 64])]; + tensor var_135_cast = reshape(shape = var_134, x = tensor_1_cast)[name = tensor("op_135_cast")]; + tensor var_136_perm_0 = const()[name = tensor("op_136_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(133246016)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136522880)))]; + tensor tensor_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_v_proj_weight_to_fp16, x = hidden_states_1_cast)[name = tensor("tensor_3_cast")]; + tensor var_141 = const()[name = tensor("op_141"), val = tensor([1, -1, 20, 64])]; + tensor var_142_cast = reshape(shape = var_141, x = tensor_3_cast)[name = tensor("op_142_cast")]; + tensor var_143_perm_0 = const()[name = tensor("op_143_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 77, 20, 64])]; + tensor var_151_cast = reshape(shape = var_150, x = tensor_5_cast)[name = tensor("op_151_cast")]; + tensor var_152_perm_0 = const()[name = tensor("op_152_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_154 = const()[name = tensor("op_154"), val = tensor([20, -1, 64])]; + tensor transpose_158 = transpose(perm = var_152_perm_0, x = var_151_cast)[name = tensor("transpose_158")]; + tensor query_states_1_cast = reshape(shape = var_154, x = transpose_158)[name = tensor("query_states_1_cast")]; + tensor var_156 = const()[name = tensor("op_156"), val = tensor([20, -1, 64])]; + tensor transpose_160 = transpose(perm = var_136_perm_0, x = var_135_cast)[name = tensor("transpose_160")]; + tensor key_states_3_cast = reshape(shape = var_156, x = transpose_160)[name = tensor("key_states_3_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor([20, -1, 64])]; + tensor transpose_159 = transpose(perm = var_143_perm_0, x = var_142_cast)[name = tensor("transpose_159")]; + tensor value_states_3_cast = reshape(shape = var_158, x = transpose_159)[name = tensor("value_states_3_cast")]; + tensor var_161_perm_0 = const()[name = tensor("op_161_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor transpose_157 = transpose(perm = var_161_perm_0, x = key_states_3_cast)[name = tensor("transpose_157")]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = query_states_1_cast, y = transpose_157)[name = tensor("attn_weights_1_cast")]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 20, 77, 77])]; + tensor var_164_cast = reshape(shape = var_163, x = attn_weights_1_cast)[name = tensor("op_164_cast")]; + tensor causal_attention_mask_to_fp16 = const()[name = tensor("causal_attention_mask_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136525504)))]; + tensor attn_weights_3_cast = add(x = var_164_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_169 = const()[name = tensor("op_169"), val = tensor([20, 77, 77])]; + tensor input_5_cast = reshape(shape = var_169, x = attn_weights_3_cast)[name = tensor("input_5_cast")]; + tensor input_7_cast = softmax(axis = var_5, x = input_5_cast)[name = tensor("input_7_cast")]; + tensor attn_output_1_transpose_x_0 = const()[name = tensor("attn_output_1_transpose_x_0"), val = tensor(false)]; + tensor attn_output_1_transpose_y_0 = const()[name = tensor("attn_output_1_transpose_y_0"), val = tensor(false)]; + tensor attn_output_1_cast = matmul(transpose_x = attn_output_1_transpose_x_0, transpose_y = attn_output_1_transpose_y_0, x = input_7_cast, y = value_states_3_cast)[name = tensor("attn_output_1_cast")]; + tensor var_174 = const()[name = tensor("op_174"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_3_cast = reshape(shape = var_174, x = attn_output_1_cast)[name = tensor("attn_output_3_cast")]; + tensor attn_output_5_perm_0 = const()[name = tensor("attn_output_5_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_177 = const()[name = tensor("op_177"), val = tensor([1, 77, 1280])]; + tensor transpose_156 = transpose(perm = attn_output_5_perm_0, x = attn_output_3_cast)[name = tensor("transpose_156")]; + tensor input_9_cast = reshape(shape = var_177, x = transpose_156)[name = tensor("input_9_cast")]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136537472)))]; + tensor text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139814336)))]; + tensor hidden_states_3_cast = linear(bias = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_self_attn_out_proj_weight_to_fp16, x = input_9_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_11_cast = add(x = input_3_cast, y = hidden_states_3_cast)[name = tensor("input_11_cast")]; + tensor input_13_axes_0 = const()[name = tensor("input_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139816960)))]; + tensor text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139819584)))]; + tensor input_13_cast = layer_norm(axes = input_13_axes_0, beta = text_encoder_text_model_encoder_layers_0_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_0_layer_norm2_weight_to_fp16, x = input_11_cast)[name = tensor("input_13_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139822208)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152929472)))]; + tensor input_15_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc1_weight_to_fp16, x = input_13_cast)[name = tensor("input_15_cast")]; + tensor input_17_mode_0 = const()[name = tensor("input_17_mode_0"), val = tensor("EXACT")]; + tensor input_17_cast = gelu(mode = input_17_mode_0, x = input_15_cast)[name = tensor("input_17_cast")]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152939776)))]; + tensor text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166047040)))]; + tensor hidden_states_5_cast = linear(bias = text_encoder_text_model_encoder_layers_0_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_0_mlp_fc2_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_5_cast")]; + tensor input_19_cast = add(x = input_11_cast, y = hidden_states_5_cast)[name = tensor("input_19_cast")]; + tensor hidden_states_7_axes_0 = const()[name = tensor("hidden_states_7_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166049664)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166052288)))]; + tensor hidden_states_7_cast = layer_norm(axes = hidden_states_7_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm1_weight_to_fp16, x = input_19_cast)[name = tensor("hidden_states_7_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(166054912)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169331776)))]; + tensor var_215_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_q_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("op_215_cast")]; + tensor var_216_to_fp16 = const()[name = tensor("op_216_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_11_cast = mul(x = var_215_cast, y = var_216_to_fp16)[name = tensor("tensor_11_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(169334400)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172611264)))]; + tensor tensor_7_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_k_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_7_cast")]; + tensor var_221 = const()[name = tensor("op_221"), val = tensor([1, -1, 20, 64])]; + tensor var_222_cast = reshape(shape = var_221, x = tensor_7_cast)[name = tensor("op_222_cast")]; + tensor var_223_perm_0 = const()[name = tensor("op_223_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172613888)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175890752)))]; + tensor tensor_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_v_proj_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("tensor_9_cast")]; + tensor var_228 = const()[name = tensor("op_228"), val = tensor([1, -1, 20, 64])]; + tensor var_229_cast = reshape(shape = var_228, x = tensor_9_cast)[name = tensor("op_229_cast")]; + tensor var_230_perm_0 = const()[name = tensor("op_230_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 77, 20, 64])]; + tensor var_238_cast = reshape(shape = var_237, x = tensor_11_cast)[name = tensor("op_238_cast")]; + tensor var_239_perm_0 = const()[name = tensor("op_239_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_241 = const()[name = tensor("op_241"), val = tensor([20, -1, 64])]; + tensor transpose_153 = transpose(perm = var_239_perm_0, x = var_238_cast)[name = tensor("transpose_153")]; + tensor query_states_3_cast = reshape(shape = var_241, x = transpose_153)[name = tensor("query_states_3_cast")]; + tensor var_243 = const()[name = tensor("op_243"), val = tensor([20, -1, 64])]; + tensor transpose_155 = transpose(perm = var_223_perm_0, x = var_222_cast)[name = tensor("transpose_155")]; + tensor key_states_7_cast = reshape(shape = var_243, x = transpose_155)[name = tensor("key_states_7_cast")]; + tensor var_245 = const()[name = tensor("op_245"), val = tensor([20, -1, 64])]; + tensor transpose_154 = transpose(perm = var_230_perm_0, x = var_229_cast)[name = tensor("transpose_154")]; + tensor value_states_7_cast = reshape(shape = var_245, x = transpose_154)[name = tensor("value_states_7_cast")]; + tensor var_248_perm_0 = const()[name = tensor("op_248_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_7_transpose_x_0 = const()[name = tensor("attn_weights_7_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_7_transpose_y_0 = const()[name = tensor("attn_weights_7_transpose_y_0"), val = tensor(false)]; + tensor transpose_152 = transpose(perm = var_248_perm_0, x = key_states_7_cast)[name = tensor("transpose_152")]; + tensor attn_weights_7_cast = matmul(transpose_x = attn_weights_7_transpose_x_0, transpose_y = attn_weights_7_transpose_y_0, x = query_states_3_cast, y = transpose_152)[name = tensor("attn_weights_7_cast")]; + tensor var_250 = const()[name = tensor("op_250"), val = tensor([1, 20, 77, 77])]; + tensor var_251_cast = reshape(shape = var_250, x = attn_weights_7_cast)[name = tensor("op_251_cast")]; + tensor attn_weights_9_cast = add(x = var_251_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_9_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([20, 77, 77])]; + tensor input_21_cast = reshape(shape = var_256, x = attn_weights_9_cast)[name = tensor("input_21_cast")]; + tensor input_23_cast = softmax(axis = var_5, x = input_21_cast)[name = tensor("input_23_cast")]; + tensor attn_output_7_transpose_x_0 = const()[name = tensor("attn_output_7_transpose_x_0"), val = tensor(false)]; + tensor attn_output_7_transpose_y_0 = const()[name = tensor("attn_output_7_transpose_y_0"), val = tensor(false)]; + tensor attn_output_7_cast = matmul(transpose_x = attn_output_7_transpose_x_0, transpose_y = attn_output_7_transpose_y_0, x = input_23_cast, y = value_states_7_cast)[name = tensor("attn_output_7_cast")]; + tensor var_261 = const()[name = tensor("op_261"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_9_cast = reshape(shape = var_261, x = attn_output_7_cast)[name = tensor("attn_output_9_cast")]; + tensor attn_output_11_perm_0 = const()[name = tensor("attn_output_11_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_264 = const()[name = tensor("op_264"), val = tensor([1, 77, 1280])]; + tensor transpose_151 = transpose(perm = attn_output_11_perm_0, x = attn_output_9_cast)[name = tensor("transpose_151")]; + tensor input_25_cast = reshape(shape = var_264, x = transpose_151)[name = tensor("input_25_cast")]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175893376)))]; + tensor text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179170240)))]; + tensor hidden_states_9_cast = linear(bias = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_self_attn_out_proj_weight_to_fp16, x = input_25_cast)[name = tensor("hidden_states_9_cast")]; + tensor input_27_cast = add(x = input_19_cast, y = hidden_states_9_cast)[name = tensor("input_27_cast")]; + tensor input_29_axes_0 = const()[name = tensor("input_29_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179172864)))]; + tensor text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179175488)))]; + tensor input_29_cast = layer_norm(axes = input_29_axes_0, beta = text_encoder_text_model_encoder_layers_1_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_1_layer_norm2_weight_to_fp16, x = input_27_cast)[name = tensor("input_29_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(179178112)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192285376)))]; + tensor input_31_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc1_weight_to_fp16, x = input_29_cast)[name = tensor("input_31_cast")]; + tensor input_33_mode_0 = const()[name = tensor("input_33_mode_0"), val = tensor("EXACT")]; + tensor input_33_cast = gelu(mode = input_33_mode_0, x = input_31_cast)[name = tensor("input_33_cast")]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192295680)))]; + tensor text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205402944)))]; + tensor hidden_states_11_cast = linear(bias = text_encoder_text_model_encoder_layers_1_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_1_mlp_fc2_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_11_cast")]; + tensor input_35_cast = add(x = input_27_cast, y = hidden_states_11_cast)[name = tensor("input_35_cast")]; + tensor hidden_states_13_axes_0 = const()[name = tensor("hidden_states_13_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205405568)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205408192)))]; + tensor hidden_states_13_cast = layer_norm(axes = hidden_states_13_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm1_weight_to_fp16, x = input_35_cast)[name = tensor("hidden_states_13_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205410816)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208687680)))]; + tensor var_302_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_q_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("op_302_cast")]; + tensor var_303_to_fp16 = const()[name = tensor("op_303_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_17_cast = mul(x = var_302_cast, y = var_303_to_fp16)[name = tensor("tensor_17_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(208690304)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211967168)))]; + tensor tensor_13_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_k_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_13_cast")]; + tensor var_308 = const()[name = tensor("op_308"), val = tensor([1, -1, 20, 64])]; + tensor var_309_cast = reshape(shape = var_308, x = tensor_13_cast)[name = tensor("op_309_cast")]; + tensor var_310_perm_0 = const()[name = tensor("op_310_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211969792)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215246656)))]; + tensor tensor_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_v_proj_weight_to_fp16, x = hidden_states_13_cast)[name = tensor("tensor_15_cast")]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, -1, 20, 64])]; + tensor var_316_cast = reshape(shape = var_315, x = tensor_15_cast)[name = tensor("op_316_cast")]; + tensor var_317_perm_0 = const()[name = tensor("op_317_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_324 = const()[name = tensor("op_324"), val = tensor([1, 77, 20, 64])]; + tensor var_325_cast = reshape(shape = var_324, x = tensor_17_cast)[name = tensor("op_325_cast")]; + tensor var_326_perm_0 = const()[name = tensor("op_326_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_328 = const()[name = tensor("op_328"), val = tensor([20, -1, 64])]; + tensor transpose_148 = transpose(perm = var_326_perm_0, x = var_325_cast)[name = tensor("transpose_148")]; + tensor query_states_5_cast = reshape(shape = var_328, x = transpose_148)[name = tensor("query_states_5_cast")]; + tensor var_330 = const()[name = tensor("op_330"), val = tensor([20, -1, 64])]; + tensor transpose_150 = transpose(perm = var_310_perm_0, x = var_309_cast)[name = tensor("transpose_150")]; + tensor key_states_11_cast = reshape(shape = var_330, x = transpose_150)[name = tensor("key_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([20, -1, 64])]; + tensor transpose_149 = transpose(perm = var_317_perm_0, x = var_316_cast)[name = tensor("transpose_149")]; + tensor value_states_11_cast = reshape(shape = var_332, x = transpose_149)[name = tensor("value_states_11_cast")]; + tensor var_335_perm_0 = const()[name = tensor("op_335_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor transpose_147 = transpose(perm = var_335_perm_0, x = key_states_11_cast)[name = tensor("transpose_147")]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = query_states_5_cast, y = transpose_147)[name = tensor("attn_weights_13_cast")]; + tensor var_337 = const()[name = tensor("op_337"), val = tensor([1, 20, 77, 77])]; + tensor var_338_cast = reshape(shape = var_337, x = attn_weights_13_cast)[name = tensor("op_338_cast")]; + tensor attn_weights_15_cast = add(x = var_338_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_343 = const()[name = tensor("op_343"), val = tensor([20, 77, 77])]; + tensor input_37_cast = reshape(shape = var_343, x = attn_weights_15_cast)[name = tensor("input_37_cast")]; + tensor input_39_cast = softmax(axis = var_5, x = input_37_cast)[name = tensor("input_39_cast")]; + tensor attn_output_13_transpose_x_0 = const()[name = tensor("attn_output_13_transpose_x_0"), val = tensor(false)]; + tensor attn_output_13_transpose_y_0 = const()[name = tensor("attn_output_13_transpose_y_0"), val = tensor(false)]; + tensor attn_output_13_cast = matmul(transpose_x = attn_output_13_transpose_x_0, transpose_y = attn_output_13_transpose_y_0, x = input_39_cast, y = value_states_11_cast)[name = tensor("attn_output_13_cast")]; + tensor var_348 = const()[name = tensor("op_348"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_15_cast = reshape(shape = var_348, x = attn_output_13_cast)[name = tensor("attn_output_15_cast")]; + tensor attn_output_17_perm_0 = const()[name = tensor("attn_output_17_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_351 = const()[name = tensor("op_351"), val = tensor([1, 77, 1280])]; + tensor transpose_146 = transpose(perm = attn_output_17_perm_0, x = attn_output_15_cast)[name = tensor("transpose_146")]; + tensor input_41_cast = reshape(shape = var_351, x = transpose_146)[name = tensor("input_41_cast")]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(215249280)))]; + tensor text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218526144)))]; + tensor hidden_states_15_cast = linear(bias = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_self_attn_out_proj_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_15_cast")]; + tensor input_43_cast = add(x = input_35_cast, y = hidden_states_15_cast)[name = tensor("input_43_cast")]; + tensor input_45_axes_0 = const()[name = tensor("input_45_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218528768)))]; + tensor text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218531392)))]; + tensor input_45_cast = layer_norm(axes = input_45_axes_0, beta = text_encoder_text_model_encoder_layers_2_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_2_layer_norm2_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218534016)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231641280)))]; + tensor input_47_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc1_weight_to_fp16, x = input_45_cast)[name = tensor("input_47_cast")]; + tensor input_49_mode_0 = const()[name = tensor("input_49_mode_0"), val = tensor("EXACT")]; + tensor input_49_cast = gelu(mode = input_49_mode_0, x = input_47_cast)[name = tensor("input_49_cast")]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231651584)))]; + tensor text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244758848)))]; + tensor hidden_states_17_cast = linear(bias = text_encoder_text_model_encoder_layers_2_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_2_mlp_fc2_weight_to_fp16, x = input_49_cast)[name = tensor("hidden_states_17_cast")]; + tensor input_51_cast = add(x = input_43_cast, y = hidden_states_17_cast)[name = tensor("input_51_cast")]; + tensor hidden_states_19_axes_0 = const()[name = tensor("hidden_states_19_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244761472)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244764096)))]; + tensor hidden_states_19_cast = layer_norm(axes = hidden_states_19_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm1_weight_to_fp16, x = input_51_cast)[name = tensor("hidden_states_19_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244766720)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248043584)))]; + tensor var_389_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_q_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("op_389_cast")]; + tensor var_390_to_fp16 = const()[name = tensor("op_390_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_23_cast = mul(x = var_389_cast, y = var_390_to_fp16)[name = tensor("tensor_23_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248046208)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251323072)))]; + tensor tensor_19_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_k_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_19_cast")]; + tensor var_395 = const()[name = tensor("op_395"), val = tensor([1, -1, 20, 64])]; + tensor var_396_cast = reshape(shape = var_395, x = tensor_19_cast)[name = tensor("op_396_cast")]; + tensor var_397_perm_0 = const()[name = tensor("op_397_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251325696)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254602560)))]; + tensor tensor_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_v_proj_weight_to_fp16, x = hidden_states_19_cast)[name = tensor("tensor_21_cast")]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, -1, 20, 64])]; + tensor var_403_cast = reshape(shape = var_402, x = tensor_21_cast)[name = tensor("op_403_cast")]; + tensor var_404_perm_0 = const()[name = tensor("op_404_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_411 = const()[name = tensor("op_411"), val = tensor([1, 77, 20, 64])]; + tensor var_412_cast = reshape(shape = var_411, x = tensor_23_cast)[name = tensor("op_412_cast")]; + tensor var_413_perm_0 = const()[name = tensor("op_413_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_415 = const()[name = tensor("op_415"), val = tensor([20, -1, 64])]; + tensor transpose_143 = transpose(perm = var_413_perm_0, x = var_412_cast)[name = tensor("transpose_143")]; + tensor query_states_7_cast = reshape(shape = var_415, x = transpose_143)[name = tensor("query_states_7_cast")]; + tensor var_417 = const()[name = tensor("op_417"), val = tensor([20, -1, 64])]; + tensor transpose_145 = transpose(perm = var_397_perm_0, x = var_396_cast)[name = tensor("transpose_145")]; + tensor key_states_15_cast = reshape(shape = var_417, x = transpose_145)[name = tensor("key_states_15_cast")]; + tensor var_419 = const()[name = tensor("op_419"), val = tensor([20, -1, 64])]; + tensor transpose_144 = transpose(perm = var_404_perm_0, x = var_403_cast)[name = tensor("transpose_144")]; + tensor value_states_15_cast = reshape(shape = var_419, x = transpose_144)[name = tensor("value_states_15_cast")]; + tensor var_422_perm_0 = const()[name = tensor("op_422_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_19_transpose_x_0 = const()[name = tensor("attn_weights_19_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_19_transpose_y_0 = const()[name = tensor("attn_weights_19_transpose_y_0"), val = tensor(false)]; + tensor transpose_142 = transpose(perm = var_422_perm_0, x = key_states_15_cast)[name = tensor("transpose_142")]; + tensor attn_weights_19_cast = matmul(transpose_x = attn_weights_19_transpose_x_0, transpose_y = attn_weights_19_transpose_y_0, x = query_states_7_cast, y = transpose_142)[name = tensor("attn_weights_19_cast")]; + tensor var_424 = const()[name = tensor("op_424"), val = tensor([1, 20, 77, 77])]; + tensor var_425_cast = reshape(shape = var_424, x = attn_weights_19_cast)[name = tensor("op_425_cast")]; + tensor attn_weights_21_cast = add(x = var_425_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_21_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([20, 77, 77])]; + tensor input_53_cast = reshape(shape = var_430, x = attn_weights_21_cast)[name = tensor("input_53_cast")]; + tensor input_55_cast = softmax(axis = var_5, x = input_53_cast)[name = tensor("input_55_cast")]; + tensor attn_output_19_transpose_x_0 = const()[name = tensor("attn_output_19_transpose_x_0"), val = tensor(false)]; + tensor attn_output_19_transpose_y_0 = const()[name = tensor("attn_output_19_transpose_y_0"), val = tensor(false)]; + tensor attn_output_19_cast = matmul(transpose_x = attn_output_19_transpose_x_0, transpose_y = attn_output_19_transpose_y_0, x = input_55_cast, y = value_states_15_cast)[name = tensor("attn_output_19_cast")]; + tensor var_435 = const()[name = tensor("op_435"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_21_cast = reshape(shape = var_435, x = attn_output_19_cast)[name = tensor("attn_output_21_cast")]; + tensor attn_output_23_perm_0 = const()[name = tensor("attn_output_23_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1, 77, 1280])]; + tensor transpose_141 = transpose(perm = attn_output_23_perm_0, x = attn_output_21_cast)[name = tensor("transpose_141")]; + tensor input_57_cast = reshape(shape = var_438, x = transpose_141)[name = tensor("input_57_cast")]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254605184)))]; + tensor text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257882048)))]; + tensor hidden_states_21_cast = linear(bias = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_self_attn_out_proj_weight_to_fp16, x = input_57_cast)[name = tensor("hidden_states_21_cast")]; + tensor input_59_cast = add(x = input_51_cast, y = hidden_states_21_cast)[name = tensor("input_59_cast")]; + tensor input_61_axes_0 = const()[name = tensor("input_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257884672)))]; + tensor text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257887296)))]; + tensor input_61_cast = layer_norm(axes = input_61_axes_0, beta = text_encoder_text_model_encoder_layers_3_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_3_layer_norm2_weight_to_fp16, x = input_59_cast)[name = tensor("input_61_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(257889920)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270997184)))]; + tensor input_63_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc1_weight_to_fp16, x = input_61_cast)[name = tensor("input_63_cast")]; + tensor input_65_mode_0 = const()[name = tensor("input_65_mode_0"), val = tensor("EXACT")]; + tensor input_65_cast = gelu(mode = input_65_mode_0, x = input_63_cast)[name = tensor("input_65_cast")]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(271007488)))]; + tensor text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284114752)))]; + tensor hidden_states_23_cast = linear(bias = text_encoder_text_model_encoder_layers_3_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_3_mlp_fc2_weight_to_fp16, x = input_65_cast)[name = tensor("hidden_states_23_cast")]; + tensor input_67_cast = add(x = input_59_cast, y = hidden_states_23_cast)[name = tensor("input_67_cast")]; + tensor hidden_states_25_axes_0 = const()[name = tensor("hidden_states_25_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284117376)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284120000)))]; + tensor hidden_states_25_cast = layer_norm(axes = hidden_states_25_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm1_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_25_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284122624)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287399488)))]; + tensor var_476_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_q_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("op_476_cast")]; + tensor var_477_to_fp16 = const()[name = tensor("op_477_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_29_cast = mul(x = var_476_cast, y = var_477_to_fp16)[name = tensor("tensor_29_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287402112)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290678976)))]; + tensor tensor_25_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_k_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_25_cast")]; + tensor var_482 = const()[name = tensor("op_482"), val = tensor([1, -1, 20, 64])]; + tensor var_483_cast = reshape(shape = var_482, x = tensor_25_cast)[name = tensor("op_483_cast")]; + tensor var_484_perm_0 = const()[name = tensor("op_484_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290681600)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293958464)))]; + tensor tensor_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_v_proj_weight_to_fp16, x = hidden_states_25_cast)[name = tensor("tensor_27_cast")]; + tensor var_489 = const()[name = tensor("op_489"), val = tensor([1, -1, 20, 64])]; + tensor var_490_cast = reshape(shape = var_489, x = tensor_27_cast)[name = tensor("op_490_cast")]; + tensor var_491_perm_0 = const()[name = tensor("op_491_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_498 = const()[name = tensor("op_498"), val = tensor([1, 77, 20, 64])]; + tensor var_499_cast = reshape(shape = var_498, x = tensor_29_cast)[name = tensor("op_499_cast")]; + tensor var_500_perm_0 = const()[name = tensor("op_500_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_502 = const()[name = tensor("op_502"), val = tensor([20, -1, 64])]; + tensor transpose_138 = transpose(perm = var_500_perm_0, x = var_499_cast)[name = tensor("transpose_138")]; + tensor query_states_9_cast = reshape(shape = var_502, x = transpose_138)[name = tensor("query_states_9_cast")]; + tensor var_504 = const()[name = tensor("op_504"), val = tensor([20, -1, 64])]; + tensor transpose_140 = transpose(perm = var_484_perm_0, x = var_483_cast)[name = tensor("transpose_140")]; + tensor key_states_19_cast = reshape(shape = var_504, x = transpose_140)[name = tensor("key_states_19_cast")]; + tensor var_506 = const()[name = tensor("op_506"), val = tensor([20, -1, 64])]; + tensor transpose_139 = transpose(perm = var_491_perm_0, x = var_490_cast)[name = tensor("transpose_139")]; + tensor value_states_19_cast = reshape(shape = var_506, x = transpose_139)[name = tensor("value_states_19_cast")]; + tensor var_509_perm_0 = const()[name = tensor("op_509_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor transpose_137 = transpose(perm = var_509_perm_0, x = key_states_19_cast)[name = tensor("transpose_137")]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = query_states_9_cast, y = transpose_137)[name = tensor("attn_weights_25_cast")]; + tensor var_511 = const()[name = tensor("op_511"), val = tensor([1, 20, 77, 77])]; + tensor var_512_cast = reshape(shape = var_511, x = attn_weights_25_cast)[name = tensor("op_512_cast")]; + tensor attn_weights_27_cast = add(x = var_512_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_517 = const()[name = tensor("op_517"), val = tensor([20, 77, 77])]; + tensor input_69_cast = reshape(shape = var_517, x = attn_weights_27_cast)[name = tensor("input_69_cast")]; + tensor input_71_cast = softmax(axis = var_5, x = input_69_cast)[name = tensor("input_71_cast")]; + tensor attn_output_25_transpose_x_0 = const()[name = tensor("attn_output_25_transpose_x_0"), val = tensor(false)]; + tensor attn_output_25_transpose_y_0 = const()[name = tensor("attn_output_25_transpose_y_0"), val = tensor(false)]; + tensor attn_output_25_cast = matmul(transpose_x = attn_output_25_transpose_x_0, transpose_y = attn_output_25_transpose_y_0, x = input_71_cast, y = value_states_19_cast)[name = tensor("attn_output_25_cast")]; + tensor var_522 = const()[name = tensor("op_522"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_27_cast = reshape(shape = var_522, x = attn_output_25_cast)[name = tensor("attn_output_27_cast")]; + tensor attn_output_29_perm_0 = const()[name = tensor("attn_output_29_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 77, 1280])]; + tensor transpose_136 = transpose(perm = attn_output_29_perm_0, x = attn_output_27_cast)[name = tensor("transpose_136")]; + tensor input_73_cast = reshape(shape = var_525, x = transpose_136)[name = tensor("input_73_cast")]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293961088)))]; + tensor text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297237952)))]; + tensor hidden_states_27_cast = linear(bias = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_self_attn_out_proj_weight_to_fp16, x = input_73_cast)[name = tensor("hidden_states_27_cast")]; + tensor input_75_cast = add(x = input_67_cast, y = hidden_states_27_cast)[name = tensor("input_75_cast")]; + tensor input_77_axes_0 = const()[name = tensor("input_77_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297240576)))]; + tensor text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297243200)))]; + tensor input_77_cast = layer_norm(axes = input_77_axes_0, beta = text_encoder_text_model_encoder_layers_4_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_4_layer_norm2_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297245824)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310353088)))]; + tensor input_79_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc1_weight_to_fp16, x = input_77_cast)[name = tensor("input_79_cast")]; + tensor input_81_mode_0 = const()[name = tensor("input_81_mode_0"), val = tensor("EXACT")]; + tensor input_81_cast = gelu(mode = input_81_mode_0, x = input_79_cast)[name = tensor("input_81_cast")]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310363392)))]; + tensor text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323470656)))]; + tensor hidden_states_29_cast = linear(bias = text_encoder_text_model_encoder_layers_4_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_4_mlp_fc2_weight_to_fp16, x = input_81_cast)[name = tensor("hidden_states_29_cast")]; + tensor input_83_cast = add(x = input_75_cast, y = hidden_states_29_cast)[name = tensor("input_83_cast")]; + tensor hidden_states_31_axes_0 = const()[name = tensor("hidden_states_31_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323473280)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323475904)))]; + tensor hidden_states_31_cast = layer_norm(axes = hidden_states_31_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm1_weight_to_fp16, x = input_83_cast)[name = tensor("hidden_states_31_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323478528)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326755392)))]; + tensor var_563_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_q_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("op_563_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_35_cast = mul(x = var_563_cast, y = var_564_to_fp16)[name = tensor("tensor_35_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326758016)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330034880)))]; + tensor tensor_31_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_k_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_31_cast")]; + tensor var_569 = const()[name = tensor("op_569"), val = tensor([1, -1, 20, 64])]; + tensor var_570_cast = reshape(shape = var_569, x = tensor_31_cast)[name = tensor("op_570_cast")]; + tensor var_571_perm_0 = const()[name = tensor("op_571_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(330037504)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333314368)))]; + tensor tensor_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_v_proj_weight_to_fp16, x = hidden_states_31_cast)[name = tensor("tensor_33_cast")]; + tensor var_576 = const()[name = tensor("op_576"), val = tensor([1, -1, 20, 64])]; + tensor var_577_cast = reshape(shape = var_576, x = tensor_33_cast)[name = tensor("op_577_cast")]; + tensor var_578_perm_0 = const()[name = tensor("op_578_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 77, 20, 64])]; + tensor var_586_cast = reshape(shape = var_585, x = tensor_35_cast)[name = tensor("op_586_cast")]; + tensor var_587_perm_0 = const()[name = tensor("op_587_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([20, -1, 64])]; + tensor transpose_133 = transpose(perm = var_587_perm_0, x = var_586_cast)[name = tensor("transpose_133")]; + tensor query_states_11_cast = reshape(shape = var_589, x = transpose_133)[name = tensor("query_states_11_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([20, -1, 64])]; + tensor transpose_135 = transpose(perm = var_571_perm_0, x = var_570_cast)[name = tensor("transpose_135")]; + tensor key_states_23_cast = reshape(shape = var_591, x = transpose_135)[name = tensor("key_states_23_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([20, -1, 64])]; + tensor transpose_134 = transpose(perm = var_578_perm_0, x = var_577_cast)[name = tensor("transpose_134")]; + tensor value_states_23_cast = reshape(shape = var_593, x = transpose_134)[name = tensor("value_states_23_cast")]; + tensor var_596_perm_0 = const()[name = tensor("op_596_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_31_transpose_x_0 = const()[name = tensor("attn_weights_31_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_31_transpose_y_0 = const()[name = tensor("attn_weights_31_transpose_y_0"), val = tensor(false)]; + tensor transpose_132 = transpose(perm = var_596_perm_0, x = key_states_23_cast)[name = tensor("transpose_132")]; + tensor attn_weights_31_cast = matmul(transpose_x = attn_weights_31_transpose_x_0, transpose_y = attn_weights_31_transpose_y_0, x = query_states_11_cast, y = transpose_132)[name = tensor("attn_weights_31_cast")]; + tensor var_598 = const()[name = tensor("op_598"), val = tensor([1, 20, 77, 77])]; + tensor var_599_cast = reshape(shape = var_598, x = attn_weights_31_cast)[name = tensor("op_599_cast")]; + tensor attn_weights_33_cast = add(x = var_599_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_33_cast")]; + tensor var_604 = const()[name = tensor("op_604"), val = tensor([20, 77, 77])]; + tensor input_85_cast = reshape(shape = var_604, x = attn_weights_33_cast)[name = tensor("input_85_cast")]; + tensor input_87_cast = softmax(axis = var_5, x = input_85_cast)[name = tensor("input_87_cast")]; + tensor attn_output_31_transpose_x_0 = const()[name = tensor("attn_output_31_transpose_x_0"), val = tensor(false)]; + tensor attn_output_31_transpose_y_0 = const()[name = tensor("attn_output_31_transpose_y_0"), val = tensor(false)]; + tensor attn_output_31_cast = matmul(transpose_x = attn_output_31_transpose_x_0, transpose_y = attn_output_31_transpose_y_0, x = input_87_cast, y = value_states_23_cast)[name = tensor("attn_output_31_cast")]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_33_cast = reshape(shape = var_609, x = attn_output_31_cast)[name = tensor("attn_output_33_cast")]; + tensor attn_output_35_perm_0 = const()[name = tensor("attn_output_35_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_612 = const()[name = tensor("op_612"), val = tensor([1, 77, 1280])]; + tensor transpose_131 = transpose(perm = attn_output_35_perm_0, x = attn_output_33_cast)[name = tensor("transpose_131")]; + tensor input_89_cast = reshape(shape = var_612, x = transpose_131)[name = tensor("input_89_cast")]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(333316992)))]; + tensor text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336593856)))]; + tensor hidden_states_33_cast = linear(bias = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_self_attn_out_proj_weight_to_fp16, x = input_89_cast)[name = tensor("hidden_states_33_cast")]; + tensor input_91_cast = add(x = input_83_cast, y = hidden_states_33_cast)[name = tensor("input_91_cast")]; + tensor input_93_axes_0 = const()[name = tensor("input_93_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336596480)))]; + tensor text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336599104)))]; + tensor input_93_cast = layer_norm(axes = input_93_axes_0, beta = text_encoder_text_model_encoder_layers_5_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_5_layer_norm2_weight_to_fp16, x = input_91_cast)[name = tensor("input_93_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336601728)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349708992)))]; + tensor input_95_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc1_weight_to_fp16, x = input_93_cast)[name = tensor("input_95_cast")]; + tensor input_97_mode_0 = const()[name = tensor("input_97_mode_0"), val = tensor("EXACT")]; + tensor input_97_cast = gelu(mode = input_97_mode_0, x = input_95_cast)[name = tensor("input_97_cast")]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349719296)))]; + tensor text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362826560)))]; + tensor hidden_states_35_cast = linear(bias = text_encoder_text_model_encoder_layers_5_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_5_mlp_fc2_weight_to_fp16, x = input_97_cast)[name = tensor("hidden_states_35_cast")]; + tensor input_99_cast = add(x = input_91_cast, y = hidden_states_35_cast)[name = tensor("input_99_cast")]; + tensor hidden_states_37_axes_0 = const()[name = tensor("hidden_states_37_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362829184)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362831808)))]; + tensor hidden_states_37_cast = layer_norm(axes = hidden_states_37_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm1_weight_to_fp16, x = input_99_cast)[name = tensor("hidden_states_37_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(362834432)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366111296)))]; + tensor var_650_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_q_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("op_650_cast")]; + tensor var_651_to_fp16 = const()[name = tensor("op_651_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_41_cast = mul(x = var_650_cast, y = var_651_to_fp16)[name = tensor("tensor_41_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366113920)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369390784)))]; + tensor tensor_37_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_k_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_37_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([1, -1, 20, 64])]; + tensor var_657_cast = reshape(shape = var_656, x = tensor_37_cast)[name = tensor("op_657_cast")]; + tensor var_658_perm_0 = const()[name = tensor("op_658_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369393408)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372670272)))]; + tensor tensor_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_v_proj_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("tensor_39_cast")]; + tensor var_663 = const()[name = tensor("op_663"), val = tensor([1, -1, 20, 64])]; + tensor var_664_cast = reshape(shape = var_663, x = tensor_39_cast)[name = tensor("op_664_cast")]; + tensor var_665_perm_0 = const()[name = tensor("op_665_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 77, 20, 64])]; + tensor var_673_cast = reshape(shape = var_672, x = tensor_41_cast)[name = tensor("op_673_cast")]; + tensor var_674_perm_0 = const()[name = tensor("op_674_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_676 = const()[name = tensor("op_676"), val = tensor([20, -1, 64])]; + tensor transpose_128 = transpose(perm = var_674_perm_0, x = var_673_cast)[name = tensor("transpose_128")]; + tensor query_states_13_cast = reshape(shape = var_676, x = transpose_128)[name = tensor("query_states_13_cast")]; + tensor var_678 = const()[name = tensor("op_678"), val = tensor([20, -1, 64])]; + tensor transpose_130 = transpose(perm = var_658_perm_0, x = var_657_cast)[name = tensor("transpose_130")]; + tensor key_states_27_cast = reshape(shape = var_678, x = transpose_130)[name = tensor("key_states_27_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([20, -1, 64])]; + tensor transpose_129 = transpose(perm = var_665_perm_0, x = var_664_cast)[name = tensor("transpose_129")]; + tensor value_states_27_cast = reshape(shape = var_680, x = transpose_129)[name = tensor("value_states_27_cast")]; + tensor var_683_perm_0 = const()[name = tensor("op_683_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor transpose_127 = transpose(perm = var_683_perm_0, x = key_states_27_cast)[name = tensor("transpose_127")]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = query_states_13_cast, y = transpose_127)[name = tensor("attn_weights_37_cast")]; + tensor var_685 = const()[name = tensor("op_685"), val = tensor([1, 20, 77, 77])]; + tensor var_686_cast = reshape(shape = var_685, x = attn_weights_37_cast)[name = tensor("op_686_cast")]; + tensor attn_weights_39_cast = add(x = var_686_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_691 = const()[name = tensor("op_691"), val = tensor([20, 77, 77])]; + tensor input_101_cast = reshape(shape = var_691, x = attn_weights_39_cast)[name = tensor("input_101_cast")]; + tensor input_103_cast = softmax(axis = var_5, x = input_101_cast)[name = tensor("input_103_cast")]; + tensor attn_output_37_transpose_x_0 = const()[name = tensor("attn_output_37_transpose_x_0"), val = tensor(false)]; + tensor attn_output_37_transpose_y_0 = const()[name = tensor("attn_output_37_transpose_y_0"), val = tensor(false)]; + tensor attn_output_37_cast = matmul(transpose_x = attn_output_37_transpose_x_0, transpose_y = attn_output_37_transpose_y_0, x = input_103_cast, y = value_states_27_cast)[name = tensor("attn_output_37_cast")]; + tensor var_696 = const()[name = tensor("op_696"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_39_cast = reshape(shape = var_696, x = attn_output_37_cast)[name = tensor("attn_output_39_cast")]; + tensor attn_output_41_perm_0 = const()[name = tensor("attn_output_41_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_699 = const()[name = tensor("op_699"), val = tensor([1, 77, 1280])]; + tensor transpose_126 = transpose(perm = attn_output_41_perm_0, x = attn_output_39_cast)[name = tensor("transpose_126")]; + tensor input_105_cast = reshape(shape = var_699, x = transpose_126)[name = tensor("input_105_cast")]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372672896)))]; + tensor text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375949760)))]; + tensor hidden_states_39_cast = linear(bias = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_self_attn_out_proj_weight_to_fp16, x = input_105_cast)[name = tensor("hidden_states_39_cast")]; + tensor input_107_cast = add(x = input_99_cast, y = hidden_states_39_cast)[name = tensor("input_107_cast")]; + tensor input_109_axes_0 = const()[name = tensor("input_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375952384)))]; + tensor text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375955008)))]; + tensor input_109_cast = layer_norm(axes = input_109_axes_0, beta = text_encoder_text_model_encoder_layers_6_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_6_layer_norm2_weight_to_fp16, x = input_107_cast)[name = tensor("input_109_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375957632)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389064896)))]; + tensor input_111_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc1_weight_to_fp16, x = input_109_cast)[name = tensor("input_111_cast")]; + tensor input_113_mode_0 = const()[name = tensor("input_113_mode_0"), val = tensor("EXACT")]; + tensor input_113_cast = gelu(mode = input_113_mode_0, x = input_111_cast)[name = tensor("input_113_cast")]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389075200)))]; + tensor text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402182464)))]; + tensor hidden_states_41_cast = linear(bias = text_encoder_text_model_encoder_layers_6_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_6_mlp_fc2_weight_to_fp16, x = input_113_cast)[name = tensor("hidden_states_41_cast")]; + tensor input_115_cast = add(x = input_107_cast, y = hidden_states_41_cast)[name = tensor("input_115_cast")]; + tensor hidden_states_43_axes_0 = const()[name = tensor("hidden_states_43_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402185088)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402187712)))]; + tensor hidden_states_43_cast = layer_norm(axes = hidden_states_43_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm1_weight_to_fp16, x = input_115_cast)[name = tensor("hidden_states_43_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(402190336)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405467200)))]; + tensor var_737_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_q_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("op_737_cast")]; + tensor var_738_to_fp16 = const()[name = tensor("op_738_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_47_cast = mul(x = var_737_cast, y = var_738_to_fp16)[name = tensor("tensor_47_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405469824)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408746688)))]; + tensor tensor_43_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_k_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_43_cast")]; + tensor var_743 = const()[name = tensor("op_743"), val = tensor([1, -1, 20, 64])]; + tensor var_744_cast = reshape(shape = var_743, x = tensor_43_cast)[name = tensor("op_744_cast")]; + tensor var_745_perm_0 = const()[name = tensor("op_745_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408749312)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412026176)))]; + tensor tensor_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_v_proj_weight_to_fp16, x = hidden_states_43_cast)[name = tensor("tensor_45_cast")]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, -1, 20, 64])]; + tensor var_751_cast = reshape(shape = var_750, x = tensor_45_cast)[name = tensor("op_751_cast")]; + tensor var_752_perm_0 = const()[name = tensor("op_752_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_759 = const()[name = tensor("op_759"), val = tensor([1, 77, 20, 64])]; + tensor var_760_cast = reshape(shape = var_759, x = tensor_47_cast)[name = tensor("op_760_cast")]; + tensor var_761_perm_0 = const()[name = tensor("op_761_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_763 = const()[name = tensor("op_763"), val = tensor([20, -1, 64])]; + tensor transpose_123 = transpose(perm = var_761_perm_0, x = var_760_cast)[name = tensor("transpose_123")]; + tensor query_states_15_cast = reshape(shape = var_763, x = transpose_123)[name = tensor("query_states_15_cast")]; + tensor var_765 = const()[name = tensor("op_765"), val = tensor([20, -1, 64])]; + tensor transpose_125 = transpose(perm = var_745_perm_0, x = var_744_cast)[name = tensor("transpose_125")]; + tensor key_states_31_cast = reshape(shape = var_765, x = transpose_125)[name = tensor("key_states_31_cast")]; + tensor var_767 = const()[name = tensor("op_767"), val = tensor([20, -1, 64])]; + tensor transpose_124 = transpose(perm = var_752_perm_0, x = var_751_cast)[name = tensor("transpose_124")]; + tensor value_states_31_cast = reshape(shape = var_767, x = transpose_124)[name = tensor("value_states_31_cast")]; + tensor var_770_perm_0 = const()[name = tensor("op_770_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_43_transpose_x_0 = const()[name = tensor("attn_weights_43_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_43_transpose_y_0 = const()[name = tensor("attn_weights_43_transpose_y_0"), val = tensor(false)]; + tensor transpose_122 = transpose(perm = var_770_perm_0, x = key_states_31_cast)[name = tensor("transpose_122")]; + tensor attn_weights_43_cast = matmul(transpose_x = attn_weights_43_transpose_x_0, transpose_y = attn_weights_43_transpose_y_0, x = query_states_15_cast, y = transpose_122)[name = tensor("attn_weights_43_cast")]; + tensor var_772 = const()[name = tensor("op_772"), val = tensor([1, 20, 77, 77])]; + tensor var_773_cast = reshape(shape = var_772, x = attn_weights_43_cast)[name = tensor("op_773_cast")]; + tensor attn_weights_45_cast = add(x = var_773_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_45_cast")]; + tensor var_778 = const()[name = tensor("op_778"), val = tensor([20, 77, 77])]; + tensor input_117_cast = reshape(shape = var_778, x = attn_weights_45_cast)[name = tensor("input_117_cast")]; + tensor input_119_cast = softmax(axis = var_5, x = input_117_cast)[name = tensor("input_119_cast")]; + tensor attn_output_43_transpose_x_0 = const()[name = tensor("attn_output_43_transpose_x_0"), val = tensor(false)]; + tensor attn_output_43_transpose_y_0 = const()[name = tensor("attn_output_43_transpose_y_0"), val = tensor(false)]; + tensor attn_output_43_cast = matmul(transpose_x = attn_output_43_transpose_x_0, transpose_y = attn_output_43_transpose_y_0, x = input_119_cast, y = value_states_31_cast)[name = tensor("attn_output_43_cast")]; + tensor var_783 = const()[name = tensor("op_783"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_45_cast = reshape(shape = var_783, x = attn_output_43_cast)[name = tensor("attn_output_45_cast")]; + tensor attn_output_47_perm_0 = const()[name = tensor("attn_output_47_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 77, 1280])]; + tensor transpose_121 = transpose(perm = attn_output_47_perm_0, x = attn_output_45_cast)[name = tensor("transpose_121")]; + tensor input_121_cast = reshape(shape = var_786, x = transpose_121)[name = tensor("input_121_cast")]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412028800)))]; + tensor text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415305664)))]; + tensor hidden_states_45_cast = linear(bias = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_self_attn_out_proj_weight_to_fp16, x = input_121_cast)[name = tensor("hidden_states_45_cast")]; + tensor input_123_cast = add(x = input_115_cast, y = hidden_states_45_cast)[name = tensor("input_123_cast")]; + tensor input_125_axes_0 = const()[name = tensor("input_125_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415308288)))]; + tensor text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415310912)))]; + tensor input_125_cast = layer_norm(axes = input_125_axes_0, beta = text_encoder_text_model_encoder_layers_7_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_7_layer_norm2_weight_to_fp16, x = input_123_cast)[name = tensor("input_125_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415313536)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428420800)))]; + tensor input_127_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc1_weight_to_fp16, x = input_125_cast)[name = tensor("input_127_cast")]; + tensor input_129_mode_0 = const()[name = tensor("input_129_mode_0"), val = tensor("EXACT")]; + tensor input_129_cast = gelu(mode = input_129_mode_0, x = input_127_cast)[name = tensor("input_129_cast")]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428431104)))]; + tensor text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441538368)))]; + tensor hidden_states_47_cast = linear(bias = text_encoder_text_model_encoder_layers_7_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_7_mlp_fc2_weight_to_fp16, x = input_129_cast)[name = tensor("hidden_states_47_cast")]; + tensor input_131_cast = add(x = input_123_cast, y = hidden_states_47_cast)[name = tensor("input_131_cast")]; + tensor hidden_states_49_axes_0 = const()[name = tensor("hidden_states_49_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441540992)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441543616)))]; + tensor hidden_states_49_cast = layer_norm(axes = hidden_states_49_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm1_weight_to_fp16, x = input_131_cast)[name = tensor("hidden_states_49_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(441546240)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444823104)))]; + tensor var_824_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_q_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("op_824_cast")]; + tensor var_825_to_fp16 = const()[name = tensor("op_825_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_53_cast = mul(x = var_824_cast, y = var_825_to_fp16)[name = tensor("tensor_53_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444825728)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448102592)))]; + tensor tensor_49_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_k_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_49_cast")]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, -1, 20, 64])]; + tensor var_831_cast = reshape(shape = var_830, x = tensor_49_cast)[name = tensor("op_831_cast")]; + tensor var_832_perm_0 = const()[name = tensor("op_832_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448105216)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451382080)))]; + tensor tensor_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_v_proj_weight_to_fp16, x = hidden_states_49_cast)[name = tensor("tensor_51_cast")]; + tensor var_837 = const()[name = tensor("op_837"), val = tensor([1, -1, 20, 64])]; + tensor var_838_cast = reshape(shape = var_837, x = tensor_51_cast)[name = tensor("op_838_cast")]; + tensor var_839_perm_0 = const()[name = tensor("op_839_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_846 = const()[name = tensor("op_846"), val = tensor([1, 77, 20, 64])]; + tensor var_847_cast = reshape(shape = var_846, x = tensor_53_cast)[name = tensor("op_847_cast")]; + tensor var_848_perm_0 = const()[name = tensor("op_848_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_850 = const()[name = tensor("op_850"), val = tensor([20, -1, 64])]; + tensor transpose_118 = transpose(perm = var_848_perm_0, x = var_847_cast)[name = tensor("transpose_118")]; + tensor query_states_17_cast = reshape(shape = var_850, x = transpose_118)[name = tensor("query_states_17_cast")]; + tensor var_852 = const()[name = tensor("op_852"), val = tensor([20, -1, 64])]; + tensor transpose_120 = transpose(perm = var_832_perm_0, x = var_831_cast)[name = tensor("transpose_120")]; + tensor key_states_35_cast = reshape(shape = var_852, x = transpose_120)[name = tensor("key_states_35_cast")]; + tensor var_854 = const()[name = tensor("op_854"), val = tensor([20, -1, 64])]; + tensor transpose_119 = transpose(perm = var_839_perm_0, x = var_838_cast)[name = tensor("transpose_119")]; + tensor value_states_35_cast = reshape(shape = var_854, x = transpose_119)[name = tensor("value_states_35_cast")]; + tensor var_857_perm_0 = const()[name = tensor("op_857_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor transpose_117 = transpose(perm = var_857_perm_0, x = key_states_35_cast)[name = tensor("transpose_117")]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = query_states_17_cast, y = transpose_117)[name = tensor("attn_weights_49_cast")]; + tensor var_859 = const()[name = tensor("op_859"), val = tensor([1, 20, 77, 77])]; + tensor var_860_cast = reshape(shape = var_859, x = attn_weights_49_cast)[name = tensor("op_860_cast")]; + tensor attn_weights_51_cast = add(x = var_860_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_865 = const()[name = tensor("op_865"), val = tensor([20, 77, 77])]; + tensor input_133_cast = reshape(shape = var_865, x = attn_weights_51_cast)[name = tensor("input_133_cast")]; + tensor input_135_cast = softmax(axis = var_5, x = input_133_cast)[name = tensor("input_135_cast")]; + tensor attn_output_49_transpose_x_0 = const()[name = tensor("attn_output_49_transpose_x_0"), val = tensor(false)]; + tensor attn_output_49_transpose_y_0 = const()[name = tensor("attn_output_49_transpose_y_0"), val = tensor(false)]; + tensor attn_output_49_cast = matmul(transpose_x = attn_output_49_transpose_x_0, transpose_y = attn_output_49_transpose_y_0, x = input_135_cast, y = value_states_35_cast)[name = tensor("attn_output_49_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_51_cast = reshape(shape = var_870, x = attn_output_49_cast)[name = tensor("attn_output_51_cast")]; + tensor attn_output_53_perm_0 = const()[name = tensor("attn_output_53_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_873 = const()[name = tensor("op_873"), val = tensor([1, 77, 1280])]; + tensor transpose_116 = transpose(perm = attn_output_53_perm_0, x = attn_output_51_cast)[name = tensor("transpose_116")]; + tensor input_137_cast = reshape(shape = var_873, x = transpose_116)[name = tensor("input_137_cast")]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451384704)))]; + tensor text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454661568)))]; + tensor hidden_states_51_cast = linear(bias = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_self_attn_out_proj_weight_to_fp16, x = input_137_cast)[name = tensor("hidden_states_51_cast")]; + tensor input_139_cast = add(x = input_131_cast, y = hidden_states_51_cast)[name = tensor("input_139_cast")]; + tensor input_141_axes_0 = const()[name = tensor("input_141_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454664192)))]; + tensor text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454666816)))]; + tensor input_141_cast = layer_norm(axes = input_141_axes_0, beta = text_encoder_text_model_encoder_layers_8_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_8_layer_norm2_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(454669440)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467776704)))]; + tensor input_143_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc1_weight_to_fp16, x = input_141_cast)[name = tensor("input_143_cast")]; + tensor input_145_mode_0 = const()[name = tensor("input_145_mode_0"), val = tensor("EXACT")]; + tensor input_145_cast = gelu(mode = input_145_mode_0, x = input_143_cast)[name = tensor("input_145_cast")]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467787008)))]; + tensor text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480894272)))]; + tensor hidden_states_53_cast = linear(bias = text_encoder_text_model_encoder_layers_8_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_8_mlp_fc2_weight_to_fp16, x = input_145_cast)[name = tensor("hidden_states_53_cast")]; + tensor input_147_cast = add(x = input_139_cast, y = hidden_states_53_cast)[name = tensor("input_147_cast")]; + tensor hidden_states_55_axes_0 = const()[name = tensor("hidden_states_55_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480896896)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480899520)))]; + tensor hidden_states_55_cast = layer_norm(axes = hidden_states_55_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm1_weight_to_fp16, x = input_147_cast)[name = tensor("hidden_states_55_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480902144)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484179008)))]; + tensor var_911_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_q_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("op_911_cast")]; + tensor var_912_to_fp16 = const()[name = tensor("op_912_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_59_cast = mul(x = var_911_cast, y = var_912_to_fp16)[name = tensor("tensor_59_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484181632)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487458496)))]; + tensor tensor_55_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_k_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_55_cast")]; + tensor var_917 = const()[name = tensor("op_917"), val = tensor([1, -1, 20, 64])]; + tensor var_918_cast = reshape(shape = var_917, x = tensor_55_cast)[name = tensor("op_918_cast")]; + tensor var_919_perm_0 = const()[name = tensor("op_919_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(487461120)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490737984)))]; + tensor tensor_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_v_proj_weight_to_fp16, x = hidden_states_55_cast)[name = tensor("tensor_57_cast")]; + tensor var_924 = const()[name = tensor("op_924"), val = tensor([1, -1, 20, 64])]; + tensor var_925_cast = reshape(shape = var_924, x = tensor_57_cast)[name = tensor("op_925_cast")]; + tensor var_926_perm_0 = const()[name = tensor("op_926_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_933 = const()[name = tensor("op_933"), val = tensor([1, 77, 20, 64])]; + tensor var_934_cast = reshape(shape = var_933, x = tensor_59_cast)[name = tensor("op_934_cast")]; + tensor var_935_perm_0 = const()[name = tensor("op_935_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_937 = const()[name = tensor("op_937"), val = tensor([20, -1, 64])]; + tensor transpose_113 = transpose(perm = var_935_perm_0, x = var_934_cast)[name = tensor("transpose_113")]; + tensor query_states_19_cast = reshape(shape = var_937, x = transpose_113)[name = tensor("query_states_19_cast")]; + tensor var_939 = const()[name = tensor("op_939"), val = tensor([20, -1, 64])]; + tensor transpose_115 = transpose(perm = var_919_perm_0, x = var_918_cast)[name = tensor("transpose_115")]; + tensor key_states_39_cast = reshape(shape = var_939, x = transpose_115)[name = tensor("key_states_39_cast")]; + tensor var_941 = const()[name = tensor("op_941"), val = tensor([20, -1, 64])]; + tensor transpose_114 = transpose(perm = var_926_perm_0, x = var_925_cast)[name = tensor("transpose_114")]; + tensor value_states_39_cast = reshape(shape = var_941, x = transpose_114)[name = tensor("value_states_39_cast")]; + tensor var_944_perm_0 = const()[name = tensor("op_944_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_55_transpose_x_0 = const()[name = tensor("attn_weights_55_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_55_transpose_y_0 = const()[name = tensor("attn_weights_55_transpose_y_0"), val = tensor(false)]; + tensor transpose_112 = transpose(perm = var_944_perm_0, x = key_states_39_cast)[name = tensor("transpose_112")]; + tensor attn_weights_55_cast = matmul(transpose_x = attn_weights_55_transpose_x_0, transpose_y = attn_weights_55_transpose_y_0, x = query_states_19_cast, y = transpose_112)[name = tensor("attn_weights_55_cast")]; + tensor var_946 = const()[name = tensor("op_946"), val = tensor([1, 20, 77, 77])]; + tensor var_947_cast = reshape(shape = var_946, x = attn_weights_55_cast)[name = tensor("op_947_cast")]; + tensor attn_weights_57_cast = add(x = var_947_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_57_cast")]; + tensor var_952 = const()[name = tensor("op_952"), val = tensor([20, 77, 77])]; + tensor input_149_cast = reshape(shape = var_952, x = attn_weights_57_cast)[name = tensor("input_149_cast")]; + tensor input_151_cast = softmax(axis = var_5, x = input_149_cast)[name = tensor("input_151_cast")]; + tensor attn_output_55_transpose_x_0 = const()[name = tensor("attn_output_55_transpose_x_0"), val = tensor(false)]; + tensor attn_output_55_transpose_y_0 = const()[name = tensor("attn_output_55_transpose_y_0"), val = tensor(false)]; + tensor attn_output_55_cast = matmul(transpose_x = attn_output_55_transpose_x_0, transpose_y = attn_output_55_transpose_y_0, x = input_151_cast, y = value_states_39_cast)[name = tensor("attn_output_55_cast")]; + tensor var_957 = const()[name = tensor("op_957"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_57_cast = reshape(shape = var_957, x = attn_output_55_cast)[name = tensor("attn_output_57_cast")]; + tensor attn_output_59_perm_0 = const()[name = tensor("attn_output_59_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_960 = const()[name = tensor("op_960"), val = tensor([1, 77, 1280])]; + tensor transpose_111 = transpose(perm = attn_output_59_perm_0, x = attn_output_57_cast)[name = tensor("transpose_111")]; + tensor input_153_cast = reshape(shape = var_960, x = transpose_111)[name = tensor("input_153_cast")]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490740608)))]; + tensor text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494017472)))]; + tensor hidden_states_57_cast = linear(bias = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_self_attn_out_proj_weight_to_fp16, x = input_153_cast)[name = tensor("hidden_states_57_cast")]; + tensor input_155_cast = add(x = input_147_cast, y = hidden_states_57_cast)[name = tensor("input_155_cast")]; + tensor input_157_axes_0 = const()[name = tensor("input_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494020096)))]; + tensor text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494022720)))]; + tensor input_157_cast = layer_norm(axes = input_157_axes_0, beta = text_encoder_text_model_encoder_layers_9_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_9_layer_norm2_weight_to_fp16, x = input_155_cast)[name = tensor("input_157_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494025344)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507132608)))]; + tensor input_159_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc1_weight_to_fp16, x = input_157_cast)[name = tensor("input_159_cast")]; + tensor input_161_mode_0 = const()[name = tensor("input_161_mode_0"), val = tensor("EXACT")]; + tensor input_161_cast = gelu(mode = input_161_mode_0, x = input_159_cast)[name = tensor("input_161_cast")]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507142912)))]; + tensor text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520250176)))]; + tensor hidden_states_59_cast = linear(bias = text_encoder_text_model_encoder_layers_9_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_9_mlp_fc2_weight_to_fp16, x = input_161_cast)[name = tensor("hidden_states_59_cast")]; + tensor input_163_cast = add(x = input_155_cast, y = hidden_states_59_cast)[name = tensor("input_163_cast")]; + tensor hidden_states_61_axes_0 = const()[name = tensor("hidden_states_61_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520252800)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520255424)))]; + tensor hidden_states_61_cast = layer_norm(axes = hidden_states_61_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm1_weight_to_fp16, x = input_163_cast)[name = tensor("hidden_states_61_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(520258048)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523534912)))]; + tensor var_998_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_q_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("op_998_cast")]; + tensor var_999_to_fp16 = const()[name = tensor("op_999_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_65_cast = mul(x = var_998_cast, y = var_999_to_fp16)[name = tensor("tensor_65_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523537536)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526814400)))]; + tensor tensor_61_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_k_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_61_cast")]; + tensor var_1004 = const()[name = tensor("op_1004"), val = tensor([1, -1, 20, 64])]; + tensor var_1005_cast = reshape(shape = var_1004, x = tensor_61_cast)[name = tensor("op_1005_cast")]; + tensor var_1006_perm_0 = const()[name = tensor("op_1006_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526817024)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530093888)))]; + tensor tensor_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_v_proj_weight_to_fp16, x = hidden_states_61_cast)[name = tensor("tensor_63_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, -1, 20, 64])]; + tensor var_1012_cast = reshape(shape = var_1011, x = tensor_63_cast)[name = tensor("op_1012_cast")]; + tensor var_1013_perm_0 = const()[name = tensor("op_1013_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1020 = const()[name = tensor("op_1020"), val = tensor([1, 77, 20, 64])]; + tensor var_1021_cast = reshape(shape = var_1020, x = tensor_65_cast)[name = tensor("op_1021_cast")]; + tensor var_1022_perm_0 = const()[name = tensor("op_1022_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1024 = const()[name = tensor("op_1024"), val = tensor([20, -1, 64])]; + tensor transpose_108 = transpose(perm = var_1022_perm_0, x = var_1021_cast)[name = tensor("transpose_108")]; + tensor query_states_21_cast = reshape(shape = var_1024, x = transpose_108)[name = tensor("query_states_21_cast")]; + tensor var_1026 = const()[name = tensor("op_1026"), val = tensor([20, -1, 64])]; + tensor transpose_110 = transpose(perm = var_1006_perm_0, x = var_1005_cast)[name = tensor("transpose_110")]; + tensor key_states_43_cast = reshape(shape = var_1026, x = transpose_110)[name = tensor("key_states_43_cast")]; + tensor var_1028 = const()[name = tensor("op_1028"), val = tensor([20, -1, 64])]; + tensor transpose_109 = transpose(perm = var_1013_perm_0, x = var_1012_cast)[name = tensor("transpose_109")]; + tensor value_states_43_cast = reshape(shape = var_1028, x = transpose_109)[name = tensor("value_states_43_cast")]; + tensor var_1031_perm_0 = const()[name = tensor("op_1031_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor transpose_107 = transpose(perm = var_1031_perm_0, x = key_states_43_cast)[name = tensor("transpose_107")]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = query_states_21_cast, y = transpose_107)[name = tensor("attn_weights_61_cast")]; + tensor var_1033 = const()[name = tensor("op_1033"), val = tensor([1, 20, 77, 77])]; + tensor var_1034_cast = reshape(shape = var_1033, x = attn_weights_61_cast)[name = tensor("op_1034_cast")]; + tensor attn_weights_63_cast = add(x = var_1034_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1039 = const()[name = tensor("op_1039"), val = tensor([20, 77, 77])]; + tensor input_165_cast = reshape(shape = var_1039, x = attn_weights_63_cast)[name = tensor("input_165_cast")]; + tensor input_167_cast = softmax(axis = var_5, x = input_165_cast)[name = tensor("input_167_cast")]; + tensor attn_output_61_transpose_x_0 = const()[name = tensor("attn_output_61_transpose_x_0"), val = tensor(false)]; + tensor attn_output_61_transpose_y_0 = const()[name = tensor("attn_output_61_transpose_y_0"), val = tensor(false)]; + tensor attn_output_61_cast = matmul(transpose_x = attn_output_61_transpose_x_0, transpose_y = attn_output_61_transpose_y_0, x = input_167_cast, y = value_states_43_cast)[name = tensor("attn_output_61_cast")]; + tensor var_1044 = const()[name = tensor("op_1044"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_63_cast = reshape(shape = var_1044, x = attn_output_61_cast)[name = tensor("attn_output_63_cast")]; + tensor attn_output_65_perm_0 = const()[name = tensor("attn_output_65_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1, 77, 1280])]; + tensor transpose_106 = transpose(perm = attn_output_65_perm_0, x = attn_output_63_cast)[name = tensor("transpose_106")]; + tensor input_169_cast = reshape(shape = var_1047, x = transpose_106)[name = tensor("input_169_cast")]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530096512)))]; + tensor text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533373376)))]; + tensor hidden_states_63_cast = linear(bias = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_self_attn_out_proj_weight_to_fp16, x = input_169_cast)[name = tensor("hidden_states_63_cast")]; + tensor input_171_cast = add(x = input_163_cast, y = hidden_states_63_cast)[name = tensor("input_171_cast")]; + tensor input_173_axes_0 = const()[name = tensor("input_173_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533376000)))]; + tensor text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533378624)))]; + tensor input_173_cast = layer_norm(axes = input_173_axes_0, beta = text_encoder_text_model_encoder_layers_10_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_10_layer_norm2_weight_to_fp16, x = input_171_cast)[name = tensor("input_173_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533381248)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546488512)))]; + tensor input_175_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc1_weight_to_fp16, x = input_173_cast)[name = tensor("input_175_cast")]; + tensor input_177_mode_0 = const()[name = tensor("input_177_mode_0"), val = tensor("EXACT")]; + tensor input_177_cast = gelu(mode = input_177_mode_0, x = input_175_cast)[name = tensor("input_177_cast")]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546498816)))]; + tensor text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559606080)))]; + tensor hidden_states_65_cast = linear(bias = text_encoder_text_model_encoder_layers_10_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_10_mlp_fc2_weight_to_fp16, x = input_177_cast)[name = tensor("hidden_states_65_cast")]; + tensor input_179_cast = add(x = input_171_cast, y = hidden_states_65_cast)[name = tensor("input_179_cast")]; + tensor hidden_states_67_axes_0 = const()[name = tensor("hidden_states_67_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559608704)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559611328)))]; + tensor hidden_states_67_cast = layer_norm(axes = hidden_states_67_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm1_weight_to_fp16, x = input_179_cast)[name = tensor("hidden_states_67_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(559613952)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562890816)))]; + tensor var_1085_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_q_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("op_1085_cast")]; + tensor var_1086_to_fp16 = const()[name = tensor("op_1086_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_71_cast = mul(x = var_1085_cast, y = var_1086_to_fp16)[name = tensor("tensor_71_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562893440)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566170304)))]; + tensor tensor_67_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_k_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_67_cast")]; + tensor var_1091 = const()[name = tensor("op_1091"), val = tensor([1, -1, 20, 64])]; + tensor var_1092_cast = reshape(shape = var_1091, x = tensor_67_cast)[name = tensor("op_1092_cast")]; + tensor var_1093_perm_0 = const()[name = tensor("op_1093_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566172928)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569449792)))]; + tensor tensor_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_v_proj_weight_to_fp16, x = hidden_states_67_cast)[name = tensor("tensor_69_cast")]; + tensor var_1098 = const()[name = tensor("op_1098"), val = tensor([1, -1, 20, 64])]; + tensor var_1099_cast = reshape(shape = var_1098, x = tensor_69_cast)[name = tensor("op_1099_cast")]; + tensor var_1100_perm_0 = const()[name = tensor("op_1100_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1107 = const()[name = tensor("op_1107"), val = tensor([1, 77, 20, 64])]; + tensor var_1108_cast = reshape(shape = var_1107, x = tensor_71_cast)[name = tensor("op_1108_cast")]; + tensor var_1109_perm_0 = const()[name = tensor("op_1109_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1111 = const()[name = tensor("op_1111"), val = tensor([20, -1, 64])]; + tensor transpose_103 = transpose(perm = var_1109_perm_0, x = var_1108_cast)[name = tensor("transpose_103")]; + tensor query_states_23_cast = reshape(shape = var_1111, x = transpose_103)[name = tensor("query_states_23_cast")]; + tensor var_1113 = const()[name = tensor("op_1113"), val = tensor([20, -1, 64])]; + tensor transpose_105 = transpose(perm = var_1093_perm_0, x = var_1092_cast)[name = tensor("transpose_105")]; + tensor key_states_47_cast = reshape(shape = var_1113, x = transpose_105)[name = tensor("key_states_47_cast")]; + tensor var_1115 = const()[name = tensor("op_1115"), val = tensor([20, -1, 64])]; + tensor transpose_104 = transpose(perm = var_1100_perm_0, x = var_1099_cast)[name = tensor("transpose_104")]; + tensor value_states_47_cast = reshape(shape = var_1115, x = transpose_104)[name = tensor("value_states_47_cast")]; + tensor var_1118_perm_0 = const()[name = tensor("op_1118_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_67_transpose_x_0 = const()[name = tensor("attn_weights_67_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_67_transpose_y_0 = const()[name = tensor("attn_weights_67_transpose_y_0"), val = tensor(false)]; + tensor transpose_102 = transpose(perm = var_1118_perm_0, x = key_states_47_cast)[name = tensor("transpose_102")]; + tensor attn_weights_67_cast = matmul(transpose_x = attn_weights_67_transpose_x_0, transpose_y = attn_weights_67_transpose_y_0, x = query_states_23_cast, y = transpose_102)[name = tensor("attn_weights_67_cast")]; + tensor var_1120 = const()[name = tensor("op_1120"), val = tensor([1, 20, 77, 77])]; + tensor var_1121_cast = reshape(shape = var_1120, x = attn_weights_67_cast)[name = tensor("op_1121_cast")]; + tensor attn_weights_69_cast = add(x = var_1121_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_69_cast")]; + tensor var_1126 = const()[name = tensor("op_1126"), val = tensor([20, 77, 77])]; + tensor input_181_cast = reshape(shape = var_1126, x = attn_weights_69_cast)[name = tensor("input_181_cast")]; + tensor input_183_cast = softmax(axis = var_5, x = input_181_cast)[name = tensor("input_183_cast")]; + tensor attn_output_67_transpose_x_0 = const()[name = tensor("attn_output_67_transpose_x_0"), val = tensor(false)]; + tensor attn_output_67_transpose_y_0 = const()[name = tensor("attn_output_67_transpose_y_0"), val = tensor(false)]; + tensor attn_output_67_cast = matmul(transpose_x = attn_output_67_transpose_x_0, transpose_y = attn_output_67_transpose_y_0, x = input_183_cast, y = value_states_47_cast)[name = tensor("attn_output_67_cast")]; + tensor var_1131 = const()[name = tensor("op_1131"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_69_cast = reshape(shape = var_1131, x = attn_output_67_cast)[name = tensor("attn_output_69_cast")]; + tensor attn_output_71_perm_0 = const()[name = tensor("attn_output_71_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1134 = const()[name = tensor("op_1134"), val = tensor([1, 77, 1280])]; + tensor transpose_101 = transpose(perm = attn_output_71_perm_0, x = attn_output_69_cast)[name = tensor("transpose_101")]; + tensor input_185_cast = reshape(shape = var_1134, x = transpose_101)[name = tensor("input_185_cast")]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569452416)))]; + tensor text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572729280)))]; + tensor hidden_states_69_cast = linear(bias = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_self_attn_out_proj_weight_to_fp16, x = input_185_cast)[name = tensor("hidden_states_69_cast")]; + tensor input_187_cast = add(x = input_179_cast, y = hidden_states_69_cast)[name = tensor("input_187_cast")]; + tensor input_189_axes_0 = const()[name = tensor("input_189_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572731904)))]; + tensor text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572734528)))]; + tensor input_189_cast = layer_norm(axes = input_189_axes_0, beta = text_encoder_text_model_encoder_layers_11_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_11_layer_norm2_weight_to_fp16, x = input_187_cast)[name = tensor("input_189_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572737152)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585844416)))]; + tensor input_191_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc1_weight_to_fp16, x = input_189_cast)[name = tensor("input_191_cast")]; + tensor input_193_mode_0 = const()[name = tensor("input_193_mode_0"), val = tensor("EXACT")]; + tensor input_193_cast = gelu(mode = input_193_mode_0, x = input_191_cast)[name = tensor("input_193_cast")]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585854720)))]; + tensor text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598961984)))]; + tensor hidden_states_71_cast = linear(bias = text_encoder_text_model_encoder_layers_11_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_11_mlp_fc2_weight_to_fp16, x = input_193_cast)[name = tensor("hidden_states_71_cast")]; + tensor input_195_cast = add(x = input_187_cast, y = hidden_states_71_cast)[name = tensor("input_195_cast")]; + tensor hidden_states_73_axes_0 = const()[name = tensor("hidden_states_73_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598964608)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598967232)))]; + tensor hidden_states_73_cast = layer_norm(axes = hidden_states_73_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm1_weight_to_fp16, x = input_195_cast)[name = tensor("hidden_states_73_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(598969856)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602246720)))]; + tensor var_1172_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_q_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("op_1172_cast")]; + tensor var_1173_to_fp16 = const()[name = tensor("op_1173_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_77_cast = mul(x = var_1172_cast, y = var_1173_to_fp16)[name = tensor("tensor_77_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602249344)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605526208)))]; + tensor tensor_73_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_k_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_73_cast")]; + tensor var_1178 = const()[name = tensor("op_1178"), val = tensor([1, -1, 20, 64])]; + tensor var_1179_cast = reshape(shape = var_1178, x = tensor_73_cast)[name = tensor("op_1179_cast")]; + tensor var_1180_perm_0 = const()[name = tensor("op_1180_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(605528832)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608805696)))]; + tensor tensor_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_v_proj_weight_to_fp16, x = hidden_states_73_cast)[name = tensor("tensor_75_cast")]; + tensor var_1185 = const()[name = tensor("op_1185"), val = tensor([1, -1, 20, 64])]; + tensor var_1186_cast = reshape(shape = var_1185, x = tensor_75_cast)[name = tensor("op_1186_cast")]; + tensor var_1187_perm_0 = const()[name = tensor("op_1187_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1194 = const()[name = tensor("op_1194"), val = tensor([1, 77, 20, 64])]; + tensor var_1195_cast = reshape(shape = var_1194, x = tensor_77_cast)[name = tensor("op_1195_cast")]; + tensor var_1196_perm_0 = const()[name = tensor("op_1196_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1198 = const()[name = tensor("op_1198"), val = tensor([20, -1, 64])]; + tensor transpose_98 = transpose(perm = var_1196_perm_0, x = var_1195_cast)[name = tensor("transpose_98")]; + tensor query_states_25_cast = reshape(shape = var_1198, x = transpose_98)[name = tensor("query_states_25_cast")]; + tensor var_1200 = const()[name = tensor("op_1200"), val = tensor([20, -1, 64])]; + tensor transpose_100 = transpose(perm = var_1180_perm_0, x = var_1179_cast)[name = tensor("transpose_100")]; + tensor key_states_51_cast = reshape(shape = var_1200, x = transpose_100)[name = tensor("key_states_51_cast")]; + tensor var_1202 = const()[name = tensor("op_1202"), val = tensor([20, -1, 64])]; + tensor transpose_99 = transpose(perm = var_1187_perm_0, x = var_1186_cast)[name = tensor("transpose_99")]; + tensor value_states_51_cast = reshape(shape = var_1202, x = transpose_99)[name = tensor("value_states_51_cast")]; + tensor var_1205_perm_0 = const()[name = tensor("op_1205_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor transpose_97 = transpose(perm = var_1205_perm_0, x = key_states_51_cast)[name = tensor("transpose_97")]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = query_states_25_cast, y = transpose_97)[name = tensor("attn_weights_73_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 20, 77, 77])]; + tensor var_1208_cast = reshape(shape = var_1207, x = attn_weights_73_cast)[name = tensor("op_1208_cast")]; + tensor attn_weights_75_cast = add(x = var_1208_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_1213 = const()[name = tensor("op_1213"), val = tensor([20, 77, 77])]; + tensor input_197_cast = reshape(shape = var_1213, x = attn_weights_75_cast)[name = tensor("input_197_cast")]; + tensor input_199_cast = softmax(axis = var_5, x = input_197_cast)[name = tensor("input_199_cast")]; + tensor attn_output_73_transpose_x_0 = const()[name = tensor("attn_output_73_transpose_x_0"), val = tensor(false)]; + tensor attn_output_73_transpose_y_0 = const()[name = tensor("attn_output_73_transpose_y_0"), val = tensor(false)]; + tensor attn_output_73_cast = matmul(transpose_x = attn_output_73_transpose_x_0, transpose_y = attn_output_73_transpose_y_0, x = input_199_cast, y = value_states_51_cast)[name = tensor("attn_output_73_cast")]; + tensor var_1218 = const()[name = tensor("op_1218"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_75_cast = reshape(shape = var_1218, x = attn_output_73_cast)[name = tensor("attn_output_75_cast")]; + tensor attn_output_77_perm_0 = const()[name = tensor("attn_output_77_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1221 = const()[name = tensor("op_1221"), val = tensor([1, 77, 1280])]; + tensor transpose_96 = transpose(perm = attn_output_77_perm_0, x = attn_output_75_cast)[name = tensor("transpose_96")]; + tensor input_201_cast = reshape(shape = var_1221, x = transpose_96)[name = tensor("input_201_cast")]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608808320)))]; + tensor text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612085184)))]; + tensor hidden_states_75_cast = linear(bias = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_self_attn_out_proj_weight_to_fp16, x = input_201_cast)[name = tensor("hidden_states_75_cast")]; + tensor input_203_cast = add(x = input_195_cast, y = hidden_states_75_cast)[name = tensor("input_203_cast")]; + tensor input_205_axes_0 = const()[name = tensor("input_205_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612087808)))]; + tensor text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612090432)))]; + tensor input_205_cast = layer_norm(axes = input_205_axes_0, beta = text_encoder_text_model_encoder_layers_12_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_12_layer_norm2_weight_to_fp16, x = input_203_cast)[name = tensor("input_205_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(612093056)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625200320)))]; + tensor input_207_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc1_weight_to_fp16, x = input_205_cast)[name = tensor("input_207_cast")]; + tensor input_209_mode_0 = const()[name = tensor("input_209_mode_0"), val = tensor("EXACT")]; + tensor input_209_cast = gelu(mode = input_209_mode_0, x = input_207_cast)[name = tensor("input_209_cast")]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625210624)))]; + tensor text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638317888)))]; + tensor hidden_states_77_cast = linear(bias = text_encoder_text_model_encoder_layers_12_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_12_mlp_fc2_weight_to_fp16, x = input_209_cast)[name = tensor("hidden_states_77_cast")]; + tensor input_211_cast = add(x = input_203_cast, y = hidden_states_77_cast)[name = tensor("input_211_cast")]; + tensor hidden_states_79_axes_0 = const()[name = tensor("hidden_states_79_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638320512)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638323136)))]; + tensor hidden_states_79_cast = layer_norm(axes = hidden_states_79_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm1_weight_to_fp16, x = input_211_cast)[name = tensor("hidden_states_79_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638325760)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641602624)))]; + tensor var_1259_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_q_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("op_1259_cast")]; + tensor var_1260_to_fp16 = const()[name = tensor("op_1260_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_83_cast = mul(x = var_1259_cast, y = var_1260_to_fp16)[name = tensor("tensor_83_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(641605248)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644882112)))]; + tensor tensor_79_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_k_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_79_cast")]; + tensor var_1265 = const()[name = tensor("op_1265"), val = tensor([1, -1, 20, 64])]; + tensor var_1266_cast = reshape(shape = var_1265, x = tensor_79_cast)[name = tensor("op_1266_cast")]; + tensor var_1267_perm_0 = const()[name = tensor("op_1267_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(644884736)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648161600)))]; + tensor tensor_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_v_proj_weight_to_fp16, x = hidden_states_79_cast)[name = tensor("tensor_81_cast")]; + tensor var_1272 = const()[name = tensor("op_1272"), val = tensor([1, -1, 20, 64])]; + tensor var_1273_cast = reshape(shape = var_1272, x = tensor_81_cast)[name = tensor("op_1273_cast")]; + tensor var_1274_perm_0 = const()[name = tensor("op_1274_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([1, 77, 20, 64])]; + tensor var_1282_cast = reshape(shape = var_1281, x = tensor_83_cast)[name = tensor("op_1282_cast")]; + tensor var_1283_perm_0 = const()[name = tensor("op_1283_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1285 = const()[name = tensor("op_1285"), val = tensor([20, -1, 64])]; + tensor transpose_93 = transpose(perm = var_1283_perm_0, x = var_1282_cast)[name = tensor("transpose_93")]; + tensor query_states_27_cast = reshape(shape = var_1285, x = transpose_93)[name = tensor("query_states_27_cast")]; + tensor var_1287 = const()[name = tensor("op_1287"), val = tensor([20, -1, 64])]; + tensor transpose_95 = transpose(perm = var_1267_perm_0, x = var_1266_cast)[name = tensor("transpose_95")]; + tensor key_states_55_cast = reshape(shape = var_1287, x = transpose_95)[name = tensor("key_states_55_cast")]; + tensor var_1289 = const()[name = tensor("op_1289"), val = tensor([20, -1, 64])]; + tensor transpose_94 = transpose(perm = var_1274_perm_0, x = var_1273_cast)[name = tensor("transpose_94")]; + tensor value_states_55_cast = reshape(shape = var_1289, x = transpose_94)[name = tensor("value_states_55_cast")]; + tensor var_1292_perm_0 = const()[name = tensor("op_1292_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_79_transpose_x_0 = const()[name = tensor("attn_weights_79_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_79_transpose_y_0 = const()[name = tensor("attn_weights_79_transpose_y_0"), val = tensor(false)]; + tensor transpose_92 = transpose(perm = var_1292_perm_0, x = key_states_55_cast)[name = tensor("transpose_92")]; + tensor attn_weights_79_cast = matmul(transpose_x = attn_weights_79_transpose_x_0, transpose_y = attn_weights_79_transpose_y_0, x = query_states_27_cast, y = transpose_92)[name = tensor("attn_weights_79_cast")]; + tensor var_1294 = const()[name = tensor("op_1294"), val = tensor([1, 20, 77, 77])]; + tensor var_1295_cast = reshape(shape = var_1294, x = attn_weights_79_cast)[name = tensor("op_1295_cast")]; + tensor attn_weights_81_cast = add(x = var_1295_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_81_cast")]; + tensor var_1300 = const()[name = tensor("op_1300"), val = tensor([20, 77, 77])]; + tensor input_213_cast = reshape(shape = var_1300, x = attn_weights_81_cast)[name = tensor("input_213_cast")]; + tensor input_215_cast = softmax(axis = var_5, x = input_213_cast)[name = tensor("input_215_cast")]; + tensor attn_output_79_transpose_x_0 = const()[name = tensor("attn_output_79_transpose_x_0"), val = tensor(false)]; + tensor attn_output_79_transpose_y_0 = const()[name = tensor("attn_output_79_transpose_y_0"), val = tensor(false)]; + tensor attn_output_79_cast = matmul(transpose_x = attn_output_79_transpose_x_0, transpose_y = attn_output_79_transpose_y_0, x = input_215_cast, y = value_states_55_cast)[name = tensor("attn_output_79_cast")]; + tensor var_1305 = const()[name = tensor("op_1305"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_81_cast = reshape(shape = var_1305, x = attn_output_79_cast)[name = tensor("attn_output_81_cast")]; + tensor attn_output_83_perm_0 = const()[name = tensor("attn_output_83_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1308 = const()[name = tensor("op_1308"), val = tensor([1, 77, 1280])]; + tensor transpose_91 = transpose(perm = attn_output_83_perm_0, x = attn_output_81_cast)[name = tensor("transpose_91")]; + tensor input_217_cast = reshape(shape = var_1308, x = transpose_91)[name = tensor("input_217_cast")]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648164224)))]; + tensor text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651441088)))]; + tensor hidden_states_81_cast = linear(bias = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_self_attn_out_proj_weight_to_fp16, x = input_217_cast)[name = tensor("hidden_states_81_cast")]; + tensor input_219_cast = add(x = input_211_cast, y = hidden_states_81_cast)[name = tensor("input_219_cast")]; + tensor input_221_axes_0 = const()[name = tensor("input_221_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651443712)))]; + tensor text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651446336)))]; + tensor input_221_cast = layer_norm(axes = input_221_axes_0, beta = text_encoder_text_model_encoder_layers_13_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_13_layer_norm2_weight_to_fp16, x = input_219_cast)[name = tensor("input_221_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651448960)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664556224)))]; + tensor input_223_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc1_weight_to_fp16, x = input_221_cast)[name = tensor("input_223_cast")]; + tensor input_225_mode_0 = const()[name = tensor("input_225_mode_0"), val = tensor("EXACT")]; + tensor input_225_cast = gelu(mode = input_225_mode_0, x = input_223_cast)[name = tensor("input_225_cast")]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(664566528)))]; + tensor text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677673792)))]; + tensor hidden_states_83_cast = linear(bias = text_encoder_text_model_encoder_layers_13_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_13_mlp_fc2_weight_to_fp16, x = input_225_cast)[name = tensor("hidden_states_83_cast")]; + tensor input_227_cast = add(x = input_219_cast, y = hidden_states_83_cast)[name = tensor("input_227_cast")]; + tensor hidden_states_85_axes_0 = const()[name = tensor("hidden_states_85_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677676416)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677679040)))]; + tensor hidden_states_85_cast = layer_norm(axes = hidden_states_85_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm1_weight_to_fp16, x = input_227_cast)[name = tensor("hidden_states_85_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677681664)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680958528)))]; + tensor var_1346_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_q_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("op_1346_cast")]; + tensor var_1347_to_fp16 = const()[name = tensor("op_1347_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_89_cast = mul(x = var_1346_cast, y = var_1347_to_fp16)[name = tensor("tensor_89_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680961152)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684238016)))]; + tensor tensor_85_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_k_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_85_cast")]; + tensor var_1352 = const()[name = tensor("op_1352"), val = tensor([1, -1, 20, 64])]; + tensor var_1353_cast = reshape(shape = var_1352, x = tensor_85_cast)[name = tensor("op_1353_cast")]; + tensor var_1354_perm_0 = const()[name = tensor("op_1354_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(684240640)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687517504)))]; + tensor tensor_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_v_proj_weight_to_fp16, x = hidden_states_85_cast)[name = tensor("tensor_87_cast")]; + tensor var_1359 = const()[name = tensor("op_1359"), val = tensor([1, -1, 20, 64])]; + tensor var_1360_cast = reshape(shape = var_1359, x = tensor_87_cast)[name = tensor("op_1360_cast")]; + tensor var_1361_perm_0 = const()[name = tensor("op_1361_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1368 = const()[name = tensor("op_1368"), val = tensor([1, 77, 20, 64])]; + tensor var_1369_cast = reshape(shape = var_1368, x = tensor_89_cast)[name = tensor("op_1369_cast")]; + tensor var_1370_perm_0 = const()[name = tensor("op_1370_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1372 = const()[name = tensor("op_1372"), val = tensor([20, -1, 64])]; + tensor transpose_88 = transpose(perm = var_1370_perm_0, x = var_1369_cast)[name = tensor("transpose_88")]; + tensor query_states_29_cast = reshape(shape = var_1372, x = transpose_88)[name = tensor("query_states_29_cast")]; + tensor var_1374 = const()[name = tensor("op_1374"), val = tensor([20, -1, 64])]; + tensor transpose_90 = transpose(perm = var_1354_perm_0, x = var_1353_cast)[name = tensor("transpose_90")]; + tensor key_states_59_cast = reshape(shape = var_1374, x = transpose_90)[name = tensor("key_states_59_cast")]; + tensor var_1376 = const()[name = tensor("op_1376"), val = tensor([20, -1, 64])]; + tensor transpose_89 = transpose(perm = var_1361_perm_0, x = var_1360_cast)[name = tensor("transpose_89")]; + tensor value_states_59_cast = reshape(shape = var_1376, x = transpose_89)[name = tensor("value_states_59_cast")]; + tensor var_1379_perm_0 = const()[name = tensor("op_1379_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor transpose_87 = transpose(perm = var_1379_perm_0, x = key_states_59_cast)[name = tensor("transpose_87")]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = query_states_29_cast, y = transpose_87)[name = tensor("attn_weights_85_cast")]; + tensor var_1381 = const()[name = tensor("op_1381"), val = tensor([1, 20, 77, 77])]; + tensor var_1382_cast = reshape(shape = var_1381, x = attn_weights_85_cast)[name = tensor("op_1382_cast")]; + tensor attn_weights_87_cast = add(x = var_1382_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_1387 = const()[name = tensor("op_1387"), val = tensor([20, 77, 77])]; + tensor input_229_cast = reshape(shape = var_1387, x = attn_weights_87_cast)[name = tensor("input_229_cast")]; + tensor input_231_cast = softmax(axis = var_5, x = input_229_cast)[name = tensor("input_231_cast")]; + tensor attn_output_85_transpose_x_0 = const()[name = tensor("attn_output_85_transpose_x_0"), val = tensor(false)]; + tensor attn_output_85_transpose_y_0 = const()[name = tensor("attn_output_85_transpose_y_0"), val = tensor(false)]; + tensor attn_output_85_cast = matmul(transpose_x = attn_output_85_transpose_x_0, transpose_y = attn_output_85_transpose_y_0, x = input_231_cast, y = value_states_59_cast)[name = tensor("attn_output_85_cast")]; + tensor var_1392 = const()[name = tensor("op_1392"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_87_cast = reshape(shape = var_1392, x = attn_output_85_cast)[name = tensor("attn_output_87_cast")]; + tensor attn_output_89_perm_0 = const()[name = tensor("attn_output_89_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([1, 77, 1280])]; + tensor transpose_86 = transpose(perm = attn_output_89_perm_0, x = attn_output_87_cast)[name = tensor("transpose_86")]; + tensor input_233_cast = reshape(shape = var_1395, x = transpose_86)[name = tensor("input_233_cast")]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687520128)))]; + tensor text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690796992)))]; + tensor hidden_states_87_cast = linear(bias = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_self_attn_out_proj_weight_to_fp16, x = input_233_cast)[name = tensor("hidden_states_87_cast")]; + tensor input_235_cast = add(x = input_227_cast, y = hidden_states_87_cast)[name = tensor("input_235_cast")]; + tensor input_237_axes_0 = const()[name = tensor("input_237_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690799616)))]; + tensor text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690802240)))]; + tensor input_237_cast = layer_norm(axes = input_237_axes_0, beta = text_encoder_text_model_encoder_layers_14_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_14_layer_norm2_weight_to_fp16, x = input_235_cast)[name = tensor("input_237_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690804864)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703912128)))]; + tensor input_239_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc1_weight_to_fp16, x = input_237_cast)[name = tensor("input_239_cast")]; + tensor input_241_mode_0 = const()[name = tensor("input_241_mode_0"), val = tensor("EXACT")]; + tensor input_241_cast = gelu(mode = input_241_mode_0, x = input_239_cast)[name = tensor("input_241_cast")]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703922432)))]; + tensor text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717029696)))]; + tensor hidden_states_89_cast = linear(bias = text_encoder_text_model_encoder_layers_14_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_14_mlp_fc2_weight_to_fp16, x = input_241_cast)[name = tensor("hidden_states_89_cast")]; + tensor input_243_cast = add(x = input_235_cast, y = hidden_states_89_cast)[name = tensor("input_243_cast")]; + tensor hidden_states_91_axes_0 = const()[name = tensor("hidden_states_91_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717032320)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717034944)))]; + tensor hidden_states_91_cast = layer_norm(axes = hidden_states_91_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm1_weight_to_fp16, x = input_243_cast)[name = tensor("hidden_states_91_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717037568)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720314432)))]; + tensor var_1433_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_q_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("op_1433_cast")]; + tensor var_1434_to_fp16 = const()[name = tensor("op_1434_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_95_cast = mul(x = var_1433_cast, y = var_1434_to_fp16)[name = tensor("tensor_95_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720317056)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723593920)))]; + tensor tensor_91_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_k_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_91_cast")]; + tensor var_1439 = const()[name = tensor("op_1439"), val = tensor([1, -1, 20, 64])]; + tensor var_1440_cast = reshape(shape = var_1439, x = tensor_91_cast)[name = tensor("op_1440_cast")]; + tensor var_1441_perm_0 = const()[name = tensor("op_1441_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(723596544)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726873408)))]; + tensor tensor_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_v_proj_weight_to_fp16, x = hidden_states_91_cast)[name = tensor("tensor_93_cast")]; + tensor var_1446 = const()[name = tensor("op_1446"), val = tensor([1, -1, 20, 64])]; + tensor var_1447_cast = reshape(shape = var_1446, x = tensor_93_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_perm_0 = const()[name = tensor("op_1448_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1455 = const()[name = tensor("op_1455"), val = tensor([1, 77, 20, 64])]; + tensor var_1456_cast = reshape(shape = var_1455, x = tensor_95_cast)[name = tensor("op_1456_cast")]; + tensor var_1457_perm_0 = const()[name = tensor("op_1457_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1459 = const()[name = tensor("op_1459"), val = tensor([20, -1, 64])]; + tensor transpose_83 = transpose(perm = var_1457_perm_0, x = var_1456_cast)[name = tensor("transpose_83")]; + tensor query_states_31_cast = reshape(shape = var_1459, x = transpose_83)[name = tensor("query_states_31_cast")]; + tensor var_1461 = const()[name = tensor("op_1461"), val = tensor([20, -1, 64])]; + tensor transpose_85 = transpose(perm = var_1441_perm_0, x = var_1440_cast)[name = tensor("transpose_85")]; + tensor key_states_63_cast = reshape(shape = var_1461, x = transpose_85)[name = tensor("key_states_63_cast")]; + tensor var_1463 = const()[name = tensor("op_1463"), val = tensor([20, -1, 64])]; + tensor transpose_84 = transpose(perm = var_1448_perm_0, x = var_1447_cast)[name = tensor("transpose_84")]; + tensor value_states_63_cast = reshape(shape = var_1463, x = transpose_84)[name = tensor("value_states_63_cast")]; + tensor var_1466_perm_0 = const()[name = tensor("op_1466_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_91_transpose_x_0 = const()[name = tensor("attn_weights_91_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_91_transpose_y_0 = const()[name = tensor("attn_weights_91_transpose_y_0"), val = tensor(false)]; + tensor transpose_82 = transpose(perm = var_1466_perm_0, x = key_states_63_cast)[name = tensor("transpose_82")]; + tensor attn_weights_91_cast = matmul(transpose_x = attn_weights_91_transpose_x_0, transpose_y = attn_weights_91_transpose_y_0, x = query_states_31_cast, y = transpose_82)[name = tensor("attn_weights_91_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1, 20, 77, 77])]; + tensor var_1469_cast = reshape(shape = var_1468, x = attn_weights_91_cast)[name = tensor("op_1469_cast")]; + tensor attn_weights_93_cast = add(x = var_1469_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_93_cast")]; + tensor var_1474 = const()[name = tensor("op_1474"), val = tensor([20, 77, 77])]; + tensor input_245_cast = reshape(shape = var_1474, x = attn_weights_93_cast)[name = tensor("input_245_cast")]; + tensor input_247_cast = softmax(axis = var_5, x = input_245_cast)[name = tensor("input_247_cast")]; + tensor attn_output_91_transpose_x_0 = const()[name = tensor("attn_output_91_transpose_x_0"), val = tensor(false)]; + tensor attn_output_91_transpose_y_0 = const()[name = tensor("attn_output_91_transpose_y_0"), val = tensor(false)]; + tensor attn_output_91_cast = matmul(transpose_x = attn_output_91_transpose_x_0, transpose_y = attn_output_91_transpose_y_0, x = input_247_cast, y = value_states_63_cast)[name = tensor("attn_output_91_cast")]; + tensor var_1479 = const()[name = tensor("op_1479"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_93_cast = reshape(shape = var_1479, x = attn_output_91_cast)[name = tensor("attn_output_93_cast")]; + tensor attn_output_95_perm_0 = const()[name = tensor("attn_output_95_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1482 = const()[name = tensor("op_1482"), val = tensor([1, 77, 1280])]; + tensor transpose_81 = transpose(perm = attn_output_95_perm_0, x = attn_output_93_cast)[name = tensor("transpose_81")]; + tensor input_249_cast = reshape(shape = var_1482, x = transpose_81)[name = tensor("input_249_cast")]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726876032)))]; + tensor text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730152896)))]; + tensor hidden_states_93_cast = linear(bias = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_self_attn_out_proj_weight_to_fp16, x = input_249_cast)[name = tensor("hidden_states_93_cast")]; + tensor input_251_cast = add(x = input_243_cast, y = hidden_states_93_cast)[name = tensor("input_251_cast")]; + tensor input_253_axes_0 = const()[name = tensor("input_253_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730155520)))]; + tensor text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730158144)))]; + tensor input_253_cast = layer_norm(axes = input_253_axes_0, beta = text_encoder_text_model_encoder_layers_15_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_15_layer_norm2_weight_to_fp16, x = input_251_cast)[name = tensor("input_253_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730160768)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743268032)))]; + tensor input_255_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc1_weight_to_fp16, x = input_253_cast)[name = tensor("input_255_cast")]; + tensor input_257_mode_0 = const()[name = tensor("input_257_mode_0"), val = tensor("EXACT")]; + tensor input_257_cast = gelu(mode = input_257_mode_0, x = input_255_cast)[name = tensor("input_257_cast")]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743278336)))]; + tensor text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756385600)))]; + tensor hidden_states_95_cast = linear(bias = text_encoder_text_model_encoder_layers_15_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_15_mlp_fc2_weight_to_fp16, x = input_257_cast)[name = tensor("hidden_states_95_cast")]; + tensor input_259_cast = add(x = input_251_cast, y = hidden_states_95_cast)[name = tensor("input_259_cast")]; + tensor hidden_states_97_axes_0 = const()[name = tensor("hidden_states_97_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756388224)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756390848)))]; + tensor hidden_states_97_cast = layer_norm(axes = hidden_states_97_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm1_weight_to_fp16, x = input_259_cast)[name = tensor("hidden_states_97_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(756393472)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759670336)))]; + tensor var_1520_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_q_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("op_1520_cast")]; + tensor var_1521_to_fp16 = const()[name = tensor("op_1521_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_101_cast = mul(x = var_1520_cast, y = var_1521_to_fp16)[name = tensor("tensor_101_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759672960)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762949824)))]; + tensor tensor_97_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_k_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_97_cast")]; + tensor var_1526 = const()[name = tensor("op_1526"), val = tensor([1, -1, 20, 64])]; + tensor var_1527_cast = reshape(shape = var_1526, x = tensor_97_cast)[name = tensor("op_1527_cast")]; + tensor var_1528_perm_0 = const()[name = tensor("op_1528_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762952448)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766229312)))]; + tensor tensor_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_v_proj_weight_to_fp16, x = hidden_states_97_cast)[name = tensor("tensor_99_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1, -1, 20, 64])]; + tensor var_1534_cast = reshape(shape = var_1533, x = tensor_99_cast)[name = tensor("op_1534_cast")]; + tensor var_1535_perm_0 = const()[name = tensor("op_1535_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1542 = const()[name = tensor("op_1542"), val = tensor([1, 77, 20, 64])]; + tensor var_1543_cast = reshape(shape = var_1542, x = tensor_101_cast)[name = tensor("op_1543_cast")]; + tensor var_1544_perm_0 = const()[name = tensor("op_1544_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1546 = const()[name = tensor("op_1546"), val = tensor([20, -1, 64])]; + tensor transpose_78 = transpose(perm = var_1544_perm_0, x = var_1543_cast)[name = tensor("transpose_78")]; + tensor query_states_33_cast = reshape(shape = var_1546, x = transpose_78)[name = tensor("query_states_33_cast")]; + tensor var_1548 = const()[name = tensor("op_1548"), val = tensor([20, -1, 64])]; + tensor transpose_80 = transpose(perm = var_1528_perm_0, x = var_1527_cast)[name = tensor("transpose_80")]; + tensor key_states_67_cast = reshape(shape = var_1548, x = transpose_80)[name = tensor("key_states_67_cast")]; + tensor var_1550 = const()[name = tensor("op_1550"), val = tensor([20, -1, 64])]; + tensor transpose_79 = transpose(perm = var_1535_perm_0, x = var_1534_cast)[name = tensor("transpose_79")]; + tensor value_states_67_cast = reshape(shape = var_1550, x = transpose_79)[name = tensor("value_states_67_cast")]; + tensor var_1553_perm_0 = const()[name = tensor("op_1553_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor transpose_77 = transpose(perm = var_1553_perm_0, x = key_states_67_cast)[name = tensor("transpose_77")]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = query_states_33_cast, y = transpose_77)[name = tensor("attn_weights_97_cast")]; + tensor var_1555 = const()[name = tensor("op_1555"), val = tensor([1, 20, 77, 77])]; + tensor var_1556_cast = reshape(shape = var_1555, x = attn_weights_97_cast)[name = tensor("op_1556_cast")]; + tensor attn_weights_99_cast = add(x = var_1556_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_1561 = const()[name = tensor("op_1561"), val = tensor([20, 77, 77])]; + tensor input_261_cast = reshape(shape = var_1561, x = attn_weights_99_cast)[name = tensor("input_261_cast")]; + tensor input_263_cast = softmax(axis = var_5, x = input_261_cast)[name = tensor("input_263_cast")]; + tensor attn_output_97_transpose_x_0 = const()[name = tensor("attn_output_97_transpose_x_0"), val = tensor(false)]; + tensor attn_output_97_transpose_y_0 = const()[name = tensor("attn_output_97_transpose_y_0"), val = tensor(false)]; + tensor attn_output_97_cast = matmul(transpose_x = attn_output_97_transpose_x_0, transpose_y = attn_output_97_transpose_y_0, x = input_263_cast, y = value_states_67_cast)[name = tensor("attn_output_97_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_99_cast = reshape(shape = var_1566, x = attn_output_97_cast)[name = tensor("attn_output_99_cast")]; + tensor attn_output_101_perm_0 = const()[name = tensor("attn_output_101_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1569 = const()[name = tensor("op_1569"), val = tensor([1, 77, 1280])]; + tensor transpose_76 = transpose(perm = attn_output_101_perm_0, x = attn_output_99_cast)[name = tensor("transpose_76")]; + tensor input_265_cast = reshape(shape = var_1569, x = transpose_76)[name = tensor("input_265_cast")]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766231936)))]; + tensor text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769508800)))]; + tensor hidden_states_99_cast = linear(bias = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_self_attn_out_proj_weight_to_fp16, x = input_265_cast)[name = tensor("hidden_states_99_cast")]; + tensor input_267_cast = add(x = input_259_cast, y = hidden_states_99_cast)[name = tensor("input_267_cast")]; + tensor input_269_axes_0 = const()[name = tensor("input_269_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769511424)))]; + tensor text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769514048)))]; + tensor input_269_cast = layer_norm(axes = input_269_axes_0, beta = text_encoder_text_model_encoder_layers_16_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_16_layer_norm2_weight_to_fp16, x = input_267_cast)[name = tensor("input_269_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769516672)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782623936)))]; + tensor input_271_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc1_weight_to_fp16, x = input_269_cast)[name = tensor("input_271_cast")]; + tensor input_273_mode_0 = const()[name = tensor("input_273_mode_0"), val = tensor("EXACT")]; + tensor input_273_cast = gelu(mode = input_273_mode_0, x = input_271_cast)[name = tensor("input_273_cast")]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782634240)))]; + tensor text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795741504)))]; + tensor hidden_states_101_cast = linear(bias = text_encoder_text_model_encoder_layers_16_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_16_mlp_fc2_weight_to_fp16, x = input_273_cast)[name = tensor("hidden_states_101_cast")]; + tensor input_275_cast = add(x = input_267_cast, y = hidden_states_101_cast)[name = tensor("input_275_cast")]; + tensor hidden_states_103_axes_0 = const()[name = tensor("hidden_states_103_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795744128)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795746752)))]; + tensor hidden_states_103_cast = layer_norm(axes = hidden_states_103_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm1_weight_to_fp16, x = input_275_cast)[name = tensor("hidden_states_103_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(795749376)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799026240)))]; + tensor var_1607_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_q_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("op_1607_cast")]; + tensor var_1608_to_fp16 = const()[name = tensor("op_1608_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_107_cast = mul(x = var_1607_cast, y = var_1608_to_fp16)[name = tensor("tensor_107_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(799028864)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802305728)))]; + tensor tensor_103_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_k_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_103_cast")]; + tensor var_1613 = const()[name = tensor("op_1613"), val = tensor([1, -1, 20, 64])]; + tensor var_1614_cast = reshape(shape = var_1613, x = tensor_103_cast)[name = tensor("op_1614_cast")]; + tensor var_1615_perm_0 = const()[name = tensor("op_1615_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802308352)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805585216)))]; + tensor tensor_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_v_proj_weight_to_fp16, x = hidden_states_103_cast)[name = tensor("tensor_105_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, -1, 20, 64])]; + tensor var_1621_cast = reshape(shape = var_1620, x = tensor_105_cast)[name = tensor("op_1621_cast")]; + tensor var_1622_perm_0 = const()[name = tensor("op_1622_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1629 = const()[name = tensor("op_1629"), val = tensor([1, 77, 20, 64])]; + tensor var_1630_cast = reshape(shape = var_1629, x = tensor_107_cast)[name = tensor("op_1630_cast")]; + tensor var_1631_perm_0 = const()[name = tensor("op_1631_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([20, -1, 64])]; + tensor transpose_73 = transpose(perm = var_1631_perm_0, x = var_1630_cast)[name = tensor("transpose_73")]; + tensor query_states_35_cast = reshape(shape = var_1633, x = transpose_73)[name = tensor("query_states_35_cast")]; + tensor var_1635 = const()[name = tensor("op_1635"), val = tensor([20, -1, 64])]; + tensor transpose_75 = transpose(perm = var_1615_perm_0, x = var_1614_cast)[name = tensor("transpose_75")]; + tensor key_states_71_cast = reshape(shape = var_1635, x = transpose_75)[name = tensor("key_states_71_cast")]; + tensor var_1637 = const()[name = tensor("op_1637"), val = tensor([20, -1, 64])]; + tensor transpose_74 = transpose(perm = var_1622_perm_0, x = var_1621_cast)[name = tensor("transpose_74")]; + tensor value_states_71_cast = reshape(shape = var_1637, x = transpose_74)[name = tensor("value_states_71_cast")]; + tensor var_1640_perm_0 = const()[name = tensor("op_1640_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_103_transpose_x_0 = const()[name = tensor("attn_weights_103_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_103_transpose_y_0 = const()[name = tensor("attn_weights_103_transpose_y_0"), val = tensor(false)]; + tensor transpose_72 = transpose(perm = var_1640_perm_0, x = key_states_71_cast)[name = tensor("transpose_72")]; + tensor attn_weights_103_cast = matmul(transpose_x = attn_weights_103_transpose_x_0, transpose_y = attn_weights_103_transpose_y_0, x = query_states_35_cast, y = transpose_72)[name = tensor("attn_weights_103_cast")]; + tensor var_1642 = const()[name = tensor("op_1642"), val = tensor([1, 20, 77, 77])]; + tensor var_1643_cast = reshape(shape = var_1642, x = attn_weights_103_cast)[name = tensor("op_1643_cast")]; + tensor attn_weights_105_cast = add(x = var_1643_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_105_cast")]; + tensor var_1648 = const()[name = tensor("op_1648"), val = tensor([20, 77, 77])]; + tensor input_277_cast = reshape(shape = var_1648, x = attn_weights_105_cast)[name = tensor("input_277_cast")]; + tensor input_279_cast = softmax(axis = var_5, x = input_277_cast)[name = tensor("input_279_cast")]; + tensor attn_output_103_transpose_x_0 = const()[name = tensor("attn_output_103_transpose_x_0"), val = tensor(false)]; + tensor attn_output_103_transpose_y_0 = const()[name = tensor("attn_output_103_transpose_y_0"), val = tensor(false)]; + tensor attn_output_103_cast = matmul(transpose_x = attn_output_103_transpose_x_0, transpose_y = attn_output_103_transpose_y_0, x = input_279_cast, y = value_states_71_cast)[name = tensor("attn_output_103_cast")]; + tensor var_1653 = const()[name = tensor("op_1653"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_105_cast = reshape(shape = var_1653, x = attn_output_103_cast)[name = tensor("attn_output_105_cast")]; + tensor attn_output_107_perm_0 = const()[name = tensor("attn_output_107_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1656 = const()[name = tensor("op_1656"), val = tensor([1, 77, 1280])]; + tensor transpose_71 = transpose(perm = attn_output_107_perm_0, x = attn_output_105_cast)[name = tensor("transpose_71")]; + tensor input_281_cast = reshape(shape = var_1656, x = transpose_71)[name = tensor("input_281_cast")]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805587840)))]; + tensor text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808864704)))]; + tensor hidden_states_105_cast = linear(bias = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_self_attn_out_proj_weight_to_fp16, x = input_281_cast)[name = tensor("hidden_states_105_cast")]; + tensor input_283_cast = add(x = input_275_cast, y = hidden_states_105_cast)[name = tensor("input_283_cast")]; + tensor input_285_axes_0 = const()[name = tensor("input_285_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808867328)))]; + tensor text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808869952)))]; + tensor input_285_cast = layer_norm(axes = input_285_axes_0, beta = text_encoder_text_model_encoder_layers_17_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_17_layer_norm2_weight_to_fp16, x = input_283_cast)[name = tensor("input_285_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(808872576)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821979840)))]; + tensor input_287_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc1_weight_to_fp16, x = input_285_cast)[name = tensor("input_287_cast")]; + tensor input_289_mode_0 = const()[name = tensor("input_289_mode_0"), val = tensor("EXACT")]; + tensor input_289_cast = gelu(mode = input_289_mode_0, x = input_287_cast)[name = tensor("input_289_cast")]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821990144)))]; + tensor text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835097408)))]; + tensor hidden_states_107_cast = linear(bias = text_encoder_text_model_encoder_layers_17_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_17_mlp_fc2_weight_to_fp16, x = input_289_cast)[name = tensor("hidden_states_107_cast")]; + tensor input_291_cast = add(x = input_283_cast, y = hidden_states_107_cast)[name = tensor("input_291_cast")]; + tensor hidden_states_109_axes_0 = const()[name = tensor("hidden_states_109_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835100032)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835102656)))]; + tensor hidden_states_109_cast = layer_norm(axes = hidden_states_109_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm1_weight_to_fp16, x = input_291_cast)[name = tensor("hidden_states_109_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(835105280)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838382144)))]; + tensor var_1694_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_q_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("op_1694_cast")]; + tensor var_1695_to_fp16 = const()[name = tensor("op_1695_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_113_cast = mul(x = var_1694_cast, y = var_1695_to_fp16)[name = tensor("tensor_113_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(838384768)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841661632)))]; + tensor tensor_109_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_k_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_109_cast")]; + tensor var_1700 = const()[name = tensor("op_1700"), val = tensor([1, -1, 20, 64])]; + tensor var_1701_cast = reshape(shape = var_1700, x = tensor_109_cast)[name = tensor("op_1701_cast")]; + tensor var_1702_perm_0 = const()[name = tensor("op_1702_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841664256)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844941120)))]; + tensor tensor_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_v_proj_weight_to_fp16, x = hidden_states_109_cast)[name = tensor("tensor_111_cast")]; + tensor var_1707 = const()[name = tensor("op_1707"), val = tensor([1, -1, 20, 64])]; + tensor var_1708_cast = reshape(shape = var_1707, x = tensor_111_cast)[name = tensor("op_1708_cast")]; + tensor var_1709_perm_0 = const()[name = tensor("op_1709_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1716 = const()[name = tensor("op_1716"), val = tensor([1, 77, 20, 64])]; + tensor var_1717_cast = reshape(shape = var_1716, x = tensor_113_cast)[name = tensor("op_1717_cast")]; + tensor var_1718_perm_0 = const()[name = tensor("op_1718_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1720 = const()[name = tensor("op_1720"), val = tensor([20, -1, 64])]; + tensor transpose_68 = transpose(perm = var_1718_perm_0, x = var_1717_cast)[name = tensor("transpose_68")]; + tensor query_states_37_cast = reshape(shape = var_1720, x = transpose_68)[name = tensor("query_states_37_cast")]; + tensor var_1722 = const()[name = tensor("op_1722"), val = tensor([20, -1, 64])]; + tensor transpose_70 = transpose(perm = var_1702_perm_0, x = var_1701_cast)[name = tensor("transpose_70")]; + tensor key_states_75_cast = reshape(shape = var_1722, x = transpose_70)[name = tensor("key_states_75_cast")]; + tensor var_1724 = const()[name = tensor("op_1724"), val = tensor([20, -1, 64])]; + tensor transpose_69 = transpose(perm = var_1709_perm_0, x = var_1708_cast)[name = tensor("transpose_69")]; + tensor value_states_75_cast = reshape(shape = var_1724, x = transpose_69)[name = tensor("value_states_75_cast")]; + tensor var_1727_perm_0 = const()[name = tensor("op_1727_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor transpose_67 = transpose(perm = var_1727_perm_0, x = key_states_75_cast)[name = tensor("transpose_67")]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = query_states_37_cast, y = transpose_67)[name = tensor("attn_weights_109_cast")]; + tensor var_1729 = const()[name = tensor("op_1729"), val = tensor([1, 20, 77, 77])]; + tensor var_1730_cast = reshape(shape = var_1729, x = attn_weights_109_cast)[name = tensor("op_1730_cast")]; + tensor attn_weights_111_cast = add(x = var_1730_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_1735 = const()[name = tensor("op_1735"), val = tensor([20, 77, 77])]; + tensor input_293_cast = reshape(shape = var_1735, x = attn_weights_111_cast)[name = tensor("input_293_cast")]; + tensor input_295_cast = softmax(axis = var_5, x = input_293_cast)[name = tensor("input_295_cast")]; + tensor attn_output_109_transpose_x_0 = const()[name = tensor("attn_output_109_transpose_x_0"), val = tensor(false)]; + tensor attn_output_109_transpose_y_0 = const()[name = tensor("attn_output_109_transpose_y_0"), val = tensor(false)]; + tensor attn_output_109_cast = matmul(transpose_x = attn_output_109_transpose_x_0, transpose_y = attn_output_109_transpose_y_0, x = input_295_cast, y = value_states_75_cast)[name = tensor("attn_output_109_cast")]; + tensor var_1740 = const()[name = tensor("op_1740"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_111_cast = reshape(shape = var_1740, x = attn_output_109_cast)[name = tensor("attn_output_111_cast")]; + tensor attn_output_113_perm_0 = const()[name = tensor("attn_output_113_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 77, 1280])]; + tensor transpose_66 = transpose(perm = attn_output_113_perm_0, x = attn_output_111_cast)[name = tensor("transpose_66")]; + tensor input_297_cast = reshape(shape = var_1743, x = transpose_66)[name = tensor("input_297_cast")]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844943744)))]; + tensor text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848220608)))]; + tensor hidden_states_111_cast = linear(bias = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_self_attn_out_proj_weight_to_fp16, x = input_297_cast)[name = tensor("hidden_states_111_cast")]; + tensor input_299_cast = add(x = input_291_cast, y = hidden_states_111_cast)[name = tensor("input_299_cast")]; + tensor input_301_axes_0 = const()[name = tensor("input_301_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848223232)))]; + tensor text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848225856)))]; + tensor input_301_cast = layer_norm(axes = input_301_axes_0, beta = text_encoder_text_model_encoder_layers_18_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_18_layer_norm2_weight_to_fp16, x = input_299_cast)[name = tensor("input_301_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848228480)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861335744)))]; + tensor input_303_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc1_weight_to_fp16, x = input_301_cast)[name = tensor("input_303_cast")]; + tensor input_305_mode_0 = const()[name = tensor("input_305_mode_0"), val = tensor("EXACT")]; + tensor input_305_cast = gelu(mode = input_305_mode_0, x = input_303_cast)[name = tensor("input_305_cast")]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(861346048)))]; + tensor text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874453312)))]; + tensor hidden_states_113_cast = linear(bias = text_encoder_text_model_encoder_layers_18_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_18_mlp_fc2_weight_to_fp16, x = input_305_cast)[name = tensor("hidden_states_113_cast")]; + tensor input_307_cast = add(x = input_299_cast, y = hidden_states_113_cast)[name = tensor("input_307_cast")]; + tensor hidden_states_115_axes_0 = const()[name = tensor("hidden_states_115_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874455936)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874458560)))]; + tensor hidden_states_115_cast = layer_norm(axes = hidden_states_115_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm1_weight_to_fp16, x = input_307_cast)[name = tensor("hidden_states_115_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874461184)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877738048)))]; + tensor var_1781_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_q_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("op_1781_cast")]; + tensor var_1782_to_fp16 = const()[name = tensor("op_1782_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_119_cast = mul(x = var_1781_cast, y = var_1782_to_fp16)[name = tensor("tensor_119_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877740672)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881017536)))]; + tensor tensor_115_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_k_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_115_cast")]; + tensor var_1787 = const()[name = tensor("op_1787"), val = tensor([1, -1, 20, 64])]; + tensor var_1788_cast = reshape(shape = var_1787, x = tensor_115_cast)[name = tensor("op_1788_cast")]; + tensor var_1789_perm_0 = const()[name = tensor("op_1789_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881020160)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884297024)))]; + tensor tensor_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_v_proj_weight_to_fp16, x = hidden_states_115_cast)[name = tensor("tensor_117_cast")]; + tensor var_1794 = const()[name = tensor("op_1794"), val = tensor([1, -1, 20, 64])]; + tensor var_1795_cast = reshape(shape = var_1794, x = tensor_117_cast)[name = tensor("op_1795_cast")]; + tensor var_1796_perm_0 = const()[name = tensor("op_1796_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1803 = const()[name = tensor("op_1803"), val = tensor([1, 77, 20, 64])]; + tensor var_1804_cast = reshape(shape = var_1803, x = tensor_119_cast)[name = tensor("op_1804_cast")]; + tensor var_1805_perm_0 = const()[name = tensor("op_1805_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1807 = const()[name = tensor("op_1807"), val = tensor([20, -1, 64])]; + tensor transpose_63 = transpose(perm = var_1805_perm_0, x = var_1804_cast)[name = tensor("transpose_63")]; + tensor query_states_39_cast = reshape(shape = var_1807, x = transpose_63)[name = tensor("query_states_39_cast")]; + tensor var_1809 = const()[name = tensor("op_1809"), val = tensor([20, -1, 64])]; + tensor transpose_65 = transpose(perm = var_1789_perm_0, x = var_1788_cast)[name = tensor("transpose_65")]; + tensor key_states_79_cast = reshape(shape = var_1809, x = transpose_65)[name = tensor("key_states_79_cast")]; + tensor var_1811 = const()[name = tensor("op_1811"), val = tensor([20, -1, 64])]; + tensor transpose_64 = transpose(perm = var_1796_perm_0, x = var_1795_cast)[name = tensor("transpose_64")]; + tensor value_states_79_cast = reshape(shape = var_1811, x = transpose_64)[name = tensor("value_states_79_cast")]; + tensor var_1814_perm_0 = const()[name = tensor("op_1814_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_115_transpose_x_0 = const()[name = tensor("attn_weights_115_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_115_transpose_y_0 = const()[name = tensor("attn_weights_115_transpose_y_0"), val = tensor(false)]; + tensor transpose_62 = transpose(perm = var_1814_perm_0, x = key_states_79_cast)[name = tensor("transpose_62")]; + tensor attn_weights_115_cast = matmul(transpose_x = attn_weights_115_transpose_x_0, transpose_y = attn_weights_115_transpose_y_0, x = query_states_39_cast, y = transpose_62)[name = tensor("attn_weights_115_cast")]; + tensor var_1816 = const()[name = tensor("op_1816"), val = tensor([1, 20, 77, 77])]; + tensor var_1817_cast = reshape(shape = var_1816, x = attn_weights_115_cast)[name = tensor("op_1817_cast")]; + tensor attn_weights_117_cast = add(x = var_1817_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_117_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([20, 77, 77])]; + tensor input_309_cast = reshape(shape = var_1822, x = attn_weights_117_cast)[name = tensor("input_309_cast")]; + tensor input_311_cast = softmax(axis = var_5, x = input_309_cast)[name = tensor("input_311_cast")]; + tensor attn_output_115_transpose_x_0 = const()[name = tensor("attn_output_115_transpose_x_0"), val = tensor(false)]; + tensor attn_output_115_transpose_y_0 = const()[name = tensor("attn_output_115_transpose_y_0"), val = tensor(false)]; + tensor attn_output_115_cast = matmul(transpose_x = attn_output_115_transpose_x_0, transpose_y = attn_output_115_transpose_y_0, x = input_311_cast, y = value_states_79_cast)[name = tensor("attn_output_115_cast")]; + tensor var_1827 = const()[name = tensor("op_1827"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_117_cast = reshape(shape = var_1827, x = attn_output_115_cast)[name = tensor("attn_output_117_cast")]; + tensor attn_output_119_perm_0 = const()[name = tensor("attn_output_119_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1830 = const()[name = tensor("op_1830"), val = tensor([1, 77, 1280])]; + tensor transpose_61 = transpose(perm = attn_output_119_perm_0, x = attn_output_117_cast)[name = tensor("transpose_61")]; + tensor input_313_cast = reshape(shape = var_1830, x = transpose_61)[name = tensor("input_313_cast")]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(884299648)))]; + tensor text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887576512)))]; + tensor hidden_states_117_cast = linear(bias = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_self_attn_out_proj_weight_to_fp16, x = input_313_cast)[name = tensor("hidden_states_117_cast")]; + tensor input_315_cast = add(x = input_307_cast, y = hidden_states_117_cast)[name = tensor("input_315_cast")]; + tensor input_317_axes_0 = const()[name = tensor("input_317_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887579136)))]; + tensor text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887581760)))]; + tensor input_317_cast = layer_norm(axes = input_317_axes_0, beta = text_encoder_text_model_encoder_layers_19_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_19_layer_norm2_weight_to_fp16, x = input_315_cast)[name = tensor("input_317_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887584384)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900691648)))]; + tensor input_319_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc1_weight_to_fp16, x = input_317_cast)[name = tensor("input_319_cast")]; + tensor input_321_mode_0 = const()[name = tensor("input_321_mode_0"), val = tensor("EXACT")]; + tensor input_321_cast = gelu(mode = input_321_mode_0, x = input_319_cast)[name = tensor("input_321_cast")]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(900701952)))]; + tensor text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913809216)))]; + tensor hidden_states_119_cast = linear(bias = text_encoder_text_model_encoder_layers_19_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_19_mlp_fc2_weight_to_fp16, x = input_321_cast)[name = tensor("hidden_states_119_cast")]; + tensor input_323_cast = add(x = input_315_cast, y = hidden_states_119_cast)[name = tensor("input_323_cast")]; + tensor hidden_states_121_axes_0 = const()[name = tensor("hidden_states_121_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913811840)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913814464)))]; + tensor hidden_states_121_cast = layer_norm(axes = hidden_states_121_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm1_weight_to_fp16, x = input_323_cast)[name = tensor("hidden_states_121_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913817088)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917093952)))]; + tensor var_1868_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_q_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("op_1868_cast")]; + tensor var_1869_to_fp16 = const()[name = tensor("op_1869_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_125_cast = mul(x = var_1868_cast, y = var_1869_to_fp16)[name = tensor("tensor_125_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(917096576)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920373440)))]; + tensor tensor_121_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_k_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_121_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([1, -1, 20, 64])]; + tensor var_1875_cast = reshape(shape = var_1874, x = tensor_121_cast)[name = tensor("op_1875_cast")]; + tensor var_1876_perm_0 = const()[name = tensor("op_1876_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(920376064)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923652928)))]; + tensor tensor_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_v_proj_weight_to_fp16, x = hidden_states_121_cast)[name = tensor("tensor_123_cast")]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, -1, 20, 64])]; + tensor var_1882_cast = reshape(shape = var_1881, x = tensor_123_cast)[name = tensor("op_1882_cast")]; + tensor var_1883_perm_0 = const()[name = tensor("op_1883_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1890 = const()[name = tensor("op_1890"), val = tensor([1, 77, 20, 64])]; + tensor var_1891_cast = reshape(shape = var_1890, x = tensor_125_cast)[name = tensor("op_1891_cast")]; + tensor var_1892_perm_0 = const()[name = tensor("op_1892_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1894 = const()[name = tensor("op_1894"), val = tensor([20, -1, 64])]; + tensor transpose_58 = transpose(perm = var_1892_perm_0, x = var_1891_cast)[name = tensor("transpose_58")]; + tensor query_states_41_cast = reshape(shape = var_1894, x = transpose_58)[name = tensor("query_states_41_cast")]; + tensor var_1896 = const()[name = tensor("op_1896"), val = tensor([20, -1, 64])]; + tensor transpose_60 = transpose(perm = var_1876_perm_0, x = var_1875_cast)[name = tensor("transpose_60")]; + tensor key_states_83_cast = reshape(shape = var_1896, x = transpose_60)[name = tensor("key_states_83_cast")]; + tensor var_1898 = const()[name = tensor("op_1898"), val = tensor([20, -1, 64])]; + tensor transpose_59 = transpose(perm = var_1883_perm_0, x = var_1882_cast)[name = tensor("transpose_59")]; + tensor value_states_83_cast = reshape(shape = var_1898, x = transpose_59)[name = tensor("value_states_83_cast")]; + tensor var_1901_perm_0 = const()[name = tensor("op_1901_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor transpose_57 = transpose(perm = var_1901_perm_0, x = key_states_83_cast)[name = tensor("transpose_57")]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = query_states_41_cast, y = transpose_57)[name = tensor("attn_weights_121_cast")]; + tensor var_1903 = const()[name = tensor("op_1903"), val = tensor([1, 20, 77, 77])]; + tensor var_1904_cast = reshape(shape = var_1903, x = attn_weights_121_cast)[name = tensor("op_1904_cast")]; + tensor attn_weights_123_cast = add(x = var_1904_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_1909 = const()[name = tensor("op_1909"), val = tensor([20, 77, 77])]; + tensor input_325_cast = reshape(shape = var_1909, x = attn_weights_123_cast)[name = tensor("input_325_cast")]; + tensor input_327_cast = softmax(axis = var_5, x = input_325_cast)[name = tensor("input_327_cast")]; + tensor attn_output_121_transpose_x_0 = const()[name = tensor("attn_output_121_transpose_x_0"), val = tensor(false)]; + tensor attn_output_121_transpose_y_0 = const()[name = tensor("attn_output_121_transpose_y_0"), val = tensor(false)]; + tensor attn_output_121_cast = matmul(transpose_x = attn_output_121_transpose_x_0, transpose_y = attn_output_121_transpose_y_0, x = input_327_cast, y = value_states_83_cast)[name = tensor("attn_output_121_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_123_cast = reshape(shape = var_1914, x = attn_output_121_cast)[name = tensor("attn_output_123_cast")]; + tensor attn_output_125_perm_0 = const()[name = tensor("attn_output_125_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1917 = const()[name = tensor("op_1917"), val = tensor([1, 77, 1280])]; + tensor transpose_56 = transpose(perm = attn_output_125_perm_0, x = attn_output_123_cast)[name = tensor("transpose_56")]; + tensor input_329_cast = reshape(shape = var_1917, x = transpose_56)[name = tensor("input_329_cast")]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923655552)))]; + tensor text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926932416)))]; + tensor hidden_states_123_cast = linear(bias = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_self_attn_out_proj_weight_to_fp16, x = input_329_cast)[name = tensor("hidden_states_123_cast")]; + tensor input_331_cast = add(x = input_323_cast, y = hidden_states_123_cast)[name = tensor("input_331_cast")]; + tensor input_333_axes_0 = const()[name = tensor("input_333_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926935040)))]; + tensor text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926937664)))]; + tensor input_333_cast = layer_norm(axes = input_333_axes_0, beta = text_encoder_text_model_encoder_layers_20_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_20_layer_norm2_weight_to_fp16, x = input_331_cast)[name = tensor("input_333_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(926940288)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940047552)))]; + tensor input_335_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc1_weight_to_fp16, x = input_333_cast)[name = tensor("input_335_cast")]; + tensor input_337_mode_0 = const()[name = tensor("input_337_mode_0"), val = tensor("EXACT")]; + tensor input_337_cast = gelu(mode = input_337_mode_0, x = input_335_cast)[name = tensor("input_337_cast")]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940057856)))]; + tensor text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953165120)))]; + tensor hidden_states_125_cast = linear(bias = text_encoder_text_model_encoder_layers_20_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_20_mlp_fc2_weight_to_fp16, x = input_337_cast)[name = tensor("hidden_states_125_cast")]; + tensor input_339_cast = add(x = input_331_cast, y = hidden_states_125_cast)[name = tensor("input_339_cast")]; + tensor hidden_states_127_axes_0 = const()[name = tensor("hidden_states_127_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953167744)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953170368)))]; + tensor hidden_states_127_cast = layer_norm(axes = hidden_states_127_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm1_weight_to_fp16, x = input_339_cast)[name = tensor("hidden_states_127_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953172992)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956449856)))]; + tensor var_1955_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_q_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("op_1955_cast")]; + tensor var_1956_to_fp16 = const()[name = tensor("op_1956_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_131_cast = mul(x = var_1955_cast, y = var_1956_to_fp16)[name = tensor("tensor_131_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956452480)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959729344)))]; + tensor tensor_127_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_k_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_127_cast")]; + tensor var_1961 = const()[name = tensor("op_1961"), val = tensor([1, -1, 20, 64])]; + tensor var_1962_cast = reshape(shape = var_1961, x = tensor_127_cast)[name = tensor("op_1962_cast")]; + tensor var_1963_perm_0 = const()[name = tensor("op_1963_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959731968)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963008832)))]; + tensor tensor_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_v_proj_weight_to_fp16, x = hidden_states_127_cast)[name = tensor("tensor_129_cast")]; + tensor var_1968 = const()[name = tensor("op_1968"), val = tensor([1, -1, 20, 64])]; + tensor var_1969_cast = reshape(shape = var_1968, x = tensor_129_cast)[name = tensor("op_1969_cast")]; + tensor var_1970_perm_0 = const()[name = tensor("op_1970_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1977 = const()[name = tensor("op_1977"), val = tensor([1, 77, 20, 64])]; + tensor var_1978_cast = reshape(shape = var_1977, x = tensor_131_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_perm_0 = const()[name = tensor("op_1979_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_1981 = const()[name = tensor("op_1981"), val = tensor([20, -1, 64])]; + tensor transpose_53 = transpose(perm = var_1979_perm_0, x = var_1978_cast)[name = tensor("transpose_53")]; + tensor query_states_43_cast = reshape(shape = var_1981, x = transpose_53)[name = tensor("query_states_43_cast")]; + tensor var_1983 = const()[name = tensor("op_1983"), val = tensor([20, -1, 64])]; + tensor transpose_55 = transpose(perm = var_1963_perm_0, x = var_1962_cast)[name = tensor("transpose_55")]; + tensor key_states_87_cast = reshape(shape = var_1983, x = transpose_55)[name = tensor("key_states_87_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([20, -1, 64])]; + tensor transpose_54 = transpose(perm = var_1970_perm_0, x = var_1969_cast)[name = tensor("transpose_54")]; + tensor value_states_87_cast = reshape(shape = var_1985, x = transpose_54)[name = tensor("value_states_87_cast")]; + tensor var_1988_perm_0 = const()[name = tensor("op_1988_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_127_transpose_x_0 = const()[name = tensor("attn_weights_127_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_127_transpose_y_0 = const()[name = tensor("attn_weights_127_transpose_y_0"), val = tensor(false)]; + tensor transpose_52 = transpose(perm = var_1988_perm_0, x = key_states_87_cast)[name = tensor("transpose_52")]; + tensor attn_weights_127_cast = matmul(transpose_x = attn_weights_127_transpose_x_0, transpose_y = attn_weights_127_transpose_y_0, x = query_states_43_cast, y = transpose_52)[name = tensor("attn_weights_127_cast")]; + tensor var_1990 = const()[name = tensor("op_1990"), val = tensor([1, 20, 77, 77])]; + tensor var_1991_cast = reshape(shape = var_1990, x = attn_weights_127_cast)[name = tensor("op_1991_cast")]; + tensor attn_weights_129_cast = add(x = var_1991_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_129_cast")]; + tensor var_1996 = const()[name = tensor("op_1996"), val = tensor([20, 77, 77])]; + tensor input_341_cast = reshape(shape = var_1996, x = attn_weights_129_cast)[name = tensor("input_341_cast")]; + tensor input_343_cast = softmax(axis = var_5, x = input_341_cast)[name = tensor("input_343_cast")]; + tensor attn_output_127_transpose_x_0 = const()[name = tensor("attn_output_127_transpose_x_0"), val = tensor(false)]; + tensor attn_output_127_transpose_y_0 = const()[name = tensor("attn_output_127_transpose_y_0"), val = tensor(false)]; + tensor attn_output_127_cast = matmul(transpose_x = attn_output_127_transpose_x_0, transpose_y = attn_output_127_transpose_y_0, x = input_343_cast, y = value_states_87_cast)[name = tensor("attn_output_127_cast")]; + tensor var_2001 = const()[name = tensor("op_2001"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_129_cast = reshape(shape = var_2001, x = attn_output_127_cast)[name = tensor("attn_output_129_cast")]; + tensor attn_output_131_perm_0 = const()[name = tensor("attn_output_131_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2004 = const()[name = tensor("op_2004"), val = tensor([1, 77, 1280])]; + tensor transpose_51 = transpose(perm = attn_output_131_perm_0, x = attn_output_129_cast)[name = tensor("transpose_51")]; + tensor input_345_cast = reshape(shape = var_2004, x = transpose_51)[name = tensor("input_345_cast")]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(963011456)))]; + tensor text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966288320)))]; + tensor hidden_states_129_cast = linear(bias = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_self_attn_out_proj_weight_to_fp16, x = input_345_cast)[name = tensor("hidden_states_129_cast")]; + tensor input_347_cast = add(x = input_339_cast, y = hidden_states_129_cast)[name = tensor("input_347_cast")]; + tensor input_349_axes_0 = const()[name = tensor("input_349_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966290944)))]; + tensor text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966293568)))]; + tensor input_349_cast = layer_norm(axes = input_349_axes_0, beta = text_encoder_text_model_encoder_layers_21_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_21_layer_norm2_weight_to_fp16, x = input_347_cast)[name = tensor("input_349_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966296192)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979403456)))]; + tensor input_351_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc1_weight_to_fp16, x = input_349_cast)[name = tensor("input_351_cast")]; + tensor input_353_mode_0 = const()[name = tensor("input_353_mode_0"), val = tensor("EXACT")]; + tensor input_353_cast = gelu(mode = input_353_mode_0, x = input_351_cast)[name = tensor("input_353_cast")]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979413760)))]; + tensor text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992521024)))]; + tensor hidden_states_131_cast = linear(bias = text_encoder_text_model_encoder_layers_21_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_21_mlp_fc2_weight_to_fp16, x = input_353_cast)[name = tensor("hidden_states_131_cast")]; + tensor input_355_cast = add(x = input_347_cast, y = hidden_states_131_cast)[name = tensor("input_355_cast")]; + tensor hidden_states_133_axes_0 = const()[name = tensor("hidden_states_133_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992523648)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992526272)))]; + tensor hidden_states_133_cast = layer_norm(axes = hidden_states_133_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm1_weight_to_fp16, x = input_355_cast)[name = tensor("hidden_states_133_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992528896)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995805760)))]; + tensor var_2042_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_q_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("op_2042_cast")]; + tensor var_2043_to_fp16 = const()[name = tensor("op_2043_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_137_cast = mul(x = var_2042_cast, y = var_2043_to_fp16)[name = tensor("tensor_137_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995808384)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999085248)))]; + tensor tensor_133_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_k_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_133_cast")]; + tensor var_2048 = const()[name = tensor("op_2048"), val = tensor([1, -1, 20, 64])]; + tensor var_2049_cast = reshape(shape = var_2048, x = tensor_133_cast)[name = tensor("op_2049_cast")]; + tensor var_2050_perm_0 = const()[name = tensor("op_2050_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999087872)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002364736)))]; + tensor tensor_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_v_proj_weight_to_fp16, x = hidden_states_133_cast)[name = tensor("tensor_135_cast")]; + tensor var_2055 = const()[name = tensor("op_2055"), val = tensor([1, -1, 20, 64])]; + tensor var_2056_cast = reshape(shape = var_2055, x = tensor_135_cast)[name = tensor("op_2056_cast")]; + tensor var_2057_perm_0 = const()[name = tensor("op_2057_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1, 77, 20, 64])]; + tensor var_2065_cast = reshape(shape = var_2064, x = tensor_137_cast)[name = tensor("op_2065_cast")]; + tensor var_2066_perm_0 = const()[name = tensor("op_2066_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([20, -1, 64])]; + tensor transpose_48 = transpose(perm = var_2066_perm_0, x = var_2065_cast)[name = tensor("transpose_48")]; + tensor query_states_45_cast = reshape(shape = var_2068, x = transpose_48)[name = tensor("query_states_45_cast")]; + tensor var_2070 = const()[name = tensor("op_2070"), val = tensor([20, -1, 64])]; + tensor transpose_50 = transpose(perm = var_2050_perm_0, x = var_2049_cast)[name = tensor("transpose_50")]; + tensor key_states_91_cast = reshape(shape = var_2070, x = transpose_50)[name = tensor("key_states_91_cast")]; + tensor var_2072 = const()[name = tensor("op_2072"), val = tensor([20, -1, 64])]; + tensor transpose_49 = transpose(perm = var_2057_perm_0, x = var_2056_cast)[name = tensor("transpose_49")]; + tensor value_states_91_cast = reshape(shape = var_2072, x = transpose_49)[name = tensor("value_states_91_cast")]; + tensor var_2075_perm_0 = const()[name = tensor("op_2075_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor transpose_47 = transpose(perm = var_2075_perm_0, x = key_states_91_cast)[name = tensor("transpose_47")]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = query_states_45_cast, y = transpose_47)[name = tensor("attn_weights_133_cast")]; + tensor var_2077 = const()[name = tensor("op_2077"), val = tensor([1, 20, 77, 77])]; + tensor var_2078_cast = reshape(shape = var_2077, x = attn_weights_133_cast)[name = tensor("op_2078_cast")]; + tensor attn_weights_135_cast = add(x = var_2078_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_2083 = const()[name = tensor("op_2083"), val = tensor([20, 77, 77])]; + tensor input_357_cast = reshape(shape = var_2083, x = attn_weights_135_cast)[name = tensor("input_357_cast")]; + tensor input_359_cast = softmax(axis = var_5, x = input_357_cast)[name = tensor("input_359_cast")]; + tensor attn_output_133_transpose_x_0 = const()[name = tensor("attn_output_133_transpose_x_0"), val = tensor(false)]; + tensor attn_output_133_transpose_y_0 = const()[name = tensor("attn_output_133_transpose_y_0"), val = tensor(false)]; + tensor attn_output_133_cast = matmul(transpose_x = attn_output_133_transpose_x_0, transpose_y = attn_output_133_transpose_y_0, x = input_359_cast, y = value_states_91_cast)[name = tensor("attn_output_133_cast")]; + tensor var_2088 = const()[name = tensor("op_2088"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_135_cast = reshape(shape = var_2088, x = attn_output_133_cast)[name = tensor("attn_output_135_cast")]; + tensor attn_output_137_perm_0 = const()[name = tensor("attn_output_137_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 77, 1280])]; + tensor transpose_46 = transpose(perm = attn_output_137_perm_0, x = attn_output_135_cast)[name = tensor("transpose_46")]; + tensor input_361_cast = reshape(shape = var_2091, x = transpose_46)[name = tensor("input_361_cast")]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002367360)))]; + tensor text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005644224)))]; + tensor hidden_states_135_cast = linear(bias = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_self_attn_out_proj_weight_to_fp16, x = input_361_cast)[name = tensor("hidden_states_135_cast")]; + tensor input_363_cast = add(x = input_355_cast, y = hidden_states_135_cast)[name = tensor("input_363_cast")]; + tensor input_365_axes_0 = const()[name = tensor("input_365_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005646848)))]; + tensor text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005649472)))]; + tensor input_365_cast = layer_norm(axes = input_365_axes_0, beta = text_encoder_text_model_encoder_layers_22_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_22_layer_norm2_weight_to_fp16, x = input_363_cast)[name = tensor("input_365_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1005652096)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018759360)))]; + tensor input_367_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc1_weight_to_fp16, x = input_365_cast)[name = tensor("input_367_cast")]; + tensor input_369_mode_0 = const()[name = tensor("input_369_mode_0"), val = tensor("EXACT")]; + tensor input_369_cast = gelu(mode = input_369_mode_0, x = input_367_cast)[name = tensor("input_369_cast")]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018769664)))]; + tensor text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031876928)))]; + tensor hidden_states_137_cast = linear(bias = text_encoder_text_model_encoder_layers_22_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_22_mlp_fc2_weight_to_fp16, x = input_369_cast)[name = tensor("hidden_states_137_cast")]; + tensor input_371_cast = add(x = input_363_cast, y = hidden_states_137_cast)[name = tensor("input_371_cast")]; + tensor hidden_states_139_axes_0 = const()[name = tensor("hidden_states_139_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031879552)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031882176)))]; + tensor hidden_states_139_cast = layer_norm(axes = hidden_states_139_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm1_weight_to_fp16, x = input_371_cast)[name = tensor("hidden_states_139_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1031884800)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035161664)))]; + tensor var_2129_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_q_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("op_2129_cast")]; + tensor var_2130_to_fp16 = const()[name = tensor("op_2130_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_143_cast = mul(x = var_2129_cast, y = var_2130_to_fp16)[name = tensor("tensor_143_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035164288)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038441152)))]; + tensor tensor_139_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_k_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_139_cast")]; + tensor var_2135 = const()[name = tensor("op_2135"), val = tensor([1, -1, 20, 64])]; + tensor var_2136_cast = reshape(shape = var_2135, x = tensor_139_cast)[name = tensor("op_2136_cast")]; + tensor var_2137_perm_0 = const()[name = tensor("op_2137_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038443776)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041720640)))]; + tensor tensor_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_v_proj_weight_to_fp16, x = hidden_states_139_cast)[name = tensor("tensor_141_cast")]; + tensor var_2142 = const()[name = tensor("op_2142"), val = tensor([1, -1, 20, 64])]; + tensor var_2143_cast = reshape(shape = var_2142, x = tensor_141_cast)[name = tensor("op_2143_cast")]; + tensor var_2144_perm_0 = const()[name = tensor("op_2144_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 77, 20, 64])]; + tensor var_2152_cast = reshape(shape = var_2151, x = tensor_143_cast)[name = tensor("op_2152_cast")]; + tensor var_2153_perm_0 = const()[name = tensor("op_2153_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2155 = const()[name = tensor("op_2155"), val = tensor([20, -1, 64])]; + tensor transpose_43 = transpose(perm = var_2153_perm_0, x = var_2152_cast)[name = tensor("transpose_43")]; + tensor query_states_47_cast = reshape(shape = var_2155, x = transpose_43)[name = tensor("query_states_47_cast")]; + tensor var_2157 = const()[name = tensor("op_2157"), val = tensor([20, -1, 64])]; + tensor transpose_45 = transpose(perm = var_2137_perm_0, x = var_2136_cast)[name = tensor("transpose_45")]; + tensor key_states_95_cast = reshape(shape = var_2157, x = transpose_45)[name = tensor("key_states_95_cast")]; + tensor var_2159 = const()[name = tensor("op_2159"), val = tensor([20, -1, 64])]; + tensor transpose_44 = transpose(perm = var_2144_perm_0, x = var_2143_cast)[name = tensor("transpose_44")]; + tensor value_states_95_cast = reshape(shape = var_2159, x = transpose_44)[name = tensor("value_states_95_cast")]; + tensor var_2162_perm_0 = const()[name = tensor("op_2162_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_139_transpose_x_0 = const()[name = tensor("attn_weights_139_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_139_transpose_y_0 = const()[name = tensor("attn_weights_139_transpose_y_0"), val = tensor(false)]; + tensor transpose_42 = transpose(perm = var_2162_perm_0, x = key_states_95_cast)[name = tensor("transpose_42")]; + tensor attn_weights_139_cast = matmul(transpose_x = attn_weights_139_transpose_x_0, transpose_y = attn_weights_139_transpose_y_0, x = query_states_47_cast, y = transpose_42)[name = tensor("attn_weights_139_cast")]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 20, 77, 77])]; + tensor var_2165_cast = reshape(shape = var_2164, x = attn_weights_139_cast)[name = tensor("op_2165_cast")]; + tensor attn_weights_141_cast = add(x = var_2165_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_141_cast")]; + tensor var_2170 = const()[name = tensor("op_2170"), val = tensor([20, 77, 77])]; + tensor input_373_cast = reshape(shape = var_2170, x = attn_weights_141_cast)[name = tensor("input_373_cast")]; + tensor input_375_cast = softmax(axis = var_5, x = input_373_cast)[name = tensor("input_375_cast")]; + tensor attn_output_139_transpose_x_0 = const()[name = tensor("attn_output_139_transpose_x_0"), val = tensor(false)]; + tensor attn_output_139_transpose_y_0 = const()[name = tensor("attn_output_139_transpose_y_0"), val = tensor(false)]; + tensor attn_output_139_cast = matmul(transpose_x = attn_output_139_transpose_x_0, transpose_y = attn_output_139_transpose_y_0, x = input_375_cast, y = value_states_95_cast)[name = tensor("attn_output_139_cast")]; + tensor var_2175 = const()[name = tensor("op_2175"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_141_cast = reshape(shape = var_2175, x = attn_output_139_cast)[name = tensor("attn_output_141_cast")]; + tensor attn_output_143_perm_0 = const()[name = tensor("attn_output_143_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2178 = const()[name = tensor("op_2178"), val = tensor([1, 77, 1280])]; + tensor transpose_41 = transpose(perm = attn_output_143_perm_0, x = attn_output_141_cast)[name = tensor("transpose_41")]; + tensor input_377_cast = reshape(shape = var_2178, x = transpose_41)[name = tensor("input_377_cast")]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041723264)))]; + tensor text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045000128)))]; + tensor hidden_states_141_cast = linear(bias = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_self_attn_out_proj_weight_to_fp16, x = input_377_cast)[name = tensor("hidden_states_141_cast")]; + tensor input_379_cast = add(x = input_371_cast, y = hidden_states_141_cast)[name = tensor("input_379_cast")]; + tensor input_381_axes_0 = const()[name = tensor("input_381_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045002752)))]; + tensor text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045005376)))]; + tensor input_381_cast = layer_norm(axes = input_381_axes_0, beta = text_encoder_text_model_encoder_layers_23_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_23_layer_norm2_weight_to_fp16, x = input_379_cast)[name = tensor("input_381_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045008000)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058115264)))]; + tensor input_383_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc1_weight_to_fp16, x = input_381_cast)[name = tensor("input_383_cast")]; + tensor input_385_mode_0 = const()[name = tensor("input_385_mode_0"), val = tensor("EXACT")]; + tensor input_385_cast = gelu(mode = input_385_mode_0, x = input_383_cast)[name = tensor("input_385_cast")]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1058125568)))]; + tensor text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071232832)))]; + tensor hidden_states_143_cast = linear(bias = text_encoder_text_model_encoder_layers_23_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_23_mlp_fc2_weight_to_fp16, x = input_385_cast)[name = tensor("hidden_states_143_cast")]; + tensor input_387_cast = add(x = input_379_cast, y = hidden_states_143_cast)[name = tensor("input_387_cast")]; + tensor hidden_states_145_axes_0 = const()[name = tensor("hidden_states_145_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071235456)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071238080)))]; + tensor hidden_states_145_cast = layer_norm(axes = hidden_states_145_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm1_weight_to_fp16, x = input_387_cast)[name = tensor("hidden_states_145_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071240704)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074517568)))]; + tensor var_2216_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_q_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("op_2216_cast")]; + tensor var_2217_to_fp16 = const()[name = tensor("op_2217_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_149_cast = mul(x = var_2216_cast, y = var_2217_to_fp16)[name = tensor("tensor_149_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074520192)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077797056)))]; + tensor tensor_145_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_k_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_145_cast")]; + tensor var_2222 = const()[name = tensor("op_2222"), val = tensor([1, -1, 20, 64])]; + tensor var_2223_cast = reshape(shape = var_2222, x = tensor_145_cast)[name = tensor("op_2223_cast")]; + tensor var_2224_perm_0 = const()[name = tensor("op_2224_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077799680)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081076544)))]; + tensor tensor_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_v_proj_weight_to_fp16, x = hidden_states_145_cast)[name = tensor("tensor_147_cast")]; + tensor var_2229 = const()[name = tensor("op_2229"), val = tensor([1, -1, 20, 64])]; + tensor var_2230_cast = reshape(shape = var_2229, x = tensor_147_cast)[name = tensor("op_2230_cast")]; + tensor var_2231_perm_0 = const()[name = tensor("op_2231_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2238 = const()[name = tensor("op_2238"), val = tensor([1, 77, 20, 64])]; + tensor var_2239_cast = reshape(shape = var_2238, x = tensor_149_cast)[name = tensor("op_2239_cast")]; + tensor var_2240_perm_0 = const()[name = tensor("op_2240_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2242 = const()[name = tensor("op_2242"), val = tensor([20, -1, 64])]; + tensor transpose_38 = transpose(perm = var_2240_perm_0, x = var_2239_cast)[name = tensor("transpose_38")]; + tensor query_states_49_cast = reshape(shape = var_2242, x = transpose_38)[name = tensor("query_states_49_cast")]; + tensor var_2244 = const()[name = tensor("op_2244"), val = tensor([20, -1, 64])]; + tensor transpose_40 = transpose(perm = var_2224_perm_0, x = var_2223_cast)[name = tensor("transpose_40")]; + tensor key_states_99_cast = reshape(shape = var_2244, x = transpose_40)[name = tensor("key_states_99_cast")]; + tensor var_2246 = const()[name = tensor("op_2246"), val = tensor([20, -1, 64])]; + tensor transpose_39 = transpose(perm = var_2231_perm_0, x = var_2230_cast)[name = tensor("transpose_39")]; + tensor value_states_99_cast = reshape(shape = var_2246, x = transpose_39)[name = tensor("value_states_99_cast")]; + tensor var_2249_perm_0 = const()[name = tensor("op_2249_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor transpose_37 = transpose(perm = var_2249_perm_0, x = key_states_99_cast)[name = tensor("transpose_37")]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = query_states_49_cast, y = transpose_37)[name = tensor("attn_weights_145_cast")]; + tensor var_2251 = const()[name = tensor("op_2251"), val = tensor([1, 20, 77, 77])]; + tensor var_2252_cast = reshape(shape = var_2251, x = attn_weights_145_cast)[name = tensor("op_2252_cast")]; + tensor attn_weights_147_cast = add(x = var_2252_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_2257 = const()[name = tensor("op_2257"), val = tensor([20, 77, 77])]; + tensor input_389_cast = reshape(shape = var_2257, x = attn_weights_147_cast)[name = tensor("input_389_cast")]; + tensor input_391_cast = softmax(axis = var_5, x = input_389_cast)[name = tensor("input_391_cast")]; + tensor attn_output_145_transpose_x_0 = const()[name = tensor("attn_output_145_transpose_x_0"), val = tensor(false)]; + tensor attn_output_145_transpose_y_0 = const()[name = tensor("attn_output_145_transpose_y_0"), val = tensor(false)]; + tensor attn_output_145_cast = matmul(transpose_x = attn_output_145_transpose_x_0, transpose_y = attn_output_145_transpose_y_0, x = input_391_cast, y = value_states_99_cast)[name = tensor("attn_output_145_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_147_cast = reshape(shape = var_2262, x = attn_output_145_cast)[name = tensor("attn_output_147_cast")]; + tensor attn_output_149_perm_0 = const()[name = tensor("attn_output_149_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2265 = const()[name = tensor("op_2265"), val = tensor([1, 77, 1280])]; + tensor transpose_36 = transpose(perm = attn_output_149_perm_0, x = attn_output_147_cast)[name = tensor("transpose_36")]; + tensor input_393_cast = reshape(shape = var_2265, x = transpose_36)[name = tensor("input_393_cast")]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1081079168)))]; + tensor text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084356032)))]; + tensor hidden_states_147_cast = linear(bias = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_self_attn_out_proj_weight_to_fp16, x = input_393_cast)[name = tensor("hidden_states_147_cast")]; + tensor input_395_cast = add(x = input_387_cast, y = hidden_states_147_cast)[name = tensor("input_395_cast")]; + tensor input_397_axes_0 = const()[name = tensor("input_397_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084358656)))]; + tensor text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084361280)))]; + tensor input_397_cast = layer_norm(axes = input_397_axes_0, beta = text_encoder_text_model_encoder_layers_24_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_24_layer_norm2_weight_to_fp16, x = input_395_cast)[name = tensor("input_397_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084363904)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097471168)))]; + tensor input_399_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc1_weight_to_fp16, x = input_397_cast)[name = tensor("input_399_cast")]; + tensor input_401_mode_0 = const()[name = tensor("input_401_mode_0"), val = tensor("EXACT")]; + tensor input_401_cast = gelu(mode = input_401_mode_0, x = input_399_cast)[name = tensor("input_401_cast")]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1097481472)))]; + tensor text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110588736)))]; + tensor hidden_states_149_cast = linear(bias = text_encoder_text_model_encoder_layers_24_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_24_mlp_fc2_weight_to_fp16, x = input_401_cast)[name = tensor("hidden_states_149_cast")]; + tensor input_403_cast = add(x = input_395_cast, y = hidden_states_149_cast)[name = tensor("input_403_cast")]; + tensor hidden_states_151_axes_0 = const()[name = tensor("hidden_states_151_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110591360)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110593984)))]; + tensor hidden_states_151_cast = layer_norm(axes = hidden_states_151_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm1_weight_to_fp16, x = input_403_cast)[name = tensor("hidden_states_151_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110596608)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113873472)))]; + tensor var_2303_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_q_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("op_2303_cast")]; + tensor var_2304_to_fp16 = const()[name = tensor("op_2304_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_155_cast = mul(x = var_2303_cast, y = var_2304_to_fp16)[name = tensor("tensor_155_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1113876096)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117152960)))]; + tensor tensor_151_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_k_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_151_cast")]; + tensor var_2309 = const()[name = tensor("op_2309"), val = tensor([1, -1, 20, 64])]; + tensor var_2310_cast = reshape(shape = var_2309, x = tensor_151_cast)[name = tensor("op_2310_cast")]; + tensor var_2311_perm_0 = const()[name = tensor("op_2311_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117155584)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120432448)))]; + tensor tensor_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_v_proj_weight_to_fp16, x = hidden_states_151_cast)[name = tensor("tensor_153_cast")]; + tensor var_2316 = const()[name = tensor("op_2316"), val = tensor([1, -1, 20, 64])]; + tensor var_2317_cast = reshape(shape = var_2316, x = tensor_153_cast)[name = tensor("op_2317_cast")]; + tensor var_2318_perm_0 = const()[name = tensor("op_2318_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2325 = const()[name = tensor("op_2325"), val = tensor([1, 77, 20, 64])]; + tensor var_2326_cast = reshape(shape = var_2325, x = tensor_155_cast)[name = tensor("op_2326_cast")]; + tensor var_2327_perm_0 = const()[name = tensor("op_2327_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2329 = const()[name = tensor("op_2329"), val = tensor([20, -1, 64])]; + tensor transpose_33 = transpose(perm = var_2327_perm_0, x = var_2326_cast)[name = tensor("transpose_33")]; + tensor query_states_51_cast = reshape(shape = var_2329, x = transpose_33)[name = tensor("query_states_51_cast")]; + tensor var_2331 = const()[name = tensor("op_2331"), val = tensor([20, -1, 64])]; + tensor transpose_35 = transpose(perm = var_2311_perm_0, x = var_2310_cast)[name = tensor("transpose_35")]; + tensor key_states_103_cast = reshape(shape = var_2331, x = transpose_35)[name = tensor("key_states_103_cast")]; + tensor var_2333 = const()[name = tensor("op_2333"), val = tensor([20, -1, 64])]; + tensor transpose_34 = transpose(perm = var_2318_perm_0, x = var_2317_cast)[name = tensor("transpose_34")]; + tensor value_states_103_cast = reshape(shape = var_2333, x = transpose_34)[name = tensor("value_states_103_cast")]; + tensor var_2336_perm_0 = const()[name = tensor("op_2336_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_151_transpose_x_0 = const()[name = tensor("attn_weights_151_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_151_transpose_y_0 = const()[name = tensor("attn_weights_151_transpose_y_0"), val = tensor(false)]; + tensor transpose_32 = transpose(perm = var_2336_perm_0, x = key_states_103_cast)[name = tensor("transpose_32")]; + tensor attn_weights_151_cast = matmul(transpose_x = attn_weights_151_transpose_x_0, transpose_y = attn_weights_151_transpose_y_0, x = query_states_51_cast, y = transpose_32)[name = tensor("attn_weights_151_cast")]; + tensor var_2338 = const()[name = tensor("op_2338"), val = tensor([1, 20, 77, 77])]; + tensor var_2339_cast = reshape(shape = var_2338, x = attn_weights_151_cast)[name = tensor("op_2339_cast")]; + tensor attn_weights_153_cast = add(x = var_2339_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_153_cast")]; + tensor var_2344 = const()[name = tensor("op_2344"), val = tensor([20, 77, 77])]; + tensor input_405_cast = reshape(shape = var_2344, x = attn_weights_153_cast)[name = tensor("input_405_cast")]; + tensor input_407_cast = softmax(axis = var_5, x = input_405_cast)[name = tensor("input_407_cast")]; + tensor attn_output_151_transpose_x_0 = const()[name = tensor("attn_output_151_transpose_x_0"), val = tensor(false)]; + tensor attn_output_151_transpose_y_0 = const()[name = tensor("attn_output_151_transpose_y_0"), val = tensor(false)]; + tensor attn_output_151_cast = matmul(transpose_x = attn_output_151_transpose_x_0, transpose_y = attn_output_151_transpose_y_0, x = input_407_cast, y = value_states_103_cast)[name = tensor("attn_output_151_cast")]; + tensor var_2349 = const()[name = tensor("op_2349"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_153_cast = reshape(shape = var_2349, x = attn_output_151_cast)[name = tensor("attn_output_153_cast")]; + tensor attn_output_155_perm_0 = const()[name = tensor("attn_output_155_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2352 = const()[name = tensor("op_2352"), val = tensor([1, 77, 1280])]; + tensor transpose_31 = transpose(perm = attn_output_155_perm_0, x = attn_output_153_cast)[name = tensor("transpose_31")]; + tensor input_409_cast = reshape(shape = var_2352, x = transpose_31)[name = tensor("input_409_cast")]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120435072)))]; + tensor text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123711936)))]; + tensor hidden_states_153_cast = linear(bias = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_self_attn_out_proj_weight_to_fp16, x = input_409_cast)[name = tensor("hidden_states_153_cast")]; + tensor input_411_cast = add(x = input_403_cast, y = hidden_states_153_cast)[name = tensor("input_411_cast")]; + tensor input_413_axes_0 = const()[name = tensor("input_413_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123714560)))]; + tensor text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123717184)))]; + tensor input_413_cast = layer_norm(axes = input_413_axes_0, beta = text_encoder_text_model_encoder_layers_25_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_25_layer_norm2_weight_to_fp16, x = input_411_cast)[name = tensor("input_413_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123719808)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136827072)))]; + tensor input_415_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc1_weight_to_fp16, x = input_413_cast)[name = tensor("input_415_cast")]; + tensor input_417_mode_0 = const()[name = tensor("input_417_mode_0"), val = tensor("EXACT")]; + tensor input_417_cast = gelu(mode = input_417_mode_0, x = input_415_cast)[name = tensor("input_417_cast")]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1136837376)))]; + tensor text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149944640)))]; + tensor hidden_states_155_cast = linear(bias = text_encoder_text_model_encoder_layers_25_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_25_mlp_fc2_weight_to_fp16, x = input_417_cast)[name = tensor("hidden_states_155_cast")]; + tensor input_419_cast = add(x = input_411_cast, y = hidden_states_155_cast)[name = tensor("input_419_cast")]; + tensor hidden_states_157_axes_0 = const()[name = tensor("hidden_states_157_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149947264)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149949888)))]; + tensor hidden_states_157_cast = layer_norm(axes = hidden_states_157_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm1_weight_to_fp16, x = input_419_cast)[name = tensor("hidden_states_157_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149952512)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153229376)))]; + tensor var_2390_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_q_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("op_2390_cast")]; + tensor var_2391_to_fp16 = const()[name = tensor("op_2391_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_161_cast = mul(x = var_2390_cast, y = var_2391_to_fp16)[name = tensor("tensor_161_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1153232000)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156508864)))]; + tensor tensor_157_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_k_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_157_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([1, -1, 20, 64])]; + tensor var_2397_cast = reshape(shape = var_2396, x = tensor_157_cast)[name = tensor("op_2397_cast")]; + tensor var_2398_perm_0 = const()[name = tensor("op_2398_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1156511488)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159788352)))]; + tensor tensor_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_v_proj_weight_to_fp16, x = hidden_states_157_cast)[name = tensor("tensor_159_cast")]; + tensor var_2403 = const()[name = tensor("op_2403"), val = tensor([1, -1, 20, 64])]; + tensor var_2404_cast = reshape(shape = var_2403, x = tensor_159_cast)[name = tensor("op_2404_cast")]; + tensor var_2405_perm_0 = const()[name = tensor("op_2405_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 77, 20, 64])]; + tensor var_2413_cast = reshape(shape = var_2412, x = tensor_161_cast)[name = tensor("op_2413_cast")]; + tensor var_2414_perm_0 = const()[name = tensor("op_2414_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2416 = const()[name = tensor("op_2416"), val = tensor([20, -1, 64])]; + tensor transpose_28 = transpose(perm = var_2414_perm_0, x = var_2413_cast)[name = tensor("transpose_28")]; + tensor query_states_53_cast = reshape(shape = var_2416, x = transpose_28)[name = tensor("query_states_53_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([20, -1, 64])]; + tensor transpose_30 = transpose(perm = var_2398_perm_0, x = var_2397_cast)[name = tensor("transpose_30")]; + tensor key_states_107_cast = reshape(shape = var_2418, x = transpose_30)[name = tensor("key_states_107_cast")]; + tensor var_2420 = const()[name = tensor("op_2420"), val = tensor([20, -1, 64])]; + tensor transpose_29 = transpose(perm = var_2405_perm_0, x = var_2404_cast)[name = tensor("transpose_29")]; + tensor value_states_107_cast = reshape(shape = var_2420, x = transpose_29)[name = tensor("value_states_107_cast")]; + tensor var_2423_perm_0 = const()[name = tensor("op_2423_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor transpose_27 = transpose(perm = var_2423_perm_0, x = key_states_107_cast)[name = tensor("transpose_27")]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = query_states_53_cast, y = transpose_27)[name = tensor("attn_weights_157_cast")]; + tensor var_2425 = const()[name = tensor("op_2425"), val = tensor([1, 20, 77, 77])]; + tensor var_2426_cast = reshape(shape = var_2425, x = attn_weights_157_cast)[name = tensor("op_2426_cast")]; + tensor attn_weights_159_cast = add(x = var_2426_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_2431 = const()[name = tensor("op_2431"), val = tensor([20, 77, 77])]; + tensor input_421_cast = reshape(shape = var_2431, x = attn_weights_159_cast)[name = tensor("input_421_cast")]; + tensor input_423_cast = softmax(axis = var_5, x = input_421_cast)[name = tensor("input_423_cast")]; + tensor attn_output_157_transpose_x_0 = const()[name = tensor("attn_output_157_transpose_x_0"), val = tensor(false)]; + tensor attn_output_157_transpose_y_0 = const()[name = tensor("attn_output_157_transpose_y_0"), val = tensor(false)]; + tensor attn_output_157_cast = matmul(transpose_x = attn_output_157_transpose_x_0, transpose_y = attn_output_157_transpose_y_0, x = input_423_cast, y = value_states_107_cast)[name = tensor("attn_output_157_cast")]; + tensor var_2436 = const()[name = tensor("op_2436"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_159_cast = reshape(shape = var_2436, x = attn_output_157_cast)[name = tensor("attn_output_159_cast")]; + tensor attn_output_161_perm_0 = const()[name = tensor("attn_output_161_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 77, 1280])]; + tensor transpose_26 = transpose(perm = attn_output_161_perm_0, x = attn_output_159_cast)[name = tensor("transpose_26")]; + tensor input_425_cast = reshape(shape = var_2439, x = transpose_26)[name = tensor("input_425_cast")]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1159790976)))]; + tensor text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163067840)))]; + tensor hidden_states_159_cast = linear(bias = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_self_attn_out_proj_weight_to_fp16, x = input_425_cast)[name = tensor("hidden_states_159_cast")]; + tensor input_427_cast = add(x = input_419_cast, y = hidden_states_159_cast)[name = tensor("input_427_cast")]; + tensor input_429_axes_0 = const()[name = tensor("input_429_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163070464)))]; + tensor text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163073088)))]; + tensor input_429_cast = layer_norm(axes = input_429_axes_0, beta = text_encoder_text_model_encoder_layers_26_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_26_layer_norm2_weight_to_fp16, x = input_427_cast)[name = tensor("input_429_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163075712)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176182976)))]; + tensor input_431_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc1_weight_to_fp16, x = input_429_cast)[name = tensor("input_431_cast")]; + tensor input_433_mode_0 = const()[name = tensor("input_433_mode_0"), val = tensor("EXACT")]; + tensor input_433_cast = gelu(mode = input_433_mode_0, x = input_431_cast)[name = tensor("input_433_cast")]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1176193280)))]; + tensor text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189300544)))]; + tensor hidden_states_161_cast = linear(bias = text_encoder_text_model_encoder_layers_26_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_26_mlp_fc2_weight_to_fp16, x = input_433_cast)[name = tensor("hidden_states_161_cast")]; + tensor input_435_cast = add(x = input_427_cast, y = hidden_states_161_cast)[name = tensor("input_435_cast")]; + tensor hidden_states_163_axes_0 = const()[name = tensor("hidden_states_163_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189303168)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189305792)))]; + tensor hidden_states_163_cast = layer_norm(axes = hidden_states_163_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm1_weight_to_fp16, x = input_435_cast)[name = tensor("hidden_states_163_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189308416)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192585280)))]; + tensor var_2477_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_q_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("op_2477_cast")]; + tensor var_2478_to_fp16 = const()[name = tensor("op_2478_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_167_cast = mul(x = var_2477_cast, y = var_2478_to_fp16)[name = tensor("tensor_167_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1192587904)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195864768)))]; + tensor tensor_163_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_k_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_163_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1, -1, 20, 64])]; + tensor var_2484_cast = reshape(shape = var_2483, x = tensor_163_cast)[name = tensor("op_2484_cast")]; + tensor var_2485_perm_0 = const()[name = tensor("op_2485_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1195867392)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199144256)))]; + tensor tensor_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_v_proj_weight_to_fp16, x = hidden_states_163_cast)[name = tensor("tensor_165_cast")]; + tensor var_2490 = const()[name = tensor("op_2490"), val = tensor([1, -1, 20, 64])]; + tensor var_2491_cast = reshape(shape = var_2490, x = tensor_165_cast)[name = tensor("op_2491_cast")]; + tensor var_2492_perm_0 = const()[name = tensor("op_2492_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2499 = const()[name = tensor("op_2499"), val = tensor([1, 77, 20, 64])]; + tensor var_2500_cast = reshape(shape = var_2499, x = tensor_167_cast)[name = tensor("op_2500_cast")]; + tensor var_2501_perm_0 = const()[name = tensor("op_2501_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2503 = const()[name = tensor("op_2503"), val = tensor([20, -1, 64])]; + tensor transpose_23 = transpose(perm = var_2501_perm_0, x = var_2500_cast)[name = tensor("transpose_23")]; + tensor query_states_55_cast = reshape(shape = var_2503, x = transpose_23)[name = tensor("query_states_55_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([20, -1, 64])]; + tensor transpose_25 = transpose(perm = var_2485_perm_0, x = var_2484_cast)[name = tensor("transpose_25")]; + tensor key_states_111_cast = reshape(shape = var_2505, x = transpose_25)[name = tensor("key_states_111_cast")]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([20, -1, 64])]; + tensor transpose_24 = transpose(perm = var_2492_perm_0, x = var_2491_cast)[name = tensor("transpose_24")]; + tensor value_states_111_cast = reshape(shape = var_2507, x = transpose_24)[name = tensor("value_states_111_cast")]; + tensor var_2510_perm_0 = const()[name = tensor("op_2510_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_163_transpose_x_0 = const()[name = tensor("attn_weights_163_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_163_transpose_y_0 = const()[name = tensor("attn_weights_163_transpose_y_0"), val = tensor(false)]; + tensor transpose_22 = transpose(perm = var_2510_perm_0, x = key_states_111_cast)[name = tensor("transpose_22")]; + tensor attn_weights_163_cast = matmul(transpose_x = attn_weights_163_transpose_x_0, transpose_y = attn_weights_163_transpose_y_0, x = query_states_55_cast, y = transpose_22)[name = tensor("attn_weights_163_cast")]; + tensor var_2512 = const()[name = tensor("op_2512"), val = tensor([1, 20, 77, 77])]; + tensor var_2513_cast = reshape(shape = var_2512, x = attn_weights_163_cast)[name = tensor("op_2513_cast")]; + tensor attn_weights_165_cast = add(x = var_2513_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_165_cast")]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([20, 77, 77])]; + tensor input_437_cast = reshape(shape = var_2518, x = attn_weights_165_cast)[name = tensor("input_437_cast")]; + tensor input_439_cast = softmax(axis = var_5, x = input_437_cast)[name = tensor("input_439_cast")]; + tensor attn_output_163_transpose_x_0 = const()[name = tensor("attn_output_163_transpose_x_0"), val = tensor(false)]; + tensor attn_output_163_transpose_y_0 = const()[name = tensor("attn_output_163_transpose_y_0"), val = tensor(false)]; + tensor attn_output_163_cast = matmul(transpose_x = attn_output_163_transpose_x_0, transpose_y = attn_output_163_transpose_y_0, x = input_439_cast, y = value_states_111_cast)[name = tensor("attn_output_163_cast")]; + tensor var_2523 = const()[name = tensor("op_2523"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_165_cast = reshape(shape = var_2523, x = attn_output_163_cast)[name = tensor("attn_output_165_cast")]; + tensor attn_output_167_perm_0 = const()[name = tensor("attn_output_167_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2526 = const()[name = tensor("op_2526"), val = tensor([1, 77, 1280])]; + tensor transpose_21 = transpose(perm = attn_output_167_perm_0, x = attn_output_165_cast)[name = tensor("transpose_21")]; + tensor input_441_cast = reshape(shape = var_2526, x = transpose_21)[name = tensor("input_441_cast")]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1199146880)))]; + tensor text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202423744)))]; + tensor hidden_states_165_cast = linear(bias = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_self_attn_out_proj_weight_to_fp16, x = input_441_cast)[name = tensor("hidden_states_165_cast")]; + tensor input_443_cast = add(x = input_435_cast, y = hidden_states_165_cast)[name = tensor("input_443_cast")]; + tensor input_445_axes_0 = const()[name = tensor("input_445_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202426368)))]; + tensor text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202428992)))]; + tensor input_445_cast = layer_norm(axes = input_445_axes_0, beta = text_encoder_text_model_encoder_layers_27_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_27_layer_norm2_weight_to_fp16, x = input_443_cast)[name = tensor("input_445_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1202431616)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215538880)))]; + tensor input_447_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc1_weight_to_fp16, x = input_445_cast)[name = tensor("input_447_cast")]; + tensor input_449_mode_0 = const()[name = tensor("input_449_mode_0"), val = tensor("EXACT")]; + tensor input_449_cast = gelu(mode = input_449_mode_0, x = input_447_cast)[name = tensor("input_449_cast")]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1215549184)))]; + tensor text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228656448)))]; + tensor hidden_states_167_cast = linear(bias = text_encoder_text_model_encoder_layers_27_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_27_mlp_fc2_weight_to_fp16, x = input_449_cast)[name = tensor("hidden_states_167_cast")]; + tensor input_451_cast = add(x = input_443_cast, y = hidden_states_167_cast)[name = tensor("input_451_cast")]; + tensor hidden_states_169_axes_0 = const()[name = tensor("hidden_states_169_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228659072)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228661696)))]; + tensor hidden_states_169_cast = layer_norm(axes = hidden_states_169_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm1_weight_to_fp16, x = input_451_cast)[name = tensor("hidden_states_169_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228664320)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231941184)))]; + tensor var_2564_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_q_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("op_2564_cast")]; + tensor var_2565_to_fp16 = const()[name = tensor("op_2565_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_173_cast = mul(x = var_2564_cast, y = var_2565_to_fp16)[name = tensor("tensor_173_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231943808)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235220672)))]; + tensor tensor_169_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_k_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_169_cast")]; + tensor var_2570 = const()[name = tensor("op_2570"), val = tensor([1, -1, 20, 64])]; + tensor var_2571_cast = reshape(shape = var_2570, x = tensor_169_cast)[name = tensor("op_2571_cast")]; + tensor var_2572_perm_0 = const()[name = tensor("op_2572_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1235223296)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238500160)))]; + tensor tensor_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_v_proj_weight_to_fp16, x = hidden_states_169_cast)[name = tensor("tensor_171_cast")]; + tensor var_2577 = const()[name = tensor("op_2577"), val = tensor([1, -1, 20, 64])]; + tensor var_2578_cast = reshape(shape = var_2577, x = tensor_171_cast)[name = tensor("op_2578_cast")]; + tensor var_2579_perm_0 = const()[name = tensor("op_2579_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2586 = const()[name = tensor("op_2586"), val = tensor([1, 77, 20, 64])]; + tensor var_2587_cast = reshape(shape = var_2586, x = tensor_173_cast)[name = tensor("op_2587_cast")]; + tensor var_2588_perm_0 = const()[name = tensor("op_2588_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2590 = const()[name = tensor("op_2590"), val = tensor([20, -1, 64])]; + tensor transpose_18 = transpose(perm = var_2588_perm_0, x = var_2587_cast)[name = tensor("transpose_18")]; + tensor query_states_57_cast = reshape(shape = var_2590, x = transpose_18)[name = tensor("query_states_57_cast")]; + tensor var_2592 = const()[name = tensor("op_2592"), val = tensor([20, -1, 64])]; + tensor transpose_20 = transpose(perm = var_2572_perm_0, x = var_2571_cast)[name = tensor("transpose_20")]; + tensor key_states_115_cast = reshape(shape = var_2592, x = transpose_20)[name = tensor("key_states_115_cast")]; + tensor var_2594 = const()[name = tensor("op_2594"), val = tensor([20, -1, 64])]; + tensor transpose_19 = transpose(perm = var_2579_perm_0, x = var_2578_cast)[name = tensor("transpose_19")]; + tensor value_states_115_cast = reshape(shape = var_2594, x = transpose_19)[name = tensor("value_states_115_cast")]; + tensor var_2597_perm_0 = const()[name = tensor("op_2597_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor transpose_17 = transpose(perm = var_2597_perm_0, x = key_states_115_cast)[name = tensor("transpose_17")]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = query_states_57_cast, y = transpose_17)[name = tensor("attn_weights_169_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1, 20, 77, 77])]; + tensor var_2600_cast = reshape(shape = var_2599, x = attn_weights_169_cast)[name = tensor("op_2600_cast")]; + tensor attn_weights_171_cast = add(x = var_2600_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_2605 = const()[name = tensor("op_2605"), val = tensor([20, 77, 77])]; + tensor input_453_cast = reshape(shape = var_2605, x = attn_weights_171_cast)[name = tensor("input_453_cast")]; + tensor input_455_cast = softmax(axis = var_5, x = input_453_cast)[name = tensor("input_455_cast")]; + tensor attn_output_169_transpose_x_0 = const()[name = tensor("attn_output_169_transpose_x_0"), val = tensor(false)]; + tensor attn_output_169_transpose_y_0 = const()[name = tensor("attn_output_169_transpose_y_0"), val = tensor(false)]; + tensor attn_output_169_cast = matmul(transpose_x = attn_output_169_transpose_x_0, transpose_y = attn_output_169_transpose_y_0, x = input_455_cast, y = value_states_115_cast)[name = tensor("attn_output_169_cast")]; + tensor var_2610 = const()[name = tensor("op_2610"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_171_cast = reshape(shape = var_2610, x = attn_output_169_cast)[name = tensor("attn_output_171_cast")]; + tensor attn_output_173_perm_0 = const()[name = tensor("attn_output_173_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2613 = const()[name = tensor("op_2613"), val = tensor([1, 77, 1280])]; + tensor transpose_16 = transpose(perm = attn_output_173_perm_0, x = attn_output_171_cast)[name = tensor("transpose_16")]; + tensor input_457_cast = reshape(shape = var_2613, x = transpose_16)[name = tensor("input_457_cast")]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1238502784)))]; + tensor text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241779648)))]; + tensor hidden_states_171_cast = linear(bias = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_self_attn_out_proj_weight_to_fp16, x = input_457_cast)[name = tensor("hidden_states_171_cast")]; + tensor input_459_cast = add(x = input_451_cast, y = hidden_states_171_cast)[name = tensor("input_459_cast")]; + tensor input_461_axes_0 = const()[name = tensor("input_461_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241782272)))]; + tensor text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241784896)))]; + tensor input_461_cast = layer_norm(axes = input_461_axes_0, beta = text_encoder_text_model_encoder_layers_28_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_28_layer_norm2_weight_to_fp16, x = input_459_cast)[name = tensor("input_461_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1241787520)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254894784)))]; + tensor input_463_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc1_weight_to_fp16, x = input_461_cast)[name = tensor("input_463_cast")]; + tensor input_465_mode_0 = const()[name = tensor("input_465_mode_0"), val = tensor("EXACT")]; + tensor input_465_cast = gelu(mode = input_465_mode_0, x = input_463_cast)[name = tensor("input_465_cast")]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254905088)))]; + tensor text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268012352)))]; + tensor hidden_states_173_cast = linear(bias = text_encoder_text_model_encoder_layers_28_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_28_mlp_fc2_weight_to_fp16, x = input_465_cast)[name = tensor("hidden_states_173_cast")]; + tensor input_467_cast = add(x = input_459_cast, y = hidden_states_173_cast)[name = tensor("input_467_cast")]; + tensor hidden_states_175_axes_0 = const()[name = tensor("hidden_states_175_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268014976)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268017600)))]; + tensor hidden_states_175_cast = layer_norm(axes = hidden_states_175_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm1_weight_to_fp16, x = input_467_cast)[name = tensor("hidden_states_175_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1268020224)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271297088)))]; + tensor var_2651_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_q_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("op_2651_cast")]; + tensor var_2652_to_fp16 = const()[name = tensor("op_2652_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_179_cast = mul(x = var_2651_cast, y = var_2652_to_fp16)[name = tensor("tensor_179_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271299712)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274576576)))]; + tensor tensor_175_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_k_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_175_cast")]; + tensor var_2657 = const()[name = tensor("op_2657"), val = tensor([1, -1, 20, 64])]; + tensor var_2658_cast = reshape(shape = var_2657, x = tensor_175_cast)[name = tensor("op_2658_cast")]; + tensor var_2659_perm_0 = const()[name = tensor("op_2659_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274579200)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277856064)))]; + tensor tensor_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_v_proj_weight_to_fp16, x = hidden_states_175_cast)[name = tensor("tensor_177_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1, -1, 20, 64])]; + tensor var_2665_cast = reshape(shape = var_2664, x = tensor_177_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_perm_0 = const()[name = tensor("op_2666_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2673 = const()[name = tensor("op_2673"), val = tensor([1, 77, 20, 64])]; + tensor var_2674_cast = reshape(shape = var_2673, x = tensor_179_cast)[name = tensor("op_2674_cast")]; + tensor var_2675_perm_0 = const()[name = tensor("op_2675_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2677 = const()[name = tensor("op_2677"), val = tensor([20, -1, 64])]; + tensor transpose_13 = transpose(perm = var_2675_perm_0, x = var_2674_cast)[name = tensor("transpose_13")]; + tensor query_states_59_cast = reshape(shape = var_2677, x = transpose_13)[name = tensor("query_states_59_cast")]; + tensor var_2679 = const()[name = tensor("op_2679"), val = tensor([20, -1, 64])]; + tensor transpose_15 = transpose(perm = var_2659_perm_0, x = var_2658_cast)[name = tensor("transpose_15")]; + tensor key_states_119_cast = reshape(shape = var_2679, x = transpose_15)[name = tensor("key_states_119_cast")]; + tensor var_2681 = const()[name = tensor("op_2681"), val = tensor([20, -1, 64])]; + tensor transpose_14 = transpose(perm = var_2666_perm_0, x = var_2665_cast)[name = tensor("transpose_14")]; + tensor value_states_119_cast = reshape(shape = var_2681, x = transpose_14)[name = tensor("value_states_119_cast")]; + tensor var_2684_perm_0 = const()[name = tensor("op_2684_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_175_transpose_x_0 = const()[name = tensor("attn_weights_175_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_175_transpose_y_0 = const()[name = tensor("attn_weights_175_transpose_y_0"), val = tensor(false)]; + tensor transpose_12 = transpose(perm = var_2684_perm_0, x = key_states_119_cast)[name = tensor("transpose_12")]; + tensor attn_weights_175_cast = matmul(transpose_x = attn_weights_175_transpose_x_0, transpose_y = attn_weights_175_transpose_y_0, x = query_states_59_cast, y = transpose_12)[name = tensor("attn_weights_175_cast")]; + tensor var_2686 = const()[name = tensor("op_2686"), val = tensor([1, 20, 77, 77])]; + tensor var_2687_cast = reshape(shape = var_2686, x = attn_weights_175_cast)[name = tensor("op_2687_cast")]; + tensor attn_weights_177_cast = add(x = var_2687_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_177_cast")]; + tensor var_2692 = const()[name = tensor("op_2692"), val = tensor([20, 77, 77])]; + tensor input_469_cast = reshape(shape = var_2692, x = attn_weights_177_cast)[name = tensor("input_469_cast")]; + tensor input_471_cast = softmax(axis = var_5, x = input_469_cast)[name = tensor("input_471_cast")]; + tensor attn_output_175_transpose_x_0 = const()[name = tensor("attn_output_175_transpose_x_0"), val = tensor(false)]; + tensor attn_output_175_transpose_y_0 = const()[name = tensor("attn_output_175_transpose_y_0"), val = tensor(false)]; + tensor attn_output_175_cast = matmul(transpose_x = attn_output_175_transpose_x_0, transpose_y = attn_output_175_transpose_y_0, x = input_471_cast, y = value_states_119_cast)[name = tensor("attn_output_175_cast")]; + tensor var_2697 = const()[name = tensor("op_2697"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_177_cast = reshape(shape = var_2697, x = attn_output_175_cast)[name = tensor("attn_output_177_cast")]; + tensor attn_output_179_perm_0 = const()[name = tensor("attn_output_179_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2700 = const()[name = tensor("op_2700"), val = tensor([1, 77, 1280])]; + tensor transpose_11 = transpose(perm = attn_output_179_perm_0, x = attn_output_177_cast)[name = tensor("transpose_11")]; + tensor input_473_cast = reshape(shape = var_2700, x = transpose_11)[name = tensor("input_473_cast")]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1277858688)))]; + tensor text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281135552)))]; + tensor hidden_states_177_cast = linear(bias = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_self_attn_out_proj_weight_to_fp16, x = input_473_cast)[name = tensor("hidden_states_177_cast")]; + tensor input_475_cast = add(x = input_467_cast, y = hidden_states_177_cast)[name = tensor("input_475_cast")]; + tensor input_477_axes_0 = const()[name = tensor("input_477_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281138176)))]; + tensor text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281140800)))]; + tensor input_477_cast = layer_norm(axes = input_477_axes_0, beta = text_encoder_text_model_encoder_layers_29_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_29_layer_norm2_weight_to_fp16, x = input_475_cast)[name = tensor("input_477_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1281143424)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294250688)))]; + tensor input_479_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc1_weight_to_fp16, x = input_477_cast)[name = tensor("input_479_cast")]; + tensor input_481_mode_0 = const()[name = tensor("input_481_mode_0"), val = tensor("EXACT")]; + tensor input_481_cast = gelu(mode = input_481_mode_0, x = input_479_cast)[name = tensor("input_481_cast")]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294260992)))]; + tensor text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307368256)))]; + tensor hidden_states_179_cast = linear(bias = text_encoder_text_model_encoder_layers_29_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_29_mlp_fc2_weight_to_fp16, x = input_481_cast)[name = tensor("hidden_states_179_cast")]; + tensor input_483_cast = add(x = input_475_cast, y = hidden_states_179_cast)[name = tensor("input_483_cast")]; + tensor hidden_states_181_axes_0 = const()[name = tensor("hidden_states_181_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307370880)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307373504)))]; + tensor hidden_states_181_cast = layer_norm(axes = hidden_states_181_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm1_weight_to_fp16, x = input_483_cast)[name = tensor("hidden_states_181_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1307376128)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310652992)))]; + tensor var_2738_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_q_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("op_2738_cast")]; + tensor var_2739_to_fp16 = const()[name = tensor("op_2739_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_185_cast = mul(x = var_2738_cast, y = var_2739_to_fp16)[name = tensor("tensor_185_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1310655616)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313932480)))]; + tensor tensor_181_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_k_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_181_cast")]; + tensor var_2744 = const()[name = tensor("op_2744"), val = tensor([1, -1, 20, 64])]; + tensor var_2745_cast = reshape(shape = var_2744, x = tensor_181_cast)[name = tensor("op_2745_cast")]; + tensor var_2746_perm_0 = const()[name = tensor("op_2746_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1313935104)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317211968)))]; + tensor tensor_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_v_proj_weight_to_fp16, x = hidden_states_181_cast)[name = tensor("tensor_183_cast")]; + tensor var_2751 = const()[name = tensor("op_2751"), val = tensor([1, -1, 20, 64])]; + tensor var_2752_cast = reshape(shape = var_2751, x = tensor_183_cast)[name = tensor("op_2752_cast")]; + tensor var_2753_perm_0 = const()[name = tensor("op_2753_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2760 = const()[name = tensor("op_2760"), val = tensor([1, 77, 20, 64])]; + tensor var_2761_cast = reshape(shape = var_2760, x = tensor_185_cast)[name = tensor("op_2761_cast")]; + tensor var_2762_perm_0 = const()[name = tensor("op_2762_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([20, -1, 64])]; + tensor transpose_8 = transpose(perm = var_2762_perm_0, x = var_2761_cast)[name = tensor("transpose_8")]; + tensor query_states_61_cast = reshape(shape = var_2764, x = transpose_8)[name = tensor("query_states_61_cast")]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([20, -1, 64])]; + tensor transpose_10 = transpose(perm = var_2746_perm_0, x = var_2745_cast)[name = tensor("transpose_10")]; + tensor key_states_123_cast = reshape(shape = var_2766, x = transpose_10)[name = tensor("key_states_123_cast")]; + tensor var_2768 = const()[name = tensor("op_2768"), val = tensor([20, -1, 64])]; + tensor transpose_9 = transpose(perm = var_2753_perm_0, x = var_2752_cast)[name = tensor("transpose_9")]; + tensor value_states_123_cast = reshape(shape = var_2768, x = transpose_9)[name = tensor("value_states_123_cast")]; + tensor var_2771_perm_0 = const()[name = tensor("op_2771_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor transpose_7 = transpose(perm = var_2771_perm_0, x = key_states_123_cast)[name = tensor("transpose_7")]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = query_states_61_cast, y = transpose_7)[name = tensor("attn_weights_181_cast")]; + tensor var_2773 = const()[name = tensor("op_2773"), val = tensor([1, 20, 77, 77])]; + tensor var_2774_cast = reshape(shape = var_2773, x = attn_weights_181_cast)[name = tensor("op_2774_cast")]; + tensor attn_weights_183_cast = add(x = var_2774_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_2779 = const()[name = tensor("op_2779"), val = tensor([20, 77, 77])]; + tensor input_485_cast = reshape(shape = var_2779, x = attn_weights_183_cast)[name = tensor("input_485_cast")]; + tensor input_487_cast = softmax(axis = var_5, x = input_485_cast)[name = tensor("input_487_cast")]; + tensor attn_output_181_transpose_x_0 = const()[name = tensor("attn_output_181_transpose_x_0"), val = tensor(false)]; + tensor attn_output_181_transpose_y_0 = const()[name = tensor("attn_output_181_transpose_y_0"), val = tensor(false)]; + tensor attn_output_181_cast = matmul(transpose_x = attn_output_181_transpose_x_0, transpose_y = attn_output_181_transpose_y_0, x = input_487_cast, y = value_states_123_cast)[name = tensor("attn_output_181_cast")]; + tensor var_2784 = const()[name = tensor("op_2784"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_183_cast = reshape(shape = var_2784, x = attn_output_181_cast)[name = tensor("attn_output_183_cast")]; + tensor attn_output_185_perm_0 = const()[name = tensor("attn_output_185_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2787 = const()[name = tensor("op_2787"), val = tensor([1, 77, 1280])]; + tensor transpose_6 = transpose(perm = attn_output_185_perm_0, x = attn_output_183_cast)[name = tensor("transpose_6")]; + tensor input_489_cast = reshape(shape = var_2787, x = transpose_6)[name = tensor("input_489_cast")]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317214592)))]; + tensor text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320491456)))]; + tensor hidden_states_183_cast = linear(bias = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_self_attn_out_proj_weight_to_fp16, x = input_489_cast)[name = tensor("hidden_states_183_cast")]; + tensor input_491_cast = add(x = input_483_cast, y = hidden_states_183_cast)[name = tensor("input_491_cast")]; + tensor input_493_axes_0 = const()[name = tensor("input_493_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320494080)))]; + tensor text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320496704)))]; + tensor input_493_cast = layer_norm(axes = input_493_axes_0, beta = text_encoder_text_model_encoder_layers_30_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_30_layer_norm2_weight_to_fp16, x = input_491_cast)[name = tensor("input_493_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320499328)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333606592)))]; + tensor input_495_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc1_weight_to_fp16, x = input_493_cast)[name = tensor("input_495_cast")]; + tensor input_497_mode_0 = const()[name = tensor("input_497_mode_0"), val = tensor("EXACT")]; + tensor input_497_cast = gelu(mode = input_497_mode_0, x = input_495_cast)[name = tensor("input_497_cast")]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1333616896)))]; + tensor text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346724160)))]; + tensor hidden_states_185_cast = linear(bias = text_encoder_text_model_encoder_layers_30_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_30_mlp_fc2_weight_to_fp16, x = input_497_cast)[name = tensor("hidden_states_185_cast")]; + tensor input_499_cast = add(x = input_491_cast, y = hidden_states_185_cast)[name = tensor("input_499_cast")]; + tensor input_499_cast_to_fp32_dtype_0 = const()[name = tensor("input_499_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor hidden_states_187_axes_0 = const()[name = tensor("hidden_states_187_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346726784)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346729408)))]; + tensor hidden_states_187_cast = layer_norm(axes = hidden_states_187_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm1_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm1_weight_to_fp16, x = input_499_cast)[name = tensor("hidden_states_187_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1346732032)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350008896)))]; + tensor var_2825_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_q_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("op_2825_cast")]; + tensor var_2826_to_fp16 = const()[name = tensor("op_2826_to_fp16"), val = tensor(0x1p-3)]; + tensor tensor_cast = mul(x = var_2825_cast, y = var_2826_to_fp16)[name = tensor("tensor_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350011520)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353288384)))]; + tensor tensor_187_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_k_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_187_cast")]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, -1, 20, 64])]; + tensor var_2832_cast = reshape(shape = var_2831, x = tensor_187_cast)[name = tensor("op_2832_cast")]; + tensor var_2833_perm_0 = const()[name = tensor("op_2833_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353291008)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356567872)))]; + tensor tensor_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_v_proj_weight_to_fp16, x = hidden_states_187_cast)[name = tensor("tensor_189_cast")]; + tensor var_2838 = const()[name = tensor("op_2838"), val = tensor([1, -1, 20, 64])]; + tensor var_2839_cast = reshape(shape = var_2838, x = tensor_189_cast)[name = tensor("op_2839_cast")]; + tensor var_2840_perm_0 = const()[name = tensor("op_2840_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2847 = const()[name = tensor("op_2847"), val = tensor([1, 77, 20, 64])]; + tensor var_2848_cast = reshape(shape = var_2847, x = tensor_cast)[name = tensor("op_2848_cast")]; + tensor var_2849_perm_0 = const()[name = tensor("op_2849_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2851 = const()[name = tensor("op_2851"), val = tensor([20, -1, 64])]; + tensor transpose_3 = transpose(perm = var_2849_perm_0, x = var_2848_cast)[name = tensor("transpose_3")]; + tensor query_states_cast = reshape(shape = var_2851, x = transpose_3)[name = tensor("query_states_cast")]; + tensor var_2853 = const()[name = tensor("op_2853"), val = tensor([20, -1, 64])]; + tensor transpose_5 = transpose(perm = var_2833_perm_0, x = var_2832_cast)[name = tensor("transpose_5")]; + tensor key_states_cast = reshape(shape = var_2853, x = transpose_5)[name = tensor("key_states_cast")]; + tensor var_2855 = const()[name = tensor("op_2855"), val = tensor([20, -1, 64])]; + tensor transpose_4 = transpose(perm = var_2840_perm_0, x = var_2839_cast)[name = tensor("transpose_4")]; + tensor value_states_cast = reshape(shape = var_2855, x = transpose_4)[name = tensor("value_states_cast")]; + tensor var_2858_perm_0 = const()[name = tensor("op_2858_perm_0"), val = tensor([0, 2, 1])]; + tensor attn_weights_187_transpose_x_0 = const()[name = tensor("attn_weights_187_transpose_x_0"), val = tensor(false)]; + tensor attn_weights_187_transpose_y_0 = const()[name = tensor("attn_weights_187_transpose_y_0"), val = tensor(false)]; + tensor transpose_2 = transpose(perm = var_2858_perm_0, x = key_states_cast)[name = tensor("transpose_2")]; + tensor attn_weights_187_cast = matmul(transpose_x = attn_weights_187_transpose_x_0, transpose_y = attn_weights_187_transpose_y_0, x = query_states_cast, y = transpose_2)[name = tensor("attn_weights_187_cast")]; + tensor var_2860 = const()[name = tensor("op_2860"), val = tensor([1, 20, 77, 77])]; + tensor var_2861_cast = reshape(shape = var_2860, x = attn_weights_187_cast)[name = tensor("op_2861_cast")]; + tensor attn_weights_189_cast = add(x = var_2861_cast, y = causal_attention_mask_to_fp16)[name = tensor("attn_weights_189_cast")]; + tensor var_2866 = const()[name = tensor("op_2866"), val = tensor([20, 77, 77])]; + tensor input_501_cast = reshape(shape = var_2866, x = attn_weights_189_cast)[name = tensor("input_501_cast")]; + tensor input_503_cast = softmax(axis = var_5, x = input_501_cast)[name = tensor("input_503_cast")]; + tensor attn_output_187_transpose_x_0 = const()[name = tensor("attn_output_187_transpose_x_0"), val = tensor(false)]; + tensor attn_output_187_transpose_y_0 = const()[name = tensor("attn_output_187_transpose_y_0"), val = tensor(false)]; + tensor attn_output_187_cast = matmul(transpose_x = attn_output_187_transpose_x_0, transpose_y = attn_output_187_transpose_y_0, x = input_503_cast, y = value_states_cast)[name = tensor("attn_output_187_cast")]; + tensor var_2871 = const()[name = tensor("op_2871"), val = tensor([1, 20, 77, 64])]; + tensor attn_output_189_cast = reshape(shape = var_2871, x = attn_output_187_cast)[name = tensor("attn_output_189_cast")]; + tensor attn_output_perm_0 = const()[name = tensor("attn_output_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_2874 = const()[name = tensor("op_2874"), val = tensor([1, 77, 1280])]; + tensor transpose_1 = transpose(perm = attn_output_perm_0, x = attn_output_189_cast)[name = tensor("transpose_1")]; + tensor input_505_cast = reshape(shape = var_2874, x = transpose_1)[name = tensor("input_505_cast")]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356570496)))]; + tensor text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359847360)))]; + tensor hidden_states_189_cast = linear(bias = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_self_attn_out_proj_weight_to_fp16, x = input_505_cast)[name = tensor("hidden_states_189_cast")]; + tensor input_507_cast = add(x = input_499_cast, y = hidden_states_189_cast)[name = tensor("input_507_cast")]; + tensor input_509_axes_0 = const()[name = tensor("input_509_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359849984)))]; + tensor text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359852608)))]; + tensor input_509_cast = layer_norm(axes = input_509_axes_0, beta = text_encoder_text_model_encoder_layers_31_layer_norm2_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_encoder_layers_31_layer_norm2_weight_to_fp16, x = input_507_cast)[name = tensor("input_509_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359855232)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372962496)))]; + tensor input_511_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc1_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc1_weight_to_fp16, x = input_509_cast)[name = tensor("input_511_cast")]; + tensor input_513_mode_0 = const()[name = tensor("input_513_mode_0"), val = tensor("EXACT")]; + tensor input_513_cast = gelu(mode = input_513_mode_0, x = input_511_cast)[name = tensor("input_513_cast")]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372972800)))]; + tensor text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386080064)))]; + tensor hidden_states_cast = linear(bias = text_encoder_text_model_encoder_layers_31_mlp_fc2_bias_to_fp16, weight = text_encoder_text_model_encoder_layers_31_mlp_fc2_weight_to_fp16, x = input_513_cast)[name = tensor("hidden_states_cast")]; + tensor input_515_cast = add(x = input_507_cast, y = hidden_states_cast)[name = tensor("input_515_cast")]; + tensor last_hidden_state_axes_0 = const()[name = tensor("last_hidden_state_axes_0"), val = tensor([-1])]; + tensor text_encoder_text_model_final_layer_norm_weight_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386082688)))]; + tensor text_encoder_text_model_final_layer_norm_bias_to_fp16 = const()[name = tensor("text_encoder_text_model_final_layer_norm_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386085312)))]; + tensor last_hidden_state_cast = layer_norm(axes = last_hidden_state_axes_0, beta = text_encoder_text_model_final_layer_norm_bias_to_fp16, epsilon = var_12_to_fp16, gamma = text_encoder_text_model_final_layer_norm_weight_to_fp16, x = input_515_cast)[name = tensor("last_hidden_state_cast")]; + tensor var_2902 = const()[name = tensor("op_2902"), val = tensor([0])]; + tensor var_2904 = reduce_argmax(axis = var_5, keep_dims = var_6, x = cast_1322)[name = tensor("op_2904")]; + tensor stack_0_axis_0 = const()[name = tensor("stack_0_axis_0"), val = tensor(1)]; + tensor stack_0 = stack(axis = stack_0_axis_0, values = (var_2902, var_2904))[name = tensor("stack_0")]; + tensor input_transpose_batch_dims_0 = const()[name = tensor("input_transpose_batch_dims_0"), val = tensor(0)]; + tensor input_transpose_cast = gather_nd(batch_dims = input_transpose_batch_dims_0, indices = stack_0, x = last_hidden_state_cast)[name = tensor("input_transpose_cast")]; + tensor text_encoder_text_projection_weight_to_fp16 = const()[name = tensor("text_encoder_text_projection_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386087936)))]; + tensor var_2911_bias_0_to_fp16 = const()[name = tensor("op_2911_bias_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389364800)))]; + tensor var_2911_cast = linear(bias = var_2911_bias_0_to_fp16, weight = text_encoder_text_projection_weight_to_fp16, x = input_transpose_cast)[name = tensor("op_2911_cast")]; + tensor var_2911_cast_to_fp32_dtype_0 = const()[name = tensor("op_2911_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor pooled_outputs = cast(dtype = var_2911_cast_to_fp32_dtype_0, x = var_2911_cast)[name = tensor("cast_325")]; + tensor hidden_embeds = cast(dtype = input_499_cast_to_fp32_dtype_0, x = input_499_cast)[name = tensor("cast_359")]; + } -> (hidden_embeds, pooled_outputs); +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..4404ced8325eb5e803c3b4b2be80bf0b4517dce4 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/TextEncoder2.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd1fc0bcce11cff685648387b0060e0b6ecfce6c34e580e1ae904cae5903363 +size 1389367424 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/analytics/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..eb60d599194a9b6cff324207a0c3eb731dd9c13d --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f726f7f29091a23eb434bd4febf1f83733c95db26b912dfa671cdccad5d874 +size 243 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..6ec4882d93c41f4d859f419572182f8503ec75fc --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aea3887ffdc8e059925f3981259e1cd3227b827e5f91edff613c73ac0ea16f6 +size 1338 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..cfc4e39d604df0fb34c37b3ffeafe70d32210d51 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/metadata.json @@ -0,0 +1,124 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process", + "shape" : "[]", + "name" : "noise_pred", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Mixed (Float16, Palettized (1 bits), Palettized (2 bits), Palettized (4 bits), Palettized (6 bits), Palettized (8 bits))", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "UpsampleNearestNeighbor" : 2, + "Ios16.reduceMean" : 512, + "Ios16.sin" : 2, + "Ios16.softmax" : 140, + "Split" : 70, + "Ios16.add" : 722, + "Concat" : 14, + "Ios16.realDiv" : 46, + "Ios16.square" : 46, + "ExpandDims" : 6, + "Ios16.sub" : 256, + "Ios16.cast" : 1, + "Ios16.conv" : 794, + "Ios16.constexprLutToDense" : 775, + "Ios16.gelu" : 70, + "Ios16.matmul" : 280, + "Ios16.batchNorm" : 46, + "Ios16.reshape" : 675, + "Ios16.rsqrt" : 210, + "Ios16.silu" : 38, + "Ios16.sqrt" : 46, + "Ios16.mul" : 842, + "Ios16.cos" : 2, + "SliceByIndex" : 4 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "visionOS" : "1.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 4 × 128 × 128)", + "shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion", + "shape" : "[2, 4, 128, 128]", + "name" : "sample", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2)", + "shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule", + "shape" : "[2]", + "name" : "timestep", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 2048 × 1 × 77)", + "shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.", + "shape" : "[2, 2048, 1, 77]", + "name" : "encoder_hidden_states", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 12)", + "shortDescription" : "", + "shape" : "[12]", + "name" : "time_ids", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 1280)", + "shortDescription" : "", + "shape" : "[2, 1280]", + "name" : "text_embeds", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117", + "com.github.apple.ml-stable-diffusion.version" : "1.0.0" + }, + "generatedClassName" : "recipe_4_50_bit_mixedpalette", + "method" : "predict" + } +] \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/model.mil b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..f64fd3ffc051e97cd7f9701d941beb7ee45e6e14 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/model.mil @@ -0,0 +1,12327 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.0.48"}})] +{ + func main(tensor encoder_hidden_states, tensor sample, tensor text_embeds, tensor time_ids, tensor timestep) { + tensor var_24 = const()[name = tensor("op_24"), val = tensor(-1)]; + tensor var_41_axes_0 = const()[name = tensor("op_41_axes_0"), val = tensor([1])]; + tensor var_41_cast = expand_dims(axes = var_41_axes_0, x = timestep)[name = tensor("op_41_cast")]; + tensor var_43_to_fp16 = const()[name = tensor("op_43_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor emb_3_cast = mul(x = var_41_cast, y = var_43_to_fp16)[name = tensor("emb_3_cast")]; + tensor var_48_cast = sin(x = emb_3_cast)[name = tensor("op_48_cast")]; + tensor var_49_cast = cos(x = emb_3_cast)[name = tensor("op_49_cast")]; + tensor emb_7_interleave_0 = const()[name = tensor("emb_7_interleave_0"), val = tensor(false)]; + tensor emb_7_cast = concat(axis = var_24, interleave = emb_7_interleave_0, values = (var_48_cast, var_49_cast))[name = tensor("emb_7_cast")]; + tensor var_53_begin_0 = const()[name = tensor("op_53_begin_0"), val = tensor([0, 160])]; + tensor var_53_end_0 = const()[name = tensor("op_53_end_0"), val = tensor([2, 320])]; + tensor var_53_end_mask_0 = const()[name = tensor("op_53_end_mask_0"), val = tensor([true, true])]; + tensor var_53_cast = slice_by_index(begin = var_53_begin_0, end = var_53_end_0, end_mask = var_53_end_mask_0, x = emb_7_cast)[name = tensor("op_53_cast")]; + tensor var_55_begin_0 = const()[name = tensor("op_55_begin_0"), val = tensor([0, 0])]; + tensor var_55_end_0 = const()[name = tensor("op_55_end_0"), val = tensor([2, 160])]; + tensor var_55_end_mask_0 = const()[name = tensor("op_55_end_mask_0"), val = tensor([true, false])]; + tensor var_55_cast = slice_by_index(begin = var_55_begin_0, end = var_55_end_0, end_mask = var_55_end_mask_0, x = emb_7_cast)[name = tensor("op_55_cast")]; + tensor sample_3_interleave_0 = const()[name = tensor("sample_3_interleave_0"), val = tensor(false)]; + tensor sample_3_cast = concat(axis = var_24, interleave = sample_3_interleave_0, values = (var_53_cast, var_55_cast))[name = tensor("sample_3_cast")]; + tensor var_58 = const()[name = tensor("op_58"), val = tensor(1)]; + tensor var_65_axes_0 = const()[name = tensor("op_65_axes_0"), val = tensor([-1])]; + tensor var_65_cast = expand_dims(axes = var_65_axes_0, x = sample_3_cast)[name = tensor("op_65_cast")]; + tensor input_1_axes_0 = const()[name = tensor("input_1_axes_0"), val = tensor([-1])]; + tensor input_1_cast = expand_dims(axes = input_1_axes_0, x = var_65_cast)[name = tensor("input_1_cast")]; + tensor var_69 = const()[name = tensor("op_69"), val = tensor([1, 1])]; + tensor var_71 = const()[name = tensor("op_71"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410112))), name = tensor("time_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 320, 1, 1])]; + tensor time_embedding_linear_1_bias_to_fp16 = const()[name = tensor("time_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410688)))]; + tensor input_3_cast = conv(bias = time_embedding_linear_1_bias_to_fp16, dilations = var_71, groups = var_58, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_69, weight = time_embedding_linear_1_weight_to_fp16_palettized, x = input_1_cast)[name = tensor("input_3_cast")]; + tensor input_5_cast = silu(x = input_3_cast)[name = tensor("input_5_cast")]; + tensor var_77 = const()[name = tensor("op_77"), val = tensor([1, 1])]; + tensor var_79 = const()[name = tensor("op_79"), val = tensor([1, 1])]; + tensor emb_pad_type_0 = const()[name = tensor("emb_pad_type_0"), val = tensor("custom")]; + tensor emb_pad_0 = const()[name = tensor("emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_2_weight_to_fp16 = const()[name = tensor("time_embedding_linear_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413312)))]; + tensor time_embedding_linear_2_bias_to_fp16 = const()[name = tensor("time_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3690176)))]; + tensor emb_cast = conv(bias = time_embedding_linear_2_bias_to_fp16, dilations = var_79, groups = var_58, pad = emb_pad_0, pad_type = emb_pad_type_0, strides = var_77, weight = time_embedding_linear_2_weight_to_fp16, x = input_5_cast)[name = tensor("emb_cast")]; + tensor var_85 = const()[name = tensor("op_85"), val = tensor(-1)]; + tensor var_102_axes_0 = const()[name = tensor("op_102_axes_0"), val = tensor([1])]; + tensor var_102_cast = expand_dims(axes = var_102_axes_0, x = time_ids)[name = tensor("op_102_cast")]; + tensor var_104_to_fp16 = const()[name = tensor("op_104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3692800)))]; + tensor emb_11_cast = mul(x = var_102_cast, y = var_104_to_fp16)[name = tensor("emb_11_cast")]; + tensor var_109_cast = sin(x = emb_11_cast)[name = tensor("op_109_cast")]; + tensor var_110_cast = cos(x = emb_11_cast)[name = tensor("op_110_cast")]; + tensor emb_15_interleave_0 = const()[name = tensor("emb_15_interleave_0"), val = tensor(false)]; + tensor emb_15_cast = concat(axis = var_85, interleave = emb_15_interleave_0, values = (var_109_cast, var_110_cast))[name = tensor("emb_15_cast")]; + tensor var_114_begin_0 = const()[name = tensor("op_114_begin_0"), val = tensor([0, 128])]; + tensor var_114_end_0 = const()[name = tensor("op_114_end_0"), val = tensor([12, 256])]; + tensor var_114_end_mask_0 = const()[name = tensor("op_114_end_mask_0"), val = tensor([true, true])]; + tensor var_114_cast = slice_by_index(begin = var_114_begin_0, end = var_114_end_0, end_mask = var_114_end_mask_0, x = emb_15_cast)[name = tensor("op_114_cast")]; + tensor var_116_begin_0 = const()[name = tensor("op_116_begin_0"), val = tensor([0, 0])]; + tensor var_116_end_0 = const()[name = tensor("op_116_end_0"), val = tensor([12, 128])]; + tensor var_116_end_mask_0 = const()[name = tensor("op_116_end_mask_0"), val = tensor([true, false])]; + tensor var_116_cast = slice_by_index(begin = var_116_begin_0, end = var_116_end_0, end_mask = var_116_end_mask_0, x = emb_15_cast)[name = tensor("op_116_cast")]; + tensor time_embeds_1_interleave_0 = const()[name = tensor("time_embeds_1_interleave_0"), val = tensor(false)]; + tensor time_embeds_1_cast = concat(axis = var_85, interleave = time_embeds_1_interleave_0, values = (var_114_cast, var_116_cast))[name = tensor("time_embeds_1_cast")]; + tensor var_124 = const()[name = tensor("op_124"), val = tensor([2, -1])]; + tensor time_embeds_cast = reshape(shape = var_124, x = time_embeds_1_cast)[name = tensor("time_embeds_cast")]; + tensor var_127 = const()[name = tensor("op_127"), val = tensor(-1)]; + tensor sample_interleave_0 = const()[name = tensor("sample_interleave_0"), val = tensor(false)]; + tensor sample_cast = concat(axis = var_127, interleave = sample_interleave_0, values = (text_embeds, time_embeds_cast))[name = tensor("sample_cast")]; + tensor var_129 = const()[name = tensor("op_129"), val = tensor(1)]; + tensor var_136_axes_0 = const()[name = tensor("op_136_axes_0"), val = tensor([-1])]; + tensor var_136_cast = expand_dims(axes = var_136_axes_0, x = sample_cast)[name = tensor("op_136_cast")]; + tensor input_7_axes_0 = const()[name = tensor("input_7_axes_0"), val = tensor([-1])]; + tensor input_7_cast = expand_dims(axes = input_7_axes_0, x = var_136_cast)[name = tensor("input_7_cast")]; + tensor var_140 = const()[name = tensor("op_140"), val = tensor([1, 1])]; + tensor var_142 = const()[name = tensor("op_142"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3693120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7297664))), name = tensor("add_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 2816, 1, 1])]; + tensor add_embedding_linear_1_bias_to_fp16 = const()[name = tensor("add_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7298240)))]; + tensor input_9_cast = conv(bias = add_embedding_linear_1_bias_to_fp16, dilations = var_142, groups = var_129, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_140, weight = add_embedding_linear_1_weight_to_fp16_palettized, x = input_7_cast)[name = tensor("input_9_cast")]; + tensor input_11_cast = silu(x = input_9_cast)[name = tensor("input_11_cast")]; + tensor var_148 = const()[name = tensor("op_148"), val = tensor([1, 1])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 1])]; + tensor aug_emb_pad_type_0 = const()[name = tensor("aug_emb_pad_type_0"), val = tensor("custom")]; + tensor aug_emb_pad_0 = const()[name = tensor("aug_emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7300864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8939328))), name = tensor("add_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor add_embedding_linear_2_bias_to_fp16 = const()[name = tensor("add_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8939904)))]; + tensor aug_emb_cast = conv(bias = add_embedding_linear_2_bias_to_fp16, dilations = var_150, groups = var_129, pad = aug_emb_pad_0, pad_type = aug_emb_pad_type_0, strides = var_148, weight = add_embedding_linear_2_weight_to_fp16_palettized, x = input_11_cast)[name = tensor("aug_emb_cast")]; + tensor input_19_cast = add(x = emb_cast, y = aug_emb_cast)[name = tensor("input_19_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor(1)]; + tensor var_161 = const()[name = tensor("op_161"), val = tensor([1, 1])]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_in_weight_to_fp16 = const()[name = tensor("conv_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8942528)))]; + tensor conv_in_bias_to_fp16 = const()[name = tensor("conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8965632)))]; + tensor input_13_cast = conv(bias = conv_in_bias_to_fp16, dilations = var_163, groups = var_158, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_161, weight = conv_in_weight_to_fp16, x = sample)[name = tensor("input_13_cast")]; + tensor var_172 = const()[name = tensor("op_172"), val = tensor(1)]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_13_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8966336)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8967040)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8967744)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8968448)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_17_cast = silu(x = add_1_cast)[name = tensor("input_17_cast")]; + tensor var_190 = const()[name = tensor("op_190"), val = tensor([1, 1])]; + tensor var_192 = const()[name = tensor("op_192"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8969152)))]; + tensor down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10812416)))]; + tensor hidden_states_1_cast = conv(bias = down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_192, groups = var_172, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_190, weight = down_blocks_0_resnets_0_conv1_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_1_cast")]; + tensor input_21_cast = silu(x = input_19_cast)[name = tensor("input_21_cast")]; + tensor var_198 = const()[name = tensor("op_198"), val = tensor([1, 1])]; + tensor var_200 = const()[name = tensor("op_200"), val = tensor([1, 1])]; + tensor temb_1_pad_type_0 = const()[name = tensor("temb_1_pad_type_0"), val = tensor("custom")]; + tensor temb_1_pad_0 = const()[name = tensor("temb_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10813120)))]; + tensor down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11632384)))]; + tensor temb_1_cast = conv(bias = down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_200, groups = var_172, pad = temb_1_pad_0, pad_type = temb_1_pad_type_0, strides = var_198, weight = down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_1_cast")]; + tensor input_23_cast = add(x = hidden_states_1_cast, y = temb_1_cast)[name = tensor("input_23_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_23_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11633088)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11633792)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_27_cast = silu(x = add_3_cast)[name = tensor("input_27_cast")]; + tensor var_210 = const()[name = tensor("op_210"), val = tensor([1, 1])]; + tensor var_212 = const()[name = tensor("op_212"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11634496)))]; + tensor down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13477760)))]; + tensor hidden_states_3_cast = conv(bias = down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_212, groups = var_172, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_210, weight = down_blocks_0_resnets_0_conv2_weight_to_fp16, x = input_27_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_29_cast = add(x = input_13_cast, y = hidden_states_3_cast)[name = tensor("input_29_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = input_29_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13478464)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13479168)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_33_cast = silu(x = add_5_cast)[name = tensor("input_33_cast")]; + tensor var_227 = const()[name = tensor("op_227"), val = tensor([1, 1])]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13479872)))]; + tensor down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15323136)))]; + tensor hidden_states_5_cast = conv(bias = down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_229, groups = var_172, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_227, weight = down_blocks_0_resnets_1_conv1_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_5_cast")]; + tensor var_235 = const()[name = tensor("op_235"), val = tensor([1, 1])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 1])]; + tensor temb_3_pad_type_0 = const()[name = tensor("temb_3_pad_type_0"), val = tensor("custom")]; + tensor temb_3_pad_0 = const()[name = tensor("temb_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15323840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15733504))), name = tensor("down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15734080)))]; + tensor temb_3_cast = conv(bias = down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_237, groups = var_172, pad = temb_3_pad_0, pad_type = temb_3_pad_type_0, strides = var_235, weight = down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_3_cast")]; + tensor input_37_cast = add(x = hidden_states_5_cast, y = temb_3_cast)[name = tensor("input_37_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_37_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15734784)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15735488)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_41_cast = silu(x = add_7_cast)[name = tensor("input_41_cast")]; + tensor var_247 = const()[name = tensor("op_247"), val = tensor([1, 1])]; + tensor var_249 = const()[name = tensor("op_249"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15736192)))]; + tensor down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17579456)))]; + tensor hidden_states_7_cast = conv(bias = down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_249, groups = var_172, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_247, weight = down_blocks_0_resnets_1_conv2_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_7_cast")]; + tensor input_43_cast = add(x = input_29_cast, y = hidden_states_7_cast)[name = tensor("input_43_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([2, 2])]; + tensor var_258 = const()[name = tensor("op_258"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17580160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18501824))), name = tensor("down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18502400)))]; + tensor input_45_cast = conv(bias = down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_258, groups = var_172, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_256, weight = down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor(3)]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor(true)]; + tensor var_282 = const()[name = tensor("op_282"), val = tensor(1)]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([2, 32, 10, 64, 64])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_45_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([2, 320, 64, 64])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18503104)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18503808)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_49_cast = silu(x = add_9_cast)[name = tensor("input_49_cast")]; + tensor var_305 = const()[name = tensor("op_305"), val = tensor([1, 1])]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18504512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20347776))), name = tensor("down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 320, 3, 3])]; + tensor down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20348352)))]; + tensor hidden_states_9_cast = conv(bias = down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_307, groups = var_282, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_305, weight = down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_49_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_313 = const()[name = tensor("op_313"), val = tensor([1, 1])]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, 1])]; + tensor temb_5_pad_type_0 = const()[name = tensor("temb_5_pad_type_0"), val = tensor("custom")]; + tensor temb_5_pad_0 = const()[name = tensor("temb_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20349696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20964160))), name = tensor("down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20964352)))]; + tensor temb_5_cast = conv(bias = down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_315, groups = var_282, pad = temb_5_pad_0, pad_type = temb_5_pad_type_0, strides = var_313, weight = down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_5_cast")]; + tensor input_53_cast = add(x = hidden_states_9_cast, y = temb_5_cast)[name = tensor("input_53_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_53_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20965696)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20967040)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20968384)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20969728)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_57_cast = silu(x = add_11_cast)[name = tensor("input_57_cast")]; + tensor var_325 = const()[name = tensor("op_325"), val = tensor([1, 1])]; + tensor var_327 = const()[name = tensor("op_327"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20971072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24657536))), name = tensor("down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24658112)))]; + tensor hidden_states_11_cast = conv(bias = down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_327, groups = var_282, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_325, weight = down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_57_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([1, 1])]; + tensor var_334 = const()[name = tensor("op_334"), val = tensor([1, 1])]; + tensor x_1_pad_type_0 = const()[name = tensor("x_1_pad_type_0"), val = tensor("custom")]; + tensor x_1_pad_0 = const()[name = tensor("x_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24659456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24864320))), name = tensor("down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 320, 1, 1])]; + tensor down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24864896)))]; + tensor x_1_cast = conv(bias = down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_334, groups = var_282, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = var_332, weight = down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_45_cast)[name = tensor("x_1_cast")]; + tensor hidden_states_13_cast = add(x = x_1_cast, y = hidden_states_11_cast)[name = tensor("hidden_states_13_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = hidden_states_13_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24866240)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24867584)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor var_356 = const()[name = tensor("op_356"), val = tensor([1, 1])]; + tensor var_358 = const()[name = tensor("op_358"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24868928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25278592))), name = tensor("down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25279168)))]; + tensor hidden_states_15_cast = conv(bias = down_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_358, groups = var_282, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_356, weight = down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_13_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_1_cast = reshape(shape = var_363, x = hidden_states_15_cast)[name = tensor("inputs_1_cast")]; + tensor var_373 = const()[name = tensor("op_373"), val = tensor([1])]; + tensor channels_mean_1_cast = reduce_mean(axes = var_373, keep_dims = var_277, x = inputs_1_cast)[name = tensor("channels_mean_1_cast")]; + tensor zero_mean_1_cast = sub(x = inputs_1_cast, y = channels_mean_1_cast)[name = tensor("zero_mean_1_cast")]; + tensor zero_mean_sq_1_cast = mul(x = zero_mean_1_cast, y = zero_mean_1_cast)[name = tensor("zero_mean_sq_1_cast")]; + tensor var_377 = const()[name = tensor("op_377"), val = tensor([1])]; + tensor var_378_cast = reduce_mean(axes = var_377, keep_dims = var_277, x = zero_mean_sq_1_cast)[name = tensor("op_378_cast")]; + tensor var_379_to_fp16 = const()[name = tensor("op_379_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_380_cast = add(x = var_378_cast, y = var_379_to_fp16)[name = tensor("op_380_cast")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_380_cast)[name = tensor("denom_1_cast")]; + tensor out_1_cast = mul(x = zero_mean_1_cast, y = denom_1_cast)[name = tensor("out_1_cast")]; + tensor var_384_to_fp16 = const()[name = tensor("op_384_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25280512)))]; + tensor var_385_cast = add(x = out_1_cast, y = var_384_to_fp16)[name = tensor("op_385_cast")]; + tensor var_387_to_fp16 = const()[name = tensor("op_387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25281856)))]; + tensor hidden_states_17_cast = mul(x = var_385_cast, y = var_387_to_fp16)[name = tensor("hidden_states_17_cast")]; + tensor var_394 = const()[name = tensor("op_394"), val = tensor([1, 1])]; + tensor var_396 = const()[name = tensor("op_396"), val = tensor([1, 1])]; + tensor q_1_pad_type_0 = const()[name = tensor("q_1_pad_type_0"), val = tensor("custom")]; + tensor q_1_pad_0 = const()[name = tensor("q_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25283200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25590464))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_1_cast = conv(dilations = var_396, groups = var_282, pad = q_1_pad_0, pad_type = q_1_pad_type_0, strides = var_394, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("q_1_cast")]; + tensor var_400 = const()[name = tensor("op_400"), val = tensor([1, 1])]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, 1])]; + tensor k_1_pad_type_0 = const()[name = tensor("k_1_pad_type_0"), val = tensor("custom")]; + tensor k_1_pad_0 = const()[name = tensor("k_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25590656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25897920))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_1_cast = conv(dilations = var_402, groups = var_282, pad = k_1_pad_0, pad_type = k_1_pad_type_0, strides = var_400, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("k_1_cast")]; + tensor var_406 = const()[name = tensor("op_406"), val = tensor([1, 1])]; + tensor var_408 = const()[name = tensor("op_408"), val = tensor([1, 1])]; + tensor v_1_pad_type_0 = const()[name = tensor("v_1_pad_type_0"), val = tensor("custom")]; + tensor v_1_pad_0 = const()[name = tensor("v_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25898112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26307776))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_1_cast = conv(dilations = var_408, groups = var_282, pad = v_1_pad_0, pad_type = v_1_pad_type_0, strides = var_406, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("v_1_cast")]; + tensor var_412 = const()[name = tensor("op_412"), val = tensor([2, 10, 64, -1])]; + tensor var_413_cast = reshape(shape = var_412, x = q_1_cast)[name = tensor("op_413_cast")]; + tensor var_414 = const()[name = tensor("op_414"), val = tensor([2, 10, 64, -1])]; + tensor var_415_cast = reshape(shape = var_414, x = k_1_cast)[name = tensor("op_415_cast")]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([2, 10, 64, -1])]; + tensor var_417_cast = reshape(shape = var_416, x = v_1_cast)[name = tensor("op_417_cast")]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_413_cast, y = var_415_cast)[name = tensor("attn_weights_1_cast")]; + tensor var_273_to_fp16 = const()[name = tensor("op_273_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_3_cast = mul(x = attn_weights_1_cast, y = var_273_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_421_cast = softmax(axis = var_266, x = attn_weights_3_cast)[name = tensor("op_421_cast")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_417_cast, y = var_421_cast)[name = tensor("attn_1_cast")]; + tensor var_425 = const()[name = tensor("op_425"), val = tensor([2, 640, 1, -1])]; + tensor input_61_cast = reshape(shape = var_425, x = attn_1_cast)[name = tensor("input_61_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor var_432 = const()[name = tensor("op_432"), val = tensor([1, 1])]; + tensor var_434_pad_type_0 = const()[name = tensor("op_434_pad_type_0"), val = tensor("custom")]; + tensor var_434_pad_0 = const()[name = tensor("op_434_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26308352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26718016))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26718592)))]; + tensor var_434_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_432, groups = var_282, pad = var_434_pad_0, pad_type = var_434_pad_type_0, strides = var_430, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_61_cast)[name = tensor("op_434_cast")]; + tensor inputs_3_cast = add(x = var_434_cast, y = inputs_1_cast)[name = tensor("inputs_3_cast")]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1])]; + tensor channels_mean_3_cast = reduce_mean(axes = var_438, keep_dims = var_277, x = inputs_3_cast)[name = tensor("channels_mean_3_cast")]; + tensor zero_mean_3_cast = sub(x = inputs_3_cast, y = channels_mean_3_cast)[name = tensor("zero_mean_3_cast")]; + tensor zero_mean_sq_3_cast = mul(x = zero_mean_3_cast, y = zero_mean_3_cast)[name = tensor("zero_mean_sq_3_cast")]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([1])]; + tensor var_443_cast = reduce_mean(axes = var_442, keep_dims = var_277, x = zero_mean_sq_3_cast)[name = tensor("op_443_cast")]; + tensor var_444_to_fp16 = const()[name = tensor("op_444_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_445_cast = add(x = var_443_cast, y = var_444_to_fp16)[name = tensor("op_445_cast")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_445_cast)[name = tensor("denom_3_cast")]; + tensor out_3_cast = mul(x = zero_mean_3_cast, y = denom_3_cast)[name = tensor("out_3_cast")]; + tensor var_449_to_fp16 = const()[name = tensor("op_449_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26719936)))]; + tensor var_450_cast = add(x = out_3_cast, y = var_449_to_fp16)[name = tensor("op_450_cast")]; + tensor var_452_to_fp16 = const()[name = tensor("op_452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26721280)))]; + tensor hidden_states_19_cast = mul(x = var_450_cast, y = var_452_to_fp16)[name = tensor("hidden_states_19_cast")]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, 1])]; + tensor var_461 = const()[name = tensor("op_461"), val = tensor([1, 1])]; + tensor q_3_pad_type_0 = const()[name = tensor("q_3_pad_type_0"), val = tensor("custom")]; + tensor q_3_pad_0 = const()[name = tensor("q_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26722624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26927488))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_3_cast = conv(dilations = var_461, groups = var_282, pad = q_3_pad_0, pad_type = q_3_pad_type_0, strides = var_459, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("q_3_cast")]; + tensor var_465 = const()[name = tensor("op_465"), val = tensor([1, 1])]; + tensor var_467 = const()[name = tensor("op_467"), val = tensor([1, 1])]; + tensor k_3_pad_type_0 = const()[name = tensor("k_3_pad_type_0"), val = tensor("custom")]; + tensor k_3_pad_0 = const()[name = tensor("k_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26927616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27910720))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_3_cast = conv(dilations = var_467, groups = var_282, pad = k_3_pad_0, pad_type = k_3_pad_type_0, strides = var_465, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_3_cast")]; + tensor var_471 = const()[name = tensor("op_471"), val = tensor([1, 1])]; + tensor var_473 = const()[name = tensor("op_473"), val = tensor([1, 1])]; + tensor v_3_pad_type_0 = const()[name = tensor("v_3_pad_type_0"), val = tensor("custom")]; + tensor v_3_pad_0 = const()[name = tensor("v_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27910912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28894016))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_3_cast = conv(dilations = var_473, groups = var_282, pad = v_3_pad_0, pad_type = v_3_pad_type_0, strides = var_471, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_3_cast")]; + tensor var_477 = const()[name = tensor("op_477"), val = tensor([2, 10, 64, -1])]; + tensor var_478_cast = reshape(shape = var_477, x = q_3_cast)[name = tensor("op_478_cast")]; + tensor var_479 = const()[name = tensor("op_479"), val = tensor([2, 10, 64, -1])]; + tensor var_480_cast = reshape(shape = var_479, x = k_3_cast)[name = tensor("op_480_cast")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([2, 10, 64, -1])]; + tensor var_482_cast = reshape(shape = var_481, x = v_3_cast)[name = tensor("op_482_cast")]; + tensor attn_weights_5_transpose_x_0 = const()[name = tensor("attn_weights_5_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_5_transpose_y_0 = const()[name = tensor("attn_weights_5_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_5_cast = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_478_cast, y = var_480_cast)[name = tensor("attn_weights_5_cast")]; + tensor attn_weights_7_cast = mul(x = attn_weights_5_cast, y = var_273_to_fp16)[name = tensor("attn_weights_7_cast")]; + tensor var_486_cast = softmax(axis = var_266, x = attn_weights_7_cast)[name = tensor("op_486_cast")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_482_cast, y = var_486_cast)[name = tensor("attn_3_cast")]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([2, 640, 1, -1])]; + tensor input_63_cast = reshape(shape = var_490, x = attn_3_cast)[name = tensor("input_63_cast")]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 1])]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1, 1])]; + tensor var_499_pad_type_0 = const()[name = tensor("op_499_pad_type_0"), val = tensor("custom")]; + tensor var_499_pad_0 = const()[name = tensor("op_499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28894208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29201472))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29201664)))]; + tensor var_499_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_497, groups = var_282, pad = var_499_pad_0, pad_type = var_499_pad_type_0, strides = var_495, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_63_cast)[name = tensor("op_499_cast")]; + tensor inputs_5_cast = add(x = var_499_cast, y = inputs_3_cast)[name = tensor("inputs_5_cast")]; + tensor var_503 = const()[name = tensor("op_503"), val = tensor([1])]; + tensor channels_mean_5_cast = reduce_mean(axes = var_503, keep_dims = var_277, x = inputs_5_cast)[name = tensor("channels_mean_5_cast")]; + tensor zero_mean_5_cast = sub(x = inputs_5_cast, y = channels_mean_5_cast)[name = tensor("zero_mean_5_cast")]; + tensor zero_mean_sq_5_cast = mul(x = zero_mean_5_cast, y = zero_mean_5_cast)[name = tensor("zero_mean_sq_5_cast")]; + tensor var_507 = const()[name = tensor("op_507"), val = tensor([1])]; + tensor var_508_cast = reduce_mean(axes = var_507, keep_dims = var_277, x = zero_mean_sq_5_cast)[name = tensor("op_508_cast")]; + tensor var_509_to_fp16 = const()[name = tensor("op_509_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_510_cast = add(x = var_508_cast, y = var_509_to_fp16)[name = tensor("op_510_cast")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_510_cast)[name = tensor("denom_5_cast")]; + tensor out_5_cast = mul(x = zero_mean_5_cast, y = denom_5_cast)[name = tensor("out_5_cast")]; + tensor var_514_to_fp16 = const()[name = tensor("op_514_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29203008)))]; + tensor var_515_cast = add(x = out_5_cast, y = var_514_to_fp16)[name = tensor("op_515_cast")]; + tensor var_517_to_fp16 = const()[name = tensor("op_517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29204352)))]; + tensor input_65_cast = mul(x = var_515_cast, y = var_517_to_fp16)[name = tensor("input_65_cast")]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 1])]; + tensor var_527 = const()[name = tensor("op_527"), val = tensor([1, 1])]; + tensor var_529_pad_type_0 = const()[name = tensor("op_529_pad_type_0"), val = tensor("custom")]; + tensor var_529_pad_0 = const()[name = tensor("op_529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29205696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32482560))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32483136)))]; + tensor var_529_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_527, groups = var_282, pad = var_529_pad_0, pad_type = var_529_pad_type_0, strides = var_525, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_65_cast)[name = tensor("op_529_cast")]; + tensor var_530_split_sizes_0 = const()[name = tensor("op_530_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_530_axis_0 = const()[name = tensor("op_530_axis_0"), val = tensor(1)]; + tensor var_530_cast_0, tensor var_530_cast_1 = split(axis = var_530_axis_0, split_sizes = var_530_split_sizes_0, x = var_529_cast)[name = tensor("op_530_cast")]; + tensor var_532_mode_0 = const()[name = tensor("op_532_mode_0"), val = tensor("EXACT")]; + tensor var_532_cast = gelu(mode = var_532_mode_0, x = var_530_cast_1)[name = tensor("op_532_cast")]; + tensor input_67_cast = mul(x = var_530_cast_0, y = var_532_cast)[name = tensor("input_67_cast")]; + tensor var_536 = const()[name = tensor("op_536"), val = tensor([1, 1])]; + tensor var_538 = const()[name = tensor("op_538"), val = tensor([1, 1])]; + tensor var_540_pad_type_0 = const()[name = tensor("op_540_pad_type_0"), val = tensor("custom")]; + tensor var_540_pad_0 = const()[name = tensor("op_540_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32493440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34131904))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34132480)))]; + tensor var_540_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_538, groups = var_282, pad = var_540_pad_0, pad_type = var_540_pad_type_0, strides = var_536, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_67_cast)[name = tensor("op_540_cast")]; + tensor inputs_7_cast = add(x = var_540_cast, y = inputs_5_cast)[name = tensor("inputs_7_cast")]; + tensor var_550 = const()[name = tensor("op_550"), val = tensor([1])]; + tensor channels_mean_7_cast = reduce_mean(axes = var_550, keep_dims = var_277, x = inputs_7_cast)[name = tensor("channels_mean_7_cast")]; + tensor zero_mean_7_cast = sub(x = inputs_7_cast, y = channels_mean_7_cast)[name = tensor("zero_mean_7_cast")]; + tensor zero_mean_sq_7_cast = mul(x = zero_mean_7_cast, y = zero_mean_7_cast)[name = tensor("zero_mean_sq_7_cast")]; + tensor var_554 = const()[name = tensor("op_554"), val = tensor([1])]; + tensor var_555_cast = reduce_mean(axes = var_554, keep_dims = var_277, x = zero_mean_sq_7_cast)[name = tensor("op_555_cast")]; + tensor var_556_to_fp16 = const()[name = tensor("op_556_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_557_cast = add(x = var_555_cast, y = var_556_to_fp16)[name = tensor("op_557_cast")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_557_cast)[name = tensor("denom_7_cast")]; + tensor out_7_cast = mul(x = zero_mean_7_cast, y = denom_7_cast)[name = tensor("out_7_cast")]; + tensor var_561_to_fp16 = const()[name = tensor("op_561_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34133824)))]; + tensor var_562_cast = add(x = out_7_cast, y = var_561_to_fp16)[name = tensor("op_562_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34135168)))]; + tensor hidden_states_23_cast = mul(x = var_562_cast, y = var_564_to_fp16)[name = tensor("hidden_states_23_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, 1])]; + tensor var_573 = const()[name = tensor("op_573"), val = tensor([1, 1])]; + tensor q_5_pad_type_0 = const()[name = tensor("q_5_pad_type_0"), val = tensor("custom")]; + tensor q_5_pad_0 = const()[name = tensor("q_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34136512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34443776))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_5_cast = conv(dilations = var_573, groups = var_282, pad = q_5_pad_0, pad_type = q_5_pad_type_0, strides = var_571, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("q_5_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([1, 1])]; + tensor var_579 = const()[name = tensor("op_579"), val = tensor([1, 1])]; + tensor k_5_pad_type_0 = const()[name = tensor("k_5_pad_type_0"), val = tensor("custom")]; + tensor k_5_pad_0 = const()[name = tensor("k_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34443968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34751232))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_5_cast = conv(dilations = var_579, groups = var_282, pad = k_5_pad_0, pad_type = k_5_pad_type_0, strides = var_577, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("k_5_cast")]; + tensor var_583 = const()[name = tensor("op_583"), val = tensor([1, 1])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 1])]; + tensor v_5_pad_type_0 = const()[name = tensor("v_5_pad_type_0"), val = tensor("custom")]; + tensor v_5_pad_0 = const()[name = tensor("v_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34751424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35161088))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_5_cast = conv(dilations = var_585, groups = var_282, pad = v_5_pad_0, pad_type = v_5_pad_type_0, strides = var_583, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("v_5_cast")]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([2, 10, 64, -1])]; + tensor var_590_cast = reshape(shape = var_589, x = q_5_cast)[name = tensor("op_590_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([2, 10, 64, -1])]; + tensor var_592_cast = reshape(shape = var_591, x = k_5_cast)[name = tensor("op_592_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([2, 10, 64, -1])]; + tensor var_594_cast = reshape(shape = var_593, x = v_5_cast)[name = tensor("op_594_cast")]; + tensor attn_weights_9_transpose_x_0 = const()[name = tensor("attn_weights_9_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_9_transpose_y_0 = const()[name = tensor("attn_weights_9_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_9_cast = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_590_cast, y = var_592_cast)[name = tensor("attn_weights_9_cast")]; + tensor attn_weights_11_cast = mul(x = attn_weights_9_cast, y = var_273_to_fp16)[name = tensor("attn_weights_11_cast")]; + tensor var_598_cast = softmax(axis = var_266, x = attn_weights_11_cast)[name = tensor("op_598_cast")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_594_cast, y = var_598_cast)[name = tensor("attn_5_cast")]; + tensor var_602 = const()[name = tensor("op_602"), val = tensor([2, 640, 1, -1])]; + tensor input_69_cast = reshape(shape = var_602, x = attn_5_cast)[name = tensor("input_69_cast")]; + tensor var_607 = const()[name = tensor("op_607"), val = tensor([1, 1])]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 1])]; + tensor var_611_pad_type_0 = const()[name = tensor("op_611_pad_type_0"), val = tensor("custom")]; + tensor var_611_pad_0 = const()[name = tensor("op_611_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35161664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35571328))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35571904)))]; + tensor var_611_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_609, groups = var_282, pad = var_611_pad_0, pad_type = var_611_pad_type_0, strides = var_607, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_69_cast)[name = tensor("op_611_cast")]; + tensor inputs_9_cast = add(x = var_611_cast, y = inputs_7_cast)[name = tensor("inputs_9_cast")]; + tensor var_615 = const()[name = tensor("op_615"), val = tensor([1])]; + tensor channels_mean_9_cast = reduce_mean(axes = var_615, keep_dims = var_277, x = inputs_9_cast)[name = tensor("channels_mean_9_cast")]; + tensor zero_mean_9_cast = sub(x = inputs_9_cast, y = channels_mean_9_cast)[name = tensor("zero_mean_9_cast")]; + tensor zero_mean_sq_9_cast = mul(x = zero_mean_9_cast, y = zero_mean_9_cast)[name = tensor("zero_mean_sq_9_cast")]; + tensor var_619 = const()[name = tensor("op_619"), val = tensor([1])]; + tensor var_620_cast = reduce_mean(axes = var_619, keep_dims = var_277, x = zero_mean_sq_9_cast)[name = tensor("op_620_cast")]; + tensor var_621_to_fp16 = const()[name = tensor("op_621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_622_cast = add(x = var_620_cast, y = var_621_to_fp16)[name = tensor("op_622_cast")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_622_cast)[name = tensor("denom_9_cast")]; + tensor out_9_cast = mul(x = zero_mean_9_cast, y = denom_9_cast)[name = tensor("out_9_cast")]; + tensor var_626_to_fp16 = const()[name = tensor("op_626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35573248)))]; + tensor var_627_cast = add(x = out_9_cast, y = var_626_to_fp16)[name = tensor("op_627_cast")]; + tensor var_629_to_fp16 = const()[name = tensor("op_629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35574592)))]; + tensor hidden_states_25_cast = mul(x = var_627_cast, y = var_629_to_fp16)[name = tensor("hidden_states_25_cast")]; + tensor var_636 = const()[name = tensor("op_636"), val = tensor([1, 1])]; + tensor var_638 = const()[name = tensor("op_638"), val = tensor([1, 1])]; + tensor q_7_pad_type_0 = const()[name = tensor("q_7_pad_type_0"), val = tensor("custom")]; + tensor q_7_pad_0 = const()[name = tensor("q_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35575936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35883200))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_7_cast = conv(dilations = var_638, groups = var_282, pad = q_7_pad_0, pad_type = q_7_pad_type_0, strides = var_636, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("q_7_cast")]; + tensor var_642 = const()[name = tensor("op_642"), val = tensor([1, 1])]; + tensor var_644 = const()[name = tensor("op_644"), val = tensor([1, 1])]; + tensor k_7_pad_type_0 = const()[name = tensor("k_7_pad_type_0"), val = tensor("custom")]; + tensor k_7_pad_0 = const()[name = tensor("k_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35883392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36866496))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_7_cast = conv(dilations = var_644, groups = var_282, pad = k_7_pad_0, pad_type = k_7_pad_type_0, strides = var_642, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_7_cast")]; + tensor var_648 = const()[name = tensor("op_648"), val = tensor([1, 1])]; + tensor var_650 = const()[name = tensor("op_650"), val = tensor([1, 1])]; + tensor v_7_pad_type_0 = const()[name = tensor("v_7_pad_type_0"), val = tensor("custom")]; + tensor v_7_pad_0 = const()[name = tensor("v_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36866688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849792))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_7_cast = conv(dilations = var_650, groups = var_282, pad = v_7_pad_0, pad_type = v_7_pad_type_0, strides = var_648, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_7_cast")]; + tensor var_654 = const()[name = tensor("op_654"), val = tensor([2, 10, 64, -1])]; + tensor var_655_cast = reshape(shape = var_654, x = q_7_cast)[name = tensor("op_655_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([2, 10, 64, -1])]; + tensor var_657_cast = reshape(shape = var_656, x = k_7_cast)[name = tensor("op_657_cast")]; + tensor var_658 = const()[name = tensor("op_658"), val = tensor([2, 10, 64, -1])]; + tensor var_659_cast = reshape(shape = var_658, x = v_7_cast)[name = tensor("op_659_cast")]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = var_655_cast, y = var_657_cast)[name = tensor("attn_weights_13_cast")]; + tensor attn_weights_15_cast = mul(x = attn_weights_13_cast, y = var_273_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_663_cast = softmax(axis = var_266, x = attn_weights_15_cast)[name = tensor("op_663_cast")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_659_cast, y = var_663_cast)[name = tensor("attn_7_cast")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([2, 640, 1, -1])]; + tensor input_71_cast = reshape(shape = var_667, x = attn_7_cast)[name = tensor("input_71_cast")]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 1])]; + tensor var_674 = const()[name = tensor("op_674"), val = tensor([1, 1])]; + tensor var_676_pad_type_0 = const()[name = tensor("op_676_pad_type_0"), val = tensor("custom")]; + tensor var_676_pad_0 = const()[name = tensor("op_676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38157248))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38157440)))]; + tensor var_676_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_674, groups = var_282, pad = var_676_pad_0, pad_type = var_676_pad_type_0, strides = var_672, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_71_cast)[name = tensor("op_676_cast")]; + tensor inputs_11_cast = add(x = var_676_cast, y = inputs_9_cast)[name = tensor("inputs_11_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([1])]; + tensor channels_mean_11_cast = reduce_mean(axes = var_680, keep_dims = var_277, x = inputs_11_cast)[name = tensor("channels_mean_11_cast")]; + tensor zero_mean_11_cast = sub(x = inputs_11_cast, y = channels_mean_11_cast)[name = tensor("zero_mean_11_cast")]; + tensor zero_mean_sq_11_cast = mul(x = zero_mean_11_cast, y = zero_mean_11_cast)[name = tensor("zero_mean_sq_11_cast")]; + tensor var_684 = const()[name = tensor("op_684"), val = tensor([1])]; + tensor var_685_cast = reduce_mean(axes = var_684, keep_dims = var_277, x = zero_mean_sq_11_cast)[name = tensor("op_685_cast")]; + tensor var_686_to_fp16 = const()[name = tensor("op_686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_687_cast = add(x = var_685_cast, y = var_686_to_fp16)[name = tensor("op_687_cast")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_687_cast)[name = tensor("denom_11_cast")]; + tensor out_11_cast = mul(x = zero_mean_11_cast, y = denom_11_cast)[name = tensor("out_11_cast")]; + tensor var_691_to_fp16 = const()[name = tensor("op_691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38158784)))]; + tensor var_692_cast = add(x = out_11_cast, y = var_691_to_fp16)[name = tensor("op_692_cast")]; + tensor var_694_to_fp16 = const()[name = tensor("op_694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38160128)))]; + tensor input_73_cast = mul(x = var_692_cast, y = var_694_to_fp16)[name = tensor("input_73_cast")]; + tensor var_702 = const()[name = tensor("op_702"), val = tensor([1, 1])]; + tensor var_704 = const()[name = tensor("op_704"), val = tensor([1, 1])]; + tensor var_706_pad_type_0 = const()[name = tensor("op_706_pad_type_0"), val = tensor("custom")]; + tensor var_706_pad_0 = const()[name = tensor("op_706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38161472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41438336))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41438912)))]; + tensor var_706_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_704, groups = var_282, pad = var_706_pad_0, pad_type = var_706_pad_type_0, strides = var_702, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_73_cast)[name = tensor("op_706_cast")]; + tensor var_707_split_sizes_0 = const()[name = tensor("op_707_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_707_axis_0 = const()[name = tensor("op_707_axis_0"), val = tensor(1)]; + tensor var_707_cast_0, tensor var_707_cast_1 = split(axis = var_707_axis_0, split_sizes = var_707_split_sizes_0, x = var_706_cast)[name = tensor("op_707_cast")]; + tensor var_709_mode_0 = const()[name = tensor("op_709_mode_0"), val = tensor("EXACT")]; + tensor var_709_cast = gelu(mode = var_709_mode_0, x = var_707_cast_1)[name = tensor("op_709_cast")]; + tensor input_75_cast = mul(x = var_707_cast_0, y = var_709_cast)[name = tensor("input_75_cast")]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 1])]; + tensor var_715 = const()[name = tensor("op_715"), val = tensor([1, 1])]; + tensor var_717_pad_type_0 = const()[name = tensor("op_717_pad_type_0"), val = tensor("custom")]; + tensor var_717_pad_0 = const()[name = tensor("op_717_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41449216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43087680))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43088256)))]; + tensor var_717_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_715, groups = var_282, pad = var_717_pad_0, pad_type = var_717_pad_type_0, strides = var_713, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_75_cast)[name = tensor("op_717_cast")]; + tensor hidden_states_29_cast = add(x = var_717_cast, y = inputs_11_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_719 = const()[name = tensor("op_719"), val = tensor([2, 640, 64, 64])]; + tensor input_77_cast = reshape(shape = var_719, x = hidden_states_29_cast)[name = tensor("input_77_cast")]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor var_725 = const()[name = tensor("op_725"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43089600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43499264))), name = tensor("down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43499840)))]; + tensor hidden_states_31_cast = conv(bias = down_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_725, groups = var_282, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_723, weight = down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_77_cast)[name = tensor("hidden_states_31_cast")]; + tensor input_79_cast = add(x = hidden_states_31_cast, y = hidden_states_13_cast)[name = tensor("input_79_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_79_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43501184)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43502528)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_83_cast = silu(x = add_15_cast)[name = tensor("input_83_cast")]; + tensor var_740 = const()[name = tensor("op_740"), val = tensor([1, 1])]; + tensor var_742 = const()[name = tensor("op_742"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43503872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47190336))), name = tensor("down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47190912)))]; + tensor hidden_states_33_cast = conv(bias = down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_742, groups = var_282, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_740, weight = down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_83_cast)[name = tensor("hidden_states_33_cast")]; + tensor var_748 = const()[name = tensor("op_748"), val = tensor([1, 1])]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, 1])]; + tensor temb_7_pad_type_0 = const()[name = tensor("temb_7_pad_type_0"), val = tensor("custom")]; + tensor temb_7_pad_0 = const()[name = tensor("temb_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47192256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47806720))), name = tensor("down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47806912)))]; + tensor temb_7_cast = conv(bias = down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_750, groups = var_282, pad = temb_7_pad_0, pad_type = temb_7_pad_type_0, strides = var_748, weight = down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_7_cast")]; + tensor input_87_cast = add(x = hidden_states_33_cast, y = temb_7_cast)[name = tensor("input_87_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_87_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47808256)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47809600)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_91_cast = silu(x = add_17_cast)[name = tensor("input_91_cast")]; + tensor var_760 = const()[name = tensor("op_760"), val = tensor([1, 1])]; + tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47810944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51497408))), name = tensor("down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51497984)))]; + tensor hidden_states_35_cast = conv(bias = down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_762, groups = var_282, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_760, weight = down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_91_cast)[name = tensor("hidden_states_35_cast")]; + tensor hidden_states_37_cast = add(x = input_79_cast, y = hidden_states_35_cast)[name = tensor("hidden_states_37_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = hidden_states_37_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51499328)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51500672)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor var_784 = const()[name = tensor("op_784"), val = tensor([1, 1])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51502016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51911680))), name = tensor("down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51912256)))]; + tensor hidden_states_39_cast = conv(bias = down_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_786, groups = var_282, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_784, weight = down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_19_cast)[name = tensor("hidden_states_39_cast")]; + tensor var_791 = const()[name = tensor("op_791"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_13_cast = reshape(shape = var_791, x = hidden_states_39_cast)[name = tensor("inputs_13_cast")]; + tensor var_801 = const()[name = tensor("op_801"), val = tensor([1])]; + tensor channels_mean_13_cast = reduce_mean(axes = var_801, keep_dims = var_277, x = inputs_13_cast)[name = tensor("channels_mean_13_cast")]; + tensor zero_mean_13_cast = sub(x = inputs_13_cast, y = channels_mean_13_cast)[name = tensor("zero_mean_13_cast")]; + tensor zero_mean_sq_13_cast = mul(x = zero_mean_13_cast, y = zero_mean_13_cast)[name = tensor("zero_mean_sq_13_cast")]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([1])]; + tensor var_806_cast = reduce_mean(axes = var_805, keep_dims = var_277, x = zero_mean_sq_13_cast)[name = tensor("op_806_cast")]; + tensor var_807_to_fp16 = const()[name = tensor("op_807_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_808_cast = add(x = var_806_cast, y = var_807_to_fp16)[name = tensor("op_808_cast")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_808_cast)[name = tensor("denom_13_cast")]; + tensor out_13_cast = mul(x = zero_mean_13_cast, y = denom_13_cast)[name = tensor("out_13_cast")]; + tensor var_812_to_fp16 = const()[name = tensor("op_812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51913600)))]; + tensor var_813_cast = add(x = out_13_cast, y = var_812_to_fp16)[name = tensor("op_813_cast")]; + tensor var_815_to_fp16 = const()[name = tensor("op_815_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51914944)))]; + tensor hidden_states_41_cast = mul(x = var_813_cast, y = var_815_to_fp16)[name = tensor("hidden_states_41_cast")]; + tensor var_822 = const()[name = tensor("op_822"), val = tensor([1, 1])]; + tensor var_824 = const()[name = tensor("op_824"), val = tensor([1, 1])]; + tensor q_9_pad_type_0 = const()[name = tensor("q_9_pad_type_0"), val = tensor("custom")]; + tensor q_9_pad_0 = const()[name = tensor("q_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51916288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52223552))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_9_cast = conv(dilations = var_824, groups = var_282, pad = q_9_pad_0, pad_type = q_9_pad_type_0, strides = var_822, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("q_9_cast")]; + tensor var_828 = const()[name = tensor("op_828"), val = tensor([1, 1])]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, 1])]; + tensor k_9_pad_type_0 = const()[name = tensor("k_9_pad_type_0"), val = tensor("custom")]; + tensor k_9_pad_0 = const()[name = tensor("k_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52223744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52531008))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_9_cast = conv(dilations = var_830, groups = var_282, pad = k_9_pad_0, pad_type = k_9_pad_type_0, strides = var_828, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("k_9_cast")]; + tensor var_834 = const()[name = tensor("op_834"), val = tensor([1, 1])]; + tensor var_836 = const()[name = tensor("op_836"), val = tensor([1, 1])]; + tensor v_9_pad_type_0 = const()[name = tensor("v_9_pad_type_0"), val = tensor("custom")]; + tensor v_9_pad_0 = const()[name = tensor("v_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52531200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52838464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_9_cast = conv(dilations = var_836, groups = var_282, pad = v_9_pad_0, pad_type = v_9_pad_type_0, strides = var_834, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("v_9_cast")]; + tensor var_840 = const()[name = tensor("op_840"), val = tensor([2, 10, 64, -1])]; + tensor var_841_cast = reshape(shape = var_840, x = q_9_cast)[name = tensor("op_841_cast")]; + tensor var_842 = const()[name = tensor("op_842"), val = tensor([2, 10, 64, -1])]; + tensor var_843_cast = reshape(shape = var_842, x = k_9_cast)[name = tensor("op_843_cast")]; + tensor var_844 = const()[name = tensor("op_844"), val = tensor([2, 10, 64, -1])]; + tensor var_845_cast = reshape(shape = var_844, x = v_9_cast)[name = tensor("op_845_cast")]; + tensor attn_weights_17_transpose_x_0 = const()[name = tensor("attn_weights_17_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_17_transpose_y_0 = const()[name = tensor("attn_weights_17_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_17_cast = matmul(transpose_x = attn_weights_17_transpose_x_0, transpose_y = attn_weights_17_transpose_y_0, x = var_841_cast, y = var_843_cast)[name = tensor("attn_weights_17_cast")]; + tensor attn_weights_19_cast = mul(x = attn_weights_17_cast, y = var_273_to_fp16)[name = tensor("attn_weights_19_cast")]; + tensor var_849_cast = softmax(axis = var_266, x = attn_weights_19_cast)[name = tensor("op_849_cast")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_845_cast, y = var_849_cast)[name = tensor("attn_9_cast")]; + tensor var_853 = const()[name = tensor("op_853"), val = tensor([2, 640, 1, -1])]; + tensor input_95_cast = reshape(shape = var_853, x = attn_9_cast)[name = tensor("input_95_cast")]; + tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; + tensor var_860 = const()[name = tensor("op_860"), val = tensor([1, 1])]; + tensor var_862_pad_type_0 = const()[name = tensor("op_862_pad_type_0"), val = tensor("custom")]; + tensor var_862_pad_0 = const()[name = tensor("op_862_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52838656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53248320))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53248896)))]; + tensor var_862_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_860, groups = var_282, pad = var_862_pad_0, pad_type = var_862_pad_type_0, strides = var_858, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_95_cast)[name = tensor("op_862_cast")]; + tensor inputs_15_cast = add(x = var_862_cast, y = inputs_13_cast)[name = tensor("inputs_15_cast")]; + tensor var_866 = const()[name = tensor("op_866"), val = tensor([1])]; + tensor channels_mean_15_cast = reduce_mean(axes = var_866, keep_dims = var_277, x = inputs_15_cast)[name = tensor("channels_mean_15_cast")]; + tensor zero_mean_15_cast = sub(x = inputs_15_cast, y = channels_mean_15_cast)[name = tensor("zero_mean_15_cast")]; + tensor zero_mean_sq_15_cast = mul(x = zero_mean_15_cast, y = zero_mean_15_cast)[name = tensor("zero_mean_sq_15_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1])]; + tensor var_871_cast = reduce_mean(axes = var_870, keep_dims = var_277, x = zero_mean_sq_15_cast)[name = tensor("op_871_cast")]; + tensor var_872_to_fp16 = const()[name = tensor("op_872_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_873_cast = add(x = var_871_cast, y = var_872_to_fp16)[name = tensor("op_873_cast")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_873_cast)[name = tensor("denom_15_cast")]; + tensor out_15_cast = mul(x = zero_mean_15_cast, y = denom_15_cast)[name = tensor("out_15_cast")]; + tensor var_877_to_fp16 = const()[name = tensor("op_877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53250240)))]; + tensor var_878_cast = add(x = out_15_cast, y = var_877_to_fp16)[name = tensor("op_878_cast")]; + tensor var_880_to_fp16 = const()[name = tensor("op_880_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53251584)))]; + tensor hidden_states_43_cast = mul(x = var_878_cast, y = var_880_to_fp16)[name = tensor("hidden_states_43_cast")]; + tensor var_887 = const()[name = tensor("op_887"), val = tensor([1, 1])]; + tensor var_889 = const()[name = tensor("op_889"), val = tensor([1, 1])]; + tensor q_11_pad_type_0 = const()[name = tensor("q_11_pad_type_0"), val = tensor("custom")]; + tensor q_11_pad_0 = const()[name = tensor("q_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53252928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53457792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_11_cast = conv(dilations = var_889, groups = var_282, pad = q_11_pad_0, pad_type = q_11_pad_type_0, strides = var_887, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("q_11_cast")]; + tensor var_893 = const()[name = tensor("op_893"), val = tensor([1, 1])]; + tensor var_895 = const()[name = tensor("op_895"), val = tensor([1, 1])]; + tensor k_11_pad_type_0 = const()[name = tensor("k_11_pad_type_0"), val = tensor("custom")]; + tensor k_11_pad_0 = const()[name = tensor("k_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53457920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54113344))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_11_cast = conv(dilations = var_895, groups = var_282, pad = k_11_pad_0, pad_type = k_11_pad_type_0, strides = var_893, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_11_cast")]; + tensor var_899 = const()[name = tensor("op_899"), val = tensor([1, 1])]; + tensor var_901 = const()[name = tensor("op_901"), val = tensor([1, 1])]; + tensor v_11_pad_type_0 = const()[name = tensor("v_11_pad_type_0"), val = tensor("custom")]; + tensor v_11_pad_0 = const()[name = tensor("v_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54113472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55096576))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_11_cast = conv(dilations = var_901, groups = var_282, pad = v_11_pad_0, pad_type = v_11_pad_type_0, strides = var_899, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_11_cast")]; + tensor var_905 = const()[name = tensor("op_905"), val = tensor([2, 10, 64, -1])]; + tensor var_906_cast = reshape(shape = var_905, x = q_11_cast)[name = tensor("op_906_cast")]; + tensor var_907 = const()[name = tensor("op_907"), val = tensor([2, 10, 64, -1])]; + tensor var_908_cast = reshape(shape = var_907, x = k_11_cast)[name = tensor("op_908_cast")]; + tensor var_909 = const()[name = tensor("op_909"), val = tensor([2, 10, 64, -1])]; + tensor var_910_cast = reshape(shape = var_909, x = v_11_cast)[name = tensor("op_910_cast")]; + tensor attn_weights_21_transpose_x_0 = const()[name = tensor("attn_weights_21_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_21_transpose_y_0 = const()[name = tensor("attn_weights_21_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_21_cast = matmul(transpose_x = attn_weights_21_transpose_x_0, transpose_y = attn_weights_21_transpose_y_0, x = var_906_cast, y = var_908_cast)[name = tensor("attn_weights_21_cast")]; + tensor attn_weights_23_cast = mul(x = attn_weights_21_cast, y = var_273_to_fp16)[name = tensor("attn_weights_23_cast")]; + tensor var_914_cast = softmax(axis = var_266, x = attn_weights_23_cast)[name = tensor("op_914_cast")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_910_cast, y = var_914_cast)[name = tensor("attn_11_cast")]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([2, 640, 1, -1])]; + tensor input_97_cast = reshape(shape = var_918, x = attn_11_cast)[name = tensor("input_97_cast")]; + tensor var_923 = const()[name = tensor("op_923"), val = tensor([1, 1])]; + tensor var_925 = const()[name = tensor("op_925"), val = tensor([1, 1])]; + tensor var_927_pad_type_0 = const()[name = tensor("op_927_pad_type_0"), val = tensor("custom")]; + tensor var_927_pad_0 = const()[name = tensor("op_927_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55096768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55404032))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55404224)))]; + tensor var_927_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_925, groups = var_282, pad = var_927_pad_0, pad_type = var_927_pad_type_0, strides = var_923, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("op_927_cast")]; + tensor inputs_17_cast = add(x = var_927_cast, y = inputs_15_cast)[name = tensor("inputs_17_cast")]; + tensor var_931 = const()[name = tensor("op_931"), val = tensor([1])]; + tensor channels_mean_17_cast = reduce_mean(axes = var_931, keep_dims = var_277, x = inputs_17_cast)[name = tensor("channels_mean_17_cast")]; + tensor zero_mean_17_cast = sub(x = inputs_17_cast, y = channels_mean_17_cast)[name = tensor("zero_mean_17_cast")]; + tensor zero_mean_sq_17_cast = mul(x = zero_mean_17_cast, y = zero_mean_17_cast)[name = tensor("zero_mean_sq_17_cast")]; + tensor var_935 = const()[name = tensor("op_935"), val = tensor([1])]; + tensor var_936_cast = reduce_mean(axes = var_935, keep_dims = var_277, x = zero_mean_sq_17_cast)[name = tensor("op_936_cast")]; + tensor var_937_to_fp16 = const()[name = tensor("op_937_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_938_cast = add(x = var_936_cast, y = var_937_to_fp16)[name = tensor("op_938_cast")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_938_cast)[name = tensor("denom_17_cast")]; + tensor out_17_cast = mul(x = zero_mean_17_cast, y = denom_17_cast)[name = tensor("out_17_cast")]; + tensor var_942_to_fp16 = const()[name = tensor("op_942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55405568)))]; + tensor var_943_cast = add(x = out_17_cast, y = var_942_to_fp16)[name = tensor("op_943_cast")]; + tensor var_945_to_fp16 = const()[name = tensor("op_945_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55406912)))]; + tensor input_99_cast = mul(x = var_943_cast, y = var_945_to_fp16)[name = tensor("input_99_cast")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([1, 1])]; + tensor var_955 = const()[name = tensor("op_955"), val = tensor([1, 1])]; + tensor var_957_pad_type_0 = const()[name = tensor("op_957_pad_type_0"), val = tensor("custom")]; + tensor var_957_pad_0 = const()[name = tensor("op_957_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55408256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58685120))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58685696)))]; + tensor var_957_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_955, groups = var_282, pad = var_957_pad_0, pad_type = var_957_pad_type_0, strides = var_953, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_99_cast)[name = tensor("op_957_cast")]; + tensor var_958_split_sizes_0 = const()[name = tensor("op_958_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_958_axis_0 = const()[name = tensor("op_958_axis_0"), val = tensor(1)]; + tensor var_958_cast_0, tensor var_958_cast_1 = split(axis = var_958_axis_0, split_sizes = var_958_split_sizes_0, x = var_957_cast)[name = tensor("op_958_cast")]; + tensor var_960_mode_0 = const()[name = tensor("op_960_mode_0"), val = tensor("EXACT")]; + tensor var_960_cast = gelu(mode = var_960_mode_0, x = var_958_cast_1)[name = tensor("op_960_cast")]; + tensor input_101_cast = mul(x = var_958_cast_0, y = var_960_cast)[name = tensor("input_101_cast")]; + tensor var_964 = const()[name = tensor("op_964"), val = tensor([1, 1])]; + tensor var_966 = const()[name = tensor("op_966"), val = tensor([1, 1])]; + tensor var_968_pad_type_0 = const()[name = tensor("op_968_pad_type_0"), val = tensor("custom")]; + tensor var_968_pad_0 = const()[name = tensor("op_968_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58696000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60334464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60335040)))]; + tensor var_968_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_966, groups = var_282, pad = var_968_pad_0, pad_type = var_968_pad_type_0, strides = var_964, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_101_cast)[name = tensor("op_968_cast")]; + tensor inputs_19_cast = add(x = var_968_cast, y = inputs_17_cast)[name = tensor("inputs_19_cast")]; + tensor var_978 = const()[name = tensor("op_978"), val = tensor([1])]; + tensor channels_mean_19_cast = reduce_mean(axes = var_978, keep_dims = var_277, x = inputs_19_cast)[name = tensor("channels_mean_19_cast")]; + tensor zero_mean_19_cast = sub(x = inputs_19_cast, y = channels_mean_19_cast)[name = tensor("zero_mean_19_cast")]; + tensor zero_mean_sq_19_cast = mul(x = zero_mean_19_cast, y = zero_mean_19_cast)[name = tensor("zero_mean_sq_19_cast")]; + tensor var_982 = const()[name = tensor("op_982"), val = tensor([1])]; + tensor var_983_cast = reduce_mean(axes = var_982, keep_dims = var_277, x = zero_mean_sq_19_cast)[name = tensor("op_983_cast")]; + tensor var_984_to_fp16 = const()[name = tensor("op_984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_985_cast = add(x = var_983_cast, y = var_984_to_fp16)[name = tensor("op_985_cast")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_985_cast)[name = tensor("denom_19_cast")]; + tensor out_19_cast = mul(x = zero_mean_19_cast, y = denom_19_cast)[name = tensor("out_19_cast")]; + tensor var_989_to_fp16 = const()[name = tensor("op_989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60336384)))]; + tensor var_990_cast = add(x = out_19_cast, y = var_989_to_fp16)[name = tensor("op_990_cast")]; + tensor var_992_to_fp16 = const()[name = tensor("op_992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60337728)))]; + tensor hidden_states_47_cast = mul(x = var_990_cast, y = var_992_to_fp16)[name = tensor("hidden_states_47_cast")]; + tensor var_999 = const()[name = tensor("op_999"), val = tensor([1, 1])]; + tensor var_1001 = const()[name = tensor("op_1001"), val = tensor([1, 1])]; + tensor q_13_pad_type_0 = const()[name = tensor("q_13_pad_type_0"), val = tensor("custom")]; + tensor q_13_pad_0 = const()[name = tensor("q_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60339072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60646336))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_13_cast = conv(dilations = var_1001, groups = var_282, pad = q_13_pad_0, pad_type = q_13_pad_type_0, strides = var_999, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("q_13_cast")]; + tensor var_1005 = const()[name = tensor("op_1005"), val = tensor([1, 1])]; + tensor var_1007 = const()[name = tensor("op_1007"), val = tensor([1, 1])]; + tensor k_13_pad_type_0 = const()[name = tensor("k_13_pad_type_0"), val = tensor("custom")]; + tensor k_13_pad_0 = const()[name = tensor("k_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60646528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60953792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_13_cast = conv(dilations = var_1007, groups = var_282, pad = k_13_pad_0, pad_type = k_13_pad_type_0, strides = var_1005, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("k_13_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, 1])]; + tensor var_1013 = const()[name = tensor("op_1013"), val = tensor([1, 1])]; + tensor v_13_pad_type_0 = const()[name = tensor("v_13_pad_type_0"), val = tensor("custom")]; + tensor v_13_pad_0 = const()[name = tensor("v_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60953984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61261248))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_13_cast = conv(dilations = var_1013, groups = var_282, pad = v_13_pad_0, pad_type = v_13_pad_type_0, strides = var_1011, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("v_13_cast")]; + tensor var_1017 = const()[name = tensor("op_1017"), val = tensor([2, 10, 64, -1])]; + tensor var_1018_cast = reshape(shape = var_1017, x = q_13_cast)[name = tensor("op_1018_cast")]; + tensor var_1019 = const()[name = tensor("op_1019"), val = tensor([2, 10, 64, -1])]; + tensor var_1020_cast = reshape(shape = var_1019, x = k_13_cast)[name = tensor("op_1020_cast")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([2, 10, 64, -1])]; + tensor var_1022_cast = reshape(shape = var_1021, x = v_13_cast)[name = tensor("op_1022_cast")]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = var_1018_cast, y = var_1020_cast)[name = tensor("attn_weights_25_cast")]; + tensor attn_weights_27_cast = mul(x = attn_weights_25_cast, y = var_273_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_1026_cast = softmax(axis = var_266, x = attn_weights_27_cast)[name = tensor("op_1026_cast")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_1022_cast, y = var_1026_cast)[name = tensor("attn_13_cast")]; + tensor var_1030 = const()[name = tensor("op_1030"), val = tensor([2, 640, 1, -1])]; + tensor input_103_cast = reshape(shape = var_1030, x = attn_13_cast)[name = tensor("input_103_cast")]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 1])]; + tensor var_1037 = const()[name = tensor("op_1037"), val = tensor([1, 1])]; + tensor var_1039_pad_type_0 = const()[name = tensor("op_1039_pad_type_0"), val = tensor("custom")]; + tensor var_1039_pad_0 = const()[name = tensor("op_1039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61261440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61568704))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61568896)))]; + tensor var_1039_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1037, groups = var_282, pad = var_1039_pad_0, pad_type = var_1039_pad_type_0, strides = var_1035, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_103_cast)[name = tensor("op_1039_cast")]; + tensor inputs_21_cast = add(x = var_1039_cast, y = inputs_19_cast)[name = tensor("inputs_21_cast")]; + tensor var_1043 = const()[name = tensor("op_1043"), val = tensor([1])]; + tensor channels_mean_21_cast = reduce_mean(axes = var_1043, keep_dims = var_277, x = inputs_21_cast)[name = tensor("channels_mean_21_cast")]; + tensor zero_mean_21_cast = sub(x = inputs_21_cast, y = channels_mean_21_cast)[name = tensor("zero_mean_21_cast")]; + tensor zero_mean_sq_21_cast = mul(x = zero_mean_21_cast, y = zero_mean_21_cast)[name = tensor("zero_mean_sq_21_cast")]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1])]; + tensor var_1048_cast = reduce_mean(axes = var_1047, keep_dims = var_277, x = zero_mean_sq_21_cast)[name = tensor("op_1048_cast")]; + tensor var_1049_to_fp16 = const()[name = tensor("op_1049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1050_cast = add(x = var_1048_cast, y = var_1049_to_fp16)[name = tensor("op_1050_cast")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_1050_cast)[name = tensor("denom_21_cast")]; + tensor out_21_cast = mul(x = zero_mean_21_cast, y = denom_21_cast)[name = tensor("out_21_cast")]; + tensor var_1054_to_fp16 = const()[name = tensor("op_1054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61570240)))]; + tensor var_1055_cast = add(x = out_21_cast, y = var_1054_to_fp16)[name = tensor("op_1055_cast")]; + tensor var_1057_to_fp16 = const()[name = tensor("op_1057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61571584)))]; + tensor hidden_states_49_cast = mul(x = var_1055_cast, y = var_1057_to_fp16)[name = tensor("hidden_states_49_cast")]; + tensor var_1064 = const()[name = tensor("op_1064"), val = tensor([1, 1])]; + tensor var_1066 = const()[name = tensor("op_1066"), val = tensor([1, 1])]; + tensor q_15_pad_type_0 = const()[name = tensor("q_15_pad_type_0"), val = tensor("custom")]; + tensor q_15_pad_0 = const()[name = tensor("q_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61572928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61777792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_15_cast = conv(dilations = var_1066, groups = var_282, pad = q_15_pad_0, pad_type = q_15_pad_type_0, strides = var_1064, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("q_15_cast")]; + tensor var_1070 = const()[name = tensor("op_1070"), val = tensor([1, 1])]; + tensor var_1072 = const()[name = tensor("op_1072"), val = tensor([1, 1])]; + tensor k_15_pad_type_0 = const()[name = tensor("k_15_pad_type_0"), val = tensor("custom")]; + tensor k_15_pad_0 = const()[name = tensor("k_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61777920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62433344))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_15_cast = conv(dilations = var_1072, groups = var_282, pad = k_15_pad_0, pad_type = k_15_pad_type_0, strides = var_1070, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_15_cast")]; + tensor var_1076 = const()[name = tensor("op_1076"), val = tensor([1, 1])]; + tensor var_1078 = const()[name = tensor("op_1078"), val = tensor([1, 1])]; + tensor v_15_pad_type_0 = const()[name = tensor("v_15_pad_type_0"), val = tensor("custom")]; + tensor v_15_pad_0 = const()[name = tensor("v_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62433472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63416576))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_15_cast = conv(dilations = var_1078, groups = var_282, pad = v_15_pad_0, pad_type = v_15_pad_type_0, strides = var_1076, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_15_cast")]; + tensor var_1082 = const()[name = tensor("op_1082"), val = tensor([2, 10, 64, -1])]; + tensor var_1083_cast = reshape(shape = var_1082, x = q_15_cast)[name = tensor("op_1083_cast")]; + tensor var_1084 = const()[name = tensor("op_1084"), val = tensor([2, 10, 64, -1])]; + tensor var_1085_cast = reshape(shape = var_1084, x = k_15_cast)[name = tensor("op_1085_cast")]; + tensor var_1086 = const()[name = tensor("op_1086"), val = tensor([2, 10, 64, -1])]; + tensor var_1087_cast = reshape(shape = var_1086, x = v_15_cast)[name = tensor("op_1087_cast")]; + tensor attn_weights_29_transpose_x_0 = const()[name = tensor("attn_weights_29_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_29_transpose_y_0 = const()[name = tensor("attn_weights_29_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_29_cast = matmul(transpose_x = attn_weights_29_transpose_x_0, transpose_y = attn_weights_29_transpose_y_0, x = var_1083_cast, y = var_1085_cast)[name = tensor("attn_weights_29_cast")]; + tensor attn_weights_31_cast = mul(x = attn_weights_29_cast, y = var_273_to_fp16)[name = tensor("attn_weights_31_cast")]; + tensor var_1091_cast = softmax(axis = var_266, x = attn_weights_31_cast)[name = tensor("op_1091_cast")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1087_cast, y = var_1091_cast)[name = tensor("attn_15_cast")]; + tensor var_1095 = const()[name = tensor("op_1095"), val = tensor([2, 640, 1, -1])]; + tensor input_105_cast = reshape(shape = var_1095, x = attn_15_cast)[name = tensor("input_105_cast")]; + tensor var_1100 = const()[name = tensor("op_1100"), val = tensor([1, 1])]; + tensor var_1102 = const()[name = tensor("op_1102"), val = tensor([1, 1])]; + tensor var_1104_pad_type_0 = const()[name = tensor("op_1104_pad_type_0"), val = tensor("custom")]; + tensor var_1104_pad_0 = const()[name = tensor("op_1104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63416768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63724032))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63724224)))]; + tensor var_1104_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1102, groups = var_282, pad = var_1104_pad_0, pad_type = var_1104_pad_type_0, strides = var_1100, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("op_1104_cast")]; + tensor inputs_23_cast = add(x = var_1104_cast, y = inputs_21_cast)[name = tensor("inputs_23_cast")]; + tensor var_1108 = const()[name = tensor("op_1108"), val = tensor([1])]; + tensor channels_mean_23_cast = reduce_mean(axes = var_1108, keep_dims = var_277, x = inputs_23_cast)[name = tensor("channels_mean_23_cast")]; + tensor zero_mean_23_cast = sub(x = inputs_23_cast, y = channels_mean_23_cast)[name = tensor("zero_mean_23_cast")]; + tensor zero_mean_sq_23_cast = mul(x = zero_mean_23_cast, y = zero_mean_23_cast)[name = tensor("zero_mean_sq_23_cast")]; + tensor var_1112 = const()[name = tensor("op_1112"), val = tensor([1])]; + tensor var_1113_cast = reduce_mean(axes = var_1112, keep_dims = var_277, x = zero_mean_sq_23_cast)[name = tensor("op_1113_cast")]; + tensor var_1114_to_fp16 = const()[name = tensor("op_1114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1115_cast = add(x = var_1113_cast, y = var_1114_to_fp16)[name = tensor("op_1115_cast")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_1115_cast)[name = tensor("denom_23_cast")]; + tensor out_23_cast = mul(x = zero_mean_23_cast, y = denom_23_cast)[name = tensor("out_23_cast")]; + tensor var_1119_to_fp16 = const()[name = tensor("op_1119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63725568)))]; + tensor var_1120_cast = add(x = out_23_cast, y = var_1119_to_fp16)[name = tensor("op_1120_cast")]; + tensor var_1122_to_fp16 = const()[name = tensor("op_1122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63726912)))]; + tensor input_107_cast = mul(x = var_1120_cast, y = var_1122_to_fp16)[name = tensor("input_107_cast")]; + tensor var_1130 = const()[name = tensor("op_1130"), val = tensor([1, 1])]; + tensor var_1132 = const()[name = tensor("op_1132"), val = tensor([1, 1])]; + tensor var_1134_pad_type_0 = const()[name = tensor("op_1134_pad_type_0"), val = tensor("custom")]; + tensor var_1134_pad_0 = const()[name = tensor("op_1134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63728256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67005120))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67005696)))]; + tensor var_1134_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1132, groups = var_282, pad = var_1134_pad_0, pad_type = var_1134_pad_type_0, strides = var_1130, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_107_cast)[name = tensor("op_1134_cast")]; + tensor var_1135_split_sizes_0 = const()[name = tensor("op_1135_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_1135_axis_0 = const()[name = tensor("op_1135_axis_0"), val = tensor(1)]; + tensor var_1135_cast_0, tensor var_1135_cast_1 = split(axis = var_1135_axis_0, split_sizes = var_1135_split_sizes_0, x = var_1134_cast)[name = tensor("op_1135_cast")]; + tensor var_1137_mode_0 = const()[name = tensor("op_1137_mode_0"), val = tensor("EXACT")]; + tensor var_1137_cast = gelu(mode = var_1137_mode_0, x = var_1135_cast_1)[name = tensor("op_1137_cast")]; + tensor input_109_cast = mul(x = var_1135_cast_0, y = var_1137_cast)[name = tensor("input_109_cast")]; + tensor var_1141 = const()[name = tensor("op_1141"), val = tensor([1, 1])]; + tensor var_1143 = const()[name = tensor("op_1143"), val = tensor([1, 1])]; + tensor var_1145_pad_type_0 = const()[name = tensor("op_1145_pad_type_0"), val = tensor("custom")]; + tensor var_1145_pad_0 = const()[name = tensor("op_1145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67016000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68654464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68655040)))]; + tensor var_1145_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1143, groups = var_282, pad = var_1145_pad_0, pad_type = var_1145_pad_type_0, strides = var_1141, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_109_cast)[name = tensor("op_1145_cast")]; + tensor hidden_states_53_cast = add(x = var_1145_cast, y = inputs_23_cast)[name = tensor("hidden_states_53_cast")]; + tensor var_1147 = const()[name = tensor("op_1147"), val = tensor([2, 640, 64, 64])]; + tensor input_111_cast = reshape(shape = var_1147, x = hidden_states_53_cast)[name = tensor("input_111_cast")]; + tensor var_1151 = const()[name = tensor("op_1151"), val = tensor([1, 1])]; + tensor var_1153 = const()[name = tensor("op_1153"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68656384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69066048))), name = tensor("down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69066624)))]; + tensor hidden_states_55_cast = conv(bias = down_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_1153, groups = var_282, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_1151, weight = down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_111_cast)[name = tensor("hidden_states_55_cast")]; + tensor input_113_cast = add(x = hidden_states_55_cast, y = hidden_states_37_cast)[name = tensor("input_113_cast")]; + tensor var_1160 = const()[name = tensor("op_1160"), val = tensor([2, 2])]; + tensor var_1162 = const()[name = tensor("op_1162"), val = tensor([1, 1])]; + tensor input_115_pad_type_0 = const()[name = tensor("input_115_pad_type_0"), val = tensor("custom")]; + tensor input_115_pad_0 = const()[name = tensor("input_115_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69067968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72754432))), name = tensor("down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72755008)))]; + tensor input_115_cast = conv(bias = down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_1162, groups = var_282, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = var_1160, weight = down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("input_115_cast")]; + tensor var_1170 = const()[name = tensor("op_1170"), val = tensor(3)]; + tensor var_1181 = const()[name = tensor("op_1181"), val = tensor(true)]; + tensor var_1186 = const()[name = tensor("op_1186"), val = tensor(1)]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([2, 32, 20, 32, 32])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = input_115_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([2, 640, 32, 32])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72756352)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72757696)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_119_cast = silu(x = add_21_cast)[name = tensor("input_119_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 1])]; + tensor var_1209 = const()[name = tensor("op_1209"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72759040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78288704))), name = tensor("down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 640, 3, 3])]; + tensor down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78288896)))]; + tensor hidden_states_57_cast = conv(bias = down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_1209, groups = var_1186, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_1207, weight = down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_119_cast)[name = tensor("hidden_states_57_cast")]; + tensor var_1215 = const()[name = tensor("op_1215"), val = tensor([1, 1])]; + tensor var_1217 = const()[name = tensor("op_1217"), val = tensor([1, 1])]; + tensor temb_9_pad_type_0 = const()[name = tensor("temb_9_pad_type_0"), val = tensor("custom")]; + tensor temb_9_pad_0 = const()[name = tensor("temb_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78291520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79520384))), name = tensor("down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79520576)))]; + tensor temb_9_cast = conv(bias = down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_1217, groups = var_1186, pad = temb_9_pad_0, pad_type = temb_9_pad_type_0, strides = var_1215, weight = down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_9_cast")]; + tensor input_123_cast = add(x = hidden_states_57_cast, y = temb_9_cast)[name = tensor("input_123_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_123_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_mean_0_to_fp16 = const()[name = tensor("add_23_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79523200)))]; + tensor add_23_variance_0_to_fp16 = const()[name = tensor("add_23_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79525824)))]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79528448)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79531072)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_127_cast = silu(x = add_23_cast)[name = tensor("input_127_cast")]; + tensor var_1227 = const()[name = tensor("op_1227"), val = tensor([1, 1])]; + tensor var_1229 = const()[name = tensor("op_1229"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79533696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90592960))), name = tensor("down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90593152)))]; + tensor hidden_states_59_cast = conv(bias = down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_1229, groups = var_1186, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_1227, weight = down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_127_cast)[name = tensor("hidden_states_59_cast")]; + tensor var_1234 = const()[name = tensor("op_1234"), val = tensor([1, 1])]; + tensor var_1236 = const()[name = tensor("op_1236"), val = tensor([1, 1])]; + tensor x_3_pad_type_0 = const()[name = tensor("x_3_pad_type_0"), val = tensor("custom")]; + tensor x_3_pad_0 = const()[name = tensor("x_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90595776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91415040))), name = tensor("down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 640, 1, 1])]; + tensor down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91415616)))]; + tensor x_3_cast = conv(bias = down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_1236, groups = var_1186, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = var_1234, weight = down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_115_cast)[name = tensor("x_3_cast")]; + tensor hidden_states_61_cast = add(x = x_3_cast, y = hidden_states_59_cast)[name = tensor("hidden_states_61_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = hidden_states_61_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91418240)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91420864)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor var_1274 = const()[name = tensor("op_1274"), val = tensor([1, 1])]; + tensor var_1276 = const()[name = tensor("op_1276"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91423488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92652352))), name = tensor("down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92652544)))]; + tensor hidden_states_63_cast = conv(bias = down_blocks_2_attentions_0_proj_in_bias_to_fp16, dilations = var_1276, groups = var_1186, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_1274, weight = down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized, x = add_25_cast)[name = tensor("hidden_states_63_cast")]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_25_cast = reshape(shape = var_1281, x = hidden_states_63_cast)[name = tensor("inputs_25_cast")]; + tensor var_1291 = const()[name = tensor("op_1291"), val = tensor([1])]; + tensor channels_mean_25_cast = reduce_mean(axes = var_1291, keep_dims = var_1181, x = inputs_25_cast)[name = tensor("channels_mean_25_cast")]; + tensor zero_mean_25_cast = sub(x = inputs_25_cast, y = channels_mean_25_cast)[name = tensor("zero_mean_25_cast")]; + tensor zero_mean_sq_25_cast = mul(x = zero_mean_25_cast, y = zero_mean_25_cast)[name = tensor("zero_mean_sq_25_cast")]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1])]; + tensor var_1296_cast = reduce_mean(axes = var_1295, keep_dims = var_1181, x = zero_mean_sq_25_cast)[name = tensor("op_1296_cast")]; + tensor var_1297_to_fp16 = const()[name = tensor("op_1297_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1298_cast = add(x = var_1296_cast, y = var_1297_to_fp16)[name = tensor("op_1298_cast")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_1298_cast)[name = tensor("denom_25_cast")]; + tensor out_25_cast = mul(x = zero_mean_25_cast, y = denom_25_cast)[name = tensor("out_25_cast")]; + tensor var_1302_to_fp16 = const()[name = tensor("op_1302_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92655168)))]; + tensor var_1303_cast = add(x = out_25_cast, y = var_1302_to_fp16)[name = tensor("op_1303_cast")]; + tensor var_1305_to_fp16 = const()[name = tensor("op_1305_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92657792)))]; + tensor hidden_states_65_cast = mul(x = var_1303_cast, y = var_1305_to_fp16)[name = tensor("hidden_states_65_cast")]; + tensor var_1312 = const()[name = tensor("op_1312"), val = tensor([1, 1])]; + tensor var_1314 = const()[name = tensor("op_1314"), val = tensor([1, 1])]; + tensor q_17_pad_type_0 = const()[name = tensor("q_17_pad_type_0"), val = tensor("custom")]; + tensor q_17_pad_0 = const()[name = tensor("q_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92660416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93479680))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_17_cast = conv(dilations = var_1314, groups = var_1186, pad = q_17_pad_0, pad_type = q_17_pad_type_0, strides = var_1312, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("q_17_cast")]; + tensor var_1318 = const()[name = tensor("op_1318"), val = tensor([1, 1])]; + tensor var_1320 = const()[name = tensor("op_1320"), val = tensor([1, 1])]; + tensor k_17_pad_type_0 = const()[name = tensor("k_17_pad_type_0"), val = tensor("custom")]; + tensor k_17_pad_0 = const()[name = tensor("k_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93479808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94299072))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_17_cast = conv(dilations = var_1320, groups = var_1186, pad = k_17_pad_0, pad_type = k_17_pad_type_0, strides = var_1318, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("k_17_cast")]; + tensor var_1324 = const()[name = tensor("op_1324"), val = tensor([1, 1])]; + tensor var_1326 = const()[name = tensor("op_1326"), val = tensor([1, 1])]; + tensor v_17_pad_type_0 = const()[name = tensor("v_17_pad_type_0"), val = tensor("custom")]; + tensor v_17_pad_0 = const()[name = tensor("v_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94299200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95118464))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_17_cast = conv(dilations = var_1326, groups = var_1186, pad = v_17_pad_0, pad_type = v_17_pad_type_0, strides = var_1324, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("v_17_cast")]; + tensor var_1330 = const()[name = tensor("op_1330"), val = tensor([2, 20, 64, -1])]; + tensor var_1331_cast = reshape(shape = var_1330, x = q_17_cast)[name = tensor("op_1331_cast")]; + tensor var_1332 = const()[name = tensor("op_1332"), val = tensor([2, 20, 64, -1])]; + tensor var_1333_cast = reshape(shape = var_1332, x = k_17_cast)[name = tensor("op_1333_cast")]; + tensor var_1334 = const()[name = tensor("op_1334"), val = tensor([2, 20, 64, -1])]; + tensor var_1335_cast = reshape(shape = var_1334, x = v_17_cast)[name = tensor("op_1335_cast")]; + tensor attn_weights_33_transpose_x_0 = const()[name = tensor("attn_weights_33_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_33_transpose_y_0 = const()[name = tensor("attn_weights_33_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_33_cast = matmul(transpose_x = attn_weights_33_transpose_x_0, transpose_y = attn_weights_33_transpose_y_0, x = var_1331_cast, y = var_1333_cast)[name = tensor("attn_weights_33_cast")]; + tensor var_1177_to_fp16 = const()[name = tensor("op_1177_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_35_cast = mul(x = attn_weights_33_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_35_cast")]; + tensor var_1339_cast = softmax(axis = var_1170, x = attn_weights_35_cast)[name = tensor("op_1339_cast")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1335_cast, y = var_1339_cast)[name = tensor("attn_17_cast")]; + tensor var_1343 = const()[name = tensor("op_1343"), val = tensor([2, 1280, 1, -1])]; + tensor input_131_cast = reshape(shape = var_1343, x = attn_17_cast)[name = tensor("input_131_cast")]; + tensor var_1348 = const()[name = tensor("op_1348"), val = tensor([1, 1])]; + tensor var_1350 = const()[name = tensor("op_1350"), val = tensor([1, 1])]; + tensor var_1352_pad_type_0 = const()[name = tensor("op_1352_pad_type_0"), val = tensor("custom")]; + tensor var_1352_pad_0 = const()[name = tensor("op_1352_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95118592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96347456))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96347648)))]; + tensor var_1352_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_1350, groups = var_1186, pad = var_1352_pad_0, pad_type = var_1352_pad_type_0, strides = var_1348, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_131_cast)[name = tensor("op_1352_cast")]; + tensor inputs_27_cast = add(x = var_1352_cast, y = inputs_25_cast)[name = tensor("inputs_27_cast")]; + tensor var_1356 = const()[name = tensor("op_1356"), val = tensor([1])]; + tensor channels_mean_27_cast = reduce_mean(axes = var_1356, keep_dims = var_1181, x = inputs_27_cast)[name = tensor("channels_mean_27_cast")]; + tensor zero_mean_27_cast = sub(x = inputs_27_cast, y = channels_mean_27_cast)[name = tensor("zero_mean_27_cast")]; + tensor zero_mean_sq_27_cast = mul(x = zero_mean_27_cast, y = zero_mean_27_cast)[name = tensor("zero_mean_sq_27_cast")]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1])]; + tensor var_1361_cast = reduce_mean(axes = var_1360, keep_dims = var_1181, x = zero_mean_sq_27_cast)[name = tensor("op_1361_cast")]; + tensor var_1362_to_fp16 = const()[name = tensor("op_1362_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1363_cast = add(x = var_1361_cast, y = var_1362_to_fp16)[name = tensor("op_1363_cast")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_1363_cast)[name = tensor("denom_27_cast")]; + tensor out_27_cast = mul(x = zero_mean_27_cast, y = denom_27_cast)[name = tensor("out_27_cast")]; + tensor var_1367_to_fp16 = const()[name = tensor("op_1367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96350272)))]; + tensor var_1368_cast = add(x = out_27_cast, y = var_1367_to_fp16)[name = tensor("op_1368_cast")]; + tensor var_1370_to_fp16 = const()[name = tensor("op_1370_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96352896)))]; + tensor hidden_states_67_cast = mul(x = var_1368_cast, y = var_1370_to_fp16)[name = tensor("hidden_states_67_cast")]; + tensor var_1377 = const()[name = tensor("op_1377"), val = tensor([1, 1])]; + tensor var_1379 = const()[name = tensor("op_1379"), val = tensor([1, 1])]; + tensor q_19_pad_type_0 = const()[name = tensor("q_19_pad_type_0"), val = tensor("custom")]; + tensor q_19_pad_0 = const()[name = tensor("q_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96355520))), lut = tensor([-0x1.adp-7, 0x1.ad8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_19_cast = conv(dilations = var_1379, groups = var_1186, pad = q_19_pad_0, pad_type = q_19_pad_type_0, strides = var_1377, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("q_19_cast")]; + tensor var_1383 = const()[name = tensor("op_1383"), val = tensor([1, 1])]; + tensor var_1385 = const()[name = tensor("op_1385"), val = tensor([1, 1])]; + tensor k_19_pad_type_0 = const()[name = tensor("k_19_pad_type_0"), val = tensor("custom")]; + tensor k_19_pad_0 = const()[name = tensor("k_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96560384))), lut = tensor([-0x1.7fcp-6, -0x1.bdcp-8, 0x1.c44p-8, 0x1.818p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_19_cast = conv(dilations = var_1385, groups = var_1186, pad = k_19_pad_0, pad_type = k_19_pad_type_0, strides = var_1383, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_19_cast")]; + tensor var_1389 = const()[name = tensor("op_1389"), val = tensor([1, 1])]; + tensor var_1391 = const()[name = tensor("op_1391"), val = tensor([1, 1])]; + tensor v_19_pad_type_0 = const()[name = tensor("v_19_pad_type_0"), val = tensor("custom")]; + tensor v_19_pad_0 = const()[name = tensor("v_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(97215808))), lut = tensor([-0x1.8e8p-6, -0x1.cbp-8, 0x1.cccp-8, 0x1.9p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_19_cast = conv(dilations = var_1391, groups = var_1186, pad = v_19_pad_0, pad_type = v_19_pad_type_0, strides = var_1389, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_19_cast")]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([2, 20, 64, -1])]; + tensor var_1396_cast = reshape(shape = var_1395, x = q_19_cast)[name = tensor("op_1396_cast")]; + tensor var_1397 = const()[name = tensor("op_1397"), val = tensor([2, 20, 64, -1])]; + tensor var_1398_cast = reshape(shape = var_1397, x = k_19_cast)[name = tensor("op_1398_cast")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([2, 20, 64, -1])]; + tensor var_1400_cast = reshape(shape = var_1399, x = v_19_cast)[name = tensor("op_1400_cast")]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = var_1396_cast, y = var_1398_cast)[name = tensor("attn_weights_37_cast")]; + tensor attn_weights_39_cast = mul(x = attn_weights_37_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_1404_cast = softmax(axis = var_1170, x = attn_weights_39_cast)[name = tensor("op_1404_cast")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1400_cast, y = var_1404_cast)[name = tensor("attn_19_cast")]; + tensor var_1408 = const()[name = tensor("op_1408"), val = tensor([2, 1280, 1, -1])]; + tensor input_133_cast = reshape(shape = var_1408, x = attn_19_cast)[name = tensor("input_133_cast")]; + tensor var_1413 = const()[name = tensor("op_1413"), val = tensor([1, 1])]; + tensor var_1415 = const()[name = tensor("op_1415"), val = tensor([1, 1])]; + tensor var_1417_pad_type_0 = const()[name = tensor("op_1417_pad_type_0"), val = tensor("custom")]; + tensor var_1417_pad_0 = const()[name = tensor("op_1417_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(97871232))), lut = tensor([-0x1.aep-7, -0x1.f5p-9, 0x1.fa8p-9, 0x1.afcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98280896)))]; + tensor var_1417_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_1415, groups = var_1186, pad = var_1417_pad_0, pad_type = var_1417_pad_type_0, strides = var_1413, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_133_cast)[name = tensor("op_1417_cast")]; + tensor inputs_29_cast = add(x = var_1417_cast, y = inputs_27_cast)[name = tensor("inputs_29_cast")]; + tensor var_1421 = const()[name = tensor("op_1421"), val = tensor([1])]; + tensor channels_mean_29_cast = reduce_mean(axes = var_1421, keep_dims = var_1181, x = inputs_29_cast)[name = tensor("channels_mean_29_cast")]; + tensor zero_mean_29_cast = sub(x = inputs_29_cast, y = channels_mean_29_cast)[name = tensor("zero_mean_29_cast")]; + tensor zero_mean_sq_29_cast = mul(x = zero_mean_29_cast, y = zero_mean_29_cast)[name = tensor("zero_mean_sq_29_cast")]; + tensor var_1425 = const()[name = tensor("op_1425"), val = tensor([1])]; + tensor var_1426_cast = reduce_mean(axes = var_1425, keep_dims = var_1181, x = zero_mean_sq_29_cast)[name = tensor("op_1426_cast")]; + tensor var_1427_to_fp16 = const()[name = tensor("op_1427_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1428_cast = add(x = var_1426_cast, y = var_1427_to_fp16)[name = tensor("op_1428_cast")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1428_cast)[name = tensor("denom_29_cast")]; + tensor out_29_cast = mul(x = zero_mean_29_cast, y = denom_29_cast)[name = tensor("out_29_cast")]; + tensor var_1432_to_fp16 = const()[name = tensor("op_1432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98283520)))]; + tensor var_1433_cast = add(x = out_29_cast, y = var_1432_to_fp16)[name = tensor("op_1433_cast")]; + tensor var_1435_to_fp16 = const()[name = tensor("op_1435_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98286144)))]; + tensor input_135_cast = mul(x = var_1433_cast, y = var_1435_to_fp16)[name = tensor("input_135_cast")]; + tensor var_1443 = const()[name = tensor("op_1443"), val = tensor([1, 1])]; + tensor var_1445 = const()[name = tensor("op_1445"), val = tensor([1, 1])]; + tensor var_1447_pad_type_0 = const()[name = tensor("op_1447_pad_type_0"), val = tensor("custom")]; + tensor var_1447_pad_0 = const()[name = tensor("op_1447_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98288768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108119232))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108119424)))]; + tensor var_1447_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_1445, groups = var_1186, pad = var_1447_pad_0, pad_type = var_1447_pad_type_0, strides = var_1443, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_135_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_split_sizes_0 = const()[name = tensor("op_1448_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1448_axis_0 = const()[name = tensor("op_1448_axis_0"), val = tensor(1)]; + tensor var_1448_cast_0, tensor var_1448_cast_1 = split(axis = var_1448_axis_0, split_sizes = var_1448_split_sizes_0, x = var_1447_cast)[name = tensor("op_1448_cast")]; + tensor var_1450_mode_0 = const()[name = tensor("op_1450_mode_0"), val = tensor("EXACT")]; + tensor var_1450_cast = gelu(mode = var_1450_mode_0, x = var_1448_cast_1)[name = tensor("op_1450_cast")]; + tensor input_137_cast = mul(x = var_1448_cast_0, y = var_1450_cast)[name = tensor("input_137_cast")]; + tensor var_1454 = const()[name = tensor("op_1454"), val = tensor([1, 1])]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([1, 1])]; + tensor var_1458_pad_type_0 = const()[name = tensor("op_1458_pad_type_0"), val = tensor("custom")]; + tensor var_1458_pad_0 = const()[name = tensor("op_1458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108139968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111416832))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111416960)))]; + tensor var_1458_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_1456, groups = var_1186, pad = var_1458_pad_0, pad_type = var_1458_pad_type_0, strides = var_1454, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("op_1458_cast")]; + tensor inputs_31_cast = add(x = var_1458_cast, y = inputs_29_cast)[name = tensor("inputs_31_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1])]; + tensor channels_mean_31_cast = reduce_mean(axes = var_1468, keep_dims = var_1181, x = inputs_31_cast)[name = tensor("channels_mean_31_cast")]; + tensor zero_mean_31_cast = sub(x = inputs_31_cast, y = channels_mean_31_cast)[name = tensor("zero_mean_31_cast")]; + tensor zero_mean_sq_31_cast = mul(x = zero_mean_31_cast, y = zero_mean_31_cast)[name = tensor("zero_mean_sq_31_cast")]; + tensor var_1472 = const()[name = tensor("op_1472"), val = tensor([1])]; + tensor var_1473_cast = reduce_mean(axes = var_1472, keep_dims = var_1181, x = zero_mean_sq_31_cast)[name = tensor("op_1473_cast")]; + tensor var_1474_to_fp16 = const()[name = tensor("op_1474_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1475_cast = add(x = var_1473_cast, y = var_1474_to_fp16)[name = tensor("op_1475_cast")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1475_cast)[name = tensor("denom_31_cast")]; + tensor out_31_cast = mul(x = zero_mean_31_cast, y = denom_31_cast)[name = tensor("out_31_cast")]; + tensor var_1479_to_fp16 = const()[name = tensor("op_1479_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111419584)))]; + tensor var_1480_cast = add(x = out_31_cast, y = var_1479_to_fp16)[name = tensor("op_1480_cast")]; + tensor var_1482_to_fp16 = const()[name = tensor("op_1482_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111422208)))]; + tensor hidden_states_71_cast = mul(x = var_1480_cast, y = var_1482_to_fp16)[name = tensor("hidden_states_71_cast")]; + tensor var_1489 = const()[name = tensor("op_1489"), val = tensor([1, 1])]; + tensor var_1491 = const()[name = tensor("op_1491"), val = tensor([1, 1])]; + tensor q_21_pad_type_0 = const()[name = tensor("q_21_pad_type_0"), val = tensor("custom")]; + tensor q_21_pad_0 = const()[name = tensor("q_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111424832))), lut = tensor([-0x1.17p-5, -0x1.518p-7, 0x1.4d4p-7, 0x1.16p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_21_cast = conv(dilations = var_1491, groups = var_1186, pad = q_21_pad_0, pad_type = q_21_pad_type_0, strides = var_1489, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("q_21_cast")]; + tensor var_1495 = const()[name = tensor("op_1495"), val = tensor([1, 1])]; + tensor var_1497 = const()[name = tensor("op_1497"), val = tensor([1, 1])]; + tensor k_21_pad_type_0 = const()[name = tensor("k_21_pad_type_0"), val = tensor("custom")]; + tensor k_21_pad_0 = const()[name = tensor("k_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111834496))), lut = tensor([-0x1.16p-5, -0x1.4fp-7, 0x1.4e8p-7, 0x1.16p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_21_cast = conv(dilations = var_1497, groups = var_1186, pad = k_21_pad_0, pad_type = k_21_pad_type_0, strides = var_1495, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("k_21_cast")]; + tensor var_1501 = const()[name = tensor("op_1501"), val = tensor([1, 1])]; + tensor var_1503 = const()[name = tensor("op_1503"), val = tensor([1, 1])]; + tensor v_21_pad_type_0 = const()[name = tensor("v_21_pad_type_0"), val = tensor("custom")]; + tensor v_21_pad_0 = const()[name = tensor("v_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(112244160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113063424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_21_cast = conv(dilations = var_1503, groups = var_1186, pad = v_21_pad_0, pad_type = v_21_pad_type_0, strides = var_1501, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("v_21_cast")]; + tensor var_1507 = const()[name = tensor("op_1507"), val = tensor([2, 20, 64, -1])]; + tensor var_1508_cast = reshape(shape = var_1507, x = q_21_cast)[name = tensor("op_1508_cast")]; + tensor var_1509 = const()[name = tensor("op_1509"), val = tensor([2, 20, 64, -1])]; + tensor var_1510_cast = reshape(shape = var_1509, x = k_21_cast)[name = tensor("op_1510_cast")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([2, 20, 64, -1])]; + tensor var_1512_cast = reshape(shape = var_1511, x = v_21_cast)[name = tensor("op_1512_cast")]; + tensor attn_weights_41_transpose_x_0 = const()[name = tensor("attn_weights_41_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_41_transpose_y_0 = const()[name = tensor("attn_weights_41_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_41_cast = matmul(transpose_x = attn_weights_41_transpose_x_0, transpose_y = attn_weights_41_transpose_y_0, x = var_1508_cast, y = var_1510_cast)[name = tensor("attn_weights_41_cast")]; + tensor attn_weights_43_cast = mul(x = attn_weights_41_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_43_cast")]; + tensor var_1516_cast = softmax(axis = var_1170, x = attn_weights_43_cast)[name = tensor("op_1516_cast")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1512_cast, y = var_1516_cast)[name = tensor("attn_21_cast")]; + tensor var_1520 = const()[name = tensor("op_1520"), val = tensor([2, 1280, 1, -1])]; + tensor input_139_cast = reshape(shape = var_1520, x = attn_21_cast)[name = tensor("input_139_cast")]; + tensor var_1525 = const()[name = tensor("op_1525"), val = tensor([1, 1])]; + tensor var_1527 = const()[name = tensor("op_1527"), val = tensor([1, 1])]; + tensor var_1529_pad_type_0 = const()[name = tensor("op_1529_pad_type_0"), val = tensor("custom")]; + tensor var_1529_pad_0 = const()[name = tensor("op_1529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113063552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113882816))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113882944)))]; + tensor var_1529_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1527, groups = var_1186, pad = var_1529_pad_0, pad_type = var_1529_pad_type_0, strides = var_1525, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_139_cast)[name = tensor("op_1529_cast")]; + tensor inputs_33_cast = add(x = var_1529_cast, y = inputs_31_cast)[name = tensor("inputs_33_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1])]; + tensor channels_mean_33_cast = reduce_mean(axes = var_1533, keep_dims = var_1181, x = inputs_33_cast)[name = tensor("channels_mean_33_cast")]; + tensor zero_mean_33_cast = sub(x = inputs_33_cast, y = channels_mean_33_cast)[name = tensor("zero_mean_33_cast")]; + tensor zero_mean_sq_33_cast = mul(x = zero_mean_33_cast, y = zero_mean_33_cast)[name = tensor("zero_mean_sq_33_cast")]; + tensor var_1537 = const()[name = tensor("op_1537"), val = tensor([1])]; + tensor var_1538_cast = reduce_mean(axes = var_1537, keep_dims = var_1181, x = zero_mean_sq_33_cast)[name = tensor("op_1538_cast")]; + tensor var_1539_to_fp16 = const()[name = tensor("op_1539_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1540_cast = add(x = var_1538_cast, y = var_1539_to_fp16)[name = tensor("op_1540_cast")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1540_cast)[name = tensor("denom_33_cast")]; + tensor out_33_cast = mul(x = zero_mean_33_cast, y = denom_33_cast)[name = tensor("out_33_cast")]; + tensor var_1544_to_fp16 = const()[name = tensor("op_1544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113885568)))]; + tensor var_1545_cast = add(x = out_33_cast, y = var_1544_to_fp16)[name = tensor("op_1545_cast")]; + tensor var_1547_to_fp16 = const()[name = tensor("op_1547_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113888192)))]; + tensor hidden_states_73_cast = mul(x = var_1545_cast, y = var_1547_to_fp16)[name = tensor("hidden_states_73_cast")]; + tensor var_1554 = const()[name = tensor("op_1554"), val = tensor([1, 1])]; + tensor var_1556 = const()[name = tensor("op_1556"), val = tensor([1, 1])]; + tensor q_23_pad_type_0 = const()[name = tensor("q_23_pad_type_0"), val = tensor("custom")]; + tensor q_23_pad_0 = const()[name = tensor("q_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113890816))), lut = tensor([-0x1.aap-6, -0x1.058p-7, 0x1.068p-7, 0x1.aa8p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_23_cast = conv(dilations = var_1556, groups = var_1186, pad = q_23_pad_0, pad_type = q_23_pad_type_0, strides = var_1554, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("q_23_cast")]; + tensor var_1560 = const()[name = tensor("op_1560"), val = tensor([1, 1])]; + tensor var_1562 = const()[name = tensor("op_1562"), val = tensor([1, 1])]; + tensor k_23_pad_type_0 = const()[name = tensor("k_23_pad_type_0"), val = tensor("custom")]; + tensor k_23_pad_0 = const()[name = tensor("k_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114300480))), lut = tensor([-0x1.85cp-6, -0x1.cbp-8, 0x1.c18p-8, 0x1.82cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_23_cast = conv(dilations = var_1562, groups = var_1186, pad = k_23_pad_0, pad_type = k_23_pad_type_0, strides = var_1560, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_23_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 1])]; + tensor var_1568 = const()[name = tensor("op_1568"), val = tensor([1, 1])]; + tensor v_23_pad_type_0 = const()[name = tensor("v_23_pad_type_0"), val = tensor("custom")]; + tensor v_23_pad_0 = const()[name = tensor("v_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114955904))), lut = tensor([-0x1.a5p-6, -0x1.e3p-8, 0x1.e54p-8, 0x1.a5p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_23_cast = conv(dilations = var_1568, groups = var_1186, pad = v_23_pad_0, pad_type = v_23_pad_type_0, strides = var_1566, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_23_cast")]; + tensor var_1572 = const()[name = tensor("op_1572"), val = tensor([2, 20, 64, -1])]; + tensor var_1573_cast = reshape(shape = var_1572, x = q_23_cast)[name = tensor("op_1573_cast")]; + tensor var_1574 = const()[name = tensor("op_1574"), val = tensor([2, 20, 64, -1])]; + tensor var_1575_cast = reshape(shape = var_1574, x = k_23_cast)[name = tensor("op_1575_cast")]; + tensor var_1576 = const()[name = tensor("op_1576"), val = tensor([2, 20, 64, -1])]; + tensor var_1577_cast = reshape(shape = var_1576, x = v_23_cast)[name = tensor("op_1577_cast")]; + tensor attn_weights_45_transpose_x_0 = const()[name = tensor("attn_weights_45_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_45_transpose_y_0 = const()[name = tensor("attn_weights_45_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_45_cast = matmul(transpose_x = attn_weights_45_transpose_x_0, transpose_y = attn_weights_45_transpose_y_0, x = var_1573_cast, y = var_1575_cast)[name = tensor("attn_weights_45_cast")]; + tensor attn_weights_47_cast = mul(x = attn_weights_45_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_47_cast")]; + tensor var_1581_cast = softmax(axis = var_1170, x = attn_weights_47_cast)[name = tensor("op_1581_cast")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1577_cast, y = var_1581_cast)[name = tensor("attn_23_cast")]; + tensor var_1585 = const()[name = tensor("op_1585"), val = tensor([2, 1280, 1, -1])]; + tensor input_141_cast = reshape(shape = var_1585, x = attn_23_cast)[name = tensor("input_141_cast")]; + tensor var_1590 = const()[name = tensor("op_1590"), val = tensor([1, 1])]; + tensor var_1592 = const()[name = tensor("op_1592"), val = tensor([1, 1])]; + tensor var_1594_pad_type_0 = const()[name = tensor("op_1594_pad_type_0"), val = tensor("custom")]; + tensor var_1594_pad_0 = const()[name = tensor("op_1594_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115611328))), lut = tensor([-0x1.ca8p-8, 0x1.ccp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115816192)))]; + tensor var_1594_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1592, groups = var_1186, pad = var_1594_pad_0, pad_type = var_1594_pad_type_0, strides = var_1590, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("op_1594_cast")]; + tensor inputs_35_cast = add(x = var_1594_cast, y = inputs_33_cast)[name = tensor("inputs_35_cast")]; + tensor var_1598 = const()[name = tensor("op_1598"), val = tensor([1])]; + tensor channels_mean_35_cast = reduce_mean(axes = var_1598, keep_dims = var_1181, x = inputs_35_cast)[name = tensor("channels_mean_35_cast")]; + tensor zero_mean_35_cast = sub(x = inputs_35_cast, y = channels_mean_35_cast)[name = tensor("zero_mean_35_cast")]; + tensor zero_mean_sq_35_cast = mul(x = zero_mean_35_cast, y = zero_mean_35_cast)[name = tensor("zero_mean_sq_35_cast")]; + tensor var_1602 = const()[name = tensor("op_1602"), val = tensor([1])]; + tensor var_1603_cast = reduce_mean(axes = var_1602, keep_dims = var_1181, x = zero_mean_sq_35_cast)[name = tensor("op_1603_cast")]; + tensor var_1604_to_fp16 = const()[name = tensor("op_1604_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1605_cast = add(x = var_1603_cast, y = var_1604_to_fp16)[name = tensor("op_1605_cast")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1605_cast)[name = tensor("denom_35_cast")]; + tensor out_35_cast = mul(x = zero_mean_35_cast, y = denom_35_cast)[name = tensor("out_35_cast")]; + tensor var_1609_to_fp16 = const()[name = tensor("op_1609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115818816)))]; + tensor var_1610_cast = add(x = out_35_cast, y = var_1609_to_fp16)[name = tensor("op_1610_cast")]; + tensor var_1612_to_fp16 = const()[name = tensor("op_1612_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115821440)))]; + tensor input_143_cast = mul(x = var_1610_cast, y = var_1612_to_fp16)[name = tensor("input_143_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, 1])]; + tensor var_1622 = const()[name = tensor("op_1622"), val = tensor([1, 1])]; + tensor var_1624_pad_type_0 = const()[name = tensor("op_1624_pad_type_0"), val = tensor("custom")]; + tensor var_1624_pad_0 = const()[name = tensor("op_1624_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115824064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122377728))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122377856)))]; + tensor var_1624_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1622, groups = var_1186, pad = var_1624_pad_0, pad_type = var_1624_pad_type_0, strides = var_1620, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_143_cast)[name = tensor("op_1624_cast")]; + tensor var_1625_split_sizes_0 = const()[name = tensor("op_1625_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1625_axis_0 = const()[name = tensor("op_1625_axis_0"), val = tensor(1)]; + tensor var_1625_cast_0, tensor var_1625_cast_1 = split(axis = var_1625_axis_0, split_sizes = var_1625_split_sizes_0, x = var_1624_cast)[name = tensor("op_1625_cast")]; + tensor var_1627_mode_0 = const()[name = tensor("op_1627_mode_0"), val = tensor("EXACT")]; + tensor var_1627_cast = gelu(mode = var_1627_mode_0, x = var_1625_cast_1)[name = tensor("op_1627_cast")]; + tensor input_145_cast = mul(x = var_1625_cast_0, y = var_1627_cast)[name = tensor("input_145_cast")]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 1])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([1, 1])]; + tensor var_1635_pad_type_0 = const()[name = tensor("op_1635_pad_type_0"), val = tensor("custom")]; + tensor var_1635_pad_0 = const()[name = tensor("op_1635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122398400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125675264))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125675392)))]; + tensor var_1635_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1633, groups = var_1186, pad = var_1635_pad_0, pad_type = var_1635_pad_type_0, strides = var_1631, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("op_1635_cast")]; + tensor inputs_37_cast = add(x = var_1635_cast, y = inputs_35_cast)[name = tensor("inputs_37_cast")]; + tensor var_1645 = const()[name = tensor("op_1645"), val = tensor([1])]; + tensor channels_mean_37_cast = reduce_mean(axes = var_1645, keep_dims = var_1181, x = inputs_37_cast)[name = tensor("channels_mean_37_cast")]; + tensor zero_mean_37_cast = sub(x = inputs_37_cast, y = channels_mean_37_cast)[name = tensor("zero_mean_37_cast")]; + tensor zero_mean_sq_37_cast = mul(x = zero_mean_37_cast, y = zero_mean_37_cast)[name = tensor("zero_mean_sq_37_cast")]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([1])]; + tensor var_1650_cast = reduce_mean(axes = var_1649, keep_dims = var_1181, x = zero_mean_sq_37_cast)[name = tensor("op_1650_cast")]; + tensor var_1651_to_fp16 = const()[name = tensor("op_1651_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1652_cast = add(x = var_1650_cast, y = var_1651_to_fp16)[name = tensor("op_1652_cast")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1652_cast)[name = tensor("denom_37_cast")]; + tensor out_37_cast = mul(x = zero_mean_37_cast, y = denom_37_cast)[name = tensor("out_37_cast")]; + tensor var_1656_to_fp16 = const()[name = tensor("op_1656_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125678016)))]; + tensor var_1657_cast = add(x = out_37_cast, y = var_1656_to_fp16)[name = tensor("op_1657_cast")]; + tensor var_1659_to_fp16 = const()[name = tensor("op_1659_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125680640)))]; + tensor hidden_states_77_cast = mul(x = var_1657_cast, y = var_1659_to_fp16)[name = tensor("hidden_states_77_cast")]; + tensor var_1666 = const()[name = tensor("op_1666"), val = tensor([1, 1])]; + tensor var_1668 = const()[name = tensor("op_1668"), val = tensor([1, 1])]; + tensor q_25_pad_type_0 = const()[name = tensor("q_25_pad_type_0"), val = tensor("custom")]; + tensor q_25_pad_0 = const()[name = tensor("q_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125683264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126502528))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_25_cast = conv(dilations = var_1668, groups = var_1186, pad = q_25_pad_0, pad_type = q_25_pad_type_0, strides = var_1666, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("q_25_cast")]; + tensor var_1672 = const()[name = tensor("op_1672"), val = tensor([1, 1])]; + tensor var_1674 = const()[name = tensor("op_1674"), val = tensor([1, 1])]; + tensor k_25_pad_type_0 = const()[name = tensor("k_25_pad_type_0"), val = tensor("custom")]; + tensor k_25_pad_0 = const()[name = tensor("k_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126502656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127321920))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_25_cast = conv(dilations = var_1674, groups = var_1186, pad = k_25_pad_0, pad_type = k_25_pad_type_0, strides = var_1672, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("k_25_cast")]; + tensor var_1678 = const()[name = tensor("op_1678"), val = tensor([1, 1])]; + tensor var_1680 = const()[name = tensor("op_1680"), val = tensor([1, 1])]; + tensor v_25_pad_type_0 = const()[name = tensor("v_25_pad_type_0"), val = tensor("custom")]; + tensor v_25_pad_0 = const()[name = tensor("v_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127322048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128141312))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_25_cast = conv(dilations = var_1680, groups = var_1186, pad = v_25_pad_0, pad_type = v_25_pad_type_0, strides = var_1678, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("v_25_cast")]; + tensor var_1684 = const()[name = tensor("op_1684"), val = tensor([2, 20, 64, -1])]; + tensor var_1685_cast = reshape(shape = var_1684, x = q_25_cast)[name = tensor("op_1685_cast")]; + tensor var_1686 = const()[name = tensor("op_1686"), val = tensor([2, 20, 64, -1])]; + tensor var_1687_cast = reshape(shape = var_1686, x = k_25_cast)[name = tensor("op_1687_cast")]; + tensor var_1688 = const()[name = tensor("op_1688"), val = tensor([2, 20, 64, -1])]; + tensor var_1689_cast = reshape(shape = var_1688, x = v_25_cast)[name = tensor("op_1689_cast")]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = var_1685_cast, y = var_1687_cast)[name = tensor("attn_weights_49_cast")]; + tensor attn_weights_51_cast = mul(x = attn_weights_49_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_1693_cast = softmax(axis = var_1170, x = attn_weights_51_cast)[name = tensor("op_1693_cast")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1689_cast, y = var_1693_cast)[name = tensor("attn_25_cast")]; + tensor var_1697 = const()[name = tensor("op_1697"), val = tensor([2, 1280, 1, -1])]; + tensor input_147_cast = reshape(shape = var_1697, x = attn_25_cast)[name = tensor("input_147_cast")]; + tensor var_1702 = const()[name = tensor("op_1702"), val = tensor([1, 1])]; + tensor var_1704 = const()[name = tensor("op_1704"), val = tensor([1, 1])]; + tensor var_1706_pad_type_0 = const()[name = tensor("op_1706_pad_type_0"), val = tensor("custom")]; + tensor var_1706_pad_0 = const()[name = tensor("op_1706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128141440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128960704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128960832)))]; + tensor var_1706_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_1704, groups = var_1186, pad = var_1706_pad_0, pad_type = var_1706_pad_type_0, strides = var_1702, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_147_cast)[name = tensor("op_1706_cast")]; + tensor inputs_39_cast = add(x = var_1706_cast, y = inputs_37_cast)[name = tensor("inputs_39_cast")]; + tensor var_1710 = const()[name = tensor("op_1710"), val = tensor([1])]; + tensor channels_mean_39_cast = reduce_mean(axes = var_1710, keep_dims = var_1181, x = inputs_39_cast)[name = tensor("channels_mean_39_cast")]; + tensor zero_mean_39_cast = sub(x = inputs_39_cast, y = channels_mean_39_cast)[name = tensor("zero_mean_39_cast")]; + tensor zero_mean_sq_39_cast = mul(x = zero_mean_39_cast, y = zero_mean_39_cast)[name = tensor("zero_mean_sq_39_cast")]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor([1])]; + tensor var_1715_cast = reduce_mean(axes = var_1714, keep_dims = var_1181, x = zero_mean_sq_39_cast)[name = tensor("op_1715_cast")]; + tensor var_1716_to_fp16 = const()[name = tensor("op_1716_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1717_cast = add(x = var_1715_cast, y = var_1716_to_fp16)[name = tensor("op_1717_cast")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1717_cast)[name = tensor("denom_39_cast")]; + tensor out_39_cast = mul(x = zero_mean_39_cast, y = denom_39_cast)[name = tensor("out_39_cast")]; + tensor var_1721_to_fp16 = const()[name = tensor("op_1721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128963456)))]; + tensor var_1722_cast = add(x = out_39_cast, y = var_1721_to_fp16)[name = tensor("op_1722_cast")]; + tensor var_1724_to_fp16 = const()[name = tensor("op_1724_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128966080)))]; + tensor hidden_states_79_cast = mul(x = var_1722_cast, y = var_1724_to_fp16)[name = tensor("hidden_states_79_cast")]; + tensor var_1731 = const()[name = tensor("op_1731"), val = tensor([1, 1])]; + tensor var_1733 = const()[name = tensor("op_1733"), val = tensor([1, 1])]; + tensor q_27_pad_type_0 = const()[name = tensor("q_27_pad_type_0"), val = tensor("custom")]; + tensor q_27_pad_0 = const()[name = tensor("q_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128968704))), lut = tensor([-0x1.72cp-6, -0x1.d2cp-8, 0x1.d5p-8, 0x1.73cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_27_cast = conv(dilations = var_1733, groups = var_1186, pad = q_27_pad_0, pad_type = q_27_pad_type_0, strides = var_1731, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("q_27_cast")]; + tensor var_1737 = const()[name = tensor("op_1737"), val = tensor([1, 1])]; + tensor var_1739 = const()[name = tensor("op_1739"), val = tensor([1, 1])]; + tensor k_27_pad_type_0 = const()[name = tensor("k_27_pad_type_0"), val = tensor("custom")]; + tensor k_27_pad_0 = const()[name = tensor("k_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129378368))), lut = tensor([-0x1.394p-6, -0x1.768p-8, 0x1.704p-8, 0x1.378p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_27_cast = conv(dilations = var_1739, groups = var_1186, pad = k_27_pad_0, pad_type = k_27_pad_type_0, strides = var_1737, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_27_cast")]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 1])]; + tensor var_1745 = const()[name = tensor("op_1745"), val = tensor([1, 1])]; + tensor v_27_pad_type_0 = const()[name = tensor("v_27_pad_type_0"), val = tensor("custom")]; + tensor v_27_pad_0 = const()[name = tensor("v_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130033792))), lut = tensor([-0x1.74cp-6, -0x1.ab8p-8, 0x1.a44p-8, 0x1.72p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_27_cast = conv(dilations = var_1745, groups = var_1186, pad = v_27_pad_0, pad_type = v_27_pad_type_0, strides = var_1743, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_27_cast")]; + tensor var_1749 = const()[name = tensor("op_1749"), val = tensor([2, 20, 64, -1])]; + tensor var_1750_cast = reshape(shape = var_1749, x = q_27_cast)[name = tensor("op_1750_cast")]; + tensor var_1751 = const()[name = tensor("op_1751"), val = tensor([2, 20, 64, -1])]; + tensor var_1752_cast = reshape(shape = var_1751, x = k_27_cast)[name = tensor("op_1752_cast")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([2, 20, 64, -1])]; + tensor var_1754_cast = reshape(shape = var_1753, x = v_27_cast)[name = tensor("op_1754_cast")]; + tensor attn_weights_53_transpose_x_0 = const()[name = tensor("attn_weights_53_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_53_transpose_y_0 = const()[name = tensor("attn_weights_53_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_53_cast = matmul(transpose_x = attn_weights_53_transpose_x_0, transpose_y = attn_weights_53_transpose_y_0, x = var_1750_cast, y = var_1752_cast)[name = tensor("attn_weights_53_cast")]; + tensor attn_weights_55_cast = mul(x = attn_weights_53_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_55_cast")]; + tensor var_1758_cast = softmax(axis = var_1170, x = attn_weights_55_cast)[name = tensor("op_1758_cast")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1754_cast, y = var_1758_cast)[name = tensor("attn_27_cast")]; + tensor var_1762 = const()[name = tensor("op_1762"), val = tensor([2, 1280, 1, -1])]; + tensor input_149_cast = reshape(shape = var_1762, x = attn_27_cast)[name = tensor("input_149_cast")]; + tensor var_1767 = const()[name = tensor("op_1767"), val = tensor([1, 1])]; + tensor var_1769 = const()[name = tensor("op_1769"), val = tensor([1, 1])]; + tensor var_1771_pad_type_0 = const()[name = tensor("op_1771_pad_type_0"), val = tensor("custom")]; + tensor var_1771_pad_0 = const()[name = tensor("op_1771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130689216))), lut = tensor([-0x1.8bp-8, 0x1.8bp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130894080)))]; + tensor var_1771_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_1769, groups = var_1186, pad = var_1771_pad_0, pad_type = var_1771_pad_type_0, strides = var_1767, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_149_cast)[name = tensor("op_1771_cast")]; + tensor inputs_41_cast = add(x = var_1771_cast, y = inputs_39_cast)[name = tensor("inputs_41_cast")]; + tensor var_1775 = const()[name = tensor("op_1775"), val = tensor([1])]; + tensor channels_mean_41_cast = reduce_mean(axes = var_1775, keep_dims = var_1181, x = inputs_41_cast)[name = tensor("channels_mean_41_cast")]; + tensor zero_mean_41_cast = sub(x = inputs_41_cast, y = channels_mean_41_cast)[name = tensor("zero_mean_41_cast")]; + tensor zero_mean_sq_41_cast = mul(x = zero_mean_41_cast, y = zero_mean_41_cast)[name = tensor("zero_mean_sq_41_cast")]; + tensor var_1779 = const()[name = tensor("op_1779"), val = tensor([1])]; + tensor var_1780_cast = reduce_mean(axes = var_1779, keep_dims = var_1181, x = zero_mean_sq_41_cast)[name = tensor("op_1780_cast")]; + tensor var_1781_to_fp16 = const()[name = tensor("op_1781_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1782_cast = add(x = var_1780_cast, y = var_1781_to_fp16)[name = tensor("op_1782_cast")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1782_cast)[name = tensor("denom_41_cast")]; + tensor out_41_cast = mul(x = zero_mean_41_cast, y = denom_41_cast)[name = tensor("out_41_cast")]; + tensor var_1786_to_fp16 = const()[name = tensor("op_1786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130896704)))]; + tensor var_1787_cast = add(x = out_41_cast, y = var_1786_to_fp16)[name = tensor("op_1787_cast")]; + tensor var_1789_to_fp16 = const()[name = tensor("op_1789_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130899328)))]; + tensor input_151_cast = mul(x = var_1787_cast, y = var_1789_to_fp16)[name = tensor("input_151_cast")]; + tensor var_1797 = const()[name = tensor("op_1797"), val = tensor([1, 1])]; + tensor var_1799 = const()[name = tensor("op_1799"), val = tensor([1, 1])]; + tensor var_1801_pad_type_0 = const()[name = tensor("op_1801_pad_type_0"), val = tensor("custom")]; + tensor var_1801_pad_0 = const()[name = tensor("op_1801_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130901952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140732416))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140732608)))]; + tensor var_1801_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_1799, groups = var_1186, pad = var_1801_pad_0, pad_type = var_1801_pad_type_0, strides = var_1797, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_151_cast)[name = tensor("op_1801_cast")]; + tensor var_1802_split_sizes_0 = const()[name = tensor("op_1802_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1802_axis_0 = const()[name = tensor("op_1802_axis_0"), val = tensor(1)]; + tensor var_1802_cast_0, tensor var_1802_cast_1 = split(axis = var_1802_axis_0, split_sizes = var_1802_split_sizes_0, x = var_1801_cast)[name = tensor("op_1802_cast")]; + tensor var_1804_mode_0 = const()[name = tensor("op_1804_mode_0"), val = tensor("EXACT")]; + tensor var_1804_cast = gelu(mode = var_1804_mode_0, x = var_1802_cast_1)[name = tensor("op_1804_cast")]; + tensor input_153_cast = mul(x = var_1802_cast_0, y = var_1804_cast)[name = tensor("input_153_cast")]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([1, 1])]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([1, 1])]; + tensor var_1812_pad_type_0 = const()[name = tensor("op_1812_pad_type_0"), val = tensor("custom")]; + tensor var_1812_pad_0 = const()[name = tensor("op_1812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140753152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144030016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144030144)))]; + tensor var_1812_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_1810, groups = var_1186, pad = var_1812_pad_0, pad_type = var_1812_pad_type_0, strides = var_1808, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("op_1812_cast")]; + tensor inputs_43_cast = add(x = var_1812_cast, y = inputs_41_cast)[name = tensor("inputs_43_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([1])]; + tensor channels_mean_43_cast = reduce_mean(axes = var_1822, keep_dims = var_1181, x = inputs_43_cast)[name = tensor("channels_mean_43_cast")]; + tensor zero_mean_43_cast = sub(x = inputs_43_cast, y = channels_mean_43_cast)[name = tensor("zero_mean_43_cast")]; + tensor zero_mean_sq_43_cast = mul(x = zero_mean_43_cast, y = zero_mean_43_cast)[name = tensor("zero_mean_sq_43_cast")]; + tensor var_1826 = const()[name = tensor("op_1826"), val = tensor([1])]; + tensor var_1827_cast = reduce_mean(axes = var_1826, keep_dims = var_1181, x = zero_mean_sq_43_cast)[name = tensor("op_1827_cast")]; + tensor var_1828_to_fp16 = const()[name = tensor("op_1828_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1829_cast = add(x = var_1827_cast, y = var_1828_to_fp16)[name = tensor("op_1829_cast")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1829_cast)[name = tensor("denom_43_cast")]; + tensor out_43_cast = mul(x = zero_mean_43_cast, y = denom_43_cast)[name = tensor("out_43_cast")]; + tensor var_1833_to_fp16 = const()[name = tensor("op_1833_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144032768)))]; + tensor var_1834_cast = add(x = out_43_cast, y = var_1833_to_fp16)[name = tensor("op_1834_cast")]; + tensor var_1836_to_fp16 = const()[name = tensor("op_1836_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144035392)))]; + tensor hidden_states_83_cast = mul(x = var_1834_cast, y = var_1836_to_fp16)[name = tensor("hidden_states_83_cast")]; + tensor var_1843 = const()[name = tensor("op_1843"), val = tensor([1, 1])]; + tensor var_1845 = const()[name = tensor("op_1845"), val = tensor([1, 1])]; + tensor q_29_pad_type_0 = const()[name = tensor("q_29_pad_type_0"), val = tensor("custom")]; + tensor q_29_pad_0 = const()[name = tensor("q_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144038016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144857280))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_29_cast = conv(dilations = var_1845, groups = var_1186, pad = q_29_pad_0, pad_type = q_29_pad_type_0, strides = var_1843, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("q_29_cast")]; + tensor var_1849 = const()[name = tensor("op_1849"), val = tensor([1, 1])]; + tensor var_1851 = const()[name = tensor("op_1851"), val = tensor([1, 1])]; + tensor k_29_pad_type_0 = const()[name = tensor("k_29_pad_type_0"), val = tensor("custom")]; + tensor k_29_pad_0 = const()[name = tensor("k_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144857408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145676672))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_29_cast = conv(dilations = var_1851, groups = var_1186, pad = k_29_pad_0, pad_type = k_29_pad_type_0, strides = var_1849, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("k_29_cast")]; + tensor var_1855 = const()[name = tensor("op_1855"), val = tensor([1, 1])]; + tensor var_1857 = const()[name = tensor("op_1857"), val = tensor([1, 1])]; + tensor v_29_pad_type_0 = const()[name = tensor("v_29_pad_type_0"), val = tensor("custom")]; + tensor v_29_pad_0 = const()[name = tensor("v_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145676800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146496064))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_29_cast = conv(dilations = var_1857, groups = var_1186, pad = v_29_pad_0, pad_type = v_29_pad_type_0, strides = var_1855, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("v_29_cast")]; + tensor var_1861 = const()[name = tensor("op_1861"), val = tensor([2, 20, 64, -1])]; + tensor var_1862_cast = reshape(shape = var_1861, x = q_29_cast)[name = tensor("op_1862_cast")]; + tensor var_1863 = const()[name = tensor("op_1863"), val = tensor([2, 20, 64, -1])]; + tensor var_1864_cast = reshape(shape = var_1863, x = k_29_cast)[name = tensor("op_1864_cast")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([2, 20, 64, -1])]; + tensor var_1866_cast = reshape(shape = var_1865, x = v_29_cast)[name = tensor("op_1866_cast")]; + tensor attn_weights_57_transpose_x_0 = const()[name = tensor("attn_weights_57_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_57_transpose_y_0 = const()[name = tensor("attn_weights_57_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_57_cast = matmul(transpose_x = attn_weights_57_transpose_x_0, transpose_y = attn_weights_57_transpose_y_0, x = var_1862_cast, y = var_1864_cast)[name = tensor("attn_weights_57_cast")]; + tensor attn_weights_59_cast = mul(x = attn_weights_57_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_59_cast")]; + tensor var_1870_cast = softmax(axis = var_1170, x = attn_weights_59_cast)[name = tensor("op_1870_cast")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1866_cast, y = var_1870_cast)[name = tensor("attn_29_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([2, 1280, 1, -1])]; + tensor input_155_cast = reshape(shape = var_1874, x = attn_29_cast)[name = tensor("input_155_cast")]; + tensor var_1879 = const()[name = tensor("op_1879"), val = tensor([1, 1])]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, 1])]; + tensor var_1883_pad_type_0 = const()[name = tensor("op_1883_pad_type_0"), val = tensor("custom")]; + tensor var_1883_pad_0 = const()[name = tensor("op_1883_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146496192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147315456))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147315584)))]; + tensor var_1883_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_1881, groups = var_1186, pad = var_1883_pad_0, pad_type = var_1883_pad_type_0, strides = var_1879, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_155_cast)[name = tensor("op_1883_cast")]; + tensor inputs_45_cast = add(x = var_1883_cast, y = inputs_43_cast)[name = tensor("inputs_45_cast")]; + tensor var_1887 = const()[name = tensor("op_1887"), val = tensor([1])]; + tensor channels_mean_45_cast = reduce_mean(axes = var_1887, keep_dims = var_1181, x = inputs_45_cast)[name = tensor("channels_mean_45_cast")]; + tensor zero_mean_45_cast = sub(x = inputs_45_cast, y = channels_mean_45_cast)[name = tensor("zero_mean_45_cast")]; + tensor zero_mean_sq_45_cast = mul(x = zero_mean_45_cast, y = zero_mean_45_cast)[name = tensor("zero_mean_sq_45_cast")]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1])]; + tensor var_1892_cast = reduce_mean(axes = var_1891, keep_dims = var_1181, x = zero_mean_sq_45_cast)[name = tensor("op_1892_cast")]; + tensor var_1893_to_fp16 = const()[name = tensor("op_1893_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1894_cast = add(x = var_1892_cast, y = var_1893_to_fp16)[name = tensor("op_1894_cast")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1894_cast)[name = tensor("denom_45_cast")]; + tensor out_45_cast = mul(x = zero_mean_45_cast, y = denom_45_cast)[name = tensor("out_45_cast")]; + tensor var_1898_to_fp16 = const()[name = tensor("op_1898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147318208)))]; + tensor var_1899_cast = add(x = out_45_cast, y = var_1898_to_fp16)[name = tensor("op_1899_cast")]; + tensor var_1901_to_fp16 = const()[name = tensor("op_1901_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147320832)))]; + tensor hidden_states_85_cast = mul(x = var_1899_cast, y = var_1901_to_fp16)[name = tensor("hidden_states_85_cast")]; + tensor var_1908 = const()[name = tensor("op_1908"), val = tensor([1, 1])]; + tensor var_1910 = const()[name = tensor("op_1910"), val = tensor([1, 1])]; + tensor q_31_pad_type_0 = const()[name = tensor("q_31_pad_type_0"), val = tensor("custom")]; + tensor q_31_pad_0 = const()[name = tensor("q_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147323456))), lut = tensor([-0x1.918p-7, 0x1.924p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_31_cast = conv(dilations = var_1910, groups = var_1186, pad = q_31_pad_0, pad_type = q_31_pad_type_0, strides = var_1908, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("q_31_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 1])]; + tensor var_1916 = const()[name = tensor("op_1916"), val = tensor([1, 1])]; + tensor k_31_pad_type_0 = const()[name = tensor("k_31_pad_type_0"), val = tensor("custom")]; + tensor k_31_pad_0 = const()[name = tensor("k_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147528320))), lut = tensor([-0x1.214p-6, -0x1.5c8p-8, 0x1.5bcp-8, 0x1.218p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_31_cast = conv(dilations = var_1916, groups = var_1186, pad = k_31_pad_0, pad_type = k_31_pad_type_0, strides = var_1914, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_31_cast")]; + tensor var_1920 = const()[name = tensor("op_1920"), val = tensor([1, 1])]; + tensor var_1922 = const()[name = tensor("op_1922"), val = tensor([1, 1])]; + tensor v_31_pad_type_0 = const()[name = tensor("v_31_pad_type_0"), val = tensor("custom")]; + tensor v_31_pad_0 = const()[name = tensor("v_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148183744))), lut = tensor([-0x1.6d4p-6, -0x1.9d8p-8, 0x1.a04p-8, 0x1.6e4p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_31_cast = conv(dilations = var_1922, groups = var_1186, pad = v_31_pad_0, pad_type = v_31_pad_type_0, strides = var_1920, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_31_cast")]; + tensor var_1926 = const()[name = tensor("op_1926"), val = tensor([2, 20, 64, -1])]; + tensor var_1927_cast = reshape(shape = var_1926, x = q_31_cast)[name = tensor("op_1927_cast")]; + tensor var_1928 = const()[name = tensor("op_1928"), val = tensor([2, 20, 64, -1])]; + tensor var_1929_cast = reshape(shape = var_1928, x = k_31_cast)[name = tensor("op_1929_cast")]; + tensor var_1930 = const()[name = tensor("op_1930"), val = tensor([2, 20, 64, -1])]; + tensor var_1931_cast = reshape(shape = var_1930, x = v_31_cast)[name = tensor("op_1931_cast")]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = var_1927_cast, y = var_1929_cast)[name = tensor("attn_weights_61_cast")]; + tensor attn_weights_63_cast = mul(x = attn_weights_61_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1935_cast = softmax(axis = var_1170, x = attn_weights_63_cast)[name = tensor("op_1935_cast")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_1931_cast, y = var_1935_cast)[name = tensor("attn_31_cast")]; + tensor var_1939 = const()[name = tensor("op_1939"), val = tensor([2, 1280, 1, -1])]; + tensor input_157_cast = reshape(shape = var_1939, x = attn_31_cast)[name = tensor("input_157_cast")]; + tensor var_1944 = const()[name = tensor("op_1944"), val = tensor([1, 1])]; + tensor var_1946 = const()[name = tensor("op_1946"), val = tensor([1, 1])]; + tensor var_1948_pad_type_0 = const()[name = tensor("op_1948_pad_type_0"), val = tensor("custom")]; + tensor var_1948_pad_0 = const()[name = tensor("op_1948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148839168))), lut = tensor([-0x1.8a8p-8, 0x1.89cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149044032)))]; + tensor var_1948_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_1946, groups = var_1186, pad = var_1948_pad_0, pad_type = var_1948_pad_type_0, strides = var_1944, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("op_1948_cast")]; + tensor inputs_47_cast = add(x = var_1948_cast, y = inputs_45_cast)[name = tensor("inputs_47_cast")]; + tensor var_1952 = const()[name = tensor("op_1952"), val = tensor([1])]; + tensor channels_mean_47_cast = reduce_mean(axes = var_1952, keep_dims = var_1181, x = inputs_47_cast)[name = tensor("channels_mean_47_cast")]; + tensor zero_mean_47_cast = sub(x = inputs_47_cast, y = channels_mean_47_cast)[name = tensor("zero_mean_47_cast")]; + tensor zero_mean_sq_47_cast = mul(x = zero_mean_47_cast, y = zero_mean_47_cast)[name = tensor("zero_mean_sq_47_cast")]; + tensor var_1956 = const()[name = tensor("op_1956"), val = tensor([1])]; + tensor var_1957_cast = reduce_mean(axes = var_1956, keep_dims = var_1181, x = zero_mean_sq_47_cast)[name = tensor("op_1957_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1959_cast = add(x = var_1957_cast, y = var_1958_to_fp16)[name = tensor("op_1959_cast")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1959_cast)[name = tensor("denom_47_cast")]; + tensor out_47_cast = mul(x = zero_mean_47_cast, y = denom_47_cast)[name = tensor("out_47_cast")]; + tensor var_1963_to_fp16 = const()[name = tensor("op_1963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149046656)))]; + tensor var_1964_cast = add(x = out_47_cast, y = var_1963_to_fp16)[name = tensor("op_1964_cast")]; + tensor var_1966_to_fp16 = const()[name = tensor("op_1966_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149049280)))]; + tensor input_159_cast = mul(x = var_1964_cast, y = var_1966_to_fp16)[name = tensor("input_159_cast")]; + tensor var_1974 = const()[name = tensor("op_1974"), val = tensor([1, 1])]; + tensor var_1976 = const()[name = tensor("op_1976"), val = tensor([1, 1])]; + tensor var_1978_pad_type_0 = const()[name = tensor("op_1978_pad_type_0"), val = tensor("custom")]; + tensor var_1978_pad_0 = const()[name = tensor("op_1978_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149051904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155605568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155605696)))]; + tensor var_1978_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_1976, groups = var_1186, pad = var_1978_pad_0, pad_type = var_1978_pad_type_0, strides = var_1974, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_159_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_split_sizes_0 = const()[name = tensor("op_1979_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1979_axis_0 = const()[name = tensor("op_1979_axis_0"), val = tensor(1)]; + tensor var_1979_cast_0, tensor var_1979_cast_1 = split(axis = var_1979_axis_0, split_sizes = var_1979_split_sizes_0, x = var_1978_cast)[name = tensor("op_1979_cast")]; + tensor var_1981_mode_0 = const()[name = tensor("op_1981_mode_0"), val = tensor("EXACT")]; + tensor var_1981_cast = gelu(mode = var_1981_mode_0, x = var_1979_cast_1)[name = tensor("op_1981_cast")]; + tensor input_161_cast = mul(x = var_1979_cast_0, y = var_1981_cast)[name = tensor("input_161_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([1, 1])]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([1, 1])]; + tensor var_1989_pad_type_0 = const()[name = tensor("op_1989_pad_type_0"), val = tensor("custom")]; + tensor var_1989_pad_0 = const()[name = tensor("op_1989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155626240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160541504))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160541696)))]; + tensor var_1989_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_1987, groups = var_1186, pad = var_1989_pad_0, pad_type = var_1989_pad_type_0, strides = var_1985, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("op_1989_cast")]; + tensor inputs_49_cast = add(x = var_1989_cast, y = inputs_47_cast)[name = tensor("inputs_49_cast")]; + tensor var_1999 = const()[name = tensor("op_1999"), val = tensor([1])]; + tensor channels_mean_49_cast = reduce_mean(axes = var_1999, keep_dims = var_1181, x = inputs_49_cast)[name = tensor("channels_mean_49_cast")]; + tensor zero_mean_49_cast = sub(x = inputs_49_cast, y = channels_mean_49_cast)[name = tensor("zero_mean_49_cast")]; + tensor zero_mean_sq_49_cast = mul(x = zero_mean_49_cast, y = zero_mean_49_cast)[name = tensor("zero_mean_sq_49_cast")]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1])]; + tensor var_2004_cast = reduce_mean(axes = var_2003, keep_dims = var_1181, x = zero_mean_sq_49_cast)[name = tensor("op_2004_cast")]; + tensor var_2005_to_fp16 = const()[name = tensor("op_2005_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2006_cast = add(x = var_2004_cast, y = var_2005_to_fp16)[name = tensor("op_2006_cast")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_2006_cast)[name = tensor("denom_49_cast")]; + tensor out_49_cast = mul(x = zero_mean_49_cast, y = denom_49_cast)[name = tensor("out_49_cast")]; + tensor var_2010_to_fp16 = const()[name = tensor("op_2010_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160544320)))]; + tensor var_2011_cast = add(x = out_49_cast, y = var_2010_to_fp16)[name = tensor("op_2011_cast")]; + tensor var_2013_to_fp16 = const()[name = tensor("op_2013_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160546944)))]; + tensor hidden_states_89_cast = mul(x = var_2011_cast, y = var_2013_to_fp16)[name = tensor("hidden_states_89_cast")]; + tensor var_2020 = const()[name = tensor("op_2020"), val = tensor([1, 1])]; + tensor var_2022 = const()[name = tensor("op_2022"), val = tensor([1, 1])]; + tensor q_33_pad_type_0 = const()[name = tensor("q_33_pad_type_0"), val = tensor("custom")]; + tensor q_33_pad_0 = const()[name = tensor("q_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160549568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161368832))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_33_cast = conv(dilations = var_2022, groups = var_1186, pad = q_33_pad_0, pad_type = q_33_pad_type_0, strides = var_2020, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("q_33_cast")]; + tensor var_2026 = const()[name = tensor("op_2026"), val = tensor([1, 1])]; + tensor var_2028 = const()[name = tensor("op_2028"), val = tensor([1, 1])]; + tensor k_33_pad_type_0 = const()[name = tensor("k_33_pad_type_0"), val = tensor("custom")]; + tensor k_33_pad_0 = const()[name = tensor("k_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161368960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162188224))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_33_cast = conv(dilations = var_2028, groups = var_1186, pad = k_33_pad_0, pad_type = k_33_pad_type_0, strides = var_2026, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("k_33_cast")]; + tensor var_2032 = const()[name = tensor("op_2032"), val = tensor([1, 1])]; + tensor var_2034 = const()[name = tensor("op_2034"), val = tensor([1, 1])]; + tensor v_33_pad_type_0 = const()[name = tensor("v_33_pad_type_0"), val = tensor("custom")]; + tensor v_33_pad_0 = const()[name = tensor("v_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162188352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163007616))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_33_cast = conv(dilations = var_2034, groups = var_1186, pad = v_33_pad_0, pad_type = v_33_pad_type_0, strides = var_2032, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("v_33_cast")]; + tensor var_2038 = const()[name = tensor("op_2038"), val = tensor([2, 20, 64, -1])]; + tensor var_2039_cast = reshape(shape = var_2038, x = q_33_cast)[name = tensor("op_2039_cast")]; + tensor var_2040 = const()[name = tensor("op_2040"), val = tensor([2, 20, 64, -1])]; + tensor var_2041_cast = reshape(shape = var_2040, x = k_33_cast)[name = tensor("op_2041_cast")]; + tensor var_2042 = const()[name = tensor("op_2042"), val = tensor([2, 20, 64, -1])]; + tensor var_2043_cast = reshape(shape = var_2042, x = v_33_cast)[name = tensor("op_2043_cast")]; + tensor attn_weights_65_transpose_x_0 = const()[name = tensor("attn_weights_65_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_65_transpose_y_0 = const()[name = tensor("attn_weights_65_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_65_cast = matmul(transpose_x = attn_weights_65_transpose_x_0, transpose_y = attn_weights_65_transpose_y_0, x = var_2039_cast, y = var_2041_cast)[name = tensor("attn_weights_65_cast")]; + tensor attn_weights_67_cast = mul(x = attn_weights_65_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_67_cast")]; + tensor var_2047_cast = softmax(axis = var_1170, x = attn_weights_67_cast)[name = tensor("op_2047_cast")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_2043_cast, y = var_2047_cast)[name = tensor("attn_33_cast")]; + tensor var_2051 = const()[name = tensor("op_2051"), val = tensor([2, 1280, 1, -1])]; + tensor input_163_cast = reshape(shape = var_2051, x = attn_33_cast)[name = tensor("input_163_cast")]; + tensor var_2056 = const()[name = tensor("op_2056"), val = tensor([1, 1])]; + tensor var_2058 = const()[name = tensor("op_2058"), val = tensor([1, 1])]; + tensor var_2060_pad_type_0 = const()[name = tensor("op_2060_pad_type_0"), val = tensor("custom")]; + tensor var_2060_pad_0 = const()[name = tensor("op_2060_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163007744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163827008))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163827136)))]; + tensor var_2060_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_2058, groups = var_1186, pad = var_2060_pad_0, pad_type = var_2060_pad_type_0, strides = var_2056, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_163_cast)[name = tensor("op_2060_cast")]; + tensor inputs_51_cast = add(x = var_2060_cast, y = inputs_49_cast)[name = tensor("inputs_51_cast")]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1])]; + tensor channels_mean_51_cast = reduce_mean(axes = var_2064, keep_dims = var_1181, x = inputs_51_cast)[name = tensor("channels_mean_51_cast")]; + tensor zero_mean_51_cast = sub(x = inputs_51_cast, y = channels_mean_51_cast)[name = tensor("zero_mean_51_cast")]; + tensor zero_mean_sq_51_cast = mul(x = zero_mean_51_cast, y = zero_mean_51_cast)[name = tensor("zero_mean_sq_51_cast")]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([1])]; + tensor var_2069_cast = reduce_mean(axes = var_2068, keep_dims = var_1181, x = zero_mean_sq_51_cast)[name = tensor("op_2069_cast")]; + tensor var_2070_to_fp16 = const()[name = tensor("op_2070_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2071_cast = add(x = var_2069_cast, y = var_2070_to_fp16)[name = tensor("op_2071_cast")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_2071_cast)[name = tensor("denom_51_cast")]; + tensor out_51_cast = mul(x = zero_mean_51_cast, y = denom_51_cast)[name = tensor("out_51_cast")]; + tensor var_2075_to_fp16 = const()[name = tensor("op_2075_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163829760)))]; + tensor var_2076_cast = add(x = out_51_cast, y = var_2075_to_fp16)[name = tensor("op_2076_cast")]; + tensor var_2078_to_fp16 = const()[name = tensor("op_2078_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163832384)))]; + tensor hidden_states_91_cast = mul(x = var_2076_cast, y = var_2078_to_fp16)[name = tensor("hidden_states_91_cast")]; + tensor var_2085 = const()[name = tensor("op_2085"), val = tensor([1, 1])]; + tensor var_2087 = const()[name = tensor("op_2087"), val = tensor([1, 1])]; + tensor q_35_pad_type_0 = const()[name = tensor("q_35_pad_type_0"), val = tensor("custom")]; + tensor q_35_pad_0 = const()[name = tensor("q_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163835008))), lut = tensor([-0x1.83p-7, 0x1.82cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_35_cast = conv(dilations = var_2087, groups = var_1186, pad = q_35_pad_0, pad_type = q_35_pad_type_0, strides = var_2085, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("q_35_cast")]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 1])]; + tensor var_2093 = const()[name = tensor("op_2093"), val = tensor([1, 1])]; + tensor k_35_pad_type_0 = const()[name = tensor("k_35_pad_type_0"), val = tensor("custom")]; + tensor k_35_pad_0 = const()[name = tensor("k_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164039872))), lut = tensor([-0x1.064p-6, -0x1.42p-8, 0x1.42cp-8, 0x1.064p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_35_cast = conv(dilations = var_2093, groups = var_1186, pad = k_35_pad_0, pad_type = k_35_pad_type_0, strides = var_2091, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_35_cast")]; + tensor var_2097 = const()[name = tensor("op_2097"), val = tensor([1, 1])]; + tensor var_2099 = const()[name = tensor("op_2099"), val = tensor([1, 1])]; + tensor v_35_pad_type_0 = const()[name = tensor("v_35_pad_type_0"), val = tensor("custom")]; + tensor v_35_pad_0 = const()[name = tensor("v_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164695296))), lut = tensor([-0x1.3e4p-7, 0x1.3e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_35_cast = conv(dilations = var_2099, groups = var_1186, pad = v_35_pad_0, pad_type = v_35_pad_type_0, strides = var_2097, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_35_cast")]; + tensor var_2103 = const()[name = tensor("op_2103"), val = tensor([2, 20, 64, -1])]; + tensor var_2104_cast = reshape(shape = var_2103, x = q_35_cast)[name = tensor("op_2104_cast")]; + tensor var_2105 = const()[name = tensor("op_2105"), val = tensor([2, 20, 64, -1])]; + tensor var_2106_cast = reshape(shape = var_2105, x = k_35_cast)[name = tensor("op_2106_cast")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([2, 20, 64, -1])]; + tensor var_2108_cast = reshape(shape = var_2107, x = v_35_cast)[name = tensor("op_2108_cast")]; + tensor attn_weights_69_transpose_x_0 = const()[name = tensor("attn_weights_69_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_69_transpose_y_0 = const()[name = tensor("attn_weights_69_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_69_cast = matmul(transpose_x = attn_weights_69_transpose_x_0, transpose_y = attn_weights_69_transpose_y_0, x = var_2104_cast, y = var_2106_cast)[name = tensor("attn_weights_69_cast")]; + tensor attn_weights_71_cast = mul(x = attn_weights_69_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_71_cast")]; + tensor var_2112_cast = softmax(axis = var_1170, x = attn_weights_71_cast)[name = tensor("op_2112_cast")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2108_cast, y = var_2112_cast)[name = tensor("attn_35_cast")]; + tensor var_2116 = const()[name = tensor("op_2116"), val = tensor([2, 1280, 1, -1])]; + tensor input_165_cast = reshape(shape = var_2116, x = attn_35_cast)[name = tensor("input_165_cast")]; + tensor var_2121 = const()[name = tensor("op_2121"), val = tensor([1, 1])]; + tensor var_2123 = const()[name = tensor("op_2123"), val = tensor([1, 1])]; + tensor var_2125_pad_type_0 = const()[name = tensor("op_2125_pad_type_0"), val = tensor("custom")]; + tensor var_2125_pad_0 = const()[name = tensor("op_2125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165023040))), lut = tensor([-0x1.684p-8, 0x1.68cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165227904)))]; + tensor var_2125_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_2123, groups = var_1186, pad = var_2125_pad_0, pad_type = var_2125_pad_type_0, strides = var_2121, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_165_cast)[name = tensor("op_2125_cast")]; + tensor inputs_53_cast = add(x = var_2125_cast, y = inputs_51_cast)[name = tensor("inputs_53_cast")]; + tensor var_2129 = const()[name = tensor("op_2129"), val = tensor([1])]; + tensor channels_mean_53_cast = reduce_mean(axes = var_2129, keep_dims = var_1181, x = inputs_53_cast)[name = tensor("channels_mean_53_cast")]; + tensor zero_mean_53_cast = sub(x = inputs_53_cast, y = channels_mean_53_cast)[name = tensor("zero_mean_53_cast")]; + tensor zero_mean_sq_53_cast = mul(x = zero_mean_53_cast, y = zero_mean_53_cast)[name = tensor("zero_mean_sq_53_cast")]; + tensor var_2133 = const()[name = tensor("op_2133"), val = tensor([1])]; + tensor var_2134_cast = reduce_mean(axes = var_2133, keep_dims = var_1181, x = zero_mean_sq_53_cast)[name = tensor("op_2134_cast")]; + tensor var_2135_to_fp16 = const()[name = tensor("op_2135_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2136_cast = add(x = var_2134_cast, y = var_2135_to_fp16)[name = tensor("op_2136_cast")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_2136_cast)[name = tensor("denom_53_cast")]; + tensor out_53_cast = mul(x = zero_mean_53_cast, y = denom_53_cast)[name = tensor("out_53_cast")]; + tensor var_2140_to_fp16 = const()[name = tensor("op_2140_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165230528)))]; + tensor var_2141_cast = add(x = out_53_cast, y = var_2140_to_fp16)[name = tensor("op_2141_cast")]; + tensor var_2143_to_fp16 = const()[name = tensor("op_2143_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165233152)))]; + tensor input_167_cast = mul(x = var_2141_cast, y = var_2143_to_fp16)[name = tensor("input_167_cast")]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 1])]; + tensor var_2153 = const()[name = tensor("op_2153"), val = tensor([1, 1])]; + tensor var_2155_pad_type_0 = const()[name = tensor("op_2155_pad_type_0"), val = tensor("custom")]; + tensor var_2155_pad_0 = const()[name = tensor("op_2155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165235776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175066240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175066432)))]; + tensor var_2155_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_2153, groups = var_1186, pad = var_2155_pad_0, pad_type = var_2155_pad_type_0, strides = var_2151, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_167_cast)[name = tensor("op_2155_cast")]; + tensor var_2156_split_sizes_0 = const()[name = tensor("op_2156_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2156_axis_0 = const()[name = tensor("op_2156_axis_0"), val = tensor(1)]; + tensor var_2156_cast_0, tensor var_2156_cast_1 = split(axis = var_2156_axis_0, split_sizes = var_2156_split_sizes_0, x = var_2155_cast)[name = tensor("op_2156_cast")]; + tensor var_2158_mode_0 = const()[name = tensor("op_2158_mode_0"), val = tensor("EXACT")]; + tensor var_2158_cast = gelu(mode = var_2158_mode_0, x = var_2156_cast_1)[name = tensor("op_2158_cast")]; + tensor input_169_cast = mul(x = var_2156_cast_0, y = var_2158_cast)[name = tensor("input_169_cast")]; + tensor var_2162 = const()[name = tensor("op_2162"), val = tensor([1, 1])]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 1])]; + tensor var_2166_pad_type_0 = const()[name = tensor("op_2166_pad_type_0"), val = tensor("custom")]; + tensor var_2166_pad_0 = const()[name = tensor("op_2166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175086976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180002240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180002432)))]; + tensor var_2166_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_2164, groups = var_1186, pad = var_2166_pad_0, pad_type = var_2166_pad_type_0, strides = var_2162, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("op_2166_cast")]; + tensor inputs_55_cast = add(x = var_2166_cast, y = inputs_53_cast)[name = tensor("inputs_55_cast")]; + tensor var_2176 = const()[name = tensor("op_2176"), val = tensor([1])]; + tensor channels_mean_55_cast = reduce_mean(axes = var_2176, keep_dims = var_1181, x = inputs_55_cast)[name = tensor("channels_mean_55_cast")]; + tensor zero_mean_55_cast = sub(x = inputs_55_cast, y = channels_mean_55_cast)[name = tensor("zero_mean_55_cast")]; + tensor zero_mean_sq_55_cast = mul(x = zero_mean_55_cast, y = zero_mean_55_cast)[name = tensor("zero_mean_sq_55_cast")]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1])]; + tensor var_2181_cast = reduce_mean(axes = var_2180, keep_dims = var_1181, x = zero_mean_sq_55_cast)[name = tensor("op_2181_cast")]; + tensor var_2182_to_fp16 = const()[name = tensor("op_2182_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2183_cast = add(x = var_2181_cast, y = var_2182_to_fp16)[name = tensor("op_2183_cast")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_2183_cast)[name = tensor("denom_55_cast")]; + tensor out_55_cast = mul(x = zero_mean_55_cast, y = denom_55_cast)[name = tensor("out_55_cast")]; + tensor var_2187_to_fp16 = const()[name = tensor("op_2187_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180005056)))]; + tensor var_2188_cast = add(x = out_55_cast, y = var_2187_to_fp16)[name = tensor("op_2188_cast")]; + tensor var_2190_to_fp16 = const()[name = tensor("op_2190_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180007680)))]; + tensor hidden_states_95_cast = mul(x = var_2188_cast, y = var_2190_to_fp16)[name = tensor("hidden_states_95_cast")]; + tensor var_2197 = const()[name = tensor("op_2197"), val = tensor([1, 1])]; + tensor var_2199 = const()[name = tensor("op_2199"), val = tensor([1, 1])]; + tensor q_37_pad_type_0 = const()[name = tensor("q_37_pad_type_0"), val = tensor("custom")]; + tensor q_37_pad_0 = const()[name = tensor("q_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180010304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180829568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_37_cast = conv(dilations = var_2199, groups = var_1186, pad = q_37_pad_0, pad_type = q_37_pad_type_0, strides = var_2197, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("q_37_cast")]; + tensor var_2203 = const()[name = tensor("op_2203"), val = tensor([1, 1])]; + tensor var_2205 = const()[name = tensor("op_2205"), val = tensor([1, 1])]; + tensor k_37_pad_type_0 = const()[name = tensor("k_37_pad_type_0"), val = tensor("custom")]; + tensor k_37_pad_0 = const()[name = tensor("k_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180829696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181648960))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_37_cast = conv(dilations = var_2205, groups = var_1186, pad = k_37_pad_0, pad_type = k_37_pad_type_0, strides = var_2203, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("k_37_cast")]; + tensor var_2209 = const()[name = tensor("op_2209"), val = tensor([1, 1])]; + tensor var_2211 = const()[name = tensor("op_2211"), val = tensor([1, 1])]; + tensor v_37_pad_type_0 = const()[name = tensor("v_37_pad_type_0"), val = tensor("custom")]; + tensor v_37_pad_0 = const()[name = tensor("v_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181649088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182468352))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_37_cast = conv(dilations = var_2211, groups = var_1186, pad = v_37_pad_0, pad_type = v_37_pad_type_0, strides = var_2209, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("v_37_cast")]; + tensor var_2215 = const()[name = tensor("op_2215"), val = tensor([2, 20, 64, -1])]; + tensor var_2216_cast = reshape(shape = var_2215, x = q_37_cast)[name = tensor("op_2216_cast")]; + tensor var_2217 = const()[name = tensor("op_2217"), val = tensor([2, 20, 64, -1])]; + tensor var_2218_cast = reshape(shape = var_2217, x = k_37_cast)[name = tensor("op_2218_cast")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([2, 20, 64, -1])]; + tensor var_2220_cast = reshape(shape = var_2219, x = v_37_cast)[name = tensor("op_2220_cast")]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = var_2216_cast, y = var_2218_cast)[name = tensor("attn_weights_73_cast")]; + tensor attn_weights_75_cast = mul(x = attn_weights_73_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_2224_cast = softmax(axis = var_1170, x = attn_weights_75_cast)[name = tensor("op_2224_cast")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2220_cast, y = var_2224_cast)[name = tensor("attn_37_cast")]; + tensor var_2228 = const()[name = tensor("op_2228"), val = tensor([2, 1280, 1, -1])]; + tensor input_171_cast = reshape(shape = var_2228, x = attn_37_cast)[name = tensor("input_171_cast")]; + tensor var_2233 = const()[name = tensor("op_2233"), val = tensor([1, 1])]; + tensor var_2235 = const()[name = tensor("op_2235"), val = tensor([1, 1])]; + tensor var_2237_pad_type_0 = const()[name = tensor("op_2237_pad_type_0"), val = tensor("custom")]; + tensor var_2237_pad_0 = const()[name = tensor("op_2237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182468480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183287744))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183287872)))]; + tensor var_2237_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_2235, groups = var_1186, pad = var_2237_pad_0, pad_type = var_2237_pad_type_0, strides = var_2233, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_171_cast)[name = tensor("op_2237_cast")]; + tensor inputs_57_cast = add(x = var_2237_cast, y = inputs_55_cast)[name = tensor("inputs_57_cast")]; + tensor var_2241 = const()[name = tensor("op_2241"), val = tensor([1])]; + tensor channels_mean_57_cast = reduce_mean(axes = var_2241, keep_dims = var_1181, x = inputs_57_cast)[name = tensor("channels_mean_57_cast")]; + tensor zero_mean_57_cast = sub(x = inputs_57_cast, y = channels_mean_57_cast)[name = tensor("zero_mean_57_cast")]; + tensor zero_mean_sq_57_cast = mul(x = zero_mean_57_cast, y = zero_mean_57_cast)[name = tensor("zero_mean_sq_57_cast")]; + tensor var_2245 = const()[name = tensor("op_2245"), val = tensor([1])]; + tensor var_2246_cast = reduce_mean(axes = var_2245, keep_dims = var_1181, x = zero_mean_sq_57_cast)[name = tensor("op_2246_cast")]; + tensor var_2247_to_fp16 = const()[name = tensor("op_2247_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2248_cast = add(x = var_2246_cast, y = var_2247_to_fp16)[name = tensor("op_2248_cast")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_2248_cast)[name = tensor("denom_57_cast")]; + tensor out_57_cast = mul(x = zero_mean_57_cast, y = denom_57_cast)[name = tensor("out_57_cast")]; + tensor var_2252_to_fp16 = const()[name = tensor("op_2252_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183290496)))]; + tensor var_2253_cast = add(x = out_57_cast, y = var_2252_to_fp16)[name = tensor("op_2253_cast")]; + tensor var_2255_to_fp16 = const()[name = tensor("op_2255_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183293120)))]; + tensor hidden_states_97_cast = mul(x = var_2253_cast, y = var_2255_to_fp16)[name = tensor("hidden_states_97_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 1])]; + tensor var_2264 = const()[name = tensor("op_2264"), val = tensor([1, 1])]; + tensor q_39_pad_type_0 = const()[name = tensor("q_39_pad_type_0"), val = tensor("custom")]; + tensor q_39_pad_0 = const()[name = tensor("q_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183295744))), lut = tensor([-0x1.7p-7, 0x1.6fcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_39_cast = conv(dilations = var_2264, groups = var_1186, pad = q_39_pad_0, pad_type = q_39_pad_type_0, strides = var_2262, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("q_39_cast")]; + tensor var_2268 = const()[name = tensor("op_2268"), val = tensor([1, 1])]; + tensor var_2270 = const()[name = tensor("op_2270"), val = tensor([1, 1])]; + tensor k_39_pad_type_0 = const()[name = tensor("k_39_pad_type_0"), val = tensor("custom")]; + tensor k_39_pad_0 = const()[name = tensor("k_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183500608))), lut = tensor([-0x1.cbcp-7, -0x1.22p-8, 0x1.214p-8, 0x1.cbcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_39_cast = conv(dilations = var_2270, groups = var_1186, pad = k_39_pad_0, pad_type = k_39_pad_type_0, strides = var_2268, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_39_cast")]; + tensor var_2274 = const()[name = tensor("op_2274"), val = tensor([1, 1])]; + tensor var_2276 = const()[name = tensor("op_2276"), val = tensor([1, 1])]; + tensor v_39_pad_type_0 = const()[name = tensor("v_39_pad_type_0"), val = tensor("custom")]; + tensor v_39_pad_0 = const()[name = tensor("v_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184156032))), lut = tensor([-0x1.1e8p-7, 0x1.1e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_39_cast = conv(dilations = var_2276, groups = var_1186, pad = v_39_pad_0, pad_type = v_39_pad_type_0, strides = var_2274, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_39_cast")]; + tensor var_2280 = const()[name = tensor("op_2280"), val = tensor([2, 20, 64, -1])]; + tensor var_2281_cast = reshape(shape = var_2280, x = q_39_cast)[name = tensor("op_2281_cast")]; + tensor var_2282 = const()[name = tensor("op_2282"), val = tensor([2, 20, 64, -1])]; + tensor var_2283_cast = reshape(shape = var_2282, x = k_39_cast)[name = tensor("op_2283_cast")]; + tensor var_2284 = const()[name = tensor("op_2284"), val = tensor([2, 20, 64, -1])]; + tensor var_2285_cast = reshape(shape = var_2284, x = v_39_cast)[name = tensor("op_2285_cast")]; + tensor attn_weights_77_transpose_x_0 = const()[name = tensor("attn_weights_77_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_77_transpose_y_0 = const()[name = tensor("attn_weights_77_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_77_cast = matmul(transpose_x = attn_weights_77_transpose_x_0, transpose_y = attn_weights_77_transpose_y_0, x = var_2281_cast, y = var_2283_cast)[name = tensor("attn_weights_77_cast")]; + tensor attn_weights_79_cast = mul(x = attn_weights_77_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_79_cast")]; + tensor var_2289_cast = softmax(axis = var_1170, x = attn_weights_79_cast)[name = tensor("op_2289_cast")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2285_cast, y = var_2289_cast)[name = tensor("attn_39_cast")]; + tensor var_2293 = const()[name = tensor("op_2293"), val = tensor([2, 1280, 1, -1])]; + tensor input_173_cast = reshape(shape = var_2293, x = attn_39_cast)[name = tensor("input_173_cast")]; + tensor var_2298 = const()[name = tensor("op_2298"), val = tensor([1, 1])]; + tensor var_2300 = const()[name = tensor("op_2300"), val = tensor([1, 1])]; + tensor var_2302_pad_type_0 = const()[name = tensor("op_2302_pad_type_0"), val = tensor("custom")]; + tensor var_2302_pad_0 = const()[name = tensor("op_2302_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184483776))), lut = tensor([-0x1.46cp-8, 0x1.48p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184688640)))]; + tensor var_2302_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_2300, groups = var_1186, pad = var_2302_pad_0, pad_type = var_2302_pad_type_0, strides = var_2298, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("op_2302_cast")]; + tensor inputs_59_cast = add(x = var_2302_cast, y = inputs_57_cast)[name = tensor("inputs_59_cast")]; + tensor var_2306 = const()[name = tensor("op_2306"), val = tensor([1])]; + tensor channels_mean_59_cast = reduce_mean(axes = var_2306, keep_dims = var_1181, x = inputs_59_cast)[name = tensor("channels_mean_59_cast")]; + tensor zero_mean_59_cast = sub(x = inputs_59_cast, y = channels_mean_59_cast)[name = tensor("zero_mean_59_cast")]; + tensor zero_mean_sq_59_cast = mul(x = zero_mean_59_cast, y = zero_mean_59_cast)[name = tensor("zero_mean_sq_59_cast")]; + tensor var_2310 = const()[name = tensor("op_2310"), val = tensor([1])]; + tensor var_2311_cast = reduce_mean(axes = var_2310, keep_dims = var_1181, x = zero_mean_sq_59_cast)[name = tensor("op_2311_cast")]; + tensor var_2312_to_fp16 = const()[name = tensor("op_2312_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2313_cast = add(x = var_2311_cast, y = var_2312_to_fp16)[name = tensor("op_2313_cast")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_2313_cast)[name = tensor("denom_59_cast")]; + tensor out_59_cast = mul(x = zero_mean_59_cast, y = denom_59_cast)[name = tensor("out_59_cast")]; + tensor var_2317_to_fp16 = const()[name = tensor("op_2317_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184691264)))]; + tensor var_2318_cast = add(x = out_59_cast, y = var_2317_to_fp16)[name = tensor("op_2318_cast")]; + tensor var_2320_to_fp16 = const()[name = tensor("op_2320_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184693888)))]; + tensor input_175_cast = mul(x = var_2318_cast, y = var_2320_to_fp16)[name = tensor("input_175_cast")]; + tensor var_2328 = const()[name = tensor("op_2328"), val = tensor([1, 1])]; + tensor var_2330 = const()[name = tensor("op_2330"), val = tensor([1, 1])]; + tensor var_2332_pad_type_0 = const()[name = tensor("op_2332_pad_type_0"), val = tensor("custom")]; + tensor var_2332_pad_0 = const()[name = tensor("op_2332_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184696512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194526976))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194527168)))]; + tensor var_2332_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_2330, groups = var_1186, pad = var_2332_pad_0, pad_type = var_2332_pad_type_0, strides = var_2328, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_175_cast)[name = tensor("op_2332_cast")]; + tensor var_2333_split_sizes_0 = const()[name = tensor("op_2333_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2333_axis_0 = const()[name = tensor("op_2333_axis_0"), val = tensor(1)]; + tensor var_2333_cast_0, tensor var_2333_cast_1 = split(axis = var_2333_axis_0, split_sizes = var_2333_split_sizes_0, x = var_2332_cast)[name = tensor("op_2333_cast")]; + tensor var_2335_mode_0 = const()[name = tensor("op_2335_mode_0"), val = tensor("EXACT")]; + tensor var_2335_cast = gelu(mode = var_2335_mode_0, x = var_2333_cast_1)[name = tensor("op_2335_cast")]; + tensor input_177_cast = mul(x = var_2333_cast_0, y = var_2335_cast)[name = tensor("input_177_cast")]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([1, 1])]; + tensor var_2341 = const()[name = tensor("op_2341"), val = tensor([1, 1])]; + tensor var_2343_pad_type_0 = const()[name = tensor("op_2343_pad_type_0"), val = tensor("custom")]; + tensor var_2343_pad_0 = const()[name = tensor("op_2343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194547712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197824576))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197824704)))]; + tensor var_2343_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_2341, groups = var_1186, pad = var_2343_pad_0, pad_type = var_2343_pad_type_0, strides = var_2339, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("op_2343_cast")]; + tensor inputs_61_cast = add(x = var_2343_cast, y = inputs_59_cast)[name = tensor("inputs_61_cast")]; + tensor var_2353 = const()[name = tensor("op_2353"), val = tensor([1])]; + tensor channels_mean_61_cast = reduce_mean(axes = var_2353, keep_dims = var_1181, x = inputs_61_cast)[name = tensor("channels_mean_61_cast")]; + tensor zero_mean_61_cast = sub(x = inputs_61_cast, y = channels_mean_61_cast)[name = tensor("zero_mean_61_cast")]; + tensor zero_mean_sq_61_cast = mul(x = zero_mean_61_cast, y = zero_mean_61_cast)[name = tensor("zero_mean_sq_61_cast")]; + tensor var_2357 = const()[name = tensor("op_2357"), val = tensor([1])]; + tensor var_2358_cast = reduce_mean(axes = var_2357, keep_dims = var_1181, x = zero_mean_sq_61_cast)[name = tensor("op_2358_cast")]; + tensor var_2359_to_fp16 = const()[name = tensor("op_2359_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2360_cast = add(x = var_2358_cast, y = var_2359_to_fp16)[name = tensor("op_2360_cast")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_2360_cast)[name = tensor("denom_61_cast")]; + tensor out_61_cast = mul(x = zero_mean_61_cast, y = denom_61_cast)[name = tensor("out_61_cast")]; + tensor var_2364_to_fp16 = const()[name = tensor("op_2364_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197827328)))]; + tensor var_2365_cast = add(x = out_61_cast, y = var_2364_to_fp16)[name = tensor("op_2365_cast")]; + tensor var_2367_to_fp16 = const()[name = tensor("op_2367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197829952)))]; + tensor hidden_states_101_cast = mul(x = var_2365_cast, y = var_2367_to_fp16)[name = tensor("hidden_states_101_cast")]; + tensor var_2374 = const()[name = tensor("op_2374"), val = tensor([1, 1])]; + tensor var_2376 = const()[name = tensor("op_2376"), val = tensor([1, 1])]; + tensor q_41_pad_type_0 = const()[name = tensor("q_41_pad_type_0"), val = tensor("custom")]; + tensor q_41_pad_0 = const()[name = tensor("q_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197832576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198651840))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_41_cast = conv(dilations = var_2376, groups = var_1186, pad = q_41_pad_0, pad_type = q_41_pad_type_0, strides = var_2374, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("q_41_cast")]; + tensor var_2380 = const()[name = tensor("op_2380"), val = tensor([1, 1])]; + tensor var_2382 = const()[name = tensor("op_2382"), val = tensor([1, 1])]; + tensor k_41_pad_type_0 = const()[name = tensor("k_41_pad_type_0"), val = tensor("custom")]; + tensor k_41_pad_0 = const()[name = tensor("k_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198651968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199471232))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_41_cast = conv(dilations = var_2382, groups = var_1186, pad = k_41_pad_0, pad_type = k_41_pad_type_0, strides = var_2380, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("k_41_cast")]; + tensor var_2386 = const()[name = tensor("op_2386"), val = tensor([1, 1])]; + tensor var_2388 = const()[name = tensor("op_2388"), val = tensor([1, 1])]; + tensor v_41_pad_type_0 = const()[name = tensor("v_41_pad_type_0"), val = tensor("custom")]; + tensor v_41_pad_0 = const()[name = tensor("v_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199471360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200290624))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_41_cast = conv(dilations = var_2388, groups = var_1186, pad = v_41_pad_0, pad_type = v_41_pad_type_0, strides = var_2386, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("v_41_cast")]; + tensor var_2392 = const()[name = tensor("op_2392"), val = tensor([2, 20, 64, -1])]; + tensor var_2393_cast = reshape(shape = var_2392, x = q_41_cast)[name = tensor("op_2393_cast")]; + tensor var_2394 = const()[name = tensor("op_2394"), val = tensor([2, 20, 64, -1])]; + tensor var_2395_cast = reshape(shape = var_2394, x = k_41_cast)[name = tensor("op_2395_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([2, 20, 64, -1])]; + tensor var_2397_cast = reshape(shape = var_2396, x = v_41_cast)[name = tensor("op_2397_cast")]; + tensor attn_weights_81_transpose_x_0 = const()[name = tensor("attn_weights_81_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_81_transpose_y_0 = const()[name = tensor("attn_weights_81_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_81_cast = matmul(transpose_x = attn_weights_81_transpose_x_0, transpose_y = attn_weights_81_transpose_y_0, x = var_2393_cast, y = var_2395_cast)[name = tensor("attn_weights_81_cast")]; + tensor attn_weights_83_cast = mul(x = attn_weights_81_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_83_cast")]; + tensor var_2401_cast = softmax(axis = var_1170, x = attn_weights_83_cast)[name = tensor("op_2401_cast")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2397_cast, y = var_2401_cast)[name = tensor("attn_41_cast")]; + tensor var_2405 = const()[name = tensor("op_2405"), val = tensor([2, 1280, 1, -1])]; + tensor input_179_cast = reshape(shape = var_2405, x = attn_41_cast)[name = tensor("input_179_cast")]; + tensor var_2410 = const()[name = tensor("op_2410"), val = tensor([1, 1])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 1])]; + tensor var_2414_pad_type_0 = const()[name = tensor("op_2414_pad_type_0"), val = tensor("custom")]; + tensor var_2414_pad_0 = const()[name = tensor("op_2414_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200290752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201110016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201110144)))]; + tensor var_2414_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_2412, groups = var_1186, pad = var_2414_pad_0, pad_type = var_2414_pad_type_0, strides = var_2410, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_179_cast)[name = tensor("op_2414_cast")]; + tensor inputs_63_cast = add(x = var_2414_cast, y = inputs_61_cast)[name = tensor("inputs_63_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([1])]; + tensor channels_mean_63_cast = reduce_mean(axes = var_2418, keep_dims = var_1181, x = inputs_63_cast)[name = tensor("channels_mean_63_cast")]; + tensor zero_mean_63_cast = sub(x = inputs_63_cast, y = channels_mean_63_cast)[name = tensor("zero_mean_63_cast")]; + tensor zero_mean_sq_63_cast = mul(x = zero_mean_63_cast, y = zero_mean_63_cast)[name = tensor("zero_mean_sq_63_cast")]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([1])]; + tensor var_2423_cast = reduce_mean(axes = var_2422, keep_dims = var_1181, x = zero_mean_sq_63_cast)[name = tensor("op_2423_cast")]; + tensor var_2424_to_fp16 = const()[name = tensor("op_2424_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2425_cast = add(x = var_2423_cast, y = var_2424_to_fp16)[name = tensor("op_2425_cast")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2425_cast)[name = tensor("denom_63_cast")]; + tensor out_63_cast = mul(x = zero_mean_63_cast, y = denom_63_cast)[name = tensor("out_63_cast")]; + tensor var_2429_to_fp16 = const()[name = tensor("op_2429_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201112768)))]; + tensor var_2430_cast = add(x = out_63_cast, y = var_2429_to_fp16)[name = tensor("op_2430_cast")]; + tensor var_2432_to_fp16 = const()[name = tensor("op_2432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201115392)))]; + tensor hidden_states_103_cast = mul(x = var_2430_cast, y = var_2432_to_fp16)[name = tensor("hidden_states_103_cast")]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 1])]; + tensor var_2441 = const()[name = tensor("op_2441"), val = tensor([1, 1])]; + tensor q_43_pad_type_0 = const()[name = tensor("q_43_pad_type_0"), val = tensor("custom")]; + tensor q_43_pad_0 = const()[name = tensor("q_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201118016))), lut = tensor([-0x1.61p-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_43_cast = conv(dilations = var_2441, groups = var_1186, pad = q_43_pad_0, pad_type = q_43_pad_type_0, strides = var_2439, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("q_43_cast")]; + tensor var_2445 = const()[name = tensor("op_2445"), val = tensor([1, 1])]; + tensor var_2447 = const()[name = tensor("op_2447"), val = tensor([1, 1])]; + tensor k_43_pad_type_0 = const()[name = tensor("k_43_pad_type_0"), val = tensor("custom")]; + tensor k_43_pad_0 = const()[name = tensor("k_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201322880))), lut = tensor([-0x1.c8p-8, 0x1.c8cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_43_cast = conv(dilations = var_2447, groups = var_1186, pad = k_43_pad_0, pad_type = k_43_pad_type_0, strides = var_2445, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_43_cast")]; + tensor var_2451 = const()[name = tensor("op_2451"), val = tensor([1, 1])]; + tensor var_2453 = const()[name = tensor("op_2453"), val = tensor([1, 1])]; + tensor v_43_pad_type_0 = const()[name = tensor("v_43_pad_type_0"), val = tensor("custom")]; + tensor v_43_pad_0 = const()[name = tensor("v_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201650624))), lut = tensor([-0x1.e5cp-8, 0x1.e78p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_43_cast = conv(dilations = var_2453, groups = var_1186, pad = v_43_pad_0, pad_type = v_43_pad_type_0, strides = var_2451, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_43_cast")]; + tensor var_2457 = const()[name = tensor("op_2457"), val = tensor([2, 20, 64, -1])]; + tensor var_2458_cast = reshape(shape = var_2457, x = q_43_cast)[name = tensor("op_2458_cast")]; + tensor var_2459 = const()[name = tensor("op_2459"), val = tensor([2, 20, 64, -1])]; + tensor var_2460_cast = reshape(shape = var_2459, x = k_43_cast)[name = tensor("op_2460_cast")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([2, 20, 64, -1])]; + tensor var_2462_cast = reshape(shape = var_2461, x = v_43_cast)[name = tensor("op_2462_cast")]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = var_2458_cast, y = var_2460_cast)[name = tensor("attn_weights_85_cast")]; + tensor attn_weights_87_cast = mul(x = attn_weights_85_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_2466_cast = softmax(axis = var_1170, x = attn_weights_87_cast)[name = tensor("op_2466_cast")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2462_cast, y = var_2466_cast)[name = tensor("attn_43_cast")]; + tensor var_2470 = const()[name = tensor("op_2470"), val = tensor([2, 1280, 1, -1])]; + tensor input_181_cast = reshape(shape = var_2470, x = attn_43_cast)[name = tensor("input_181_cast")]; + tensor var_2475 = const()[name = tensor("op_2475"), val = tensor([1, 1])]; + tensor var_2477 = const()[name = tensor("op_2477"), val = tensor([1, 1])]; + tensor var_2479_pad_type_0 = const()[name = tensor("op_2479_pad_type_0"), val = tensor("custom")]; + tensor var_2479_pad_0 = const()[name = tensor("op_2479_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201978368))), lut = tensor([-0x1.1a8p-8, 0x1.1a8p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202183232)))]; + tensor var_2479_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_2477, groups = var_1186, pad = var_2479_pad_0, pad_type = var_2479_pad_type_0, strides = var_2475, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_181_cast)[name = tensor("op_2479_cast")]; + tensor inputs_65_cast = add(x = var_2479_cast, y = inputs_63_cast)[name = tensor("inputs_65_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1])]; + tensor channels_mean_65_cast = reduce_mean(axes = var_2483, keep_dims = var_1181, x = inputs_65_cast)[name = tensor("channels_mean_65_cast")]; + tensor zero_mean_65_cast = sub(x = inputs_65_cast, y = channels_mean_65_cast)[name = tensor("zero_mean_65_cast")]; + tensor zero_mean_sq_65_cast = mul(x = zero_mean_65_cast, y = zero_mean_65_cast)[name = tensor("zero_mean_sq_65_cast")]; + tensor var_2487 = const()[name = tensor("op_2487"), val = tensor([1])]; + tensor var_2488_cast = reduce_mean(axes = var_2487, keep_dims = var_1181, x = zero_mean_sq_65_cast)[name = tensor("op_2488_cast")]; + tensor var_2489_to_fp16 = const()[name = tensor("op_2489_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2490_cast = add(x = var_2488_cast, y = var_2489_to_fp16)[name = tensor("op_2490_cast")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2490_cast)[name = tensor("denom_65_cast")]; + tensor out_65_cast = mul(x = zero_mean_65_cast, y = denom_65_cast)[name = tensor("out_65_cast")]; + tensor var_2494_to_fp16 = const()[name = tensor("op_2494_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202185856)))]; + tensor var_2495_cast = add(x = out_65_cast, y = var_2494_to_fp16)[name = tensor("op_2495_cast")]; + tensor var_2497_to_fp16 = const()[name = tensor("op_2497_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202188480)))]; + tensor input_183_cast = mul(x = var_2495_cast, y = var_2497_to_fp16)[name = tensor("input_183_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([1, 1])]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([1, 1])]; + tensor var_2509_pad_type_0 = const()[name = tensor("op_2509_pad_type_0"), val = tensor("custom")]; + tensor var_2509_pad_0 = const()[name = tensor("op_2509_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202191104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212021568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212021760)))]; + tensor var_2509_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_2507, groups = var_1186, pad = var_2509_pad_0, pad_type = var_2509_pad_type_0, strides = var_2505, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_183_cast)[name = tensor("op_2509_cast")]; + tensor var_2510_split_sizes_0 = const()[name = tensor("op_2510_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2510_axis_0 = const()[name = tensor("op_2510_axis_0"), val = tensor(1)]; + tensor var_2510_cast_0, tensor var_2510_cast_1 = split(axis = var_2510_axis_0, split_sizes = var_2510_split_sizes_0, x = var_2509_cast)[name = tensor("op_2510_cast")]; + tensor var_2512_mode_0 = const()[name = tensor("op_2512_mode_0"), val = tensor("EXACT")]; + tensor var_2512_cast = gelu(mode = var_2512_mode_0, x = var_2510_cast_1)[name = tensor("op_2512_cast")]; + tensor input_185_cast = mul(x = var_2510_cast_0, y = var_2512_cast)[name = tensor("input_185_cast")]; + tensor var_2516 = const()[name = tensor("op_2516"), val = tensor([1, 1])]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([1, 1])]; + tensor var_2520_pad_type_0 = const()[name = tensor("op_2520_pad_type_0"), val = tensor("custom")]; + tensor var_2520_pad_0 = const()[name = tensor("op_2520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212042304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216957568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216957760)))]; + tensor var_2520_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_2518, groups = var_1186, pad = var_2520_pad_0, pad_type = var_2520_pad_type_0, strides = var_2516, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("op_2520_cast")]; + tensor inputs_67_cast = add(x = var_2520_cast, y = inputs_65_cast)[name = tensor("inputs_67_cast")]; + tensor var_2530 = const()[name = tensor("op_2530"), val = tensor([1])]; + tensor channels_mean_67_cast = reduce_mean(axes = var_2530, keep_dims = var_1181, x = inputs_67_cast)[name = tensor("channels_mean_67_cast")]; + tensor zero_mean_67_cast = sub(x = inputs_67_cast, y = channels_mean_67_cast)[name = tensor("zero_mean_67_cast")]; + tensor zero_mean_sq_67_cast = mul(x = zero_mean_67_cast, y = zero_mean_67_cast)[name = tensor("zero_mean_sq_67_cast")]; + tensor var_2534 = const()[name = tensor("op_2534"), val = tensor([1])]; + tensor var_2535_cast = reduce_mean(axes = var_2534, keep_dims = var_1181, x = zero_mean_sq_67_cast)[name = tensor("op_2535_cast")]; + tensor var_2536_to_fp16 = const()[name = tensor("op_2536_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2537_cast = add(x = var_2535_cast, y = var_2536_to_fp16)[name = tensor("op_2537_cast")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2537_cast)[name = tensor("denom_67_cast")]; + tensor out_67_cast = mul(x = zero_mean_67_cast, y = denom_67_cast)[name = tensor("out_67_cast")]; + tensor var_2541_to_fp16 = const()[name = tensor("op_2541_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216960384)))]; + tensor var_2542_cast = add(x = out_67_cast, y = var_2541_to_fp16)[name = tensor("op_2542_cast")]; + tensor var_2544_to_fp16 = const()[name = tensor("op_2544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216963008)))]; + tensor hidden_states_107_cast = mul(x = var_2542_cast, y = var_2544_to_fp16)[name = tensor("hidden_states_107_cast")]; + tensor var_2551 = const()[name = tensor("op_2551"), val = tensor([1, 1])]; + tensor var_2553 = const()[name = tensor("op_2553"), val = tensor([1, 1])]; + tensor q_45_pad_type_0 = const()[name = tensor("q_45_pad_type_0"), val = tensor("custom")]; + tensor q_45_pad_0 = const()[name = tensor("q_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216965632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217784896))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_45_cast = conv(dilations = var_2553, groups = var_1186, pad = q_45_pad_0, pad_type = q_45_pad_type_0, strides = var_2551, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("q_45_cast")]; + tensor var_2557 = const()[name = tensor("op_2557"), val = tensor([1, 1])]; + tensor var_2559 = const()[name = tensor("op_2559"), val = tensor([1, 1])]; + tensor k_45_pad_type_0 = const()[name = tensor("k_45_pad_type_0"), val = tensor("custom")]; + tensor k_45_pad_0 = const()[name = tensor("k_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217785024))), lut = tensor([-0x1.09p-5, -0x1.404p-7, 0x1.404p-7, 0x1.08cp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_45_cast = conv(dilations = var_2559, groups = var_1186, pad = k_45_pad_0, pad_type = k_45_pad_type_0, strides = var_2557, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("k_45_cast")]; + tensor var_2563 = const()[name = tensor("op_2563"), val = tensor([1, 1])]; + tensor var_2565 = const()[name = tensor("op_2565"), val = tensor([1, 1])]; + tensor v_45_pad_type_0 = const()[name = tensor("v_45_pad_type_0"), val = tensor("custom")]; + tensor v_45_pad_0 = const()[name = tensor("v_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218194688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219013952))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_45_cast = conv(dilations = var_2565, groups = var_1186, pad = v_45_pad_0, pad_type = v_45_pad_type_0, strides = var_2563, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("v_45_cast")]; + tensor var_2569 = const()[name = tensor("op_2569"), val = tensor([2, 20, 64, -1])]; + tensor var_2570_cast = reshape(shape = var_2569, x = q_45_cast)[name = tensor("op_2570_cast")]; + tensor var_2571 = const()[name = tensor("op_2571"), val = tensor([2, 20, 64, -1])]; + tensor var_2572_cast = reshape(shape = var_2571, x = k_45_cast)[name = tensor("op_2572_cast")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([2, 20, 64, -1])]; + tensor var_2574_cast = reshape(shape = var_2573, x = v_45_cast)[name = tensor("op_2574_cast")]; + tensor attn_weights_89_transpose_x_0 = const()[name = tensor("attn_weights_89_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_89_transpose_y_0 = const()[name = tensor("attn_weights_89_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_89_cast = matmul(transpose_x = attn_weights_89_transpose_x_0, transpose_y = attn_weights_89_transpose_y_0, x = var_2570_cast, y = var_2572_cast)[name = tensor("attn_weights_89_cast")]; + tensor attn_weights_91_cast = mul(x = attn_weights_89_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_91_cast")]; + tensor var_2578_cast = softmax(axis = var_1170, x = attn_weights_91_cast)[name = tensor("op_2578_cast")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2574_cast, y = var_2578_cast)[name = tensor("attn_45_cast")]; + tensor var_2582 = const()[name = tensor("op_2582"), val = tensor([2, 1280, 1, -1])]; + tensor input_187_cast = reshape(shape = var_2582, x = attn_45_cast)[name = tensor("input_187_cast")]; + tensor var_2587 = const()[name = tensor("op_2587"), val = tensor([1, 1])]; + tensor var_2589 = const()[name = tensor("op_2589"), val = tensor([1, 1])]; + tensor var_2591_pad_type_0 = const()[name = tensor("op_2591_pad_type_0"), val = tensor("custom")]; + tensor var_2591_pad_0 = const()[name = tensor("op_2591_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219014080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219833344))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219833472)))]; + tensor var_2591_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_2589, groups = var_1186, pad = var_2591_pad_0, pad_type = var_2591_pad_type_0, strides = var_2587, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_187_cast)[name = tensor("op_2591_cast")]; + tensor inputs_69_cast = add(x = var_2591_cast, y = inputs_67_cast)[name = tensor("inputs_69_cast")]; + tensor var_2595 = const()[name = tensor("op_2595"), val = tensor([1])]; + tensor channels_mean_69_cast = reduce_mean(axes = var_2595, keep_dims = var_1181, x = inputs_69_cast)[name = tensor("channels_mean_69_cast")]; + tensor zero_mean_69_cast = sub(x = inputs_69_cast, y = channels_mean_69_cast)[name = tensor("zero_mean_69_cast")]; + tensor zero_mean_sq_69_cast = mul(x = zero_mean_69_cast, y = zero_mean_69_cast)[name = tensor("zero_mean_sq_69_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1])]; + tensor var_2600_cast = reduce_mean(axes = var_2599, keep_dims = var_1181, x = zero_mean_sq_69_cast)[name = tensor("op_2600_cast")]; + tensor var_2601_to_fp16 = const()[name = tensor("op_2601_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2602_cast = add(x = var_2600_cast, y = var_2601_to_fp16)[name = tensor("op_2602_cast")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2602_cast)[name = tensor("denom_69_cast")]; + tensor out_69_cast = mul(x = zero_mean_69_cast, y = denom_69_cast)[name = tensor("out_69_cast")]; + tensor var_2606_to_fp16 = const()[name = tensor("op_2606_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219836096)))]; + tensor var_2607_cast = add(x = out_69_cast, y = var_2606_to_fp16)[name = tensor("op_2607_cast")]; + tensor var_2609_to_fp16 = const()[name = tensor("op_2609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219838720)))]; + tensor hidden_states_109_cast = mul(x = var_2607_cast, y = var_2609_to_fp16)[name = tensor("hidden_states_109_cast")]; + tensor var_2616 = const()[name = tensor("op_2616"), val = tensor([1, 1])]; + tensor var_2618 = const()[name = tensor("op_2618"), val = tensor([1, 1])]; + tensor q_47_pad_type_0 = const()[name = tensor("q_47_pad_type_0"), val = tensor("custom")]; + tensor q_47_pad_0 = const()[name = tensor("q_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219841344))), lut = tensor([-0x1.61p-7, 0x1.614p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_47_cast = conv(dilations = var_2618, groups = var_1186, pad = q_47_pad_0, pad_type = q_47_pad_type_0, strides = var_2616, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("q_47_cast")]; + tensor var_2622 = const()[name = tensor("op_2622"), val = tensor([1, 1])]; + tensor var_2624 = const()[name = tensor("op_2624"), val = tensor([1, 1])]; + tensor k_47_pad_type_0 = const()[name = tensor("k_47_pad_type_0"), val = tensor("custom")]; + tensor k_47_pad_0 = const()[name = tensor("k_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220046208))), lut = tensor([-0x1.c84p-8, 0x1.c98p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_47_cast = conv(dilations = var_2624, groups = var_1186, pad = k_47_pad_0, pad_type = k_47_pad_type_0, strides = var_2622, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_47_cast")]; + tensor var_2628 = const()[name = tensor("op_2628"), val = tensor([1, 1])]; + tensor var_2630 = const()[name = tensor("op_2630"), val = tensor([1, 1])]; + tensor v_47_pad_type_0 = const()[name = tensor("v_47_pad_type_0"), val = tensor("custom")]; + tensor v_47_pad_0 = const()[name = tensor("v_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220373952))), lut = tensor([-0x1.e6cp-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_47_cast = conv(dilations = var_2630, groups = var_1186, pad = v_47_pad_0, pad_type = v_47_pad_type_0, strides = var_2628, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_47_cast")]; + tensor var_2634 = const()[name = tensor("op_2634"), val = tensor([2, 20, 64, -1])]; + tensor var_2635_cast = reshape(shape = var_2634, x = q_47_cast)[name = tensor("op_2635_cast")]; + tensor var_2636 = const()[name = tensor("op_2636"), val = tensor([2, 20, 64, -1])]; + tensor var_2637_cast = reshape(shape = var_2636, x = k_47_cast)[name = tensor("op_2637_cast")]; + tensor var_2638 = const()[name = tensor("op_2638"), val = tensor([2, 20, 64, -1])]; + tensor var_2639_cast = reshape(shape = var_2638, x = v_47_cast)[name = tensor("op_2639_cast")]; + tensor attn_weights_93_transpose_x_0 = const()[name = tensor("attn_weights_93_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_93_transpose_y_0 = const()[name = tensor("attn_weights_93_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_93_cast = matmul(transpose_x = attn_weights_93_transpose_x_0, transpose_y = attn_weights_93_transpose_y_0, x = var_2635_cast, y = var_2637_cast)[name = tensor("attn_weights_93_cast")]; + tensor attn_weights_95_cast = mul(x = attn_weights_93_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_95_cast")]; + tensor var_2643_cast = softmax(axis = var_1170, x = attn_weights_95_cast)[name = tensor("op_2643_cast")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2639_cast, y = var_2643_cast)[name = tensor("attn_47_cast")]; + tensor var_2647 = const()[name = tensor("op_2647"), val = tensor([2, 1280, 1, -1])]; + tensor input_189_cast = reshape(shape = var_2647, x = attn_47_cast)[name = tensor("input_189_cast")]; + tensor var_2652 = const()[name = tensor("op_2652"), val = tensor([1, 1])]; + tensor var_2654 = const()[name = tensor("op_2654"), val = tensor([1, 1])]; + tensor var_2656_pad_type_0 = const()[name = tensor("op_2656_pad_type_0"), val = tensor("custom")]; + tensor var_2656_pad_0 = const()[name = tensor("op_2656_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220701696))), lut = tensor([-0x1.214p-8, 0x1.21p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220906560)))]; + tensor var_2656_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_2654, groups = var_1186, pad = var_2656_pad_0, pad_type = var_2656_pad_type_0, strides = var_2652, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("op_2656_cast")]; + tensor inputs_71_cast = add(x = var_2656_cast, y = inputs_69_cast)[name = tensor("inputs_71_cast")]; + tensor var_2660 = const()[name = tensor("op_2660"), val = tensor([1])]; + tensor channels_mean_71_cast = reduce_mean(axes = var_2660, keep_dims = var_1181, x = inputs_71_cast)[name = tensor("channels_mean_71_cast")]; + tensor zero_mean_71_cast = sub(x = inputs_71_cast, y = channels_mean_71_cast)[name = tensor("zero_mean_71_cast")]; + tensor zero_mean_sq_71_cast = mul(x = zero_mean_71_cast, y = zero_mean_71_cast)[name = tensor("zero_mean_sq_71_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1])]; + tensor var_2665_cast = reduce_mean(axes = var_2664, keep_dims = var_1181, x = zero_mean_sq_71_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_to_fp16 = const()[name = tensor("op_2666_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2667_cast = add(x = var_2665_cast, y = var_2666_to_fp16)[name = tensor("op_2667_cast")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2667_cast)[name = tensor("denom_71_cast")]; + tensor out_71_cast = mul(x = zero_mean_71_cast, y = denom_71_cast)[name = tensor("out_71_cast")]; + tensor var_2671_to_fp16 = const()[name = tensor("op_2671_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220909184)))]; + tensor var_2672_cast = add(x = out_71_cast, y = var_2671_to_fp16)[name = tensor("op_2672_cast")]; + tensor var_2674_to_fp16 = const()[name = tensor("op_2674_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220911808)))]; + tensor input_191_cast = mul(x = var_2672_cast, y = var_2674_to_fp16)[name = tensor("input_191_cast")]; + tensor var_2682 = const()[name = tensor("op_2682"), val = tensor([1, 1])]; + tensor var_2684 = const()[name = tensor("op_2684"), val = tensor([1, 1])]; + tensor var_2686_pad_type_0 = const()[name = tensor("op_2686_pad_type_0"), val = tensor("custom")]; + tensor var_2686_pad_0 = const()[name = tensor("op_2686_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220914432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227468096))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227468224)))]; + tensor var_2686_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_2684, groups = var_1186, pad = var_2686_pad_0, pad_type = var_2686_pad_type_0, strides = var_2682, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_191_cast)[name = tensor("op_2686_cast")]; + tensor var_2687_split_sizes_0 = const()[name = tensor("op_2687_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2687_axis_0 = const()[name = tensor("op_2687_axis_0"), val = tensor(1)]; + tensor var_2687_cast_0, tensor var_2687_cast_1 = split(axis = var_2687_axis_0, split_sizes = var_2687_split_sizes_0, x = var_2686_cast)[name = tensor("op_2687_cast")]; + tensor var_2689_mode_0 = const()[name = tensor("op_2689_mode_0"), val = tensor("EXACT")]; + tensor var_2689_cast = gelu(mode = var_2689_mode_0, x = var_2687_cast_1)[name = tensor("op_2689_cast")]; + tensor input_193_cast = mul(x = var_2687_cast_0, y = var_2689_cast)[name = tensor("input_193_cast")]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([1, 1])]; + tensor var_2695 = const()[name = tensor("op_2695"), val = tensor([1, 1])]; + tensor var_2697_pad_type_0 = const()[name = tensor("op_2697_pad_type_0"), val = tensor("custom")]; + tensor var_2697_pad_0 = const()[name = tensor("op_2697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227488768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232404032))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232404224)))]; + tensor var_2697_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_2695, groups = var_1186, pad = var_2697_pad_0, pad_type = var_2697_pad_type_0, strides = var_2693, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("op_2697_cast")]; + tensor inputs_73_cast = add(x = var_2697_cast, y = inputs_71_cast)[name = tensor("inputs_73_cast")]; + tensor var_2707 = const()[name = tensor("op_2707"), val = tensor([1])]; + tensor channels_mean_73_cast = reduce_mean(axes = var_2707, keep_dims = var_1181, x = inputs_73_cast)[name = tensor("channels_mean_73_cast")]; + tensor zero_mean_73_cast = sub(x = inputs_73_cast, y = channels_mean_73_cast)[name = tensor("zero_mean_73_cast")]; + tensor zero_mean_sq_73_cast = mul(x = zero_mean_73_cast, y = zero_mean_73_cast)[name = tensor("zero_mean_sq_73_cast")]; + tensor var_2711 = const()[name = tensor("op_2711"), val = tensor([1])]; + tensor var_2712_cast = reduce_mean(axes = var_2711, keep_dims = var_1181, x = zero_mean_sq_73_cast)[name = tensor("op_2712_cast")]; + tensor var_2713_to_fp16 = const()[name = tensor("op_2713_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2714_cast = add(x = var_2712_cast, y = var_2713_to_fp16)[name = tensor("op_2714_cast")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2714_cast)[name = tensor("denom_73_cast")]; + tensor out_73_cast = mul(x = zero_mean_73_cast, y = denom_73_cast)[name = tensor("out_73_cast")]; + tensor var_2718_to_fp16 = const()[name = tensor("op_2718_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232406848)))]; + tensor var_2719_cast = add(x = out_73_cast, y = var_2718_to_fp16)[name = tensor("op_2719_cast")]; + tensor var_2721_to_fp16 = const()[name = tensor("op_2721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232409472)))]; + tensor hidden_states_113_cast = mul(x = var_2719_cast, y = var_2721_to_fp16)[name = tensor("hidden_states_113_cast")]; + tensor var_2728 = const()[name = tensor("op_2728"), val = tensor([1, 1])]; + tensor var_2730 = const()[name = tensor("op_2730"), val = tensor([1, 1])]; + tensor q_49_pad_type_0 = const()[name = tensor("q_49_pad_type_0"), val = tensor("custom")]; + tensor q_49_pad_0 = const()[name = tensor("q_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232412096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233231360))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_49_cast = conv(dilations = var_2730, groups = var_1186, pad = q_49_pad_0, pad_type = q_49_pad_type_0, strides = var_2728, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("q_49_cast")]; + tensor var_2734 = const()[name = tensor("op_2734"), val = tensor([1, 1])]; + tensor var_2736 = const()[name = tensor("op_2736"), val = tensor([1, 1])]; + tensor k_49_pad_type_0 = const()[name = tensor("k_49_pad_type_0"), val = tensor("custom")]; + tensor k_49_pad_0 = const()[name = tensor("k_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233231488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234050752))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_49_cast = conv(dilations = var_2736, groups = var_1186, pad = k_49_pad_0, pad_type = k_49_pad_type_0, strides = var_2734, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("k_49_cast")]; + tensor var_2740 = const()[name = tensor("op_2740"), val = tensor([1, 1])]; + tensor var_2742 = const()[name = tensor("op_2742"), val = tensor([1, 1])]; + tensor v_49_pad_type_0 = const()[name = tensor("v_49_pad_type_0"), val = tensor("custom")]; + tensor v_49_pad_0 = const()[name = tensor("v_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234050880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234870144))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_49_cast = conv(dilations = var_2742, groups = var_1186, pad = v_49_pad_0, pad_type = v_49_pad_type_0, strides = var_2740, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("v_49_cast")]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([2, 20, 64, -1])]; + tensor var_2747_cast = reshape(shape = var_2746, x = q_49_cast)[name = tensor("op_2747_cast")]; + tensor var_2748 = const()[name = tensor("op_2748"), val = tensor([2, 20, 64, -1])]; + tensor var_2749_cast = reshape(shape = var_2748, x = k_49_cast)[name = tensor("op_2749_cast")]; + tensor var_2750 = const()[name = tensor("op_2750"), val = tensor([2, 20, 64, -1])]; + tensor var_2751_cast = reshape(shape = var_2750, x = v_49_cast)[name = tensor("op_2751_cast")]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = var_2747_cast, y = var_2749_cast)[name = tensor("attn_weights_97_cast")]; + tensor attn_weights_99_cast = mul(x = attn_weights_97_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_2755_cast = softmax(axis = var_1170, x = attn_weights_99_cast)[name = tensor("op_2755_cast")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_2751_cast, y = var_2755_cast)[name = tensor("attn_49_cast")]; + tensor var_2759 = const()[name = tensor("op_2759"), val = tensor([2, 1280, 1, -1])]; + tensor input_195_cast = reshape(shape = var_2759, x = attn_49_cast)[name = tensor("input_195_cast")]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([1, 1])]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([1, 1])]; + tensor var_2768_pad_type_0 = const()[name = tensor("op_2768_pad_type_0"), val = tensor("custom")]; + tensor var_2768_pad_0 = const()[name = tensor("op_2768_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234870272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235689536))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235689664)))]; + tensor var_2768_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_2766, groups = var_1186, pad = var_2768_pad_0, pad_type = var_2768_pad_type_0, strides = var_2764, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_195_cast)[name = tensor("op_2768_cast")]; + tensor inputs_75_cast = add(x = var_2768_cast, y = inputs_73_cast)[name = tensor("inputs_75_cast")]; + tensor var_2772 = const()[name = tensor("op_2772"), val = tensor([1])]; + tensor channels_mean_75_cast = reduce_mean(axes = var_2772, keep_dims = var_1181, x = inputs_75_cast)[name = tensor("channels_mean_75_cast")]; + tensor zero_mean_75_cast = sub(x = inputs_75_cast, y = channels_mean_75_cast)[name = tensor("zero_mean_75_cast")]; + tensor zero_mean_sq_75_cast = mul(x = zero_mean_75_cast, y = zero_mean_75_cast)[name = tensor("zero_mean_sq_75_cast")]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor([1])]; + tensor var_2777_cast = reduce_mean(axes = var_2776, keep_dims = var_1181, x = zero_mean_sq_75_cast)[name = tensor("op_2777_cast")]; + tensor var_2778_to_fp16 = const()[name = tensor("op_2778_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2779_cast = add(x = var_2777_cast, y = var_2778_to_fp16)[name = tensor("op_2779_cast")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2779_cast)[name = tensor("denom_75_cast")]; + tensor out_75_cast = mul(x = zero_mean_75_cast, y = denom_75_cast)[name = tensor("out_75_cast")]; + tensor var_2783_to_fp16 = const()[name = tensor("op_2783_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235692288)))]; + tensor var_2784_cast = add(x = out_75_cast, y = var_2783_to_fp16)[name = tensor("op_2784_cast")]; + tensor var_2786_to_fp16 = const()[name = tensor("op_2786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235694912)))]; + tensor hidden_states_115_cast = mul(x = var_2784_cast, y = var_2786_to_fp16)[name = tensor("hidden_states_115_cast")]; + tensor var_2793 = const()[name = tensor("op_2793"), val = tensor([1, 1])]; + tensor var_2795 = const()[name = tensor("op_2795"), val = tensor([1, 1])]; + tensor q_51_pad_type_0 = const()[name = tensor("q_51_pad_type_0"), val = tensor("custom")]; + tensor q_51_pad_0 = const()[name = tensor("q_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235697536))), lut = tensor([-0x1.60cp-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_51_cast = conv(dilations = var_2795, groups = var_1186, pad = q_51_pad_0, pad_type = q_51_pad_type_0, strides = var_2793, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("q_51_cast")]; + tensor var_2799 = const()[name = tensor("op_2799"), val = tensor([1, 1])]; + tensor var_2801 = const()[name = tensor("op_2801"), val = tensor([1, 1])]; + tensor k_51_pad_type_0 = const()[name = tensor("k_51_pad_type_0"), val = tensor("custom")]; + tensor k_51_pad_0 = const()[name = tensor("k_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235902400))), lut = tensor([-0x1.838p-7, -0x1.f7p-9, 0x1.f84p-9, 0x1.83cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_51_cast = conv(dilations = var_2801, groups = var_1186, pad = k_51_pad_0, pad_type = k_51_pad_type_0, strides = var_2799, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_51_cast")]; + tensor var_2805 = const()[name = tensor("op_2805"), val = tensor([1, 1])]; + tensor var_2807 = const()[name = tensor("op_2807"), val = tensor([1, 1])]; + tensor v_51_pad_type_0 = const()[name = tensor("v_51_pad_type_0"), val = tensor("custom")]; + tensor v_51_pad_0 = const()[name = tensor("v_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236557824))), lut = tensor([-0x1.eap-8, 0x1.ecp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_51_cast = conv(dilations = var_2807, groups = var_1186, pad = v_51_pad_0, pad_type = v_51_pad_type_0, strides = var_2805, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_51_cast")]; + tensor var_2811 = const()[name = tensor("op_2811"), val = tensor([2, 20, 64, -1])]; + tensor var_2812_cast = reshape(shape = var_2811, x = q_51_cast)[name = tensor("op_2812_cast")]; + tensor var_2813 = const()[name = tensor("op_2813"), val = tensor([2, 20, 64, -1])]; + tensor var_2814_cast = reshape(shape = var_2813, x = k_51_cast)[name = tensor("op_2814_cast")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([2, 20, 64, -1])]; + tensor var_2816_cast = reshape(shape = var_2815, x = v_51_cast)[name = tensor("op_2816_cast")]; + tensor attn_weights_101_transpose_x_0 = const()[name = tensor("attn_weights_101_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_101_transpose_y_0 = const()[name = tensor("attn_weights_101_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_101_cast = matmul(transpose_x = attn_weights_101_transpose_x_0, transpose_y = attn_weights_101_transpose_y_0, x = var_2812_cast, y = var_2814_cast)[name = tensor("attn_weights_101_cast")]; + tensor attn_weights_103_cast = mul(x = attn_weights_101_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_103_cast")]; + tensor var_2820_cast = softmax(axis = var_1170, x = attn_weights_103_cast)[name = tensor("op_2820_cast")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_2816_cast, y = var_2820_cast)[name = tensor("attn_51_cast")]; + tensor var_2824 = const()[name = tensor("op_2824"), val = tensor([2, 1280, 1, -1])]; + tensor input_197_cast = reshape(shape = var_2824, x = attn_51_cast)[name = tensor("input_197_cast")]; + tensor var_2829 = const()[name = tensor("op_2829"), val = tensor([1, 1])]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, 1])]; + tensor var_2833_pad_type_0 = const()[name = tensor("op_2833_pad_type_0"), val = tensor("custom")]; + tensor var_2833_pad_0 = const()[name = tensor("op_2833_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236885568))), lut = tensor([-0x1.2b8p-8, 0x1.2cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237090432)))]; + tensor var_2833_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_2831, groups = var_1186, pad = var_2833_pad_0, pad_type = var_2833_pad_type_0, strides = var_2829, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_197_cast)[name = tensor("op_2833_cast")]; + tensor inputs_77_cast = add(x = var_2833_cast, y = inputs_75_cast)[name = tensor("inputs_77_cast")]; + tensor var_2837 = const()[name = tensor("op_2837"), val = tensor([1])]; + tensor channels_mean_77_cast = reduce_mean(axes = var_2837, keep_dims = var_1181, x = inputs_77_cast)[name = tensor("channels_mean_77_cast")]; + tensor zero_mean_77_cast = sub(x = inputs_77_cast, y = channels_mean_77_cast)[name = tensor("zero_mean_77_cast")]; + tensor zero_mean_sq_77_cast = mul(x = zero_mean_77_cast, y = zero_mean_77_cast)[name = tensor("zero_mean_sq_77_cast")]; + tensor var_2841 = const()[name = tensor("op_2841"), val = tensor([1])]; + tensor var_2842_cast = reduce_mean(axes = var_2841, keep_dims = var_1181, x = zero_mean_sq_77_cast)[name = tensor("op_2842_cast")]; + tensor var_2843_to_fp16 = const()[name = tensor("op_2843_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2844_cast = add(x = var_2842_cast, y = var_2843_to_fp16)[name = tensor("op_2844_cast")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2844_cast)[name = tensor("denom_77_cast")]; + tensor out_77_cast = mul(x = zero_mean_77_cast, y = denom_77_cast)[name = tensor("out_77_cast")]; + tensor var_2848_to_fp16 = const()[name = tensor("op_2848_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237093056)))]; + tensor var_2849_cast = add(x = out_77_cast, y = var_2848_to_fp16)[name = tensor("op_2849_cast")]; + tensor var_2851_to_fp16 = const()[name = tensor("op_2851_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237095680)))]; + tensor input_199_cast = mul(x = var_2849_cast, y = var_2851_to_fp16)[name = tensor("input_199_cast")]; + tensor var_2859 = const()[name = tensor("op_2859"), val = tensor([1, 1])]; + tensor var_2861 = const()[name = tensor("op_2861"), val = tensor([1, 1])]; + tensor var_2863_pad_type_0 = const()[name = tensor("op_2863_pad_type_0"), val = tensor("custom")]; + tensor var_2863_pad_0 = const()[name = tensor("op_2863_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237098304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246928768))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246928960)))]; + tensor var_2863_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_2861, groups = var_1186, pad = var_2863_pad_0, pad_type = var_2863_pad_type_0, strides = var_2859, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_199_cast)[name = tensor("op_2863_cast")]; + tensor var_2864_split_sizes_0 = const()[name = tensor("op_2864_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2864_axis_0 = const()[name = tensor("op_2864_axis_0"), val = tensor(1)]; + tensor var_2864_cast_0, tensor var_2864_cast_1 = split(axis = var_2864_axis_0, split_sizes = var_2864_split_sizes_0, x = var_2863_cast)[name = tensor("op_2864_cast")]; + tensor var_2866_mode_0 = const()[name = tensor("op_2866_mode_0"), val = tensor("EXACT")]; + tensor var_2866_cast = gelu(mode = var_2866_mode_0, x = var_2864_cast_1)[name = tensor("op_2866_cast")]; + tensor input_201_cast = mul(x = var_2864_cast_0, y = var_2866_cast)[name = tensor("input_201_cast")]; + tensor var_2870 = const()[name = tensor("op_2870"), val = tensor([1, 1])]; + tensor var_2872 = const()[name = tensor("op_2872"), val = tensor([1, 1])]; + tensor var_2874_pad_type_0 = const()[name = tensor("op_2874_pad_type_0"), val = tensor("custom")]; + tensor var_2874_pad_0 = const()[name = tensor("op_2874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246949504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250226368))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250226496)))]; + tensor var_2874_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_2872, groups = var_1186, pad = var_2874_pad_0, pad_type = var_2874_pad_type_0, strides = var_2870, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("op_2874_cast")]; + tensor inputs_79_cast = add(x = var_2874_cast, y = inputs_77_cast)[name = tensor("inputs_79_cast")]; + tensor var_2884 = const()[name = tensor("op_2884"), val = tensor([1])]; + tensor channels_mean_79_cast = reduce_mean(axes = var_2884, keep_dims = var_1181, x = inputs_79_cast)[name = tensor("channels_mean_79_cast")]; + tensor zero_mean_79_cast = sub(x = inputs_79_cast, y = channels_mean_79_cast)[name = tensor("zero_mean_79_cast")]; + tensor zero_mean_sq_79_cast = mul(x = zero_mean_79_cast, y = zero_mean_79_cast)[name = tensor("zero_mean_sq_79_cast")]; + tensor var_2888 = const()[name = tensor("op_2888"), val = tensor([1])]; + tensor var_2889_cast = reduce_mean(axes = var_2888, keep_dims = var_1181, x = zero_mean_sq_79_cast)[name = tensor("op_2889_cast")]; + tensor var_2890_to_fp16 = const()[name = tensor("op_2890_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2891_cast = add(x = var_2889_cast, y = var_2890_to_fp16)[name = tensor("op_2891_cast")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2891_cast)[name = tensor("denom_79_cast")]; + tensor out_79_cast = mul(x = zero_mean_79_cast, y = denom_79_cast)[name = tensor("out_79_cast")]; + tensor var_2895_to_fp16 = const()[name = tensor("op_2895_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250229120)))]; + tensor var_2896_cast = add(x = out_79_cast, y = var_2895_to_fp16)[name = tensor("op_2896_cast")]; + tensor var_2898_to_fp16 = const()[name = tensor("op_2898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250231744)))]; + tensor hidden_states_119_cast = mul(x = var_2896_cast, y = var_2898_to_fp16)[name = tensor("hidden_states_119_cast")]; + tensor var_2905 = const()[name = tensor("op_2905"), val = tensor([1, 1])]; + tensor var_2907 = const()[name = tensor("op_2907"), val = tensor([1, 1])]; + tensor q_53_pad_type_0 = const()[name = tensor("q_53_pad_type_0"), val = tensor("custom")]; + tensor q_53_pad_0 = const()[name = tensor("q_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250234368))), lut = tensor([-0x1.1d4p-6, 0x1.1dcp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_53_cast = conv(dilations = var_2907, groups = var_1186, pad = q_53_pad_0, pad_type = q_53_pad_type_0, strides = var_2905, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("q_53_cast")]; + tensor var_2911 = const()[name = tensor("op_2911"), val = tensor([1, 1])]; + tensor var_2913 = const()[name = tensor("op_2913"), val = tensor([1, 1])]; + tensor k_53_pad_type_0 = const()[name = tensor("k_53_pad_type_0"), val = tensor("custom")]; + tensor k_53_pad_0 = const()[name = tensor("k_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250439232))), lut = tensor([-0x1.194p-6, 0x1.1ap-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_53_cast = conv(dilations = var_2913, groups = var_1186, pad = k_53_pad_0, pad_type = k_53_pad_type_0, strides = var_2911, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("k_53_cast")]; + tensor var_2917 = const()[name = tensor("op_2917"), val = tensor([1, 1])]; + tensor var_2919 = const()[name = tensor("op_2919"), val = tensor([1, 1])]; + tensor v_53_pad_type_0 = const()[name = tensor("v_53_pad_type_0"), val = tensor("custom")]; + tensor v_53_pad_0 = const()[name = tensor("v_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250644096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251463360))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_53_cast = conv(dilations = var_2919, groups = var_1186, pad = v_53_pad_0, pad_type = v_53_pad_type_0, strides = var_2917, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("v_53_cast")]; + tensor var_2923 = const()[name = tensor("op_2923"), val = tensor([2, 20, 64, -1])]; + tensor var_2924_cast = reshape(shape = var_2923, x = q_53_cast)[name = tensor("op_2924_cast")]; + tensor var_2925 = const()[name = tensor("op_2925"), val = tensor([2, 20, 64, -1])]; + tensor var_2926_cast = reshape(shape = var_2925, x = k_53_cast)[name = tensor("op_2926_cast")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([2, 20, 64, -1])]; + tensor var_2928_cast = reshape(shape = var_2927, x = v_53_cast)[name = tensor("op_2928_cast")]; + tensor attn_weights_105_transpose_x_0 = const()[name = tensor("attn_weights_105_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_105_transpose_y_0 = const()[name = tensor("attn_weights_105_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_105_cast = matmul(transpose_x = attn_weights_105_transpose_x_0, transpose_y = attn_weights_105_transpose_y_0, x = var_2924_cast, y = var_2926_cast)[name = tensor("attn_weights_105_cast")]; + tensor attn_weights_107_cast = mul(x = attn_weights_105_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_107_cast")]; + tensor var_2932_cast = softmax(axis = var_1170, x = attn_weights_107_cast)[name = tensor("op_2932_cast")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_2928_cast, y = var_2932_cast)[name = tensor("attn_53_cast")]; + tensor var_2936 = const()[name = tensor("op_2936"), val = tensor([2, 1280, 1, -1])]; + tensor input_203_cast = reshape(shape = var_2936, x = attn_53_cast)[name = tensor("input_203_cast")]; + tensor var_2941 = const()[name = tensor("op_2941"), val = tensor([1, 1])]; + tensor var_2943 = const()[name = tensor("op_2943"), val = tensor([1, 1])]; + tensor var_2945_pad_type_0 = const()[name = tensor("op_2945_pad_type_0"), val = tensor("custom")]; + tensor var_2945_pad_0 = const()[name = tensor("op_2945_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251463488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252282752))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252282880)))]; + tensor var_2945_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_2943, groups = var_1186, pad = var_2945_pad_0, pad_type = var_2945_pad_type_0, strides = var_2941, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_203_cast)[name = tensor("op_2945_cast")]; + tensor inputs_81_cast = add(x = var_2945_cast, y = inputs_79_cast)[name = tensor("inputs_81_cast")]; + tensor var_2949 = const()[name = tensor("op_2949"), val = tensor([1])]; + tensor channels_mean_81_cast = reduce_mean(axes = var_2949, keep_dims = var_1181, x = inputs_81_cast)[name = tensor("channels_mean_81_cast")]; + tensor zero_mean_81_cast = sub(x = inputs_81_cast, y = channels_mean_81_cast)[name = tensor("zero_mean_81_cast")]; + tensor zero_mean_sq_81_cast = mul(x = zero_mean_81_cast, y = zero_mean_81_cast)[name = tensor("zero_mean_sq_81_cast")]; + tensor var_2953 = const()[name = tensor("op_2953"), val = tensor([1])]; + tensor var_2954_cast = reduce_mean(axes = var_2953, keep_dims = var_1181, x = zero_mean_sq_81_cast)[name = tensor("op_2954_cast")]; + tensor var_2955_to_fp16 = const()[name = tensor("op_2955_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2956_cast = add(x = var_2954_cast, y = var_2955_to_fp16)[name = tensor("op_2956_cast")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2956_cast)[name = tensor("denom_81_cast")]; + tensor out_81_cast = mul(x = zero_mean_81_cast, y = denom_81_cast)[name = tensor("out_81_cast")]; + tensor var_2960_to_fp16 = const()[name = tensor("op_2960_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252285504)))]; + tensor var_2961_cast = add(x = out_81_cast, y = var_2960_to_fp16)[name = tensor("op_2961_cast")]; + tensor var_2963_to_fp16 = const()[name = tensor("op_2963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252288128)))]; + tensor hidden_states_121_cast = mul(x = var_2961_cast, y = var_2963_to_fp16)[name = tensor("hidden_states_121_cast")]; + tensor var_2970 = const()[name = tensor("op_2970"), val = tensor([1, 1])]; + tensor var_2972 = const()[name = tensor("op_2972"), val = tensor([1, 1])]; + tensor q_55_pad_type_0 = const()[name = tensor("q_55_pad_type_0"), val = tensor("custom")]; + tensor q_55_pad_0 = const()[name = tensor("q_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252290752))), lut = tensor([-0x1.688p-7, 0x1.688p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_55_cast = conv(dilations = var_2972, groups = var_1186, pad = q_55_pad_0, pad_type = q_55_pad_type_0, strides = var_2970, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("q_55_cast")]; + tensor var_2976 = const()[name = tensor("op_2976"), val = tensor([1, 1])]; + tensor var_2978 = const()[name = tensor("op_2978"), val = tensor([1, 1])]; + tensor k_55_pad_type_0 = const()[name = tensor("k_55_pad_type_0"), val = tensor("custom")]; + tensor k_55_pad_0 = const()[name = tensor("k_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252495616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253806400))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_55_cast = conv(dilations = var_2978, groups = var_1186, pad = k_55_pad_0, pad_type = k_55_pad_type_0, strides = var_2976, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_55_cast")]; + tensor var_2982 = const()[name = tensor("op_2982"), val = tensor([1, 1])]; + tensor var_2984 = const()[name = tensor("op_2984"), val = tensor([1, 1])]; + tensor v_55_pad_type_0 = const()[name = tensor("v_55_pad_type_0"), val = tensor("custom")]; + tensor v_55_pad_0 = const()[name = tensor("v_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253806528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255117312))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_55_cast = conv(dilations = var_2984, groups = var_1186, pad = v_55_pad_0, pad_type = v_55_pad_type_0, strides = var_2982, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_55_cast")]; + tensor var_2988 = const()[name = tensor("op_2988"), val = tensor([2, 20, 64, -1])]; + tensor var_2989_cast = reshape(shape = var_2988, x = q_55_cast)[name = tensor("op_2989_cast")]; + tensor var_2990 = const()[name = tensor("op_2990"), val = tensor([2, 20, 64, -1])]; + tensor var_2991_cast = reshape(shape = var_2990, x = k_55_cast)[name = tensor("op_2991_cast")]; + tensor var_2992 = const()[name = tensor("op_2992"), val = tensor([2, 20, 64, -1])]; + tensor var_2993_cast = reshape(shape = var_2992, x = v_55_cast)[name = tensor("op_2993_cast")]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = var_2989_cast, y = var_2991_cast)[name = tensor("attn_weights_109_cast")]; + tensor attn_weights_111_cast = mul(x = attn_weights_109_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_2997_cast = softmax(axis = var_1170, x = attn_weights_111_cast)[name = tensor("op_2997_cast")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_2993_cast, y = var_2997_cast)[name = tensor("attn_55_cast")]; + tensor var_3001 = const()[name = tensor("op_3001"), val = tensor([2, 1280, 1, -1])]; + tensor input_205_cast = reshape(shape = var_3001, x = attn_55_cast)[name = tensor("input_205_cast")]; + tensor var_3006 = const()[name = tensor("op_3006"), val = tensor([1, 1])]; + tensor var_3008 = const()[name = tensor("op_3008"), val = tensor([1, 1])]; + tensor var_3010_pad_type_0 = const()[name = tensor("op_3010_pad_type_0"), val = tensor("custom")]; + tensor var_3010_pad_0 = const()[name = tensor("op_3010_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255117440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255936704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255936832)))]; + tensor var_3010_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_3008, groups = var_1186, pad = var_3010_pad_0, pad_type = var_3010_pad_type_0, strides = var_3006, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("op_3010_cast")]; + tensor inputs_83_cast = add(x = var_3010_cast, y = inputs_81_cast)[name = tensor("inputs_83_cast")]; + tensor var_3014 = const()[name = tensor("op_3014"), val = tensor([1])]; + tensor channels_mean_83_cast = reduce_mean(axes = var_3014, keep_dims = var_1181, x = inputs_83_cast)[name = tensor("channels_mean_83_cast")]; + tensor zero_mean_83_cast = sub(x = inputs_83_cast, y = channels_mean_83_cast)[name = tensor("zero_mean_83_cast")]; + tensor zero_mean_sq_83_cast = mul(x = zero_mean_83_cast, y = zero_mean_83_cast)[name = tensor("zero_mean_sq_83_cast")]; + tensor var_3018 = const()[name = tensor("op_3018"), val = tensor([1])]; + tensor var_3019_cast = reduce_mean(axes = var_3018, keep_dims = var_1181, x = zero_mean_sq_83_cast)[name = tensor("op_3019_cast")]; + tensor var_3020_to_fp16 = const()[name = tensor("op_3020_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3021_cast = add(x = var_3019_cast, y = var_3020_to_fp16)[name = tensor("op_3021_cast")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_3021_cast)[name = tensor("denom_83_cast")]; + tensor out_83_cast = mul(x = zero_mean_83_cast, y = denom_83_cast)[name = tensor("out_83_cast")]; + tensor var_3025_to_fp16 = const()[name = tensor("op_3025_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255939456)))]; + tensor var_3026_cast = add(x = out_83_cast, y = var_3025_to_fp16)[name = tensor("op_3026_cast")]; + tensor var_3028_to_fp16 = const()[name = tensor("op_3028_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255942080)))]; + tensor input_207_cast = mul(x = var_3026_cast, y = var_3028_to_fp16)[name = tensor("input_207_cast")]; + tensor var_3036 = const()[name = tensor("op_3036"), val = tensor([1, 1])]; + tensor var_3038 = const()[name = tensor("op_3038"), val = tensor([1, 1])]; + tensor var_3040_pad_type_0 = const()[name = tensor("op_3040_pad_type_0"), val = tensor("custom")]; + tensor var_3040_pad_0 = const()[name = tensor("op_3040_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255944704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265775168))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265775360)))]; + tensor var_3040_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_3038, groups = var_1186, pad = var_3040_pad_0, pad_type = var_3040_pad_type_0, strides = var_3036, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_207_cast)[name = tensor("op_3040_cast")]; + tensor var_3041_split_sizes_0 = const()[name = tensor("op_3041_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3041_axis_0 = const()[name = tensor("op_3041_axis_0"), val = tensor(1)]; + tensor var_3041_cast_0, tensor var_3041_cast_1 = split(axis = var_3041_axis_0, split_sizes = var_3041_split_sizes_0, x = var_3040_cast)[name = tensor("op_3041_cast")]; + tensor var_3043_mode_0 = const()[name = tensor("op_3043_mode_0"), val = tensor("EXACT")]; + tensor var_3043_cast = gelu(mode = var_3043_mode_0, x = var_3041_cast_1)[name = tensor("op_3043_cast")]; + tensor input_209_cast = mul(x = var_3041_cast_0, y = var_3043_cast)[name = tensor("input_209_cast")]; + tensor var_3047 = const()[name = tensor("op_3047"), val = tensor([1, 1])]; + tensor var_3049 = const()[name = tensor("op_3049"), val = tensor([1, 1])]; + tensor var_3051_pad_type_0 = const()[name = tensor("op_3051_pad_type_0"), val = tensor("custom")]; + tensor var_3051_pad_0 = const()[name = tensor("op_3051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265795904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269072768))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269072896)))]; + tensor var_3051_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_3049, groups = var_1186, pad = var_3051_pad_0, pad_type = var_3051_pad_type_0, strides = var_3047, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("op_3051_cast")]; + tensor hidden_states_125_cast = add(x = var_3051_cast, y = inputs_83_cast)[name = tensor("hidden_states_125_cast")]; + tensor var_3053 = const()[name = tensor("op_3053"), val = tensor([2, 1280, 32, 32])]; + tensor input_211_cast = reshape(shape = var_3053, x = hidden_states_125_cast)[name = tensor("input_211_cast")]; + tensor var_3057 = const()[name = tensor("op_3057"), val = tensor([1, 1])]; + tensor var_3059 = const()[name = tensor("op_3059"), val = tensor([1, 1])]; + tensor hidden_states_127_pad_type_0 = const()[name = tensor("hidden_states_127_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_127_pad_0 = const()[name = tensor("hidden_states_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269075520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270304384))), name = tensor("down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270304576)))]; + tensor hidden_states_127_cast = conv(bias = down_blocks_2_attentions_0_proj_out_bias_to_fp16, dilations = var_3059, groups = var_1186, pad = hidden_states_127_pad_0, pad_type = hidden_states_127_pad_type_0, strides = var_3057, weight = down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized, x = input_211_cast)[name = tensor("hidden_states_127_cast")]; + tensor input_213_cast = add(x = hidden_states_127_cast, y = hidden_states_61_cast)[name = tensor("input_213_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_213_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270307200)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270309824)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_217_cast = silu(x = add_27_cast)[name = tensor("input_217_cast")]; + tensor var_3074 = const()[name = tensor("op_3074"), val = tensor([1, 1])]; + tensor var_3076 = const()[name = tensor("op_3076"), val = tensor([1, 1])]; + tensor hidden_states_129_pad_type_0 = const()[name = tensor("hidden_states_129_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_129_pad_0 = const()[name = tensor("hidden_states_129_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270312448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281371712))), name = tensor("down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281371904)))]; + tensor hidden_states_129_cast = conv(bias = down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_3076, groups = var_1186, pad = hidden_states_129_pad_0, pad_type = hidden_states_129_pad_type_0, strides = var_3074, weight = down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("hidden_states_129_cast")]; + tensor var_3082 = const()[name = tensor("op_3082"), val = tensor([1, 1])]; + tensor var_3084 = const()[name = tensor("op_3084"), val = tensor([1, 1])]; + tensor temb_11_pad_type_0 = const()[name = tensor("temb_11_pad_type_0"), val = tensor("custom")]; + tensor temb_11_pad_0 = const()[name = tensor("temb_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281374528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282193792))), name = tensor("down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282193920)))]; + tensor temb_11_cast = conv(bias = down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_3084, groups = var_1186, pad = temb_11_pad_0, pad_type = temb_11_pad_type_0, strides = var_3082, weight = down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_11_cast")]; + tensor input_221_cast = add(x = hidden_states_129_cast, y = temb_11_cast)[name = tensor("input_221_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = input_221_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282196544)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282199168)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_225_cast = silu(x = add_29_cast)[name = tensor("input_225_cast")]; + tensor var_3094 = const()[name = tensor("op_3094"), val = tensor([1, 1])]; + tensor var_3096 = const()[name = tensor("op_3096"), val = tensor([1, 1])]; + tensor hidden_states_131_pad_type_0 = const()[name = tensor("hidden_states_131_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_131_pad_0 = const()[name = tensor("hidden_states_131_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282201792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289574656))), name = tensor("down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289574784)))]; + tensor hidden_states_131_cast = conv(bias = down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_3096, groups = var_1186, pad = hidden_states_131_pad_0, pad_type = hidden_states_131_pad_type_0, strides = var_3094, weight = down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("hidden_states_131_cast")]; + tensor hidden_states_133_cast = add(x = input_213_cast, y = hidden_states_131_cast)[name = tensor("hidden_states_133_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = hidden_states_133_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289577408)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289580032)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor var_3134 = const()[name = tensor("op_3134"), val = tensor([1, 1])]; + tensor var_3136 = const()[name = tensor("op_3136"), val = tensor([1, 1])]; + tensor hidden_states_135_pad_type_0 = const()[name = tensor("hidden_states_135_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_135_pad_0 = const()[name = tensor("hidden_states_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289582656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290401920))), name = tensor("down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290402048)))]; + tensor hidden_states_135_cast = conv(bias = down_blocks_2_attentions_1_proj_in_bias_to_fp16, dilations = var_3136, groups = var_1186, pad = hidden_states_135_pad_0, pad_type = hidden_states_135_pad_type_0, strides = var_3134, weight = down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized, x = add_31_cast)[name = tensor("hidden_states_135_cast")]; + tensor var_3141 = const()[name = tensor("op_3141"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_85_cast = reshape(shape = var_3141, x = hidden_states_135_cast)[name = tensor("inputs_85_cast")]; + tensor var_3151 = const()[name = tensor("op_3151"), val = tensor([1])]; + tensor channels_mean_85_cast = reduce_mean(axes = var_3151, keep_dims = var_1181, x = inputs_85_cast)[name = tensor("channels_mean_85_cast")]; + tensor zero_mean_85_cast = sub(x = inputs_85_cast, y = channels_mean_85_cast)[name = tensor("zero_mean_85_cast")]; + tensor zero_mean_sq_85_cast = mul(x = zero_mean_85_cast, y = zero_mean_85_cast)[name = tensor("zero_mean_sq_85_cast")]; + tensor var_3155 = const()[name = tensor("op_3155"), val = tensor([1])]; + tensor var_3156_cast = reduce_mean(axes = var_3155, keep_dims = var_1181, x = zero_mean_sq_85_cast)[name = tensor("op_3156_cast")]; + tensor var_3157_to_fp16 = const()[name = tensor("op_3157_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3158_cast = add(x = var_3156_cast, y = var_3157_to_fp16)[name = tensor("op_3158_cast")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_3158_cast)[name = tensor("denom_85_cast")]; + tensor out_85_cast = mul(x = zero_mean_85_cast, y = denom_85_cast)[name = tensor("out_85_cast")]; + tensor var_3162_to_fp16 = const()[name = tensor("op_3162_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290404672)))]; + tensor var_3163_cast = add(x = out_85_cast, y = var_3162_to_fp16)[name = tensor("op_3163_cast")]; + tensor var_3165_to_fp16 = const()[name = tensor("op_3165_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290407296)))]; + tensor hidden_states_137_cast = mul(x = var_3163_cast, y = var_3165_to_fp16)[name = tensor("hidden_states_137_cast")]; + tensor var_3172 = const()[name = tensor("op_3172"), val = tensor([1, 1])]; + tensor var_3174 = const()[name = tensor("op_3174"), val = tensor([1, 1])]; + tensor q_57_pad_type_0 = const()[name = tensor("q_57_pad_type_0"), val = tensor("custom")]; + tensor q_57_pad_0 = const()[name = tensor("q_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290409920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291229184))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_57_cast = conv(dilations = var_3174, groups = var_1186, pad = q_57_pad_0, pad_type = q_57_pad_type_0, strides = var_3172, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("q_57_cast")]; + tensor var_3178 = const()[name = tensor("op_3178"), val = tensor([1, 1])]; + tensor var_3180 = const()[name = tensor("op_3180"), val = tensor([1, 1])]; + tensor k_57_pad_type_0 = const()[name = tensor("k_57_pad_type_0"), val = tensor("custom")]; + tensor k_57_pad_0 = const()[name = tensor("k_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291229312))), lut = tensor([-0x1.394p-5, -0x1.784p-7, 0x1.7c4p-7, 0x1.3a4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_57_cast = conv(dilations = var_3180, groups = var_1186, pad = k_57_pad_0, pad_type = k_57_pad_type_0, strides = var_3178, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("k_57_cast")]; + tensor var_3184 = const()[name = tensor("op_3184"), val = tensor([1, 1])]; + tensor var_3186 = const()[name = tensor("op_3186"), val = tensor([1, 1])]; + tensor v_57_pad_type_0 = const()[name = tensor("v_57_pad_type_0"), val = tensor("custom")]; + tensor v_57_pad_0 = const()[name = tensor("v_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291638976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(292458240))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_57_cast = conv(dilations = var_3186, groups = var_1186, pad = v_57_pad_0, pad_type = v_57_pad_type_0, strides = var_3184, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("v_57_cast")]; + tensor var_3190 = const()[name = tensor("op_3190"), val = tensor([2, 20, 64, -1])]; + tensor var_3191_cast = reshape(shape = var_3190, x = q_57_cast)[name = tensor("op_3191_cast")]; + tensor var_3192 = const()[name = tensor("op_3192"), val = tensor([2, 20, 64, -1])]; + tensor var_3193_cast = reshape(shape = var_3192, x = k_57_cast)[name = tensor("op_3193_cast")]; + tensor var_3194 = const()[name = tensor("op_3194"), val = tensor([2, 20, 64, -1])]; + tensor var_3195_cast = reshape(shape = var_3194, x = v_57_cast)[name = tensor("op_3195_cast")]; + tensor attn_weights_113_transpose_x_0 = const()[name = tensor("attn_weights_113_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_113_transpose_y_0 = const()[name = tensor("attn_weights_113_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_113_cast = matmul(transpose_x = attn_weights_113_transpose_x_0, transpose_y = attn_weights_113_transpose_y_0, x = var_3191_cast, y = var_3193_cast)[name = tensor("attn_weights_113_cast")]; + tensor attn_weights_115_cast = mul(x = attn_weights_113_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_115_cast")]; + tensor var_3199_cast = softmax(axis = var_1170, x = attn_weights_115_cast)[name = tensor("op_3199_cast")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3195_cast, y = var_3199_cast)[name = tensor("attn_57_cast")]; + tensor var_3203 = const()[name = tensor("op_3203"), val = tensor([2, 1280, 1, -1])]; + tensor input_229_cast = reshape(shape = var_3203, x = attn_57_cast)[name = tensor("input_229_cast")]; + tensor var_3208 = const()[name = tensor("op_3208"), val = tensor([1, 1])]; + tensor var_3210 = const()[name = tensor("op_3210"), val = tensor([1, 1])]; + tensor var_3212_pad_type_0 = const()[name = tensor("op_3212_pad_type_0"), val = tensor("custom")]; + tensor var_3212_pad_0 = const()[name = tensor("op_3212_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(292458368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293277632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293277760)))]; + tensor var_3212_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_3210, groups = var_1186, pad = var_3212_pad_0, pad_type = var_3212_pad_type_0, strides = var_3208, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_229_cast)[name = tensor("op_3212_cast")]; + tensor inputs_87_cast = add(x = var_3212_cast, y = inputs_85_cast)[name = tensor("inputs_87_cast")]; + tensor var_3216 = const()[name = tensor("op_3216"), val = tensor([1])]; + tensor channels_mean_87_cast = reduce_mean(axes = var_3216, keep_dims = var_1181, x = inputs_87_cast)[name = tensor("channels_mean_87_cast")]; + tensor zero_mean_87_cast = sub(x = inputs_87_cast, y = channels_mean_87_cast)[name = tensor("zero_mean_87_cast")]; + tensor zero_mean_sq_87_cast = mul(x = zero_mean_87_cast, y = zero_mean_87_cast)[name = tensor("zero_mean_sq_87_cast")]; + tensor var_3220 = const()[name = tensor("op_3220"), val = tensor([1])]; + tensor var_3221_cast = reduce_mean(axes = var_3220, keep_dims = var_1181, x = zero_mean_sq_87_cast)[name = tensor("op_3221_cast")]; + tensor var_3222_to_fp16 = const()[name = tensor("op_3222_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3223_cast = add(x = var_3221_cast, y = var_3222_to_fp16)[name = tensor("op_3223_cast")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_3223_cast)[name = tensor("denom_87_cast")]; + tensor out_87_cast = mul(x = zero_mean_87_cast, y = denom_87_cast)[name = tensor("out_87_cast")]; + tensor var_3227_to_fp16 = const()[name = tensor("op_3227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293280384)))]; + tensor var_3228_cast = add(x = out_87_cast, y = var_3227_to_fp16)[name = tensor("op_3228_cast")]; + tensor var_3230_to_fp16 = const()[name = tensor("op_3230_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293283008)))]; + tensor hidden_states_139_cast = mul(x = var_3228_cast, y = var_3230_to_fp16)[name = tensor("hidden_states_139_cast")]; + tensor var_3237 = const()[name = tensor("op_3237"), val = tensor([1, 1])]; + tensor var_3239 = const()[name = tensor("op_3239"), val = tensor([1, 1])]; + tensor q_59_pad_type_0 = const()[name = tensor("q_59_pad_type_0"), val = tensor("custom")]; + tensor q_59_pad_0 = const()[name = tensor("q_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293285632))), lut = tensor([-0x1.f38p-7, 0x1.f48p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_59_cast = conv(dilations = var_3239, groups = var_1186, pad = q_59_pad_0, pad_type = q_59_pad_type_0, strides = var_3237, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_139_cast)[name = tensor("q_59_cast")]; + tensor var_3243 = const()[name = tensor("op_3243"), val = tensor([1, 1])]; + tensor var_3245 = const()[name = tensor("op_3245"), val = tensor([1, 1])]; + tensor k_59_pad_type_0 = const()[name = tensor("k_59_pad_type_0"), val = tensor("custom")]; + tensor k_59_pad_0 = const()[name = tensor("k_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293490496))), lut = tensor([-0x1.dc8p-6, -0x1.1b8p-7, 0x1.198p-7, 0x1.dbp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_59_cast = conv(dilations = var_3245, groups = var_1186, pad = k_59_pad_0, pad_type = k_59_pad_type_0, strides = var_3243, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_59_cast")]; + tensor var_3249 = const()[name = tensor("op_3249"), val = tensor([1, 1])]; + tensor var_3251 = const()[name = tensor("op_3251"), val = tensor([1, 1])]; + tensor v_59_pad_type_0 = const()[name = tensor("v_59_pad_type_0"), val = tensor("custom")]; + tensor v_59_pad_0 = const()[name = tensor("v_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294145920))), lut = tensor([-0x1.0d4p-5, -0x1.34cp-7, 0x1.358p-7, 0x1.0d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_59_cast = conv(dilations = var_3251, groups = var_1186, pad = v_59_pad_0, pad_type = v_59_pad_type_0, strides = var_3249, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_59_cast")]; + tensor var_3255 = const()[name = tensor("op_3255"), val = tensor([2, 20, 64, -1])]; + tensor var_3256_cast = reshape(shape = var_3255, x = q_59_cast)[name = tensor("op_3256_cast")]; + tensor var_3257 = const()[name = tensor("op_3257"), val = tensor([2, 20, 64, -1])]; + tensor var_3258_cast = reshape(shape = var_3257, x = k_59_cast)[name = tensor("op_3258_cast")]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([2, 20, 64, -1])]; + tensor var_3260_cast = reshape(shape = var_3259, x = v_59_cast)[name = tensor("op_3260_cast")]; + tensor attn_weights_117_transpose_x_0 = const()[name = tensor("attn_weights_117_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_117_transpose_y_0 = const()[name = tensor("attn_weights_117_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_117_cast = matmul(transpose_x = attn_weights_117_transpose_x_0, transpose_y = attn_weights_117_transpose_y_0, x = var_3256_cast, y = var_3258_cast)[name = tensor("attn_weights_117_cast")]; + tensor attn_weights_119_cast = mul(x = attn_weights_117_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_119_cast")]; + tensor var_3264_cast = softmax(axis = var_1170, x = attn_weights_119_cast)[name = tensor("op_3264_cast")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3260_cast, y = var_3264_cast)[name = tensor("attn_59_cast")]; + tensor var_3268 = const()[name = tensor("op_3268"), val = tensor([2, 1280, 1, -1])]; + tensor input_231_cast = reshape(shape = var_3268, x = attn_59_cast)[name = tensor("input_231_cast")]; + tensor var_3273 = const()[name = tensor("op_3273"), val = tensor([1, 1])]; + tensor var_3275 = const()[name = tensor("op_3275"), val = tensor([1, 1])]; + tensor var_3277_pad_type_0 = const()[name = tensor("op_3277_pad_type_0"), val = tensor("custom")]; + tensor var_3277_pad_0 = const()[name = tensor("op_3277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294801344))), lut = tensor([-0x1.2d8p-6, -0x1.698p-8, 0x1.68cp-8, 0x1.2dp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295211008)))]; + tensor var_3277_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_3275, groups = var_1186, pad = var_3277_pad_0, pad_type = var_3277_pad_type_0, strides = var_3273, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_231_cast)[name = tensor("op_3277_cast")]; + tensor inputs_89_cast = add(x = var_3277_cast, y = inputs_87_cast)[name = tensor("inputs_89_cast")]; + tensor var_3281 = const()[name = tensor("op_3281"), val = tensor([1])]; + tensor channels_mean_89_cast = reduce_mean(axes = var_3281, keep_dims = var_1181, x = inputs_89_cast)[name = tensor("channels_mean_89_cast")]; + tensor zero_mean_89_cast = sub(x = inputs_89_cast, y = channels_mean_89_cast)[name = tensor("zero_mean_89_cast")]; + tensor zero_mean_sq_89_cast = mul(x = zero_mean_89_cast, y = zero_mean_89_cast)[name = tensor("zero_mean_sq_89_cast")]; + tensor var_3285 = const()[name = tensor("op_3285"), val = tensor([1])]; + tensor var_3286_cast = reduce_mean(axes = var_3285, keep_dims = var_1181, x = zero_mean_sq_89_cast)[name = tensor("op_3286_cast")]; + tensor var_3287_to_fp16 = const()[name = tensor("op_3287_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3288_cast = add(x = var_3286_cast, y = var_3287_to_fp16)[name = tensor("op_3288_cast")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_3288_cast)[name = tensor("denom_89_cast")]; + tensor out_89_cast = mul(x = zero_mean_89_cast, y = denom_89_cast)[name = tensor("out_89_cast")]; + tensor var_3292_to_fp16 = const()[name = tensor("op_3292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295213632)))]; + tensor var_3293_cast = add(x = out_89_cast, y = var_3292_to_fp16)[name = tensor("op_3293_cast")]; + tensor var_3295_to_fp16 = const()[name = tensor("op_3295_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295216256)))]; + tensor input_233_cast = mul(x = var_3293_cast, y = var_3295_to_fp16)[name = tensor("input_233_cast")]; + tensor var_3303 = const()[name = tensor("op_3303"), val = tensor([1, 1])]; + tensor var_3305 = const()[name = tensor("op_3305"), val = tensor([1, 1])]; + tensor var_3307_pad_type_0 = const()[name = tensor("op_3307_pad_type_0"), val = tensor("custom")]; + tensor var_3307_pad_0 = const()[name = tensor("op_3307_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295218880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305049344))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305049536)))]; + tensor var_3307_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_3305, groups = var_1186, pad = var_3307_pad_0, pad_type = var_3307_pad_type_0, strides = var_3303, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("op_3307_cast")]; + tensor var_3308_split_sizes_0 = const()[name = tensor("op_3308_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3308_axis_0 = const()[name = tensor("op_3308_axis_0"), val = tensor(1)]; + tensor var_3308_cast_0, tensor var_3308_cast_1 = split(axis = var_3308_axis_0, split_sizes = var_3308_split_sizes_0, x = var_3307_cast)[name = tensor("op_3308_cast")]; + tensor var_3310_mode_0 = const()[name = tensor("op_3310_mode_0"), val = tensor("EXACT")]; + tensor var_3310_cast = gelu(mode = var_3310_mode_0, x = var_3308_cast_1)[name = tensor("op_3310_cast")]; + tensor input_235_cast = mul(x = var_3308_cast_0, y = var_3310_cast)[name = tensor("input_235_cast")]; + tensor var_3314 = const()[name = tensor("op_3314"), val = tensor([1, 1])]; + tensor var_3316 = const()[name = tensor("op_3316"), val = tensor([1, 1])]; + tensor var_3318_pad_type_0 = const()[name = tensor("op_3318_pad_type_0"), val = tensor("custom")]; + tensor var_3318_pad_0 = const()[name = tensor("op_3318_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305070080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308346944))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308347072)))]; + tensor var_3318_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_3316, groups = var_1186, pad = var_3318_pad_0, pad_type = var_3318_pad_type_0, strides = var_3314, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_235_cast)[name = tensor("op_3318_cast")]; + tensor inputs_91_cast = add(x = var_3318_cast, y = inputs_89_cast)[name = tensor("inputs_91_cast")]; + tensor var_3328 = const()[name = tensor("op_3328"), val = tensor([1])]; + tensor channels_mean_91_cast = reduce_mean(axes = var_3328, keep_dims = var_1181, x = inputs_91_cast)[name = tensor("channels_mean_91_cast")]; + tensor zero_mean_91_cast = sub(x = inputs_91_cast, y = channels_mean_91_cast)[name = tensor("zero_mean_91_cast")]; + tensor zero_mean_sq_91_cast = mul(x = zero_mean_91_cast, y = zero_mean_91_cast)[name = tensor("zero_mean_sq_91_cast")]; + tensor var_3332 = const()[name = tensor("op_3332"), val = tensor([1])]; + tensor var_3333_cast = reduce_mean(axes = var_3332, keep_dims = var_1181, x = zero_mean_sq_91_cast)[name = tensor("op_3333_cast")]; + tensor var_3334_to_fp16 = const()[name = tensor("op_3334_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3335_cast = add(x = var_3333_cast, y = var_3334_to_fp16)[name = tensor("op_3335_cast")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_3335_cast)[name = tensor("denom_91_cast")]; + tensor out_91_cast = mul(x = zero_mean_91_cast, y = denom_91_cast)[name = tensor("out_91_cast")]; + tensor var_3339_to_fp16 = const()[name = tensor("op_3339_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308349696)))]; + tensor var_3340_cast = add(x = out_91_cast, y = var_3339_to_fp16)[name = tensor("op_3340_cast")]; + tensor var_3342_to_fp16 = const()[name = tensor("op_3342_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308352320)))]; + tensor hidden_states_143_cast = mul(x = var_3340_cast, y = var_3342_to_fp16)[name = tensor("hidden_states_143_cast")]; + tensor var_3349 = const()[name = tensor("op_3349"), val = tensor([1, 1])]; + tensor var_3351 = const()[name = tensor("op_3351"), val = tensor([1, 1])]; + tensor q_61_pad_type_0 = const()[name = tensor("q_61_pad_type_0"), val = tensor("custom")]; + tensor q_61_pad_0 = const()[name = tensor("q_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308354944))), lut = tensor([-0x1.414p-5, -0x1.83cp-7, 0x1.814p-7, 0x1.414p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_61_cast = conv(dilations = var_3351, groups = var_1186, pad = q_61_pad_0, pad_type = q_61_pad_type_0, strides = var_3349, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("q_61_cast")]; + tensor var_3355 = const()[name = tensor("op_3355"), val = tensor([1, 1])]; + tensor var_3357 = const()[name = tensor("op_3357"), val = tensor([1, 1])]; + tensor k_61_pad_type_0 = const()[name = tensor("k_61_pad_type_0"), val = tensor("custom")]; + tensor k_61_pad_0 = const()[name = tensor("k_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308764608))), lut = tensor([-0x1.444p-5, -0x1.84cp-7, 0x1.884p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_61_cast = conv(dilations = var_3357, groups = var_1186, pad = k_61_pad_0, pad_type = k_61_pad_type_0, strides = var_3355, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("k_61_cast")]; + tensor var_3361 = const()[name = tensor("op_3361"), val = tensor([1, 1])]; + tensor var_3363 = const()[name = tensor("op_3363"), val = tensor([1, 1])]; + tensor v_61_pad_type_0 = const()[name = tensor("v_61_pad_type_0"), val = tensor("custom")]; + tensor v_61_pad_0 = const()[name = tensor("v_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309174272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309993536))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_61_cast = conv(dilations = var_3363, groups = var_1186, pad = v_61_pad_0, pad_type = v_61_pad_type_0, strides = var_3361, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("v_61_cast")]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor([2, 20, 64, -1])]; + tensor var_3368_cast = reshape(shape = var_3367, x = q_61_cast)[name = tensor("op_3368_cast")]; + tensor var_3369 = const()[name = tensor("op_3369"), val = tensor([2, 20, 64, -1])]; + tensor var_3370_cast = reshape(shape = var_3369, x = k_61_cast)[name = tensor("op_3370_cast")]; + tensor var_3371 = const()[name = tensor("op_3371"), val = tensor([2, 20, 64, -1])]; + tensor var_3372_cast = reshape(shape = var_3371, x = v_61_cast)[name = tensor("op_3372_cast")]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = var_3368_cast, y = var_3370_cast)[name = tensor("attn_weights_121_cast")]; + tensor attn_weights_123_cast = mul(x = attn_weights_121_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_3376_cast = softmax(axis = var_1170, x = attn_weights_123_cast)[name = tensor("op_3376_cast")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3372_cast, y = var_3376_cast)[name = tensor("attn_61_cast")]; + tensor var_3380 = const()[name = tensor("op_3380"), val = tensor([2, 1280, 1, -1])]; + tensor input_237_cast = reshape(shape = var_3380, x = attn_61_cast)[name = tensor("input_237_cast")]; + tensor var_3385 = const()[name = tensor("op_3385"), val = tensor([1, 1])]; + tensor var_3387 = const()[name = tensor("op_3387"), val = tensor([1, 1])]; + tensor var_3389_pad_type_0 = const()[name = tensor("op_3389_pad_type_0"), val = tensor("custom")]; + tensor var_3389_pad_0 = const()[name = tensor("op_3389_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309993664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310812928))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310813056)))]; + tensor var_3389_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_3387, groups = var_1186, pad = var_3389_pad_0, pad_type = var_3389_pad_type_0, strides = var_3385, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("op_3389_cast")]; + tensor inputs_93_cast = add(x = var_3389_cast, y = inputs_91_cast)[name = tensor("inputs_93_cast")]; + tensor var_3393 = const()[name = tensor("op_3393"), val = tensor([1])]; + tensor channels_mean_93_cast = reduce_mean(axes = var_3393, keep_dims = var_1181, x = inputs_93_cast)[name = tensor("channels_mean_93_cast")]; + tensor zero_mean_93_cast = sub(x = inputs_93_cast, y = channels_mean_93_cast)[name = tensor("zero_mean_93_cast")]; + tensor zero_mean_sq_93_cast = mul(x = zero_mean_93_cast, y = zero_mean_93_cast)[name = tensor("zero_mean_sq_93_cast")]; + tensor var_3397 = const()[name = tensor("op_3397"), val = tensor([1])]; + tensor var_3398_cast = reduce_mean(axes = var_3397, keep_dims = var_1181, x = zero_mean_sq_93_cast)[name = tensor("op_3398_cast")]; + tensor var_3399_to_fp16 = const()[name = tensor("op_3399_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3400_cast = add(x = var_3398_cast, y = var_3399_to_fp16)[name = tensor("op_3400_cast")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_3400_cast)[name = tensor("denom_93_cast")]; + tensor out_93_cast = mul(x = zero_mean_93_cast, y = denom_93_cast)[name = tensor("out_93_cast")]; + tensor var_3404_to_fp16 = const()[name = tensor("op_3404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310815680)))]; + tensor var_3405_cast = add(x = out_93_cast, y = var_3404_to_fp16)[name = tensor("op_3405_cast")]; + tensor var_3407_to_fp16 = const()[name = tensor("op_3407_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310818304)))]; + tensor hidden_states_145_cast = mul(x = var_3405_cast, y = var_3407_to_fp16)[name = tensor("hidden_states_145_cast")]; + tensor var_3414 = const()[name = tensor("op_3414"), val = tensor([1, 1])]; + tensor var_3416 = const()[name = tensor("op_3416"), val = tensor([1, 1])]; + tensor q_63_pad_type_0 = const()[name = tensor("q_63_pad_type_0"), val = tensor("custom")]; + tensor q_63_pad_0 = const()[name = tensor("q_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310820928))), lut = tensor([-0x1.284p-6, 0x1.274p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_63_cast = conv(dilations = var_3416, groups = var_1186, pad = q_63_pad_0, pad_type = q_63_pad_type_0, strides = var_3414, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_145_cast)[name = tensor("q_63_cast")]; + tensor var_3420 = const()[name = tensor("op_3420"), val = tensor([1, 1])]; + tensor var_3422 = const()[name = tensor("op_3422"), val = tensor([1, 1])]; + tensor k_63_pad_type_0 = const()[name = tensor("k_63_pad_type_0"), val = tensor("custom")]; + tensor k_63_pad_0 = const()[name = tensor("k_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311025792))), lut = tensor([-0x1.f2p-6, -0x1.278p-7, 0x1.258p-7, 0x1.f1p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_63_cast = conv(dilations = var_3422, groups = var_1186, pad = k_63_pad_0, pad_type = k_63_pad_type_0, strides = var_3420, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_63_cast")]; + tensor var_3426 = const()[name = tensor("op_3426"), val = tensor([1, 1])]; + tensor var_3428 = const()[name = tensor("op_3428"), val = tensor([1, 1])]; + tensor v_63_pad_type_0 = const()[name = tensor("v_63_pad_type_0"), val = tensor("custom")]; + tensor v_63_pad_0 = const()[name = tensor("v_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311681216))), lut = tensor([-0x1.19p-6, 0x1.19p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_63_cast = conv(dilations = var_3428, groups = var_1186, pad = v_63_pad_0, pad_type = v_63_pad_type_0, strides = var_3426, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_63_cast")]; + tensor var_3432 = const()[name = tensor("op_3432"), val = tensor([2, 20, 64, -1])]; + tensor var_3433_cast = reshape(shape = var_3432, x = q_63_cast)[name = tensor("op_3433_cast")]; + tensor var_3434 = const()[name = tensor("op_3434"), val = tensor([2, 20, 64, -1])]; + tensor var_3435_cast = reshape(shape = var_3434, x = k_63_cast)[name = tensor("op_3435_cast")]; + tensor var_3436 = const()[name = tensor("op_3436"), val = tensor([2, 20, 64, -1])]; + tensor var_3437_cast = reshape(shape = var_3436, x = v_63_cast)[name = tensor("op_3437_cast")]; + tensor attn_weights_125_transpose_x_0 = const()[name = tensor("attn_weights_125_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_125_transpose_y_0 = const()[name = tensor("attn_weights_125_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_125_cast = matmul(transpose_x = attn_weights_125_transpose_x_0, transpose_y = attn_weights_125_transpose_y_0, x = var_3433_cast, y = var_3435_cast)[name = tensor("attn_weights_125_cast")]; + tensor attn_weights_127_cast = mul(x = attn_weights_125_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_127_cast")]; + tensor var_3441_cast = softmax(axis = var_1170, x = attn_weights_127_cast)[name = tensor("op_3441_cast")]; + tensor attn_63_transpose_x_0 = const()[name = tensor("attn_63_transpose_x_0"), val = tensor(false)]; + tensor attn_63_transpose_y_0 = const()[name = tensor("attn_63_transpose_y_0"), val = tensor(true)]; + tensor attn_63_cast = matmul(transpose_x = attn_63_transpose_x_0, transpose_y = attn_63_transpose_y_0, x = var_3437_cast, y = var_3441_cast)[name = tensor("attn_63_cast")]; + tensor var_3445 = const()[name = tensor("op_3445"), val = tensor([2, 1280, 1, -1])]; + tensor input_239_cast = reshape(shape = var_3445, x = attn_63_cast)[name = tensor("input_239_cast")]; + tensor var_3450 = const()[name = tensor("op_3450"), val = tensor([1, 1])]; + tensor var_3452 = const()[name = tensor("op_3452"), val = tensor([1, 1])]; + tensor var_3454_pad_type_0 = const()[name = tensor("op_3454_pad_type_0"), val = tensor("custom")]; + tensor var_3454_pad_0 = const()[name = tensor("op_3454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312008960))), lut = tensor([-0x1.6ecp-7, 0x1.6ep-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312213824)))]; + tensor var_3454_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_3452, groups = var_1186, pad = var_3454_pad_0, pad_type = var_3454_pad_type_0, strides = var_3450, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_239_cast)[name = tensor("op_3454_cast")]; + tensor inputs_95_cast = add(x = var_3454_cast, y = inputs_93_cast)[name = tensor("inputs_95_cast")]; + tensor var_3458 = const()[name = tensor("op_3458"), val = tensor([1])]; + tensor channels_mean_95_cast = reduce_mean(axes = var_3458, keep_dims = var_1181, x = inputs_95_cast)[name = tensor("channels_mean_95_cast")]; + tensor zero_mean_95_cast = sub(x = inputs_95_cast, y = channels_mean_95_cast)[name = tensor("zero_mean_95_cast")]; + tensor zero_mean_sq_95_cast = mul(x = zero_mean_95_cast, y = zero_mean_95_cast)[name = tensor("zero_mean_sq_95_cast")]; + tensor var_3462 = const()[name = tensor("op_3462"), val = tensor([1])]; + tensor var_3463_cast = reduce_mean(axes = var_3462, keep_dims = var_1181, x = zero_mean_sq_95_cast)[name = tensor("op_3463_cast")]; + tensor var_3464_to_fp16 = const()[name = tensor("op_3464_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3465_cast = add(x = var_3463_cast, y = var_3464_to_fp16)[name = tensor("op_3465_cast")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_3465_cast)[name = tensor("denom_95_cast")]; + tensor out_95_cast = mul(x = zero_mean_95_cast, y = denom_95_cast)[name = tensor("out_95_cast")]; + tensor var_3469_to_fp16 = const()[name = tensor("op_3469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312216448)))]; + tensor var_3470_cast = add(x = out_95_cast, y = var_3469_to_fp16)[name = tensor("op_3470_cast")]; + tensor var_3472_to_fp16 = const()[name = tensor("op_3472_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312219072)))]; + tensor input_241_cast = mul(x = var_3470_cast, y = var_3472_to_fp16)[name = tensor("input_241_cast")]; + tensor var_3480 = const()[name = tensor("op_3480"), val = tensor([1, 1])]; + tensor var_3482 = const()[name = tensor("op_3482"), val = tensor([1, 1])]; + tensor var_3484_pad_type_0 = const()[name = tensor("op_3484_pad_type_0"), val = tensor("custom")]; + tensor var_3484_pad_0 = const()[name = tensor("op_3484_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312221696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318775360))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318775488)))]; + tensor var_3484_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_3482, groups = var_1186, pad = var_3484_pad_0, pad_type = var_3484_pad_type_0, strides = var_3480, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("op_3484_cast")]; + tensor var_3485_split_sizes_0 = const()[name = tensor("op_3485_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3485_axis_0 = const()[name = tensor("op_3485_axis_0"), val = tensor(1)]; + tensor var_3485_cast_0, tensor var_3485_cast_1 = split(axis = var_3485_axis_0, split_sizes = var_3485_split_sizes_0, x = var_3484_cast)[name = tensor("op_3485_cast")]; + tensor var_3487_mode_0 = const()[name = tensor("op_3487_mode_0"), val = tensor("EXACT")]; + tensor var_3487_cast = gelu(mode = var_3487_mode_0, x = var_3485_cast_1)[name = tensor("op_3487_cast")]; + tensor input_243_cast = mul(x = var_3485_cast_0, y = var_3487_cast)[name = tensor("input_243_cast")]; + tensor var_3491 = const()[name = tensor("op_3491"), val = tensor([1, 1])]; + tensor var_3493 = const()[name = tensor("op_3493"), val = tensor([1, 1])]; + tensor var_3495_pad_type_0 = const()[name = tensor("op_3495_pad_type_0"), val = tensor("custom")]; + tensor var_3495_pad_0 = const()[name = tensor("op_3495_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318796032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322072896))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322073024)))]; + tensor var_3495_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_3493, groups = var_1186, pad = var_3495_pad_0, pad_type = var_3495_pad_type_0, strides = var_3491, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_243_cast)[name = tensor("op_3495_cast")]; + tensor inputs_97_cast = add(x = var_3495_cast, y = inputs_95_cast)[name = tensor("inputs_97_cast")]; + tensor var_3505 = const()[name = tensor("op_3505"), val = tensor([1])]; + tensor channels_mean_97_cast = reduce_mean(axes = var_3505, keep_dims = var_1181, x = inputs_97_cast)[name = tensor("channels_mean_97_cast")]; + tensor zero_mean_97_cast = sub(x = inputs_97_cast, y = channels_mean_97_cast)[name = tensor("zero_mean_97_cast")]; + tensor zero_mean_sq_97_cast = mul(x = zero_mean_97_cast, y = zero_mean_97_cast)[name = tensor("zero_mean_sq_97_cast")]; + tensor var_3509 = const()[name = tensor("op_3509"), val = tensor([1])]; + tensor var_3510_cast = reduce_mean(axes = var_3509, keep_dims = var_1181, x = zero_mean_sq_97_cast)[name = tensor("op_3510_cast")]; + tensor var_3511_to_fp16 = const()[name = tensor("op_3511_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3512_cast = add(x = var_3510_cast, y = var_3511_to_fp16)[name = tensor("op_3512_cast")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3512_cast)[name = tensor("denom_97_cast")]; + tensor out_97_cast = mul(x = zero_mean_97_cast, y = denom_97_cast)[name = tensor("out_97_cast")]; + tensor var_3516_to_fp16 = const()[name = tensor("op_3516_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322075648)))]; + tensor var_3517_cast = add(x = out_97_cast, y = var_3516_to_fp16)[name = tensor("op_3517_cast")]; + tensor var_3519_to_fp16 = const()[name = tensor("op_3519_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322078272)))]; + tensor hidden_states_149_cast = mul(x = var_3517_cast, y = var_3519_to_fp16)[name = tensor("hidden_states_149_cast")]; + tensor var_3526 = const()[name = tensor("op_3526"), val = tensor([1, 1])]; + tensor var_3528 = const()[name = tensor("op_3528"), val = tensor([1, 1])]; + tensor q_65_pad_type_0 = const()[name = tensor("q_65_pad_type_0"), val = tensor("custom")]; + tensor q_65_pad_0 = const()[name = tensor("q_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322080896))), lut = tensor([-0x1.438p-5, -0x1.854p-7, 0x1.848p-7, 0x1.438p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_65_cast = conv(dilations = var_3528, groups = var_1186, pad = q_65_pad_0, pad_type = q_65_pad_type_0, strides = var_3526, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("q_65_cast")]; + tensor var_3532 = const()[name = tensor("op_3532"), val = tensor([1, 1])]; + tensor var_3534 = const()[name = tensor("op_3534"), val = tensor([1, 1])]; + tensor k_65_pad_type_0 = const()[name = tensor("k_65_pad_type_0"), val = tensor("custom")]; + tensor k_65_pad_0 = const()[name = tensor("k_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322490560))), lut = tensor([-0x1.44cp-5, -0x1.86cp-7, 0x1.854p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_65_cast = conv(dilations = var_3534, groups = var_1186, pad = k_65_pad_0, pad_type = k_65_pad_type_0, strides = var_3532, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("k_65_cast")]; + tensor var_3538 = const()[name = tensor("op_3538"), val = tensor([1, 1])]; + tensor var_3540 = const()[name = tensor("op_3540"), val = tensor([1, 1])]; + tensor v_65_pad_type_0 = const()[name = tensor("v_65_pad_type_0"), val = tensor("custom")]; + tensor v_65_pad_0 = const()[name = tensor("v_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322900224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323719488))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_65_cast = conv(dilations = var_3540, groups = var_1186, pad = v_65_pad_0, pad_type = v_65_pad_type_0, strides = var_3538, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("v_65_cast")]; + tensor var_3544 = const()[name = tensor("op_3544"), val = tensor([2, 20, 64, -1])]; + tensor var_3545_cast = reshape(shape = var_3544, x = q_65_cast)[name = tensor("op_3545_cast")]; + tensor var_3546 = const()[name = tensor("op_3546"), val = tensor([2, 20, 64, -1])]; + tensor var_3547_cast = reshape(shape = var_3546, x = k_65_cast)[name = tensor("op_3547_cast")]; + tensor var_3548 = const()[name = tensor("op_3548"), val = tensor([2, 20, 64, -1])]; + tensor var_3549_cast = reshape(shape = var_3548, x = v_65_cast)[name = tensor("op_3549_cast")]; + tensor attn_weights_129_transpose_x_0 = const()[name = tensor("attn_weights_129_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_129_transpose_y_0 = const()[name = tensor("attn_weights_129_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_129_cast = matmul(transpose_x = attn_weights_129_transpose_x_0, transpose_y = attn_weights_129_transpose_y_0, x = var_3545_cast, y = var_3547_cast)[name = tensor("attn_weights_129_cast")]; + tensor attn_weights_131_cast = mul(x = attn_weights_129_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_131_cast")]; + tensor var_3553_cast = softmax(axis = var_1170, x = attn_weights_131_cast)[name = tensor("op_3553_cast")]; + tensor attn_65_transpose_x_0 = const()[name = tensor("attn_65_transpose_x_0"), val = tensor(false)]; + tensor attn_65_transpose_y_0 = const()[name = tensor("attn_65_transpose_y_0"), val = tensor(true)]; + tensor attn_65_cast = matmul(transpose_x = attn_65_transpose_x_0, transpose_y = attn_65_transpose_y_0, x = var_3549_cast, y = var_3553_cast)[name = tensor("attn_65_cast")]; + tensor var_3557 = const()[name = tensor("op_3557"), val = tensor([2, 1280, 1, -1])]; + tensor input_245_cast = reshape(shape = var_3557, x = attn_65_cast)[name = tensor("input_245_cast")]; + tensor var_3562 = const()[name = tensor("op_3562"), val = tensor([1, 1])]; + tensor var_3564 = const()[name = tensor("op_3564"), val = tensor([1, 1])]; + tensor var_3566_pad_type_0 = const()[name = tensor("op_3566_pad_type_0"), val = tensor("custom")]; + tensor var_3566_pad_0 = const()[name = tensor("op_3566_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323719616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324538880))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324539008)))]; + tensor var_3566_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_3564, groups = var_1186, pad = var_3566_pad_0, pad_type = var_3566_pad_type_0, strides = var_3562, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_245_cast)[name = tensor("op_3566_cast")]; + tensor inputs_99_cast = add(x = var_3566_cast, y = inputs_97_cast)[name = tensor("inputs_99_cast")]; + tensor var_3570 = const()[name = tensor("op_3570"), val = tensor([1])]; + tensor channels_mean_99_cast = reduce_mean(axes = var_3570, keep_dims = var_1181, x = inputs_99_cast)[name = tensor("channels_mean_99_cast")]; + tensor zero_mean_99_cast = sub(x = inputs_99_cast, y = channels_mean_99_cast)[name = tensor("zero_mean_99_cast")]; + tensor zero_mean_sq_99_cast = mul(x = zero_mean_99_cast, y = zero_mean_99_cast)[name = tensor("zero_mean_sq_99_cast")]; + tensor var_3574 = const()[name = tensor("op_3574"), val = tensor([1])]; + tensor var_3575_cast = reduce_mean(axes = var_3574, keep_dims = var_1181, x = zero_mean_sq_99_cast)[name = tensor("op_3575_cast")]; + tensor var_3576_to_fp16 = const()[name = tensor("op_3576_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3577_cast = add(x = var_3575_cast, y = var_3576_to_fp16)[name = tensor("op_3577_cast")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3577_cast)[name = tensor("denom_99_cast")]; + tensor out_99_cast = mul(x = zero_mean_99_cast, y = denom_99_cast)[name = tensor("out_99_cast")]; + tensor var_3581_to_fp16 = const()[name = tensor("op_3581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324541632)))]; + tensor var_3582_cast = add(x = out_99_cast, y = var_3581_to_fp16)[name = tensor("op_3582_cast")]; + tensor var_3584_to_fp16 = const()[name = tensor("op_3584_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324544256)))]; + tensor hidden_states_151_cast = mul(x = var_3582_cast, y = var_3584_to_fp16)[name = tensor("hidden_states_151_cast")]; + tensor var_3591 = const()[name = tensor("op_3591"), val = tensor([1, 1])]; + tensor var_3593 = const()[name = tensor("op_3593"), val = tensor([1, 1])]; + tensor q_67_pad_type_0 = const()[name = tensor("q_67_pad_type_0"), val = tensor("custom")]; + tensor q_67_pad_0 = const()[name = tensor("q_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324546880))), lut = tensor([-0x1.354p-6, 0x1.364p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_67_cast = conv(dilations = var_3593, groups = var_1186, pad = q_67_pad_0, pad_type = q_67_pad_type_0, strides = var_3591, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_151_cast)[name = tensor("q_67_cast")]; + tensor var_3597 = const()[name = tensor("op_3597"), val = tensor([1, 1])]; + tensor var_3599 = const()[name = tensor("op_3599"), val = tensor([1, 1])]; + tensor k_67_pad_type_0 = const()[name = tensor("k_67_pad_type_0"), val = tensor("custom")]; + tensor k_67_pad_0 = const()[name = tensor("k_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324751744))), lut = tensor([-0x1.08p-6, 0x1.084p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_67_cast = conv(dilations = var_3599, groups = var_1186, pad = k_67_pad_0, pad_type = k_67_pad_type_0, strides = var_3597, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_67_cast")]; + tensor var_3603 = const()[name = tensor("op_3603"), val = tensor([1, 1])]; + tensor var_3605 = const()[name = tensor("op_3605"), val = tensor([1, 1])]; + tensor v_67_pad_type_0 = const()[name = tensor("v_67_pad_type_0"), val = tensor("custom")]; + tensor v_67_pad_0 = const()[name = tensor("v_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325079488))), lut = tensor([-0x1.1f4p-6, 0x1.2p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_67_cast = conv(dilations = var_3605, groups = var_1186, pad = v_67_pad_0, pad_type = v_67_pad_type_0, strides = var_3603, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_67_cast")]; + tensor var_3609 = const()[name = tensor("op_3609"), val = tensor([2, 20, 64, -1])]; + tensor var_3610_cast = reshape(shape = var_3609, x = q_67_cast)[name = tensor("op_3610_cast")]; + tensor var_3611 = const()[name = tensor("op_3611"), val = tensor([2, 20, 64, -1])]; + tensor var_3612_cast = reshape(shape = var_3611, x = k_67_cast)[name = tensor("op_3612_cast")]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([2, 20, 64, -1])]; + tensor var_3614_cast = reshape(shape = var_3613, x = v_67_cast)[name = tensor("op_3614_cast")]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = var_3610_cast, y = var_3612_cast)[name = tensor("attn_weights_133_cast")]; + tensor attn_weights_135_cast = mul(x = attn_weights_133_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_3618_cast = softmax(axis = var_1170, x = attn_weights_135_cast)[name = tensor("op_3618_cast")]; + tensor attn_67_transpose_x_0 = const()[name = tensor("attn_67_transpose_x_0"), val = tensor(false)]; + tensor attn_67_transpose_y_0 = const()[name = tensor("attn_67_transpose_y_0"), val = tensor(true)]; + tensor attn_67_cast = matmul(transpose_x = attn_67_transpose_x_0, transpose_y = attn_67_transpose_y_0, x = var_3614_cast, y = var_3618_cast)[name = tensor("attn_67_cast")]; + tensor var_3622 = const()[name = tensor("op_3622"), val = tensor([2, 1280, 1, -1])]; + tensor input_247_cast = reshape(shape = var_3622, x = attn_67_cast)[name = tensor("input_247_cast")]; + tensor var_3627 = const()[name = tensor("op_3627"), val = tensor([1, 1])]; + tensor var_3629 = const()[name = tensor("op_3629"), val = tensor([1, 1])]; + tensor var_3631_pad_type_0 = const()[name = tensor("op_3631_pad_type_0"), val = tensor("custom")]; + tensor var_3631_pad_0 = const()[name = tensor("op_3631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325407232))), lut = tensor([-0x1.6ecp-7, 0x1.6fp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325612096)))]; + tensor var_3631_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_3629, groups = var_1186, pad = var_3631_pad_0, pad_type = var_3631_pad_type_0, strides = var_3627, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_247_cast)[name = tensor("op_3631_cast")]; + tensor inputs_101_cast = add(x = var_3631_cast, y = inputs_99_cast)[name = tensor("inputs_101_cast")]; + tensor var_3635 = const()[name = tensor("op_3635"), val = tensor([1])]; + tensor channels_mean_101_cast = reduce_mean(axes = var_3635, keep_dims = var_1181, x = inputs_101_cast)[name = tensor("channels_mean_101_cast")]; + tensor zero_mean_101_cast = sub(x = inputs_101_cast, y = channels_mean_101_cast)[name = tensor("zero_mean_101_cast")]; + tensor zero_mean_sq_101_cast = mul(x = zero_mean_101_cast, y = zero_mean_101_cast)[name = tensor("zero_mean_sq_101_cast")]; + tensor var_3639 = const()[name = tensor("op_3639"), val = tensor([1])]; + tensor var_3640_cast = reduce_mean(axes = var_3639, keep_dims = var_1181, x = zero_mean_sq_101_cast)[name = tensor("op_3640_cast")]; + tensor var_3641_to_fp16 = const()[name = tensor("op_3641_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3642_cast = add(x = var_3640_cast, y = var_3641_to_fp16)[name = tensor("op_3642_cast")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3642_cast)[name = tensor("denom_101_cast")]; + tensor out_101_cast = mul(x = zero_mean_101_cast, y = denom_101_cast)[name = tensor("out_101_cast")]; + tensor var_3646_to_fp16 = const()[name = tensor("op_3646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325614720)))]; + tensor var_3647_cast = add(x = out_101_cast, y = var_3646_to_fp16)[name = tensor("op_3647_cast")]; + tensor var_3649_to_fp16 = const()[name = tensor("op_3649_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325617344)))]; + tensor input_249_cast = mul(x = var_3647_cast, y = var_3649_to_fp16)[name = tensor("input_249_cast")]; + tensor var_3657 = const()[name = tensor("op_3657"), val = tensor([1, 1])]; + tensor var_3659 = const()[name = tensor("op_3659"), val = tensor([1, 1])]; + tensor var_3661_pad_type_0 = const()[name = tensor("op_3661_pad_type_0"), val = tensor("custom")]; + tensor var_3661_pad_0 = const()[name = tensor("op_3661_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325619968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332173632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332173760)))]; + tensor var_3661_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_3659, groups = var_1186, pad = var_3661_pad_0, pad_type = var_3661_pad_type_0, strides = var_3657, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("op_3661_cast")]; + tensor var_3662_split_sizes_0 = const()[name = tensor("op_3662_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3662_axis_0 = const()[name = tensor("op_3662_axis_0"), val = tensor(1)]; + tensor var_3662_cast_0, tensor var_3662_cast_1 = split(axis = var_3662_axis_0, split_sizes = var_3662_split_sizes_0, x = var_3661_cast)[name = tensor("op_3662_cast")]; + tensor var_3664_mode_0 = const()[name = tensor("op_3664_mode_0"), val = tensor("EXACT")]; + tensor var_3664_cast = gelu(mode = var_3664_mode_0, x = var_3662_cast_1)[name = tensor("op_3664_cast")]; + tensor input_251_cast = mul(x = var_3662_cast_0, y = var_3664_cast)[name = tensor("input_251_cast")]; + tensor var_3668 = const()[name = tensor("op_3668"), val = tensor([1, 1])]; + tensor var_3670 = const()[name = tensor("op_3670"), val = tensor([1, 1])]; + tensor var_3672_pad_type_0 = const()[name = tensor("op_3672_pad_type_0"), val = tensor("custom")]; + tensor var_3672_pad_0 = const()[name = tensor("op_3672_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332194304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335471168))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335471296)))]; + tensor var_3672_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_3670, groups = var_1186, pad = var_3672_pad_0, pad_type = var_3672_pad_type_0, strides = var_3668, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_251_cast)[name = tensor("op_3672_cast")]; + tensor inputs_103_cast = add(x = var_3672_cast, y = inputs_101_cast)[name = tensor("inputs_103_cast")]; + tensor var_3682 = const()[name = tensor("op_3682"), val = tensor([1])]; + tensor channels_mean_103_cast = reduce_mean(axes = var_3682, keep_dims = var_1181, x = inputs_103_cast)[name = tensor("channels_mean_103_cast")]; + tensor zero_mean_103_cast = sub(x = inputs_103_cast, y = channels_mean_103_cast)[name = tensor("zero_mean_103_cast")]; + tensor zero_mean_sq_103_cast = mul(x = zero_mean_103_cast, y = zero_mean_103_cast)[name = tensor("zero_mean_sq_103_cast")]; + tensor var_3686 = const()[name = tensor("op_3686"), val = tensor([1])]; + tensor var_3687_cast = reduce_mean(axes = var_3686, keep_dims = var_1181, x = zero_mean_sq_103_cast)[name = tensor("op_3687_cast")]; + tensor var_3688_to_fp16 = const()[name = tensor("op_3688_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3689_cast = add(x = var_3687_cast, y = var_3688_to_fp16)[name = tensor("op_3689_cast")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3689_cast)[name = tensor("denom_103_cast")]; + tensor out_103_cast = mul(x = zero_mean_103_cast, y = denom_103_cast)[name = tensor("out_103_cast")]; + tensor var_3693_to_fp16 = const()[name = tensor("op_3693_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335473920)))]; + tensor var_3694_cast = add(x = out_103_cast, y = var_3693_to_fp16)[name = tensor("op_3694_cast")]; + tensor var_3696_to_fp16 = const()[name = tensor("op_3696_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335476544)))]; + tensor hidden_states_155_cast = mul(x = var_3694_cast, y = var_3696_to_fp16)[name = tensor("hidden_states_155_cast")]; + tensor var_3703 = const()[name = tensor("op_3703"), val = tensor([1, 1])]; + tensor var_3705 = const()[name = tensor("op_3705"), val = tensor([1, 1])]; + tensor q_69_pad_type_0 = const()[name = tensor("q_69_pad_type_0"), val = tensor("custom")]; + tensor q_69_pad_0 = const()[name = tensor("q_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335479168))), lut = tensor([-0x1.568p-6, 0x1.58p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_69_cast = conv(dilations = var_3705, groups = var_1186, pad = q_69_pad_0, pad_type = q_69_pad_type_0, strides = var_3703, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("q_69_cast")]; + tensor var_3709 = const()[name = tensor("op_3709"), val = tensor([1, 1])]; + tensor var_3711 = const()[name = tensor("op_3711"), val = tensor([1, 1])]; + tensor k_69_pad_type_0 = const()[name = tensor("k_69_pad_type_0"), val = tensor("custom")]; + tensor k_69_pad_0 = const()[name = tensor("k_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335684032))), lut = tensor([-0x1.574p-6, 0x1.56cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_69_cast = conv(dilations = var_3711, groups = var_1186, pad = k_69_pad_0, pad_type = k_69_pad_type_0, strides = var_3709, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("k_69_cast")]; + tensor var_3715 = const()[name = tensor("op_3715"), val = tensor([1, 1])]; + tensor var_3717 = const()[name = tensor("op_3717"), val = tensor([1, 1])]; + tensor v_69_pad_type_0 = const()[name = tensor("v_69_pad_type_0"), val = tensor("custom")]; + tensor v_69_pad_0 = const()[name = tensor("v_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335888896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336708160))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_69_cast = conv(dilations = var_3717, groups = var_1186, pad = v_69_pad_0, pad_type = v_69_pad_type_0, strides = var_3715, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("v_69_cast")]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor([2, 20, 64, -1])]; + tensor var_3722_cast = reshape(shape = var_3721, x = q_69_cast)[name = tensor("op_3722_cast")]; + tensor var_3723 = const()[name = tensor("op_3723"), val = tensor([2, 20, 64, -1])]; + tensor var_3724_cast = reshape(shape = var_3723, x = k_69_cast)[name = tensor("op_3724_cast")]; + tensor var_3725 = const()[name = tensor("op_3725"), val = tensor([2, 20, 64, -1])]; + tensor var_3726_cast = reshape(shape = var_3725, x = v_69_cast)[name = tensor("op_3726_cast")]; + tensor attn_weights_137_transpose_x_0 = const()[name = tensor("attn_weights_137_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_137_transpose_y_0 = const()[name = tensor("attn_weights_137_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_137_cast = matmul(transpose_x = attn_weights_137_transpose_x_0, transpose_y = attn_weights_137_transpose_y_0, x = var_3722_cast, y = var_3724_cast)[name = tensor("attn_weights_137_cast")]; + tensor attn_weights_139_cast = mul(x = attn_weights_137_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_139_cast")]; + tensor var_3730_cast = softmax(axis = var_1170, x = attn_weights_139_cast)[name = tensor("op_3730_cast")]; + tensor attn_69_transpose_x_0 = const()[name = tensor("attn_69_transpose_x_0"), val = tensor(false)]; + tensor attn_69_transpose_y_0 = const()[name = tensor("attn_69_transpose_y_0"), val = tensor(true)]; + tensor attn_69_cast = matmul(transpose_x = attn_69_transpose_x_0, transpose_y = attn_69_transpose_y_0, x = var_3726_cast, y = var_3730_cast)[name = tensor("attn_69_cast")]; + tensor var_3734 = const()[name = tensor("op_3734"), val = tensor([2, 1280, 1, -1])]; + tensor input_253_cast = reshape(shape = var_3734, x = attn_69_cast)[name = tensor("input_253_cast")]; + tensor var_3739 = const()[name = tensor("op_3739"), val = tensor([1, 1])]; + tensor var_3741 = const()[name = tensor("op_3741"), val = tensor([1, 1])]; + tensor var_3743_pad_type_0 = const()[name = tensor("op_3743_pad_type_0"), val = tensor("custom")]; + tensor var_3743_pad_0 = const()[name = tensor("op_3743_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336708288))), lut = tensor([-0x1.35cp-5, -0x1.74p-7, 0x1.74p-7, 0x1.358p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337117952)))]; + tensor var_3743_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_3741, groups = var_1186, pad = var_3743_pad_0, pad_type = var_3743_pad_type_0, strides = var_3739, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("op_3743_cast")]; + tensor inputs_105_cast = add(x = var_3743_cast, y = inputs_103_cast)[name = tensor("inputs_105_cast")]; + tensor var_3747 = const()[name = tensor("op_3747"), val = tensor([1])]; + tensor channels_mean_105_cast = reduce_mean(axes = var_3747, keep_dims = var_1181, x = inputs_105_cast)[name = tensor("channels_mean_105_cast")]; + tensor zero_mean_105_cast = sub(x = inputs_105_cast, y = channels_mean_105_cast)[name = tensor("zero_mean_105_cast")]; + tensor zero_mean_sq_105_cast = mul(x = zero_mean_105_cast, y = zero_mean_105_cast)[name = tensor("zero_mean_sq_105_cast")]; + tensor var_3751 = const()[name = tensor("op_3751"), val = tensor([1])]; + tensor var_3752_cast = reduce_mean(axes = var_3751, keep_dims = var_1181, x = zero_mean_sq_105_cast)[name = tensor("op_3752_cast")]; + tensor var_3753_to_fp16 = const()[name = tensor("op_3753_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3754_cast = add(x = var_3752_cast, y = var_3753_to_fp16)[name = tensor("op_3754_cast")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3754_cast)[name = tensor("denom_105_cast")]; + tensor out_105_cast = mul(x = zero_mean_105_cast, y = denom_105_cast)[name = tensor("out_105_cast")]; + tensor var_3758_to_fp16 = const()[name = tensor("op_3758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337120576)))]; + tensor var_3759_cast = add(x = out_105_cast, y = var_3758_to_fp16)[name = tensor("op_3759_cast")]; + tensor var_3761_to_fp16 = const()[name = tensor("op_3761_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337123200)))]; + tensor hidden_states_157_cast = mul(x = var_3759_cast, y = var_3761_to_fp16)[name = tensor("hidden_states_157_cast")]; + tensor var_3768 = const()[name = tensor("op_3768"), val = tensor([1, 1])]; + tensor var_3770 = const()[name = tensor("op_3770"), val = tensor([1, 1])]; + tensor q_71_pad_type_0 = const()[name = tensor("q_71_pad_type_0"), val = tensor("custom")]; + tensor q_71_pad_0 = const()[name = tensor("q_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337125824))), lut = tensor([-0x1.26cp-6, 0x1.268p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_71_cast = conv(dilations = var_3770, groups = var_1186, pad = q_71_pad_0, pad_type = q_71_pad_type_0, strides = var_3768, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_157_cast)[name = tensor("q_71_cast")]; + tensor var_3774 = const()[name = tensor("op_3774"), val = tensor([1, 1])]; + tensor var_3776 = const()[name = tensor("op_3776"), val = tensor([1, 1])]; + tensor k_71_pad_type_0 = const()[name = tensor("k_71_pad_type_0"), val = tensor("custom")]; + tensor k_71_pad_0 = const()[name = tensor("k_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337330688))), lut = tensor([-0x1.f2cp-7, 0x1.f4cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_71_cast = conv(dilations = var_3776, groups = var_1186, pad = k_71_pad_0, pad_type = k_71_pad_type_0, strides = var_3774, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_71_cast")]; + tensor var_3780 = const()[name = tensor("op_3780"), val = tensor([1, 1])]; + tensor var_3782 = const()[name = tensor("op_3782"), val = tensor([1, 1])]; + tensor v_71_pad_type_0 = const()[name = tensor("v_71_pad_type_0"), val = tensor("custom")]; + tensor v_71_pad_0 = const()[name = tensor("v_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337658432))), lut = tensor([-0x1.1ecp-6, 0x1.1f4p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_71_cast = conv(dilations = var_3782, groups = var_1186, pad = v_71_pad_0, pad_type = v_71_pad_type_0, strides = var_3780, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_71_cast")]; + tensor var_3786 = const()[name = tensor("op_3786"), val = tensor([2, 20, 64, -1])]; + tensor var_3787_cast = reshape(shape = var_3786, x = q_71_cast)[name = tensor("op_3787_cast")]; + tensor var_3788 = const()[name = tensor("op_3788"), val = tensor([2, 20, 64, -1])]; + tensor var_3789_cast = reshape(shape = var_3788, x = k_71_cast)[name = tensor("op_3789_cast")]; + tensor var_3790 = const()[name = tensor("op_3790"), val = tensor([2, 20, 64, -1])]; + tensor var_3791_cast = reshape(shape = var_3790, x = v_71_cast)[name = tensor("op_3791_cast")]; + tensor attn_weights_141_transpose_x_0 = const()[name = tensor("attn_weights_141_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_141_transpose_y_0 = const()[name = tensor("attn_weights_141_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_141_cast = matmul(transpose_x = attn_weights_141_transpose_x_0, transpose_y = attn_weights_141_transpose_y_0, x = var_3787_cast, y = var_3789_cast)[name = tensor("attn_weights_141_cast")]; + tensor attn_weights_143_cast = mul(x = attn_weights_141_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_143_cast")]; + tensor var_3795_cast = softmax(axis = var_1170, x = attn_weights_143_cast)[name = tensor("op_3795_cast")]; + tensor attn_71_transpose_x_0 = const()[name = tensor("attn_71_transpose_x_0"), val = tensor(false)]; + tensor attn_71_transpose_y_0 = const()[name = tensor("attn_71_transpose_y_0"), val = tensor(true)]; + tensor attn_71_cast = matmul(transpose_x = attn_71_transpose_x_0, transpose_y = attn_71_transpose_y_0, x = var_3791_cast, y = var_3795_cast)[name = tensor("attn_71_cast")]; + tensor var_3799 = const()[name = tensor("op_3799"), val = tensor([2, 1280, 1, -1])]; + tensor input_255_cast = reshape(shape = var_3799, x = attn_71_cast)[name = tensor("input_255_cast")]; + tensor var_3804 = const()[name = tensor("op_3804"), val = tensor([1, 1])]; + tensor var_3806 = const()[name = tensor("op_3806"), val = tensor([1, 1])]; + tensor var_3808_pad_type_0 = const()[name = tensor("op_3808_pad_type_0"), val = tensor("custom")]; + tensor var_3808_pad_0 = const()[name = tensor("op_3808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337986176))), lut = tensor([-0x1.618p-7, 0x1.628p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338191040)))]; + tensor var_3808_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_3806, groups = var_1186, pad = var_3808_pad_0, pad_type = var_3808_pad_type_0, strides = var_3804, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_255_cast)[name = tensor("op_3808_cast")]; + tensor inputs_107_cast = add(x = var_3808_cast, y = inputs_105_cast)[name = tensor("inputs_107_cast")]; + tensor var_3812 = const()[name = tensor("op_3812"), val = tensor([1])]; + tensor channels_mean_107_cast = reduce_mean(axes = var_3812, keep_dims = var_1181, x = inputs_107_cast)[name = tensor("channels_mean_107_cast")]; + tensor zero_mean_107_cast = sub(x = inputs_107_cast, y = channels_mean_107_cast)[name = tensor("zero_mean_107_cast")]; + tensor zero_mean_sq_107_cast = mul(x = zero_mean_107_cast, y = zero_mean_107_cast)[name = tensor("zero_mean_sq_107_cast")]; + tensor var_3816 = const()[name = tensor("op_3816"), val = tensor([1])]; + tensor var_3817_cast = reduce_mean(axes = var_3816, keep_dims = var_1181, x = zero_mean_sq_107_cast)[name = tensor("op_3817_cast")]; + tensor var_3818_to_fp16 = const()[name = tensor("op_3818_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3819_cast = add(x = var_3817_cast, y = var_3818_to_fp16)[name = tensor("op_3819_cast")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3819_cast)[name = tensor("denom_107_cast")]; + tensor out_107_cast = mul(x = zero_mean_107_cast, y = denom_107_cast)[name = tensor("out_107_cast")]; + tensor var_3823_to_fp16 = const()[name = tensor("op_3823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338193664)))]; + tensor var_3824_cast = add(x = out_107_cast, y = var_3823_to_fp16)[name = tensor("op_3824_cast")]; + tensor var_3826_to_fp16 = const()[name = tensor("op_3826_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338196288)))]; + tensor input_257_cast = mul(x = var_3824_cast, y = var_3826_to_fp16)[name = tensor("input_257_cast")]; + tensor var_3834 = const()[name = tensor("op_3834"), val = tensor([1, 1])]; + tensor var_3836 = const()[name = tensor("op_3836"), val = tensor([1, 1])]; + tensor var_3838_pad_type_0 = const()[name = tensor("op_3838_pad_type_0"), val = tensor("custom")]; + tensor var_3838_pad_0 = const()[name = tensor("op_3838_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338198912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344752576))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344752704)))]; + tensor var_3838_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_3836, groups = var_1186, pad = var_3838_pad_0, pad_type = var_3838_pad_type_0, strides = var_3834, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("op_3838_cast")]; + tensor var_3839_split_sizes_0 = const()[name = tensor("op_3839_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3839_axis_0 = const()[name = tensor("op_3839_axis_0"), val = tensor(1)]; + tensor var_3839_cast_0, tensor var_3839_cast_1 = split(axis = var_3839_axis_0, split_sizes = var_3839_split_sizes_0, x = var_3838_cast)[name = tensor("op_3839_cast")]; + tensor var_3841_mode_0 = const()[name = tensor("op_3841_mode_0"), val = tensor("EXACT")]; + tensor var_3841_cast = gelu(mode = var_3841_mode_0, x = var_3839_cast_1)[name = tensor("op_3841_cast")]; + tensor input_259_cast = mul(x = var_3839_cast_0, y = var_3841_cast)[name = tensor("input_259_cast")]; + tensor var_3845 = const()[name = tensor("op_3845"), val = tensor([1, 1])]; + tensor var_3847 = const()[name = tensor("op_3847"), val = tensor([1, 1])]; + tensor var_3849_pad_type_0 = const()[name = tensor("op_3849_pad_type_0"), val = tensor("custom")]; + tensor var_3849_pad_0 = const()[name = tensor("op_3849_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344773248))), lut = tensor([-0x1.4c4p-5, -0x1.8dp-7, 0x1.8dcp-7, 0x1.4c4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346411712)))]; + tensor var_3849_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_3847, groups = var_1186, pad = var_3849_pad_0, pad_type = var_3849_pad_type_0, strides = var_3845, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_259_cast)[name = tensor("op_3849_cast")]; + tensor inputs_109_cast = add(x = var_3849_cast, y = inputs_107_cast)[name = tensor("inputs_109_cast")]; + tensor var_3859 = const()[name = tensor("op_3859"), val = tensor([1])]; + tensor channels_mean_109_cast = reduce_mean(axes = var_3859, keep_dims = var_1181, x = inputs_109_cast)[name = tensor("channels_mean_109_cast")]; + tensor zero_mean_109_cast = sub(x = inputs_109_cast, y = channels_mean_109_cast)[name = tensor("zero_mean_109_cast")]; + tensor zero_mean_sq_109_cast = mul(x = zero_mean_109_cast, y = zero_mean_109_cast)[name = tensor("zero_mean_sq_109_cast")]; + tensor var_3863 = const()[name = tensor("op_3863"), val = tensor([1])]; + tensor var_3864_cast = reduce_mean(axes = var_3863, keep_dims = var_1181, x = zero_mean_sq_109_cast)[name = tensor("op_3864_cast")]; + tensor var_3865_to_fp16 = const()[name = tensor("op_3865_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3866_cast = add(x = var_3864_cast, y = var_3865_to_fp16)[name = tensor("op_3866_cast")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3866_cast)[name = tensor("denom_109_cast")]; + tensor out_109_cast = mul(x = zero_mean_109_cast, y = denom_109_cast)[name = tensor("out_109_cast")]; + tensor var_3870_to_fp16 = const()[name = tensor("op_3870_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346414336)))]; + tensor var_3871_cast = add(x = out_109_cast, y = var_3870_to_fp16)[name = tensor("op_3871_cast")]; + tensor var_3873_to_fp16 = const()[name = tensor("op_3873_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346416960)))]; + tensor hidden_states_161_cast = mul(x = var_3871_cast, y = var_3873_to_fp16)[name = tensor("hidden_states_161_cast")]; + tensor var_3880 = const()[name = tensor("op_3880"), val = tensor([1, 1])]; + tensor var_3882 = const()[name = tensor("op_3882"), val = tensor([1, 1])]; + tensor q_73_pad_type_0 = const()[name = tensor("q_73_pad_type_0"), val = tensor("custom")]; + tensor q_73_pad_0 = const()[name = tensor("q_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346419584))), lut = tensor([-0x1.3dcp-5, -0x1.7dp-7, 0x1.7f8p-7, 0x1.3e8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_73_cast = conv(dilations = var_3882, groups = var_1186, pad = q_73_pad_0, pad_type = q_73_pad_type_0, strides = var_3880, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("q_73_cast")]; + tensor var_3886 = const()[name = tensor("op_3886"), val = tensor([1, 1])]; + tensor var_3888 = const()[name = tensor("op_3888"), val = tensor([1, 1])]; + tensor k_73_pad_type_0 = const()[name = tensor("k_73_pad_type_0"), val = tensor("custom")]; + tensor k_73_pad_0 = const()[name = tensor("k_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346829248))), lut = tensor([-0x1.514p-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_73_cast = conv(dilations = var_3888, groups = var_1186, pad = k_73_pad_0, pad_type = k_73_pad_type_0, strides = var_3886, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("k_73_cast")]; + tensor var_3892 = const()[name = tensor("op_3892"), val = tensor([1, 1])]; + tensor var_3894 = const()[name = tensor("op_3894"), val = tensor([1, 1])]; + tensor v_73_pad_type_0 = const()[name = tensor("v_73_pad_type_0"), val = tensor("custom")]; + tensor v_73_pad_0 = const()[name = tensor("v_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347034112))), lut = tensor([-0x1.2ep-5, -0x1.6d4p-7, 0x1.6acp-7, 0x1.2d8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_73_cast = conv(dilations = var_3894, groups = var_1186, pad = v_73_pad_0, pad_type = v_73_pad_type_0, strides = var_3892, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("v_73_cast")]; + tensor var_3898 = const()[name = tensor("op_3898"), val = tensor([2, 20, 64, -1])]; + tensor var_3899_cast = reshape(shape = var_3898, x = q_73_cast)[name = tensor("op_3899_cast")]; + tensor var_3900 = const()[name = tensor("op_3900"), val = tensor([2, 20, 64, -1])]; + tensor var_3901_cast = reshape(shape = var_3900, x = k_73_cast)[name = tensor("op_3901_cast")]; + tensor var_3902 = const()[name = tensor("op_3902"), val = tensor([2, 20, 64, -1])]; + tensor var_3903_cast = reshape(shape = var_3902, x = v_73_cast)[name = tensor("op_3903_cast")]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = var_3899_cast, y = var_3901_cast)[name = tensor("attn_weights_145_cast")]; + tensor attn_weights_147_cast = mul(x = attn_weights_145_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_3907_cast = softmax(axis = var_1170, x = attn_weights_147_cast)[name = tensor("op_3907_cast")]; + tensor attn_73_transpose_x_0 = const()[name = tensor("attn_73_transpose_x_0"), val = tensor(false)]; + tensor attn_73_transpose_y_0 = const()[name = tensor("attn_73_transpose_y_0"), val = tensor(true)]; + tensor attn_73_cast = matmul(transpose_x = attn_73_transpose_x_0, transpose_y = attn_73_transpose_y_0, x = var_3903_cast, y = var_3907_cast)[name = tensor("attn_73_cast")]; + tensor var_3911 = const()[name = tensor("op_3911"), val = tensor([2, 1280, 1, -1])]; + tensor input_261_cast = reshape(shape = var_3911, x = attn_73_cast)[name = tensor("input_261_cast")]; + tensor var_3916 = const()[name = tensor("op_3916"), val = tensor([1, 1])]; + tensor var_3918 = const()[name = tensor("op_3918"), val = tensor([1, 1])]; + tensor var_3920_pad_type_0 = const()[name = tensor("op_3920_pad_type_0"), val = tensor("custom")]; + tensor var_3920_pad_0 = const()[name = tensor("op_3920_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347443776))), lut = tensor([-0x1.2c4p-5, -0x1.6c8p-7, 0x1.64p-7, 0x1.2a8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347853440)))]; + tensor var_3920_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_3918, groups = var_1186, pad = var_3920_pad_0, pad_type = var_3920_pad_type_0, strides = var_3916, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_261_cast)[name = tensor("op_3920_cast")]; + tensor inputs_111_cast = add(x = var_3920_cast, y = inputs_109_cast)[name = tensor("inputs_111_cast")]; + tensor var_3924 = const()[name = tensor("op_3924"), val = tensor([1])]; + tensor channels_mean_111_cast = reduce_mean(axes = var_3924, keep_dims = var_1181, x = inputs_111_cast)[name = tensor("channels_mean_111_cast")]; + tensor zero_mean_111_cast = sub(x = inputs_111_cast, y = channels_mean_111_cast)[name = tensor("zero_mean_111_cast")]; + tensor zero_mean_sq_111_cast = mul(x = zero_mean_111_cast, y = zero_mean_111_cast)[name = tensor("zero_mean_sq_111_cast")]; + tensor var_3928 = const()[name = tensor("op_3928"), val = tensor([1])]; + tensor var_3929_cast = reduce_mean(axes = var_3928, keep_dims = var_1181, x = zero_mean_sq_111_cast)[name = tensor("op_3929_cast")]; + tensor var_3930_to_fp16 = const()[name = tensor("op_3930_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3931_cast = add(x = var_3929_cast, y = var_3930_to_fp16)[name = tensor("op_3931_cast")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3931_cast)[name = tensor("denom_111_cast")]; + tensor out_111_cast = mul(x = zero_mean_111_cast, y = denom_111_cast)[name = tensor("out_111_cast")]; + tensor var_3935_to_fp16 = const()[name = tensor("op_3935_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347856064)))]; + tensor var_3936_cast = add(x = out_111_cast, y = var_3935_to_fp16)[name = tensor("op_3936_cast")]; + tensor var_3938_to_fp16 = const()[name = tensor("op_3938_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347858688)))]; + tensor hidden_states_163_cast = mul(x = var_3936_cast, y = var_3938_to_fp16)[name = tensor("hidden_states_163_cast")]; + tensor var_3945 = const()[name = tensor("op_3945"), val = tensor([1, 1])]; + tensor var_3947 = const()[name = tensor("op_3947"), val = tensor([1, 1])]; + tensor q_75_pad_type_0 = const()[name = tensor("q_75_pad_type_0"), val = tensor("custom")]; + tensor q_75_pad_0 = const()[name = tensor("q_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347861312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348680576))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_75_cast = conv(dilations = var_3947, groups = var_1186, pad = q_75_pad_0, pad_type = q_75_pad_type_0, strides = var_3945, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_163_cast)[name = tensor("q_75_cast")]; + tensor var_3951 = const()[name = tensor("op_3951"), val = tensor([1, 1])]; + tensor var_3953 = const()[name = tensor("op_3953"), val = tensor([1, 1])]; + tensor k_75_pad_type_0 = const()[name = tensor("k_75_pad_type_0"), val = tensor("custom")]; + tensor k_75_pad_0 = const()[name = tensor("k_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348680704))), lut = tensor([-0x1.d3p-7, 0x1.d54p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_75_cast = conv(dilations = var_3953, groups = var_1186, pad = k_75_pad_0, pad_type = k_75_pad_type_0, strides = var_3951, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_75_cast")]; + tensor var_3957 = const()[name = tensor("op_3957"), val = tensor([1, 1])]; + tensor var_3959 = const()[name = tensor("op_3959"), val = tensor([1, 1])]; + tensor v_75_pad_type_0 = const()[name = tensor("v_75_pad_type_0"), val = tensor("custom")]; + tensor v_75_pad_0 = const()[name = tensor("v_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349008448))), lut = tensor([-0x1.17cp-6, 0x1.188p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_75_cast = conv(dilations = var_3959, groups = var_1186, pad = v_75_pad_0, pad_type = v_75_pad_type_0, strides = var_3957, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_75_cast")]; + tensor var_3963 = const()[name = tensor("op_3963"), val = tensor([2, 20, 64, -1])]; + tensor var_3964_cast = reshape(shape = var_3963, x = q_75_cast)[name = tensor("op_3964_cast")]; + tensor var_3965 = const()[name = tensor("op_3965"), val = tensor([2, 20, 64, -1])]; + tensor var_3966_cast = reshape(shape = var_3965, x = k_75_cast)[name = tensor("op_3966_cast")]; + tensor var_3967 = const()[name = tensor("op_3967"), val = tensor([2, 20, 64, -1])]; + tensor var_3968_cast = reshape(shape = var_3967, x = v_75_cast)[name = tensor("op_3968_cast")]; + tensor attn_weights_149_transpose_x_0 = const()[name = tensor("attn_weights_149_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_149_transpose_y_0 = const()[name = tensor("attn_weights_149_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_149_cast = matmul(transpose_x = attn_weights_149_transpose_x_0, transpose_y = attn_weights_149_transpose_y_0, x = var_3964_cast, y = var_3966_cast)[name = tensor("attn_weights_149_cast")]; + tensor attn_weights_151_cast = mul(x = attn_weights_149_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_151_cast")]; + tensor var_3972_cast = softmax(axis = var_1170, x = attn_weights_151_cast)[name = tensor("op_3972_cast")]; + tensor attn_75_transpose_x_0 = const()[name = tensor("attn_75_transpose_x_0"), val = tensor(false)]; + tensor attn_75_transpose_y_0 = const()[name = tensor("attn_75_transpose_y_0"), val = tensor(true)]; + tensor attn_75_cast = matmul(transpose_x = attn_75_transpose_x_0, transpose_y = attn_75_transpose_y_0, x = var_3968_cast, y = var_3972_cast)[name = tensor("attn_75_cast")]; + tensor var_3976 = const()[name = tensor("op_3976"), val = tensor([2, 1280, 1, -1])]; + tensor input_263_cast = reshape(shape = var_3976, x = attn_75_cast)[name = tensor("input_263_cast")]; + tensor var_3981 = const()[name = tensor("op_3981"), val = tensor([1, 1])]; + tensor var_3983 = const()[name = tensor("op_3983"), val = tensor([1, 1])]; + tensor var_3985_pad_type_0 = const()[name = tensor("op_3985_pad_type_0"), val = tensor("custom")]; + tensor var_3985_pad_0 = const()[name = tensor("op_3985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349336192))), lut = tensor([-0x1.5bp-7, 0x1.5a8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349541056)))]; + tensor var_3985_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_3983, groups = var_1186, pad = var_3985_pad_0, pad_type = var_3985_pad_type_0, strides = var_3981, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_263_cast)[name = tensor("op_3985_cast")]; + tensor inputs_113_cast = add(x = var_3985_cast, y = inputs_111_cast)[name = tensor("inputs_113_cast")]; + tensor var_3989 = const()[name = tensor("op_3989"), val = tensor([1])]; + tensor channels_mean_113_cast = reduce_mean(axes = var_3989, keep_dims = var_1181, x = inputs_113_cast)[name = tensor("channels_mean_113_cast")]; + tensor zero_mean_113_cast = sub(x = inputs_113_cast, y = channels_mean_113_cast)[name = tensor("zero_mean_113_cast")]; + tensor zero_mean_sq_113_cast = mul(x = zero_mean_113_cast, y = zero_mean_113_cast)[name = tensor("zero_mean_sq_113_cast")]; + tensor var_3993 = const()[name = tensor("op_3993"), val = tensor([1])]; + tensor var_3994_cast = reduce_mean(axes = var_3993, keep_dims = var_1181, x = zero_mean_sq_113_cast)[name = tensor("op_3994_cast")]; + tensor var_3995_to_fp16 = const()[name = tensor("op_3995_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3996_cast = add(x = var_3994_cast, y = var_3995_to_fp16)[name = tensor("op_3996_cast")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3996_cast)[name = tensor("denom_113_cast")]; + tensor out_113_cast = mul(x = zero_mean_113_cast, y = denom_113_cast)[name = tensor("out_113_cast")]; + tensor var_4000_to_fp16 = const()[name = tensor("op_4000_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349543680)))]; + tensor var_4001_cast = add(x = out_113_cast, y = var_4000_to_fp16)[name = tensor("op_4001_cast")]; + tensor var_4003_to_fp16 = const()[name = tensor("op_4003_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349546304)))]; + tensor input_265_cast = mul(x = var_4001_cast, y = var_4003_to_fp16)[name = tensor("input_265_cast")]; + tensor var_4011 = const()[name = tensor("op_4011"), val = tensor([1, 1])]; + tensor var_4013 = const()[name = tensor("op_4013"), val = tensor([1, 1])]; + tensor var_4015_pad_type_0 = const()[name = tensor("op_4015_pad_type_0"), val = tensor("custom")]; + tensor var_4015_pad_0 = const()[name = tensor("op_4015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349548928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356102592))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356102720)))]; + tensor var_4015_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_4013, groups = var_1186, pad = var_4015_pad_0, pad_type = var_4015_pad_type_0, strides = var_4011, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("op_4015_cast")]; + tensor var_4016_split_sizes_0 = const()[name = tensor("op_4016_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4016_axis_0 = const()[name = tensor("op_4016_axis_0"), val = tensor(1)]; + tensor var_4016_cast_0, tensor var_4016_cast_1 = split(axis = var_4016_axis_0, split_sizes = var_4016_split_sizes_0, x = var_4015_cast)[name = tensor("op_4016_cast")]; + tensor var_4018_mode_0 = const()[name = tensor("op_4018_mode_0"), val = tensor("EXACT")]; + tensor var_4018_cast = gelu(mode = var_4018_mode_0, x = var_4016_cast_1)[name = tensor("op_4018_cast")]; + tensor input_267_cast = mul(x = var_4016_cast_0, y = var_4018_cast)[name = tensor("input_267_cast")]; + tensor var_4022 = const()[name = tensor("op_4022"), val = tensor([1, 1])]; + tensor var_4024 = const()[name = tensor("op_4024"), val = tensor([1, 1])]; + tensor var_4026_pad_type_0 = const()[name = tensor("op_4026_pad_type_0"), val = tensor("custom")]; + tensor var_4026_pad_0 = const()[name = tensor("op_4026_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356123264))), lut = tensor([-0x1.508p-5, -0x1.92cp-7, 0x1.92p-7, 0x1.504p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357761728)))]; + tensor var_4026_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_4024, groups = var_1186, pad = var_4026_pad_0, pad_type = var_4026_pad_type_0, strides = var_4022, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_267_cast)[name = tensor("op_4026_cast")]; + tensor inputs_115_cast = add(x = var_4026_cast, y = inputs_113_cast)[name = tensor("inputs_115_cast")]; + tensor var_4036 = const()[name = tensor("op_4036"), val = tensor([1])]; + tensor channels_mean_115_cast = reduce_mean(axes = var_4036, keep_dims = var_1181, x = inputs_115_cast)[name = tensor("channels_mean_115_cast")]; + tensor zero_mean_115_cast = sub(x = inputs_115_cast, y = channels_mean_115_cast)[name = tensor("zero_mean_115_cast")]; + tensor zero_mean_sq_115_cast = mul(x = zero_mean_115_cast, y = zero_mean_115_cast)[name = tensor("zero_mean_sq_115_cast")]; + tensor var_4040 = const()[name = tensor("op_4040"), val = tensor([1])]; + tensor var_4041_cast = reduce_mean(axes = var_4040, keep_dims = var_1181, x = zero_mean_sq_115_cast)[name = tensor("op_4041_cast")]; + tensor var_4042_to_fp16 = const()[name = tensor("op_4042_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4043_cast = add(x = var_4041_cast, y = var_4042_to_fp16)[name = tensor("op_4043_cast")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_4043_cast)[name = tensor("denom_115_cast")]; + tensor out_115_cast = mul(x = zero_mean_115_cast, y = denom_115_cast)[name = tensor("out_115_cast")]; + tensor var_4047_to_fp16 = const()[name = tensor("op_4047_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357764352)))]; + tensor var_4048_cast = add(x = out_115_cast, y = var_4047_to_fp16)[name = tensor("op_4048_cast")]; + tensor var_4050_to_fp16 = const()[name = tensor("op_4050_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357766976)))]; + tensor hidden_states_167_cast = mul(x = var_4048_cast, y = var_4050_to_fp16)[name = tensor("hidden_states_167_cast")]; + tensor var_4057 = const()[name = tensor("op_4057"), val = tensor([1, 1])]; + tensor var_4059 = const()[name = tensor("op_4059"), val = tensor([1, 1])]; + tensor q_77_pad_type_0 = const()[name = tensor("q_77_pad_type_0"), val = tensor("custom")]; + tensor q_77_pad_0 = const()[name = tensor("q_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357769600))), lut = tensor([-0x1.538p-6, 0x1.548p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_77_cast = conv(dilations = var_4059, groups = var_1186, pad = q_77_pad_0, pad_type = q_77_pad_type_0, strides = var_4057, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("q_77_cast")]; + tensor var_4063 = const()[name = tensor("op_4063"), val = tensor([1, 1])]; + tensor var_4065 = const()[name = tensor("op_4065"), val = tensor([1, 1])]; + tensor k_77_pad_type_0 = const()[name = tensor("k_77_pad_type_0"), val = tensor("custom")]; + tensor k_77_pad_0 = const()[name = tensor("k_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357974464))), lut = tensor([-0x1.528p-6, 0x1.528p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_77_cast = conv(dilations = var_4065, groups = var_1186, pad = k_77_pad_0, pad_type = k_77_pad_type_0, strides = var_4063, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("k_77_cast")]; + tensor var_4069 = const()[name = tensor("op_4069"), val = tensor([1, 1])]; + tensor var_4071 = const()[name = tensor("op_4071"), val = tensor([1, 1])]; + tensor v_77_pad_type_0 = const()[name = tensor("v_77_pad_type_0"), val = tensor("custom")]; + tensor v_77_pad_0 = const()[name = tensor("v_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(358179328))), lut = tensor([-0x1.2d8p-5, -0x1.6b4p-7, 0x1.6bp-7, 0x1.2d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_77_cast = conv(dilations = var_4071, groups = var_1186, pad = v_77_pad_0, pad_type = v_77_pad_type_0, strides = var_4069, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("v_77_cast")]; + tensor var_4075 = const()[name = tensor("op_4075"), val = tensor([2, 20, 64, -1])]; + tensor var_4076_cast = reshape(shape = var_4075, x = q_77_cast)[name = tensor("op_4076_cast")]; + tensor var_4077 = const()[name = tensor("op_4077"), val = tensor([2, 20, 64, -1])]; + tensor var_4078_cast = reshape(shape = var_4077, x = k_77_cast)[name = tensor("op_4078_cast")]; + tensor var_4079 = const()[name = tensor("op_4079"), val = tensor([2, 20, 64, -1])]; + tensor var_4080_cast = reshape(shape = var_4079, x = v_77_cast)[name = tensor("op_4080_cast")]; + tensor attn_weights_153_transpose_x_0 = const()[name = tensor("attn_weights_153_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_153_transpose_y_0 = const()[name = tensor("attn_weights_153_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_153_cast = matmul(transpose_x = attn_weights_153_transpose_x_0, transpose_y = attn_weights_153_transpose_y_0, x = var_4076_cast, y = var_4078_cast)[name = tensor("attn_weights_153_cast")]; + tensor attn_weights_155_cast = mul(x = attn_weights_153_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_155_cast")]; + tensor var_4084_cast = softmax(axis = var_1170, x = attn_weights_155_cast)[name = tensor("op_4084_cast")]; + tensor attn_77_transpose_x_0 = const()[name = tensor("attn_77_transpose_x_0"), val = tensor(false)]; + tensor attn_77_transpose_y_0 = const()[name = tensor("attn_77_transpose_y_0"), val = tensor(true)]; + tensor attn_77_cast = matmul(transpose_x = attn_77_transpose_x_0, transpose_y = attn_77_transpose_y_0, x = var_4080_cast, y = var_4084_cast)[name = tensor("attn_77_cast")]; + tensor var_4088 = const()[name = tensor("op_4088"), val = tensor([2, 1280, 1, -1])]; + tensor input_269_cast = reshape(shape = var_4088, x = attn_77_cast)[name = tensor("input_269_cast")]; + tensor var_4093 = const()[name = tensor("op_4093"), val = tensor([1, 1])]; + tensor var_4095 = const()[name = tensor("op_4095"), val = tensor([1, 1])]; + tensor var_4097_pad_type_0 = const()[name = tensor("op_4097_pad_type_0"), val = tensor("custom")]; + tensor var_4097_pad_0 = const()[name = tensor("op_4097_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(358588992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359408256))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359408384)))]; + tensor var_4097_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_4095, groups = var_1186, pad = var_4097_pad_0, pad_type = var_4097_pad_type_0, strides = var_4093, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("op_4097_cast")]; + tensor inputs_117_cast = add(x = var_4097_cast, y = inputs_115_cast)[name = tensor("inputs_117_cast")]; + tensor var_4101 = const()[name = tensor("op_4101"), val = tensor([1])]; + tensor channels_mean_117_cast = reduce_mean(axes = var_4101, keep_dims = var_1181, x = inputs_117_cast)[name = tensor("channels_mean_117_cast")]; + tensor zero_mean_117_cast = sub(x = inputs_117_cast, y = channels_mean_117_cast)[name = tensor("zero_mean_117_cast")]; + tensor zero_mean_sq_117_cast = mul(x = zero_mean_117_cast, y = zero_mean_117_cast)[name = tensor("zero_mean_sq_117_cast")]; + tensor var_4105 = const()[name = tensor("op_4105"), val = tensor([1])]; + tensor var_4106_cast = reduce_mean(axes = var_4105, keep_dims = var_1181, x = zero_mean_sq_117_cast)[name = tensor("op_4106_cast")]; + tensor var_4107_to_fp16 = const()[name = tensor("op_4107_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4108_cast = add(x = var_4106_cast, y = var_4107_to_fp16)[name = tensor("op_4108_cast")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_4108_cast)[name = tensor("denom_117_cast")]; + tensor out_117_cast = mul(x = zero_mean_117_cast, y = denom_117_cast)[name = tensor("out_117_cast")]; + tensor var_4112_to_fp16 = const()[name = tensor("op_4112_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359411008)))]; + tensor var_4113_cast = add(x = out_117_cast, y = var_4112_to_fp16)[name = tensor("op_4113_cast")]; + tensor var_4115_to_fp16 = const()[name = tensor("op_4115_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359413632)))]; + tensor hidden_states_169_cast = mul(x = var_4113_cast, y = var_4115_to_fp16)[name = tensor("hidden_states_169_cast")]; + tensor var_4122 = const()[name = tensor("op_4122"), val = tensor([1, 1])]; + tensor var_4124 = const()[name = tensor("op_4124"), val = tensor([1, 1])]; + tensor q_79_pad_type_0 = const()[name = tensor("q_79_pad_type_0"), val = tensor("custom")]; + tensor q_79_pad_0 = const()[name = tensor("q_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359416256))), lut = tensor([-0x1.0fcp-6, 0x1.0fp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_79_cast = conv(dilations = var_4124, groups = var_1186, pad = q_79_pad_0, pad_type = q_79_pad_type_0, strides = var_4122, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_169_cast)[name = tensor("q_79_cast")]; + tensor var_4128 = const()[name = tensor("op_4128"), val = tensor([1, 1])]; + tensor var_4130 = const()[name = tensor("op_4130"), val = tensor([1, 1])]; + tensor k_79_pad_type_0 = const()[name = tensor("k_79_pad_type_0"), val = tensor("custom")]; + tensor k_79_pad_0 = const()[name = tensor("k_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359621120))), lut = tensor([-0x1.b3p-7, 0x1.b38p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_79_cast = conv(dilations = var_4130, groups = var_1186, pad = k_79_pad_0, pad_type = k_79_pad_type_0, strides = var_4128, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_79_cast")]; + tensor var_4134 = const()[name = tensor("op_4134"), val = tensor([1, 1])]; + tensor var_4136 = const()[name = tensor("op_4136"), val = tensor([1, 1])]; + tensor v_79_pad_type_0 = const()[name = tensor("v_79_pad_type_0"), val = tensor("custom")]; + tensor v_79_pad_0 = const()[name = tensor("v_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359948864))), lut = tensor([-0x1.12cp-6, 0x1.12p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_79_cast = conv(dilations = var_4136, groups = var_1186, pad = v_79_pad_0, pad_type = v_79_pad_type_0, strides = var_4134, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_79_cast")]; + tensor var_4140 = const()[name = tensor("op_4140"), val = tensor([2, 20, 64, -1])]; + tensor var_4141_cast = reshape(shape = var_4140, x = q_79_cast)[name = tensor("op_4141_cast")]; + tensor var_4142 = const()[name = tensor("op_4142"), val = tensor([2, 20, 64, -1])]; + tensor var_4143_cast = reshape(shape = var_4142, x = k_79_cast)[name = tensor("op_4143_cast")]; + tensor var_4144 = const()[name = tensor("op_4144"), val = tensor([2, 20, 64, -1])]; + tensor var_4145_cast = reshape(shape = var_4144, x = v_79_cast)[name = tensor("op_4145_cast")]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = var_4141_cast, y = var_4143_cast)[name = tensor("attn_weights_157_cast")]; + tensor attn_weights_159_cast = mul(x = attn_weights_157_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_4149_cast = softmax(axis = var_1170, x = attn_weights_159_cast)[name = tensor("op_4149_cast")]; + tensor attn_79_transpose_x_0 = const()[name = tensor("attn_79_transpose_x_0"), val = tensor(false)]; + tensor attn_79_transpose_y_0 = const()[name = tensor("attn_79_transpose_y_0"), val = tensor(true)]; + tensor attn_79_cast = matmul(transpose_x = attn_79_transpose_x_0, transpose_y = attn_79_transpose_y_0, x = var_4145_cast, y = var_4149_cast)[name = tensor("attn_79_cast")]; + tensor var_4153 = const()[name = tensor("op_4153"), val = tensor([2, 1280, 1, -1])]; + tensor input_271_cast = reshape(shape = var_4153, x = attn_79_cast)[name = tensor("input_271_cast")]; + tensor var_4158 = const()[name = tensor("op_4158"), val = tensor([1, 1])]; + tensor var_4160 = const()[name = tensor("op_4160"), val = tensor([1, 1])]; + tensor var_4162_pad_type_0 = const()[name = tensor("op_4162_pad_type_0"), val = tensor("custom")]; + tensor var_4162_pad_0 = const()[name = tensor("op_4162_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360276608))), lut = tensor([-0x1.578p-7, 0x1.57cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360481472)))]; + tensor var_4162_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_4160, groups = var_1186, pad = var_4162_pad_0, pad_type = var_4162_pad_type_0, strides = var_4158, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_271_cast)[name = tensor("op_4162_cast")]; + tensor inputs_119_cast = add(x = var_4162_cast, y = inputs_117_cast)[name = tensor("inputs_119_cast")]; + tensor var_4166 = const()[name = tensor("op_4166"), val = tensor([1])]; + tensor channels_mean_119_cast = reduce_mean(axes = var_4166, keep_dims = var_1181, x = inputs_119_cast)[name = tensor("channels_mean_119_cast")]; + tensor zero_mean_119_cast = sub(x = inputs_119_cast, y = channels_mean_119_cast)[name = tensor("zero_mean_119_cast")]; + tensor zero_mean_sq_119_cast = mul(x = zero_mean_119_cast, y = zero_mean_119_cast)[name = tensor("zero_mean_sq_119_cast")]; + tensor var_4170 = const()[name = tensor("op_4170"), val = tensor([1])]; + tensor var_4171_cast = reduce_mean(axes = var_4170, keep_dims = var_1181, x = zero_mean_sq_119_cast)[name = tensor("op_4171_cast")]; + tensor var_4172_to_fp16 = const()[name = tensor("op_4172_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4173_cast = add(x = var_4171_cast, y = var_4172_to_fp16)[name = tensor("op_4173_cast")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_4173_cast)[name = tensor("denom_119_cast")]; + tensor out_119_cast = mul(x = zero_mean_119_cast, y = denom_119_cast)[name = tensor("out_119_cast")]; + tensor var_4177_to_fp16 = const()[name = tensor("op_4177_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360484096)))]; + tensor var_4178_cast = add(x = out_119_cast, y = var_4177_to_fp16)[name = tensor("op_4178_cast")]; + tensor var_4180_to_fp16 = const()[name = tensor("op_4180_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360486720)))]; + tensor input_273_cast = mul(x = var_4178_cast, y = var_4180_to_fp16)[name = tensor("input_273_cast")]; + tensor var_4188 = const()[name = tensor("op_4188"), val = tensor([1, 1])]; + tensor var_4190 = const()[name = tensor("op_4190"), val = tensor([1, 1])]; + tensor var_4192_pad_type_0 = const()[name = tensor("op_4192_pad_type_0"), val = tensor("custom")]; + tensor var_4192_pad_0 = const()[name = tensor("op_4192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360489344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367043008))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367043136)))]; + tensor var_4192_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_4190, groups = var_1186, pad = var_4192_pad_0, pad_type = var_4192_pad_type_0, strides = var_4188, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("op_4192_cast")]; + tensor var_4193_split_sizes_0 = const()[name = tensor("op_4193_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4193_axis_0 = const()[name = tensor("op_4193_axis_0"), val = tensor(1)]; + tensor var_4193_cast_0, tensor var_4193_cast_1 = split(axis = var_4193_axis_0, split_sizes = var_4193_split_sizes_0, x = var_4192_cast)[name = tensor("op_4193_cast")]; + tensor var_4195_mode_0 = const()[name = tensor("op_4195_mode_0"), val = tensor("EXACT")]; + tensor var_4195_cast = gelu(mode = var_4195_mode_0, x = var_4193_cast_1)[name = tensor("op_4195_cast")]; + tensor input_275_cast = mul(x = var_4193_cast_0, y = var_4195_cast)[name = tensor("input_275_cast")]; + tensor var_4199 = const()[name = tensor("op_4199"), val = tensor([1, 1])]; + tensor var_4201 = const()[name = tensor("op_4201"), val = tensor([1, 1])]; + tensor var_4203_pad_type_0 = const()[name = tensor("op_4203_pad_type_0"), val = tensor("custom")]; + tensor var_4203_pad_0 = const()[name = tensor("op_4203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367063680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370340544))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370340672)))]; + tensor var_4203_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_4201, groups = var_1186, pad = var_4203_pad_0, pad_type = var_4203_pad_type_0, strides = var_4199, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_275_cast)[name = tensor("op_4203_cast")]; + tensor inputs_121_cast = add(x = var_4203_cast, y = inputs_119_cast)[name = tensor("inputs_121_cast")]; + tensor var_4213 = const()[name = tensor("op_4213"), val = tensor([1])]; + tensor channels_mean_121_cast = reduce_mean(axes = var_4213, keep_dims = var_1181, x = inputs_121_cast)[name = tensor("channels_mean_121_cast")]; + tensor zero_mean_121_cast = sub(x = inputs_121_cast, y = channels_mean_121_cast)[name = tensor("zero_mean_121_cast")]; + tensor zero_mean_sq_121_cast = mul(x = zero_mean_121_cast, y = zero_mean_121_cast)[name = tensor("zero_mean_sq_121_cast")]; + tensor var_4217 = const()[name = tensor("op_4217"), val = tensor([1])]; + tensor var_4218_cast = reduce_mean(axes = var_4217, keep_dims = var_1181, x = zero_mean_sq_121_cast)[name = tensor("op_4218_cast")]; + tensor var_4219_to_fp16 = const()[name = tensor("op_4219_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4220_cast = add(x = var_4218_cast, y = var_4219_to_fp16)[name = tensor("op_4220_cast")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_4220_cast)[name = tensor("denom_121_cast")]; + tensor out_121_cast = mul(x = zero_mean_121_cast, y = denom_121_cast)[name = tensor("out_121_cast")]; + tensor var_4224_to_fp16 = const()[name = tensor("op_4224_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370343296)))]; + tensor var_4225_cast = add(x = out_121_cast, y = var_4224_to_fp16)[name = tensor("op_4225_cast")]; + tensor var_4227_to_fp16 = const()[name = tensor("op_4227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370345920)))]; + tensor hidden_states_173_cast = mul(x = var_4225_cast, y = var_4227_to_fp16)[name = tensor("hidden_states_173_cast")]; + tensor var_4234 = const()[name = tensor("op_4234"), val = tensor([1, 1])]; + tensor var_4236 = const()[name = tensor("op_4236"), val = tensor([1, 1])]; + tensor q_81_pad_type_0 = const()[name = tensor("q_81_pad_type_0"), val = tensor("custom")]; + tensor q_81_pad_0 = const()[name = tensor("q_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370348544))), lut = tensor([-0x1.50cp-6, 0x1.51p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_81_cast = conv(dilations = var_4236, groups = var_1186, pad = q_81_pad_0, pad_type = q_81_pad_type_0, strides = var_4234, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("q_81_cast")]; + tensor var_4240 = const()[name = tensor("op_4240"), val = tensor([1, 1])]; + tensor var_4242 = const()[name = tensor("op_4242"), val = tensor([1, 1])]; + tensor k_81_pad_type_0 = const()[name = tensor("k_81_pad_type_0"), val = tensor("custom")]; + tensor k_81_pad_0 = const()[name = tensor("k_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370553408))), lut = tensor([-0x1.4fp-6, 0x1.4f8p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_81_cast = conv(dilations = var_4242, groups = var_1186, pad = k_81_pad_0, pad_type = k_81_pad_type_0, strides = var_4240, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("k_81_cast")]; + tensor var_4246 = const()[name = tensor("op_4246"), val = tensor([1, 1])]; + tensor var_4248 = const()[name = tensor("op_4248"), val = tensor([1, 1])]; + tensor v_81_pad_type_0 = const()[name = tensor("v_81_pad_type_0"), val = tensor("custom")]; + tensor v_81_pad_0 = const()[name = tensor("v_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370758272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371577536))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_81_cast = conv(dilations = var_4248, groups = var_1186, pad = v_81_pad_0, pad_type = v_81_pad_type_0, strides = var_4246, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("v_81_cast")]; + tensor var_4252 = const()[name = tensor("op_4252"), val = tensor([2, 20, 64, -1])]; + tensor var_4253_cast = reshape(shape = var_4252, x = q_81_cast)[name = tensor("op_4253_cast")]; + tensor var_4254 = const()[name = tensor("op_4254"), val = tensor([2, 20, 64, -1])]; + tensor var_4255_cast = reshape(shape = var_4254, x = k_81_cast)[name = tensor("op_4255_cast")]; + tensor var_4256 = const()[name = tensor("op_4256"), val = tensor([2, 20, 64, -1])]; + tensor var_4257_cast = reshape(shape = var_4256, x = v_81_cast)[name = tensor("op_4257_cast")]; + tensor attn_weights_161_transpose_x_0 = const()[name = tensor("attn_weights_161_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_161_transpose_y_0 = const()[name = tensor("attn_weights_161_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_161_cast = matmul(transpose_x = attn_weights_161_transpose_x_0, transpose_y = attn_weights_161_transpose_y_0, x = var_4253_cast, y = var_4255_cast)[name = tensor("attn_weights_161_cast")]; + tensor attn_weights_163_cast = mul(x = attn_weights_161_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_163_cast")]; + tensor var_4261_cast = softmax(axis = var_1170, x = attn_weights_163_cast)[name = tensor("op_4261_cast")]; + tensor attn_81_transpose_x_0 = const()[name = tensor("attn_81_transpose_x_0"), val = tensor(false)]; + tensor attn_81_transpose_y_0 = const()[name = tensor("attn_81_transpose_y_0"), val = tensor(true)]; + tensor attn_81_cast = matmul(transpose_x = attn_81_transpose_x_0, transpose_y = attn_81_transpose_y_0, x = var_4257_cast, y = var_4261_cast)[name = tensor("attn_81_cast")]; + tensor var_4265 = const()[name = tensor("op_4265"), val = tensor([2, 1280, 1, -1])]; + tensor input_277_cast = reshape(shape = var_4265, x = attn_81_cast)[name = tensor("input_277_cast")]; + tensor var_4270 = const()[name = tensor("op_4270"), val = tensor([1, 1])]; + tensor var_4272 = const()[name = tensor("op_4272"), val = tensor([1, 1])]; + tensor var_4274_pad_type_0 = const()[name = tensor("op_4274_pad_type_0"), val = tensor("custom")]; + tensor var_4274_pad_0 = const()[name = tensor("op_4274_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371577664))), lut = tensor([-0x1.2e4p-5, -0x1.6bp-7, 0x1.6acp-7, 0x1.2ep-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371987328)))]; + tensor var_4274_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_4272, groups = var_1186, pad = var_4274_pad_0, pad_type = var_4274_pad_type_0, strides = var_4270, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_277_cast)[name = tensor("op_4274_cast")]; + tensor inputs_123_cast = add(x = var_4274_cast, y = inputs_121_cast)[name = tensor("inputs_123_cast")]; + tensor var_4278 = const()[name = tensor("op_4278"), val = tensor([1])]; + tensor channels_mean_123_cast = reduce_mean(axes = var_4278, keep_dims = var_1181, x = inputs_123_cast)[name = tensor("channels_mean_123_cast")]; + tensor zero_mean_123_cast = sub(x = inputs_123_cast, y = channels_mean_123_cast)[name = tensor("zero_mean_123_cast")]; + tensor zero_mean_sq_123_cast = mul(x = zero_mean_123_cast, y = zero_mean_123_cast)[name = tensor("zero_mean_sq_123_cast")]; + tensor var_4282 = const()[name = tensor("op_4282"), val = tensor([1])]; + tensor var_4283_cast = reduce_mean(axes = var_4282, keep_dims = var_1181, x = zero_mean_sq_123_cast)[name = tensor("op_4283_cast")]; + tensor var_4284_to_fp16 = const()[name = tensor("op_4284_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4285_cast = add(x = var_4283_cast, y = var_4284_to_fp16)[name = tensor("op_4285_cast")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_4285_cast)[name = tensor("denom_123_cast")]; + tensor out_123_cast = mul(x = zero_mean_123_cast, y = denom_123_cast)[name = tensor("out_123_cast")]; + tensor var_4289_to_fp16 = const()[name = tensor("op_4289_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371989952)))]; + tensor var_4290_cast = add(x = out_123_cast, y = var_4289_to_fp16)[name = tensor("op_4290_cast")]; + tensor var_4292_to_fp16 = const()[name = tensor("op_4292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371992576)))]; + tensor hidden_states_175_cast = mul(x = var_4290_cast, y = var_4292_to_fp16)[name = tensor("hidden_states_175_cast")]; + tensor var_4299 = const()[name = tensor("op_4299"), val = tensor([1, 1])]; + tensor var_4301 = const()[name = tensor("op_4301"), val = tensor([1, 1])]; + tensor q_83_pad_type_0 = const()[name = tensor("q_83_pad_type_0"), val = tensor("custom")]; + tensor q_83_pad_0 = const()[name = tensor("q_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371995200))), lut = tensor([-0x1.e5p-7, 0x1.e64p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_83_cast = conv(dilations = var_4301, groups = var_1186, pad = q_83_pad_0, pad_type = q_83_pad_type_0, strides = var_4299, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_175_cast)[name = tensor("q_83_cast")]; + tensor var_4305 = const()[name = tensor("op_4305"), val = tensor([1, 1])]; + tensor var_4307 = const()[name = tensor("op_4307"), val = tensor([1, 1])]; + tensor k_83_pad_type_0 = const()[name = tensor("k_83_pad_type_0"), val = tensor("custom")]; + tensor k_83_pad_0 = const()[name = tensor("k_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372200064))), lut = tensor([-0x1.798p-7, 0x1.78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_83_cast = conv(dilations = var_4307, groups = var_1186, pad = k_83_pad_0, pad_type = k_83_pad_type_0, strides = var_4305, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_83_cast")]; + tensor var_4311 = const()[name = tensor("op_4311"), val = tensor([1, 1])]; + tensor var_4313 = const()[name = tensor("op_4313"), val = tensor([1, 1])]; + tensor v_83_pad_type_0 = const()[name = tensor("v_83_pad_type_0"), val = tensor("custom")]; + tensor v_83_pad_0 = const()[name = tensor("v_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372527808))), lut = tensor([-0x1.fb4p-7, 0x1.fap-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_83_cast = conv(dilations = var_4313, groups = var_1186, pad = v_83_pad_0, pad_type = v_83_pad_type_0, strides = var_4311, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_83_cast")]; + tensor var_4317 = const()[name = tensor("op_4317"), val = tensor([2, 20, 64, -1])]; + tensor var_4318_cast = reshape(shape = var_4317, x = q_83_cast)[name = tensor("op_4318_cast")]; + tensor var_4319 = const()[name = tensor("op_4319"), val = tensor([2, 20, 64, -1])]; + tensor var_4320_cast = reshape(shape = var_4319, x = k_83_cast)[name = tensor("op_4320_cast")]; + tensor var_4321 = const()[name = tensor("op_4321"), val = tensor([2, 20, 64, -1])]; + tensor var_4322_cast = reshape(shape = var_4321, x = v_83_cast)[name = tensor("op_4322_cast")]; + tensor attn_weights_165_transpose_x_0 = const()[name = tensor("attn_weights_165_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_165_transpose_y_0 = const()[name = tensor("attn_weights_165_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_165_cast = matmul(transpose_x = attn_weights_165_transpose_x_0, transpose_y = attn_weights_165_transpose_y_0, x = var_4318_cast, y = var_4320_cast)[name = tensor("attn_weights_165_cast")]; + tensor attn_weights_167_cast = mul(x = attn_weights_165_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_167_cast")]; + tensor var_4326_cast = softmax(axis = var_1170, x = attn_weights_167_cast)[name = tensor("op_4326_cast")]; + tensor attn_83_transpose_x_0 = const()[name = tensor("attn_83_transpose_x_0"), val = tensor(false)]; + tensor attn_83_transpose_y_0 = const()[name = tensor("attn_83_transpose_y_0"), val = tensor(true)]; + tensor attn_83_cast = matmul(transpose_x = attn_83_transpose_x_0, transpose_y = attn_83_transpose_y_0, x = var_4322_cast, y = var_4326_cast)[name = tensor("attn_83_cast")]; + tensor var_4330 = const()[name = tensor("op_4330"), val = tensor([2, 1280, 1, -1])]; + tensor input_279_cast = reshape(shape = var_4330, x = attn_83_cast)[name = tensor("input_279_cast")]; + tensor var_4335 = const()[name = tensor("op_4335"), val = tensor([1, 1])]; + tensor var_4337 = const()[name = tensor("op_4337"), val = tensor([1, 1])]; + tensor var_4339_pad_type_0 = const()[name = tensor("op_4339_pad_type_0"), val = tensor("custom")]; + tensor var_4339_pad_0 = const()[name = tensor("op_4339_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372855552))), lut = tensor([-0x1.3e8p-7, 0x1.3f4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373060416)))]; + tensor var_4339_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_4337, groups = var_1186, pad = var_4339_pad_0, pad_type = var_4339_pad_type_0, strides = var_4335, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_279_cast)[name = tensor("op_4339_cast")]; + tensor inputs_125_cast = add(x = var_4339_cast, y = inputs_123_cast)[name = tensor("inputs_125_cast")]; + tensor var_4343 = const()[name = tensor("op_4343"), val = tensor([1])]; + tensor channels_mean_125_cast = reduce_mean(axes = var_4343, keep_dims = var_1181, x = inputs_125_cast)[name = tensor("channels_mean_125_cast")]; + tensor zero_mean_125_cast = sub(x = inputs_125_cast, y = channels_mean_125_cast)[name = tensor("zero_mean_125_cast")]; + tensor zero_mean_sq_125_cast = mul(x = zero_mean_125_cast, y = zero_mean_125_cast)[name = tensor("zero_mean_sq_125_cast")]; + tensor var_4347 = const()[name = tensor("op_4347"), val = tensor([1])]; + tensor var_4348_cast = reduce_mean(axes = var_4347, keep_dims = var_1181, x = zero_mean_sq_125_cast)[name = tensor("op_4348_cast")]; + tensor var_4349_to_fp16 = const()[name = tensor("op_4349_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4350_cast = add(x = var_4348_cast, y = var_4349_to_fp16)[name = tensor("op_4350_cast")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_4350_cast)[name = tensor("denom_125_cast")]; + tensor out_125_cast = mul(x = zero_mean_125_cast, y = denom_125_cast)[name = tensor("out_125_cast")]; + tensor var_4354_to_fp16 = const()[name = tensor("op_4354_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373063040)))]; + tensor var_4355_cast = add(x = out_125_cast, y = var_4354_to_fp16)[name = tensor("op_4355_cast")]; + tensor var_4357_to_fp16 = const()[name = tensor("op_4357_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373065664)))]; + tensor input_281_cast = mul(x = var_4355_cast, y = var_4357_to_fp16)[name = tensor("input_281_cast")]; + tensor var_4365 = const()[name = tensor("op_4365"), val = tensor([1, 1])]; + tensor var_4367 = const()[name = tensor("op_4367"), val = tensor([1, 1])]; + tensor var_4369_pad_type_0 = const()[name = tensor("op_4369_pad_type_0"), val = tensor("custom")]; + tensor var_4369_pad_0 = const()[name = tensor("op_4369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373068288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379621952))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379622080)))]; + tensor var_4369_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_4367, groups = var_1186, pad = var_4369_pad_0, pad_type = var_4369_pad_type_0, strides = var_4365, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("op_4369_cast")]; + tensor var_4370_split_sizes_0 = const()[name = tensor("op_4370_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4370_axis_0 = const()[name = tensor("op_4370_axis_0"), val = tensor(1)]; + tensor var_4370_cast_0, tensor var_4370_cast_1 = split(axis = var_4370_axis_0, split_sizes = var_4370_split_sizes_0, x = var_4369_cast)[name = tensor("op_4370_cast")]; + tensor var_4372_mode_0 = const()[name = tensor("op_4372_mode_0"), val = tensor("EXACT")]; + tensor var_4372_cast = gelu(mode = var_4372_mode_0, x = var_4370_cast_1)[name = tensor("op_4372_cast")]; + tensor input_283_cast = mul(x = var_4370_cast_0, y = var_4372_cast)[name = tensor("input_283_cast")]; + tensor var_4376 = const()[name = tensor("op_4376"), val = tensor([1, 1])]; + tensor var_4378 = const()[name = tensor("op_4378"), val = tensor([1, 1])]; + tensor var_4380_pad_type_0 = const()[name = tensor("op_4380_pad_type_0"), val = tensor("custom")]; + tensor var_4380_pad_0 = const()[name = tensor("op_4380_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379642624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382919488))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382919616)))]; + tensor var_4380_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_4378, groups = var_1186, pad = var_4380_pad_0, pad_type = var_4380_pad_type_0, strides = var_4376, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_283_cast)[name = tensor("op_4380_cast")]; + tensor inputs_127_cast = add(x = var_4380_cast, y = inputs_125_cast)[name = tensor("inputs_127_cast")]; + tensor var_4390 = const()[name = tensor("op_4390"), val = tensor([1])]; + tensor channels_mean_127_cast = reduce_mean(axes = var_4390, keep_dims = var_1181, x = inputs_127_cast)[name = tensor("channels_mean_127_cast")]; + tensor zero_mean_127_cast = sub(x = inputs_127_cast, y = channels_mean_127_cast)[name = tensor("zero_mean_127_cast")]; + tensor zero_mean_sq_127_cast = mul(x = zero_mean_127_cast, y = zero_mean_127_cast)[name = tensor("zero_mean_sq_127_cast")]; + tensor var_4394 = const()[name = tensor("op_4394"), val = tensor([1])]; + tensor var_4395_cast = reduce_mean(axes = var_4394, keep_dims = var_1181, x = zero_mean_sq_127_cast)[name = tensor("op_4395_cast")]; + tensor var_4396_to_fp16 = const()[name = tensor("op_4396_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4397_cast = add(x = var_4395_cast, y = var_4396_to_fp16)[name = tensor("op_4397_cast")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_4397_cast)[name = tensor("denom_127_cast")]; + tensor out_127_cast = mul(x = zero_mean_127_cast, y = denom_127_cast)[name = tensor("out_127_cast")]; + tensor var_4401_to_fp16 = const()[name = tensor("op_4401_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382922240)))]; + tensor var_4402_cast = add(x = out_127_cast, y = var_4401_to_fp16)[name = tensor("op_4402_cast")]; + tensor var_4404_to_fp16 = const()[name = tensor("op_4404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382924864)))]; + tensor hidden_states_179_cast = mul(x = var_4402_cast, y = var_4404_to_fp16)[name = tensor("hidden_states_179_cast")]; + tensor var_4411 = const()[name = tensor("op_4411"), val = tensor([1, 1])]; + tensor var_4413 = const()[name = tensor("op_4413"), val = tensor([1, 1])]; + tensor q_85_pad_type_0 = const()[name = tensor("q_85_pad_type_0"), val = tensor("custom")]; + tensor q_85_pad_0 = const()[name = tensor("q_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382927488))), lut = tensor([-0x1.50cp-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_85_cast = conv(dilations = var_4413, groups = var_1186, pad = q_85_pad_0, pad_type = q_85_pad_type_0, strides = var_4411, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("q_85_cast")]; + tensor var_4417 = const()[name = tensor("op_4417"), val = tensor([1, 1])]; + tensor var_4419 = const()[name = tensor("op_4419"), val = tensor([1, 1])]; + tensor k_85_pad_type_0 = const()[name = tensor("k_85_pad_type_0"), val = tensor("custom")]; + tensor k_85_pad_0 = const()[name = tensor("k_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383132352))), lut = tensor([-0x1.4f4p-6, 0x1.4ecp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_85_cast = conv(dilations = var_4419, groups = var_1186, pad = k_85_pad_0, pad_type = k_85_pad_type_0, strides = var_4417, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("k_85_cast")]; + tensor var_4423 = const()[name = tensor("op_4423"), val = tensor([1, 1])]; + tensor var_4425 = const()[name = tensor("op_4425"), val = tensor([1, 1])]; + tensor v_85_pad_type_0 = const()[name = tensor("v_85_pad_type_0"), val = tensor("custom")]; + tensor v_85_pad_0 = const()[name = tensor("v_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383337216))), lut = tensor([-0x1.37p-5, -0x1.75cp-7, 0x1.794p-7, 0x1.374p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_85_cast = conv(dilations = var_4425, groups = var_1186, pad = v_85_pad_0, pad_type = v_85_pad_type_0, strides = var_4423, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("v_85_cast")]; + tensor var_4429 = const()[name = tensor("op_4429"), val = tensor([2, 20, 64, -1])]; + tensor var_4430_cast = reshape(shape = var_4429, x = q_85_cast)[name = tensor("op_4430_cast")]; + tensor var_4431 = const()[name = tensor("op_4431"), val = tensor([2, 20, 64, -1])]; + tensor var_4432_cast = reshape(shape = var_4431, x = k_85_cast)[name = tensor("op_4432_cast")]; + tensor var_4433 = const()[name = tensor("op_4433"), val = tensor([2, 20, 64, -1])]; + tensor var_4434_cast = reshape(shape = var_4433, x = v_85_cast)[name = tensor("op_4434_cast")]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = var_4430_cast, y = var_4432_cast)[name = tensor("attn_weights_169_cast")]; + tensor attn_weights_171_cast = mul(x = attn_weights_169_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_4438_cast = softmax(axis = var_1170, x = attn_weights_171_cast)[name = tensor("op_4438_cast")]; + tensor attn_85_transpose_x_0 = const()[name = tensor("attn_85_transpose_x_0"), val = tensor(false)]; + tensor attn_85_transpose_y_0 = const()[name = tensor("attn_85_transpose_y_0"), val = tensor(true)]; + tensor attn_85_cast = matmul(transpose_x = attn_85_transpose_x_0, transpose_y = attn_85_transpose_y_0, x = var_4434_cast, y = var_4438_cast)[name = tensor("attn_85_cast")]; + tensor var_4442 = const()[name = tensor("op_4442"), val = tensor([2, 1280, 1, -1])]; + tensor input_285_cast = reshape(shape = var_4442, x = attn_85_cast)[name = tensor("input_285_cast")]; + tensor var_4447 = const()[name = tensor("op_4447"), val = tensor([1, 1])]; + tensor var_4449 = const()[name = tensor("op_4449"), val = tensor([1, 1])]; + tensor var_4451_pad_type_0 = const()[name = tensor("op_4451_pad_type_0"), val = tensor("custom")]; + tensor var_4451_pad_0 = const()[name = tensor("op_4451_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383746880))), lut = tensor([-0x1.31cp-5, -0x1.6f4p-7, 0x1.704p-7, 0x1.31cp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384156544)))]; + tensor var_4451_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_4449, groups = var_1186, pad = var_4451_pad_0, pad_type = var_4451_pad_type_0, strides = var_4447, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("op_4451_cast")]; + tensor inputs_129_cast = add(x = var_4451_cast, y = inputs_127_cast)[name = tensor("inputs_129_cast")]; + tensor var_4455 = const()[name = tensor("op_4455"), val = tensor([1])]; + tensor channels_mean_129_cast = reduce_mean(axes = var_4455, keep_dims = var_1181, x = inputs_129_cast)[name = tensor("channels_mean_129_cast")]; + tensor zero_mean_129_cast = sub(x = inputs_129_cast, y = channels_mean_129_cast)[name = tensor("zero_mean_129_cast")]; + tensor zero_mean_sq_129_cast = mul(x = zero_mean_129_cast, y = zero_mean_129_cast)[name = tensor("zero_mean_sq_129_cast")]; + tensor var_4459 = const()[name = tensor("op_4459"), val = tensor([1])]; + tensor var_4460_cast = reduce_mean(axes = var_4459, keep_dims = var_1181, x = zero_mean_sq_129_cast)[name = tensor("op_4460_cast")]; + tensor var_4461_to_fp16 = const()[name = tensor("op_4461_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4462_cast = add(x = var_4460_cast, y = var_4461_to_fp16)[name = tensor("op_4462_cast")]; + tensor denom_129_epsilon_0_to_fp16 = const()[name = tensor("denom_129_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_129_cast = rsqrt(epsilon = denom_129_epsilon_0_to_fp16, x = var_4462_cast)[name = tensor("denom_129_cast")]; + tensor out_129_cast = mul(x = zero_mean_129_cast, y = denom_129_cast)[name = tensor("out_129_cast")]; + tensor var_4466_to_fp16 = const()[name = tensor("op_4466_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384159168)))]; + tensor var_4467_cast = add(x = out_129_cast, y = var_4466_to_fp16)[name = tensor("op_4467_cast")]; + tensor var_4469_to_fp16 = const()[name = tensor("op_4469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384161792)))]; + tensor hidden_states_181_cast = mul(x = var_4467_cast, y = var_4469_to_fp16)[name = tensor("hidden_states_181_cast")]; + tensor var_4476 = const()[name = tensor("op_4476"), val = tensor([1, 1])]; + tensor var_4478 = const()[name = tensor("op_4478"), val = tensor([1, 1])]; + tensor q_87_pad_type_0 = const()[name = tensor("q_87_pad_type_0"), val = tensor("custom")]; + tensor q_87_pad_0 = const()[name = tensor("q_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384164416))), lut = tensor([-0x1.d38p-7, 0x1.d44p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_87_cast = conv(dilations = var_4478, groups = var_1186, pad = q_87_pad_0, pad_type = q_87_pad_type_0, strides = var_4476, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_181_cast)[name = tensor("q_87_cast")]; + tensor var_4482 = const()[name = tensor("op_4482"), val = tensor([1, 1])]; + tensor var_4484 = const()[name = tensor("op_4484"), val = tensor([1, 1])]; + tensor k_87_pad_type_0 = const()[name = tensor("k_87_pad_type_0"), val = tensor("custom")]; + tensor k_87_pad_0 = const()[name = tensor("k_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384369280))), lut = tensor([-0x1.648p-7, 0x1.62cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_87_cast = conv(dilations = var_4484, groups = var_1186, pad = k_87_pad_0, pad_type = k_87_pad_type_0, strides = var_4482, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_87_cast")]; + tensor var_4488 = const()[name = tensor("op_4488"), val = tensor([1, 1])]; + tensor var_4490 = const()[name = tensor("op_4490"), val = tensor([1, 1])]; + tensor v_87_pad_type_0 = const()[name = tensor("v_87_pad_type_0"), val = tensor("custom")]; + tensor v_87_pad_0 = const()[name = tensor("v_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384697024))), lut = tensor([-0x1.e88p-7, 0x1.e78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_87_cast = conv(dilations = var_4490, groups = var_1186, pad = v_87_pad_0, pad_type = v_87_pad_type_0, strides = var_4488, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_87_cast")]; + tensor var_4494 = const()[name = tensor("op_4494"), val = tensor([2, 20, 64, -1])]; + tensor var_4495_cast = reshape(shape = var_4494, x = q_87_cast)[name = tensor("op_4495_cast")]; + tensor var_4496 = const()[name = tensor("op_4496"), val = tensor([2, 20, 64, -1])]; + tensor var_4497_cast = reshape(shape = var_4496, x = k_87_cast)[name = tensor("op_4497_cast")]; + tensor var_4498 = const()[name = tensor("op_4498"), val = tensor([2, 20, 64, -1])]; + tensor var_4499_cast = reshape(shape = var_4498, x = v_87_cast)[name = tensor("op_4499_cast")]; + tensor attn_weights_173_transpose_x_0 = const()[name = tensor("attn_weights_173_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_173_transpose_y_0 = const()[name = tensor("attn_weights_173_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_173_cast = matmul(transpose_x = attn_weights_173_transpose_x_0, transpose_y = attn_weights_173_transpose_y_0, x = var_4495_cast, y = var_4497_cast)[name = tensor("attn_weights_173_cast")]; + tensor attn_weights_175_cast = mul(x = attn_weights_173_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_175_cast")]; + tensor var_4503_cast = softmax(axis = var_1170, x = attn_weights_175_cast)[name = tensor("op_4503_cast")]; + tensor attn_87_transpose_x_0 = const()[name = tensor("attn_87_transpose_x_0"), val = tensor(false)]; + tensor attn_87_transpose_y_0 = const()[name = tensor("attn_87_transpose_y_0"), val = tensor(true)]; + tensor attn_87_cast = matmul(transpose_x = attn_87_transpose_x_0, transpose_y = attn_87_transpose_y_0, x = var_4499_cast, y = var_4503_cast)[name = tensor("attn_87_cast")]; + tensor var_4507 = const()[name = tensor("op_4507"), val = tensor([2, 1280, 1, -1])]; + tensor input_287_cast = reshape(shape = var_4507, x = attn_87_cast)[name = tensor("input_287_cast")]; + tensor var_4512 = const()[name = tensor("op_4512"), val = tensor([1, 1])]; + tensor var_4514 = const()[name = tensor("op_4514"), val = tensor([1, 1])]; + tensor var_4516_pad_type_0 = const()[name = tensor("op_4516_pad_type_0"), val = tensor("custom")]; + tensor var_4516_pad_0 = const()[name = tensor("op_4516_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385024768))), lut = tensor([-0x1.354p-7, 0x1.37p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385229632)))]; + tensor var_4516_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_4514, groups = var_1186, pad = var_4516_pad_0, pad_type = var_4516_pad_type_0, strides = var_4512, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_287_cast)[name = tensor("op_4516_cast")]; + tensor inputs_131_cast = add(x = var_4516_cast, y = inputs_129_cast)[name = tensor("inputs_131_cast")]; + tensor var_4520 = const()[name = tensor("op_4520"), val = tensor([1])]; + tensor channels_mean_131_cast = reduce_mean(axes = var_4520, keep_dims = var_1181, x = inputs_131_cast)[name = tensor("channels_mean_131_cast")]; + tensor zero_mean_131_cast = sub(x = inputs_131_cast, y = channels_mean_131_cast)[name = tensor("zero_mean_131_cast")]; + tensor zero_mean_sq_131_cast = mul(x = zero_mean_131_cast, y = zero_mean_131_cast)[name = tensor("zero_mean_sq_131_cast")]; + tensor var_4524 = const()[name = tensor("op_4524"), val = tensor([1])]; + tensor var_4525_cast = reduce_mean(axes = var_4524, keep_dims = var_1181, x = zero_mean_sq_131_cast)[name = tensor("op_4525_cast")]; + tensor var_4526_to_fp16 = const()[name = tensor("op_4526_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4527_cast = add(x = var_4525_cast, y = var_4526_to_fp16)[name = tensor("op_4527_cast")]; + tensor denom_131_epsilon_0_to_fp16 = const()[name = tensor("denom_131_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_131_cast = rsqrt(epsilon = denom_131_epsilon_0_to_fp16, x = var_4527_cast)[name = tensor("denom_131_cast")]; + tensor out_131_cast = mul(x = zero_mean_131_cast, y = denom_131_cast)[name = tensor("out_131_cast")]; + tensor var_4531_to_fp16 = const()[name = tensor("op_4531_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385232256)))]; + tensor var_4532_cast = add(x = out_131_cast, y = var_4531_to_fp16)[name = tensor("op_4532_cast")]; + tensor var_4534_to_fp16 = const()[name = tensor("op_4534_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385234880)))]; + tensor input_289_cast = mul(x = var_4532_cast, y = var_4534_to_fp16)[name = tensor("input_289_cast")]; + tensor var_4542 = const()[name = tensor("op_4542"), val = tensor([1, 1])]; + tensor var_4544 = const()[name = tensor("op_4544"), val = tensor([1, 1])]; + tensor var_4546_pad_type_0 = const()[name = tensor("op_4546_pad_type_0"), val = tensor("custom")]; + tensor var_4546_pad_0 = const()[name = tensor("op_4546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385237504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391791168))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391791296)))]; + tensor var_4546_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_4544, groups = var_1186, pad = var_4546_pad_0, pad_type = var_4546_pad_type_0, strides = var_4542, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("op_4546_cast")]; + tensor var_4547_split_sizes_0 = const()[name = tensor("op_4547_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4547_axis_0 = const()[name = tensor("op_4547_axis_0"), val = tensor(1)]; + tensor var_4547_cast_0, tensor var_4547_cast_1 = split(axis = var_4547_axis_0, split_sizes = var_4547_split_sizes_0, x = var_4546_cast)[name = tensor("op_4547_cast")]; + tensor var_4549_mode_0 = const()[name = tensor("op_4549_mode_0"), val = tensor("EXACT")]; + tensor var_4549_cast = gelu(mode = var_4549_mode_0, x = var_4547_cast_1)[name = tensor("op_4549_cast")]; + tensor input_291_cast = mul(x = var_4547_cast_0, y = var_4549_cast)[name = tensor("input_291_cast")]; + tensor var_4553 = const()[name = tensor("op_4553"), val = tensor([1, 1])]; + tensor var_4555 = const()[name = tensor("op_4555"), val = tensor([1, 1])]; + tensor var_4557_pad_type_0 = const()[name = tensor("op_4557_pad_type_0"), val = tensor("custom")]; + tensor var_4557_pad_0 = const()[name = tensor("op_4557_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391811840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395088704))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395088832)))]; + tensor var_4557_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_4555, groups = var_1186, pad = var_4557_pad_0, pad_type = var_4557_pad_type_0, strides = var_4553, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_291_cast)[name = tensor("op_4557_cast")]; + tensor inputs_133_cast = add(x = var_4557_cast, y = inputs_131_cast)[name = tensor("inputs_133_cast")]; + tensor var_4567 = const()[name = tensor("op_4567"), val = tensor([1])]; + tensor channels_mean_133_cast = reduce_mean(axes = var_4567, keep_dims = var_1181, x = inputs_133_cast)[name = tensor("channels_mean_133_cast")]; + tensor zero_mean_133_cast = sub(x = inputs_133_cast, y = channels_mean_133_cast)[name = tensor("zero_mean_133_cast")]; + tensor zero_mean_sq_133_cast = mul(x = zero_mean_133_cast, y = zero_mean_133_cast)[name = tensor("zero_mean_sq_133_cast")]; + tensor var_4571 = const()[name = tensor("op_4571"), val = tensor([1])]; + tensor var_4572_cast = reduce_mean(axes = var_4571, keep_dims = var_1181, x = zero_mean_sq_133_cast)[name = tensor("op_4572_cast")]; + tensor var_4573_to_fp16 = const()[name = tensor("op_4573_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4574_cast = add(x = var_4572_cast, y = var_4573_to_fp16)[name = tensor("op_4574_cast")]; + tensor denom_133_epsilon_0_to_fp16 = const()[name = tensor("denom_133_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_133_cast = rsqrt(epsilon = denom_133_epsilon_0_to_fp16, x = var_4574_cast)[name = tensor("denom_133_cast")]; + tensor out_133_cast = mul(x = zero_mean_133_cast, y = denom_133_cast)[name = tensor("out_133_cast")]; + tensor var_4578_to_fp16 = const()[name = tensor("op_4578_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395091456)))]; + tensor var_4579_cast = add(x = out_133_cast, y = var_4578_to_fp16)[name = tensor("op_4579_cast")]; + tensor var_4581_to_fp16 = const()[name = tensor("op_4581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395094080)))]; + tensor hidden_states_185_cast = mul(x = var_4579_cast, y = var_4581_to_fp16)[name = tensor("hidden_states_185_cast")]; + tensor var_4588 = const()[name = tensor("op_4588"), val = tensor([1, 1])]; + tensor var_4590 = const()[name = tensor("op_4590"), val = tensor([1, 1])]; + tensor q_89_pad_type_0 = const()[name = tensor("q_89_pad_type_0"), val = tensor("custom")]; + tensor q_89_pad_0 = const()[name = tensor("q_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395096704))), lut = tensor([-0x1.544p-6, 0x1.54p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_89_cast = conv(dilations = var_4590, groups = var_1186, pad = q_89_pad_0, pad_type = q_89_pad_type_0, strides = var_4588, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("q_89_cast")]; + tensor var_4594 = const()[name = tensor("op_4594"), val = tensor([1, 1])]; + tensor var_4596 = const()[name = tensor("op_4596"), val = tensor([1, 1])]; + tensor k_89_pad_type_0 = const()[name = tensor("k_89_pad_type_0"), val = tensor("custom")]; + tensor k_89_pad_0 = const()[name = tensor("k_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395301568))), lut = tensor([-0x1.53p-6, 0x1.51cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_89_cast = conv(dilations = var_4596, groups = var_1186, pad = k_89_pad_0, pad_type = k_89_pad_type_0, strides = var_4594, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("k_89_cast")]; + tensor var_4600 = const()[name = tensor("op_4600"), val = tensor([1, 1])]; + tensor var_4602 = const()[name = tensor("op_4602"), val = tensor([1, 1])]; + tensor v_89_pad_type_0 = const()[name = tensor("v_89_pad_type_0"), val = tensor("custom")]; + tensor v_89_pad_0 = const()[name = tensor("v_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395506432))), lut = tensor([-0x1.3e4p-5, -0x1.7ecp-7, 0x1.82p-7, 0x1.3f4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_89_cast = conv(dilations = var_4602, groups = var_1186, pad = v_89_pad_0, pad_type = v_89_pad_type_0, strides = var_4600, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("v_89_cast")]; + tensor var_4606 = const()[name = tensor("op_4606"), val = tensor([2, 20, 64, -1])]; + tensor var_4607_cast = reshape(shape = var_4606, x = q_89_cast)[name = tensor("op_4607_cast")]; + tensor var_4608 = const()[name = tensor("op_4608"), val = tensor([2, 20, 64, -1])]; + tensor var_4609_cast = reshape(shape = var_4608, x = k_89_cast)[name = tensor("op_4609_cast")]; + tensor var_4610 = const()[name = tensor("op_4610"), val = tensor([2, 20, 64, -1])]; + tensor var_4611_cast = reshape(shape = var_4610, x = v_89_cast)[name = tensor("op_4611_cast")]; + tensor attn_weights_177_transpose_x_0 = const()[name = tensor("attn_weights_177_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_177_transpose_y_0 = const()[name = tensor("attn_weights_177_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_177_cast = matmul(transpose_x = attn_weights_177_transpose_x_0, transpose_y = attn_weights_177_transpose_y_0, x = var_4607_cast, y = var_4609_cast)[name = tensor("attn_weights_177_cast")]; + tensor attn_weights_179_cast = mul(x = attn_weights_177_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_179_cast")]; + tensor var_4615_cast = softmax(axis = var_1170, x = attn_weights_179_cast)[name = tensor("op_4615_cast")]; + tensor attn_89_transpose_x_0 = const()[name = tensor("attn_89_transpose_x_0"), val = tensor(false)]; + tensor attn_89_transpose_y_0 = const()[name = tensor("attn_89_transpose_y_0"), val = tensor(true)]; + tensor attn_89_cast = matmul(transpose_x = attn_89_transpose_x_0, transpose_y = attn_89_transpose_y_0, x = var_4611_cast, y = var_4615_cast)[name = tensor("attn_89_cast")]; + tensor var_4619 = const()[name = tensor("op_4619"), val = tensor([2, 1280, 1, -1])]; + tensor input_293_cast = reshape(shape = var_4619, x = attn_89_cast)[name = tensor("input_293_cast")]; + tensor var_4624 = const()[name = tensor("op_4624"), val = tensor([1, 1])]; + tensor var_4626 = const()[name = tensor("op_4626"), val = tensor([1, 1])]; + tensor var_4628_pad_type_0 = const()[name = tensor("op_4628_pad_type_0"), val = tensor("custom")]; + tensor var_4628_pad_0 = const()[name = tensor("op_4628_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395916096))), lut = tensor([-0x1.378p-5, -0x1.74p-7, 0x1.7bcp-7, 0x1.394p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396325760)))]; + tensor var_4628_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_4626, groups = var_1186, pad = var_4628_pad_0, pad_type = var_4628_pad_type_0, strides = var_4624, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_293_cast)[name = tensor("op_4628_cast")]; + tensor inputs_135_cast = add(x = var_4628_cast, y = inputs_133_cast)[name = tensor("inputs_135_cast")]; + tensor var_4632 = const()[name = tensor("op_4632"), val = tensor([1])]; + tensor channels_mean_135_cast = reduce_mean(axes = var_4632, keep_dims = var_1181, x = inputs_135_cast)[name = tensor("channels_mean_135_cast")]; + tensor zero_mean_135_cast = sub(x = inputs_135_cast, y = channels_mean_135_cast)[name = tensor("zero_mean_135_cast")]; + tensor zero_mean_sq_135_cast = mul(x = zero_mean_135_cast, y = zero_mean_135_cast)[name = tensor("zero_mean_sq_135_cast")]; + tensor var_4636 = const()[name = tensor("op_4636"), val = tensor([1])]; + tensor var_4637_cast = reduce_mean(axes = var_4636, keep_dims = var_1181, x = zero_mean_sq_135_cast)[name = tensor("op_4637_cast")]; + tensor var_4638_to_fp16 = const()[name = tensor("op_4638_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4639_cast = add(x = var_4637_cast, y = var_4638_to_fp16)[name = tensor("op_4639_cast")]; + tensor denom_135_epsilon_0_to_fp16 = const()[name = tensor("denom_135_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_135_cast = rsqrt(epsilon = denom_135_epsilon_0_to_fp16, x = var_4639_cast)[name = tensor("denom_135_cast")]; + tensor out_135_cast = mul(x = zero_mean_135_cast, y = denom_135_cast)[name = tensor("out_135_cast")]; + tensor var_4643_to_fp16 = const()[name = tensor("op_4643_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396328384)))]; + tensor var_4644_cast = add(x = out_135_cast, y = var_4643_to_fp16)[name = tensor("op_4644_cast")]; + tensor var_4646_to_fp16 = const()[name = tensor("op_4646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396331008)))]; + tensor hidden_states_187_cast = mul(x = var_4644_cast, y = var_4646_to_fp16)[name = tensor("hidden_states_187_cast")]; + tensor var_4653 = const()[name = tensor("op_4653"), val = tensor([1, 1])]; + tensor var_4655 = const()[name = tensor("op_4655"), val = tensor([1, 1])]; + tensor q_91_pad_type_0 = const()[name = tensor("q_91_pad_type_0"), val = tensor("custom")]; + tensor q_91_pad_0 = const()[name = tensor("q_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396333632))), lut = tensor([-0x1.cd4p-7, 0x1.cf4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_91_cast = conv(dilations = var_4655, groups = var_1186, pad = q_91_pad_0, pad_type = q_91_pad_type_0, strides = var_4653, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_187_cast)[name = tensor("q_91_cast")]; + tensor var_4659 = const()[name = tensor("op_4659"), val = tensor([1, 1])]; + tensor var_4661 = const()[name = tensor("op_4661"), val = tensor([1, 1])]; + tensor k_91_pad_type_0 = const()[name = tensor("k_91_pad_type_0"), val = tensor("custom")]; + tensor k_91_pad_0 = const()[name = tensor("k_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396538496))), lut = tensor([-0x1.584p-7, 0x1.568p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_91_cast = conv(dilations = var_4661, groups = var_1186, pad = k_91_pad_0, pad_type = k_91_pad_type_0, strides = var_4659, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_91_cast")]; + tensor var_4665 = const()[name = tensor("op_4665"), val = tensor([1, 1])]; + tensor var_4667 = const()[name = tensor("op_4667"), val = tensor([1, 1])]; + tensor v_91_pad_type_0 = const()[name = tensor("v_91_pad_type_0"), val = tensor("custom")]; + tensor v_91_pad_0 = const()[name = tensor("v_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396866240))), lut = tensor([-0x1.ce8p-7, 0x1.cd8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_91_cast = conv(dilations = var_4667, groups = var_1186, pad = v_91_pad_0, pad_type = v_91_pad_type_0, strides = var_4665, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_91_cast")]; + tensor var_4671 = const()[name = tensor("op_4671"), val = tensor([2, 20, 64, -1])]; + tensor var_4672_cast = reshape(shape = var_4671, x = q_91_cast)[name = tensor("op_4672_cast")]; + tensor var_4673 = const()[name = tensor("op_4673"), val = tensor([2, 20, 64, -1])]; + tensor var_4674_cast = reshape(shape = var_4673, x = k_91_cast)[name = tensor("op_4674_cast")]; + tensor var_4675 = const()[name = tensor("op_4675"), val = tensor([2, 20, 64, -1])]; + tensor var_4676_cast = reshape(shape = var_4675, x = v_91_cast)[name = tensor("op_4676_cast")]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = var_4672_cast, y = var_4674_cast)[name = tensor("attn_weights_181_cast")]; + tensor attn_weights_183_cast = mul(x = attn_weights_181_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_4680_cast = softmax(axis = var_1170, x = attn_weights_183_cast)[name = tensor("op_4680_cast")]; + tensor attn_91_transpose_x_0 = const()[name = tensor("attn_91_transpose_x_0"), val = tensor(false)]; + tensor attn_91_transpose_y_0 = const()[name = tensor("attn_91_transpose_y_0"), val = tensor(true)]; + tensor attn_91_cast = matmul(transpose_x = attn_91_transpose_x_0, transpose_y = attn_91_transpose_y_0, x = var_4676_cast, y = var_4680_cast)[name = tensor("attn_91_cast")]; + tensor var_4684 = const()[name = tensor("op_4684"), val = tensor([2, 1280, 1, -1])]; + tensor input_295_cast = reshape(shape = var_4684, x = attn_91_cast)[name = tensor("input_295_cast")]; + tensor var_4689 = const()[name = tensor("op_4689"), val = tensor([1, 1])]; + tensor var_4691 = const()[name = tensor("op_4691"), val = tensor([1, 1])]; + tensor var_4693_pad_type_0 = const()[name = tensor("op_4693_pad_type_0"), val = tensor("custom")]; + tensor var_4693_pad_0 = const()[name = tensor("op_4693_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397193984))), lut = tensor([-0x1.23p-7, 0x1.25p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397398848)))]; + tensor var_4693_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_4691, groups = var_1186, pad = var_4693_pad_0, pad_type = var_4693_pad_type_0, strides = var_4689, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_295_cast)[name = tensor("op_4693_cast")]; + tensor inputs_137_cast = add(x = var_4693_cast, y = inputs_135_cast)[name = tensor("inputs_137_cast")]; + tensor var_4697 = const()[name = tensor("op_4697"), val = tensor([1])]; + tensor channels_mean_137_cast = reduce_mean(axes = var_4697, keep_dims = var_1181, x = inputs_137_cast)[name = tensor("channels_mean_137_cast")]; + tensor zero_mean_137_cast = sub(x = inputs_137_cast, y = channels_mean_137_cast)[name = tensor("zero_mean_137_cast")]; + tensor zero_mean_sq_137_cast = mul(x = zero_mean_137_cast, y = zero_mean_137_cast)[name = tensor("zero_mean_sq_137_cast")]; + tensor var_4701 = const()[name = tensor("op_4701"), val = tensor([1])]; + tensor var_4702_cast = reduce_mean(axes = var_4701, keep_dims = var_1181, x = zero_mean_sq_137_cast)[name = tensor("op_4702_cast")]; + tensor var_4703_to_fp16 = const()[name = tensor("op_4703_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4704_cast = add(x = var_4702_cast, y = var_4703_to_fp16)[name = tensor("op_4704_cast")]; + tensor denom_137_epsilon_0_to_fp16 = const()[name = tensor("denom_137_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_137_cast = rsqrt(epsilon = denom_137_epsilon_0_to_fp16, x = var_4704_cast)[name = tensor("denom_137_cast")]; + tensor out_137_cast = mul(x = zero_mean_137_cast, y = denom_137_cast)[name = tensor("out_137_cast")]; + tensor var_4708_to_fp16 = const()[name = tensor("op_4708_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397401472)))]; + tensor var_4709_cast = add(x = out_137_cast, y = var_4708_to_fp16)[name = tensor("op_4709_cast")]; + tensor var_4711_to_fp16 = const()[name = tensor("op_4711_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397404096)))]; + tensor input_297_cast = mul(x = var_4709_cast, y = var_4711_to_fp16)[name = tensor("input_297_cast")]; + tensor var_4719 = const()[name = tensor("op_4719"), val = tensor([1, 1])]; + tensor var_4721 = const()[name = tensor("op_4721"), val = tensor([1, 1])]; + tensor var_4723_pad_type_0 = const()[name = tensor("op_4723_pad_type_0"), val = tensor("custom")]; + tensor var_4723_pad_0 = const()[name = tensor("op_4723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397406720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403960384))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403960512)))]; + tensor var_4723_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_4721, groups = var_1186, pad = var_4723_pad_0, pad_type = var_4723_pad_type_0, strides = var_4719, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("op_4723_cast")]; + tensor var_4724_split_sizes_0 = const()[name = tensor("op_4724_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4724_axis_0 = const()[name = tensor("op_4724_axis_0"), val = tensor(1)]; + tensor var_4724_cast_0, tensor var_4724_cast_1 = split(axis = var_4724_axis_0, split_sizes = var_4724_split_sizes_0, x = var_4723_cast)[name = tensor("op_4724_cast")]; + tensor var_4726_mode_0 = const()[name = tensor("op_4726_mode_0"), val = tensor("EXACT")]; + tensor var_4726_cast = gelu(mode = var_4726_mode_0, x = var_4724_cast_1)[name = tensor("op_4726_cast")]; + tensor input_299_cast = mul(x = var_4724_cast_0, y = var_4726_cast)[name = tensor("input_299_cast")]; + tensor var_4730 = const()[name = tensor("op_4730"), val = tensor([1, 1])]; + tensor var_4732 = const()[name = tensor("op_4732"), val = tensor([1, 1])]; + tensor var_4734_pad_type_0 = const()[name = tensor("op_4734_pad_type_0"), val = tensor("custom")]; + tensor var_4734_pad_0 = const()[name = tensor("op_4734_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403981056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407257920))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407258048)))]; + tensor var_4734_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_4732, groups = var_1186, pad = var_4734_pad_0, pad_type = var_4734_pad_type_0, strides = var_4730, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_299_cast)[name = tensor("op_4734_cast")]; + tensor inputs_139_cast = add(x = var_4734_cast, y = inputs_137_cast)[name = tensor("inputs_139_cast")]; + tensor var_4744 = const()[name = tensor("op_4744"), val = tensor([1])]; + tensor channels_mean_139_cast = reduce_mean(axes = var_4744, keep_dims = var_1181, x = inputs_139_cast)[name = tensor("channels_mean_139_cast")]; + tensor zero_mean_139_cast = sub(x = inputs_139_cast, y = channels_mean_139_cast)[name = tensor("zero_mean_139_cast")]; + tensor zero_mean_sq_139_cast = mul(x = zero_mean_139_cast, y = zero_mean_139_cast)[name = tensor("zero_mean_sq_139_cast")]; + tensor var_4748 = const()[name = tensor("op_4748"), val = tensor([1])]; + tensor var_4749_cast = reduce_mean(axes = var_4748, keep_dims = var_1181, x = zero_mean_sq_139_cast)[name = tensor("op_4749_cast")]; + tensor var_4750_to_fp16 = const()[name = tensor("op_4750_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4751_cast = add(x = var_4749_cast, y = var_4750_to_fp16)[name = tensor("op_4751_cast")]; + tensor denom_139_epsilon_0_to_fp16 = const()[name = tensor("denom_139_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_139_cast = rsqrt(epsilon = denom_139_epsilon_0_to_fp16, x = var_4751_cast)[name = tensor("denom_139_cast")]; + tensor out_139_cast = mul(x = zero_mean_139_cast, y = denom_139_cast)[name = tensor("out_139_cast")]; + tensor var_4755_to_fp16 = const()[name = tensor("op_4755_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407260672)))]; + tensor var_4756_cast = add(x = out_139_cast, y = var_4755_to_fp16)[name = tensor("op_4756_cast")]; + tensor var_4758_to_fp16 = const()[name = tensor("op_4758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407263296)))]; + tensor hidden_states_191_cast = mul(x = var_4756_cast, y = var_4758_to_fp16)[name = tensor("hidden_states_191_cast")]; + tensor var_4765 = const()[name = tensor("op_4765"), val = tensor([1, 1])]; + tensor var_4767 = const()[name = tensor("op_4767"), val = tensor([1, 1])]; + tensor q_93_pad_type_0 = const()[name = tensor("q_93_pad_type_0"), val = tensor("custom")]; + tensor q_93_pad_0 = const()[name = tensor("q_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407265920))), lut = tensor([-0x1.4a4p-5, -0x1.8b8p-7, 0x1.90cp-7, 0x1.4b4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_93_cast = conv(dilations = var_4767, groups = var_1186, pad = q_93_pad_0, pad_type = q_93_pad_type_0, strides = var_4765, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("q_93_cast")]; + tensor var_4771 = const()[name = tensor("op_4771"), val = tensor([1, 1])]; + tensor var_4773 = const()[name = tensor("op_4773"), val = tensor([1, 1])]; + tensor k_93_pad_type_0 = const()[name = tensor("k_93_pad_type_0"), val = tensor("custom")]; + tensor k_93_pad_0 = const()[name = tensor("k_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407675584))), lut = tensor([-0x1.474p-5, -0x1.8a4p-7, 0x1.898p-7, 0x1.474p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_93_cast = conv(dilations = var_4773, groups = var_1186, pad = k_93_pad_0, pad_type = k_93_pad_type_0, strides = var_4771, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("k_93_cast")]; + tensor var_4777 = const()[name = tensor("op_4777"), val = tensor([1, 1])]; + tensor var_4779 = const()[name = tensor("op_4779"), val = tensor([1, 1])]; + tensor v_93_pad_type_0 = const()[name = tensor("v_93_pad_type_0"), val = tensor("custom")]; + tensor v_93_pad_0 = const()[name = tensor("v_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408085248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408904512))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_93_cast = conv(dilations = var_4779, groups = var_1186, pad = v_93_pad_0, pad_type = v_93_pad_type_0, strides = var_4777, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("v_93_cast")]; + tensor var_4783 = const()[name = tensor("op_4783"), val = tensor([2, 20, 64, -1])]; + tensor var_4784_cast = reshape(shape = var_4783, x = q_93_cast)[name = tensor("op_4784_cast")]; + tensor var_4785 = const()[name = tensor("op_4785"), val = tensor([2, 20, 64, -1])]; + tensor var_4786_cast = reshape(shape = var_4785, x = k_93_cast)[name = tensor("op_4786_cast")]; + tensor var_4787 = const()[name = tensor("op_4787"), val = tensor([2, 20, 64, -1])]; + tensor var_4788_cast = reshape(shape = var_4787, x = v_93_cast)[name = tensor("op_4788_cast")]; + tensor attn_weights_185_transpose_x_0 = const()[name = tensor("attn_weights_185_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_185_transpose_y_0 = const()[name = tensor("attn_weights_185_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_185_cast = matmul(transpose_x = attn_weights_185_transpose_x_0, transpose_y = attn_weights_185_transpose_y_0, x = var_4784_cast, y = var_4786_cast)[name = tensor("attn_weights_185_cast")]; + tensor attn_weights_187_cast = mul(x = attn_weights_185_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_187_cast")]; + tensor var_4792_cast = softmax(axis = var_1170, x = attn_weights_187_cast)[name = tensor("op_4792_cast")]; + tensor attn_93_transpose_x_0 = const()[name = tensor("attn_93_transpose_x_0"), val = tensor(false)]; + tensor attn_93_transpose_y_0 = const()[name = tensor("attn_93_transpose_y_0"), val = tensor(true)]; + tensor attn_93_cast = matmul(transpose_x = attn_93_transpose_x_0, transpose_y = attn_93_transpose_y_0, x = var_4788_cast, y = var_4792_cast)[name = tensor("attn_93_cast")]; + tensor var_4796 = const()[name = tensor("op_4796"), val = tensor([2, 1280, 1, -1])]; + tensor input_301_cast = reshape(shape = var_4796, x = attn_93_cast)[name = tensor("input_301_cast")]; + tensor var_4801 = const()[name = tensor("op_4801"), val = tensor([1, 1])]; + tensor var_4803 = const()[name = tensor("op_4803"), val = tensor([1, 1])]; + tensor var_4805_pad_type_0 = const()[name = tensor("op_4805_pad_type_0"), val = tensor("custom")]; + tensor var_4805_pad_0 = const()[name = tensor("op_4805_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408904640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409723904))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409724032)))]; + tensor var_4805_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_4803, groups = var_1186, pad = var_4805_pad_0, pad_type = var_4805_pad_type_0, strides = var_4801, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("op_4805_cast")]; + tensor inputs_141_cast = add(x = var_4805_cast, y = inputs_139_cast)[name = tensor("inputs_141_cast")]; + tensor var_4809 = const()[name = tensor("op_4809"), val = tensor([1])]; + tensor channels_mean_141_cast = reduce_mean(axes = var_4809, keep_dims = var_1181, x = inputs_141_cast)[name = tensor("channels_mean_141_cast")]; + tensor zero_mean_141_cast = sub(x = inputs_141_cast, y = channels_mean_141_cast)[name = tensor("zero_mean_141_cast")]; + tensor zero_mean_sq_141_cast = mul(x = zero_mean_141_cast, y = zero_mean_141_cast)[name = tensor("zero_mean_sq_141_cast")]; + tensor var_4813 = const()[name = tensor("op_4813"), val = tensor([1])]; + tensor var_4814_cast = reduce_mean(axes = var_4813, keep_dims = var_1181, x = zero_mean_sq_141_cast)[name = tensor("op_4814_cast")]; + tensor var_4815_to_fp16 = const()[name = tensor("op_4815_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4816_cast = add(x = var_4814_cast, y = var_4815_to_fp16)[name = tensor("op_4816_cast")]; + tensor denom_141_epsilon_0_to_fp16 = const()[name = tensor("denom_141_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_141_cast = rsqrt(epsilon = denom_141_epsilon_0_to_fp16, x = var_4816_cast)[name = tensor("denom_141_cast")]; + tensor out_141_cast = mul(x = zero_mean_141_cast, y = denom_141_cast)[name = tensor("out_141_cast")]; + tensor var_4820_to_fp16 = const()[name = tensor("op_4820_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409726656)))]; + tensor var_4821_cast = add(x = out_141_cast, y = var_4820_to_fp16)[name = tensor("op_4821_cast")]; + tensor var_4823_to_fp16 = const()[name = tensor("op_4823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409729280)))]; + tensor hidden_states_193_cast = mul(x = var_4821_cast, y = var_4823_to_fp16)[name = tensor("hidden_states_193_cast")]; + tensor var_4830 = const()[name = tensor("op_4830"), val = tensor([1, 1])]; + tensor var_4832 = const()[name = tensor("op_4832"), val = tensor([1, 1])]; + tensor q_95_pad_type_0 = const()[name = tensor("q_95_pad_type_0"), val = tensor("custom")]; + tensor q_95_pad_0 = const()[name = tensor("q_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409731904))), lut = tensor([-0x1.a64p-7, 0x1.a5cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_95_cast = conv(dilations = var_4832, groups = var_1186, pad = q_95_pad_0, pad_type = q_95_pad_type_0, strides = var_4830, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_193_cast)[name = tensor("q_95_cast")]; + tensor var_4836 = const()[name = tensor("op_4836"), val = tensor([1, 1])]; + tensor var_4838 = const()[name = tensor("op_4838"), val = tensor([1, 1])]; + tensor k_95_pad_type_0 = const()[name = tensor("k_95_pad_type_0"), val = tensor("custom")]; + tensor k_95_pad_0 = const()[name = tensor("k_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409936768))), lut = tensor([-0x1.27cp-7, 0x1.288p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_95_cast = conv(dilations = var_4838, groups = var_1186, pad = k_95_pad_0, pad_type = k_95_pad_type_0, strides = var_4836, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_95_cast")]; + tensor var_4842 = const()[name = tensor("op_4842"), val = tensor([1, 1])]; + tensor var_4844 = const()[name = tensor("op_4844"), val = tensor([1, 1])]; + tensor v_95_pad_type_0 = const()[name = tensor("v_95_pad_type_0"), val = tensor("custom")]; + tensor v_95_pad_0 = const()[name = tensor("v_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410264512))), lut = tensor([-0x1.77cp-7, 0x1.79p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_95_cast = conv(dilations = var_4844, groups = var_1186, pad = v_95_pad_0, pad_type = v_95_pad_type_0, strides = var_4842, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_95_cast")]; + tensor var_4848 = const()[name = tensor("op_4848"), val = tensor([2, 20, 64, -1])]; + tensor var_4849_cast = reshape(shape = var_4848, x = q_95_cast)[name = tensor("op_4849_cast")]; + tensor var_4850 = const()[name = tensor("op_4850"), val = tensor([2, 20, 64, -1])]; + tensor var_4851_cast = reshape(shape = var_4850, x = k_95_cast)[name = tensor("op_4851_cast")]; + tensor var_4852 = const()[name = tensor("op_4852"), val = tensor([2, 20, 64, -1])]; + tensor var_4853_cast = reshape(shape = var_4852, x = v_95_cast)[name = tensor("op_4853_cast")]; + tensor attn_weights_189_transpose_x_0 = const()[name = tensor("attn_weights_189_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_189_transpose_y_0 = const()[name = tensor("attn_weights_189_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_189_cast = matmul(transpose_x = attn_weights_189_transpose_x_0, transpose_y = attn_weights_189_transpose_y_0, x = var_4849_cast, y = var_4851_cast)[name = tensor("attn_weights_189_cast")]; + tensor attn_weights_191_cast = mul(x = attn_weights_189_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_191_cast")]; + tensor var_4857_cast = softmax(axis = var_1170, x = attn_weights_191_cast)[name = tensor("op_4857_cast")]; + tensor attn_95_transpose_x_0 = const()[name = tensor("attn_95_transpose_x_0"), val = tensor(false)]; + tensor attn_95_transpose_y_0 = const()[name = tensor("attn_95_transpose_y_0"), val = tensor(true)]; + tensor attn_95_cast = matmul(transpose_x = attn_95_transpose_x_0, transpose_y = attn_95_transpose_y_0, x = var_4853_cast, y = var_4857_cast)[name = tensor("attn_95_cast")]; + tensor var_4861 = const()[name = tensor("op_4861"), val = tensor([2, 1280, 1, -1])]; + tensor input_303_cast = reshape(shape = var_4861, x = attn_95_cast)[name = tensor("input_303_cast")]; + tensor var_4866 = const()[name = tensor("op_4866"), val = tensor([1, 1])]; + tensor var_4868 = const()[name = tensor("op_4868"), val = tensor([1, 1])]; + tensor var_4870_pad_type_0 = const()[name = tensor("op_4870_pad_type_0"), val = tensor("custom")]; + tensor var_4870_pad_0 = const()[name = tensor("op_4870_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410592256))), lut = tensor([-0x1.e74p-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410797120)))]; + tensor var_4870_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_4868, groups = var_1186, pad = var_4870_pad_0, pad_type = var_4870_pad_type_0, strides = var_4866, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_303_cast)[name = tensor("op_4870_cast")]; + tensor inputs_143_cast = add(x = var_4870_cast, y = inputs_141_cast)[name = tensor("inputs_143_cast")]; + tensor var_4874 = const()[name = tensor("op_4874"), val = tensor([1])]; + tensor channels_mean_143_cast = reduce_mean(axes = var_4874, keep_dims = var_1181, x = inputs_143_cast)[name = tensor("channels_mean_143_cast")]; + tensor zero_mean_143_cast = sub(x = inputs_143_cast, y = channels_mean_143_cast)[name = tensor("zero_mean_143_cast")]; + tensor zero_mean_sq_143_cast = mul(x = zero_mean_143_cast, y = zero_mean_143_cast)[name = tensor("zero_mean_sq_143_cast")]; + tensor var_4878 = const()[name = tensor("op_4878"), val = tensor([1])]; + tensor var_4879_cast = reduce_mean(axes = var_4878, keep_dims = var_1181, x = zero_mean_sq_143_cast)[name = tensor("op_4879_cast")]; + tensor var_4880_to_fp16 = const()[name = tensor("op_4880_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4881_cast = add(x = var_4879_cast, y = var_4880_to_fp16)[name = tensor("op_4881_cast")]; + tensor denom_143_epsilon_0_to_fp16 = const()[name = tensor("denom_143_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_143_cast = rsqrt(epsilon = denom_143_epsilon_0_to_fp16, x = var_4881_cast)[name = tensor("denom_143_cast")]; + tensor out_143_cast = mul(x = zero_mean_143_cast, y = denom_143_cast)[name = tensor("out_143_cast")]; + tensor var_4885_to_fp16 = const()[name = tensor("op_4885_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410799744)))]; + tensor var_4886_cast = add(x = out_143_cast, y = var_4885_to_fp16)[name = tensor("op_4886_cast")]; + tensor var_4888_to_fp16 = const()[name = tensor("op_4888_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410802368)))]; + tensor input_305_cast = mul(x = var_4886_cast, y = var_4888_to_fp16)[name = tensor("input_305_cast")]; + tensor var_4896 = const()[name = tensor("op_4896"), val = tensor([1, 1])]; + tensor var_4898 = const()[name = tensor("op_4898"), val = tensor([1, 1])]; + tensor var_4900_pad_type_0 = const()[name = tensor("op_4900_pad_type_0"), val = tensor("custom")]; + tensor var_4900_pad_0 = const()[name = tensor("op_4900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410804992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417358656))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417358784)))]; + tensor var_4900_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_4898, groups = var_1186, pad = var_4900_pad_0, pad_type = var_4900_pad_type_0, strides = var_4896, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("op_4900_cast")]; + tensor var_4901_split_sizes_0 = const()[name = tensor("op_4901_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4901_axis_0 = const()[name = tensor("op_4901_axis_0"), val = tensor(1)]; + tensor var_4901_cast_0, tensor var_4901_cast_1 = split(axis = var_4901_axis_0, split_sizes = var_4901_split_sizes_0, x = var_4900_cast)[name = tensor("op_4901_cast")]; + tensor var_4903_mode_0 = const()[name = tensor("op_4903_mode_0"), val = tensor("EXACT")]; + tensor var_4903_cast = gelu(mode = var_4903_mode_0, x = var_4901_cast_1)[name = tensor("op_4903_cast")]; + tensor input_307_cast = mul(x = var_4901_cast_0, y = var_4903_cast)[name = tensor("input_307_cast")]; + tensor var_4907 = const()[name = tensor("op_4907"), val = tensor([1, 1])]; + tensor var_4909 = const()[name = tensor("op_4909"), val = tensor([1, 1])]; + tensor var_4911_pad_type_0 = const()[name = tensor("op_4911_pad_type_0"), val = tensor("custom")]; + tensor var_4911_pad_0 = const()[name = tensor("op_4911_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417379328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420656192))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420656320)))]; + tensor var_4911_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_4909, groups = var_1186, pad = var_4911_pad_0, pad_type = var_4911_pad_type_0, strides = var_4907, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_307_cast)[name = tensor("op_4911_cast")]; + tensor hidden_states_197_cast = add(x = var_4911_cast, y = inputs_143_cast)[name = tensor("hidden_states_197_cast")]; + tensor var_4913 = const()[name = tensor("op_4913"), val = tensor([2, 1280, 32, 32])]; + tensor input_309_cast = reshape(shape = var_4913, x = hidden_states_197_cast)[name = tensor("input_309_cast")]; + tensor var_4917 = const()[name = tensor("op_4917"), val = tensor([1, 1])]; + tensor var_4919 = const()[name = tensor("op_4919"), val = tensor([1, 1])]; + tensor hidden_states_199_pad_type_0 = const()[name = tensor("hidden_states_199_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_199_pad_0 = const()[name = tensor("hidden_states_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420658944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421887808))), name = tensor("down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421888000)))]; + tensor hidden_states_199_cast = conv(bias = down_blocks_2_attentions_1_proj_out_bias_to_fp16, dilations = var_4919, groups = var_1186, pad = hidden_states_199_pad_0, pad_type = hidden_states_199_pad_type_0, strides = var_4917, weight = down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized, x = input_309_cast)[name = tensor("hidden_states_199_cast")]; + tensor input_311_cast = add(x = hidden_states_199_cast, y = hidden_states_133_cast)[name = tensor("input_311_cast")]; + tensor var_4927 = const()[name = tensor("op_4927"), val = tensor(3)]; + tensor var_4938 = const()[name = tensor("op_4938"), val = tensor(true)]; + tensor var_4943 = const()[name = tensor("op_4943"), val = tensor(1)]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = input_311_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421890624)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421893248)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_315_cast = silu(x = add_33_cast)[name = tensor("input_315_cast")]; + tensor var_4961 = const()[name = tensor("op_4961"), val = tensor([1, 1])]; + tensor var_4963 = const()[name = tensor("op_4963"), val = tensor([1, 1])]; + tensor hidden_states_201_pad_type_0 = const()[name = tensor("hidden_states_201_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_201_pad_0 = const()[name = tensor("hidden_states_201_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421895872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432955136))), name = tensor("mid_block_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432955328)))]; + tensor hidden_states_201_cast = conv(bias = mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_4963, groups = var_4943, pad = hidden_states_201_pad_0, pad_type = hidden_states_201_pad_type_0, strides = var_4961, weight = mid_block_resnets_0_conv1_weight_to_fp16_palettized, x = input_315_cast)[name = tensor("hidden_states_201_cast")]; + tensor var_4969 = const()[name = tensor("op_4969"), val = tensor([1, 1])]; + tensor var_4971 = const()[name = tensor("op_4971"), val = tensor([1, 1])]; + tensor temb_13_pad_type_0 = const()[name = tensor("temb_13_pad_type_0"), val = tensor("custom")]; + tensor temb_13_pad_0 = const()[name = tensor("temb_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432957952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433777216))), name = tensor("mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433777344)))]; + tensor temb_13_cast = conv(bias = mid_block_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_4971, groups = var_4943, pad = temb_13_pad_0, pad_type = temb_13_pad_type_0, strides = var_4969, weight = mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_13_cast")]; + tensor input_319_cast = add(x = hidden_states_201_cast, y = temb_13_cast)[name = tensor("input_319_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_319_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433779968)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433782592)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_323_cast = silu(x = add_35_cast)[name = tensor("input_323_cast")]; + tensor var_4981 = const()[name = tensor("op_4981"), val = tensor([1, 1])]; + tensor var_4983 = const()[name = tensor("op_4983"), val = tensor([1, 1])]; + tensor hidden_states_203_pad_type_0 = const()[name = tensor("hidden_states_203_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_203_pad_0 = const()[name = tensor("hidden_states_203_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433785216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444844480))), name = tensor("mid_block_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444844672)))]; + tensor hidden_states_203_cast = conv(bias = mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_4983, groups = var_4943, pad = hidden_states_203_pad_0, pad_type = hidden_states_203_pad_type_0, strides = var_4981, weight = mid_block_resnets_0_conv2_weight_to_fp16_palettized, x = input_323_cast)[name = tensor("hidden_states_203_cast")]; + tensor hidden_states_205_cast = add(x = input_311_cast, y = hidden_states_203_cast)[name = tensor("hidden_states_205_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = hidden_states_205_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor add_37_gamma_0_to_fp16 = const()[name = tensor("add_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444847296)))]; + tensor add_37_beta_0_to_fp16 = const()[name = tensor("add_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444849920)))]; + tensor add_37_epsilon_0_to_fp16 = const()[name = tensor("add_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_37_cast = batch_norm(beta = add_37_beta_0_to_fp16, epsilon = add_37_epsilon_0_to_fp16, gamma = add_37_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_73_cast)[name = tensor("add_37_cast")]; + tensor var_5021 = const()[name = tensor("op_5021"), val = tensor([1, 1])]; + tensor var_5023 = const()[name = tensor("op_5023"), val = tensor([1, 1])]; + tensor hidden_states_207_pad_type_0 = const()[name = tensor("hidden_states_207_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_207_pad_0 = const()[name = tensor("hidden_states_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444852544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446081408))), name = tensor("mid_block_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446081600)))]; + tensor hidden_states_207_cast = conv(bias = mid_block_attentions_0_proj_in_bias_to_fp16, dilations = var_5023, groups = var_4943, pad = hidden_states_207_pad_0, pad_type = hidden_states_207_pad_type_0, strides = var_5021, weight = mid_block_attentions_0_proj_in_weight_to_fp16_palettized, x = add_37_cast)[name = tensor("hidden_states_207_cast")]; + tensor var_5028 = const()[name = tensor("op_5028"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_145_cast = reshape(shape = var_5028, x = hidden_states_207_cast)[name = tensor("inputs_145_cast")]; + tensor var_5038 = const()[name = tensor("op_5038"), val = tensor([1])]; + tensor channels_mean_145_cast = reduce_mean(axes = var_5038, keep_dims = var_4938, x = inputs_145_cast)[name = tensor("channels_mean_145_cast")]; + tensor zero_mean_145_cast = sub(x = inputs_145_cast, y = channels_mean_145_cast)[name = tensor("zero_mean_145_cast")]; + tensor zero_mean_sq_145_cast = mul(x = zero_mean_145_cast, y = zero_mean_145_cast)[name = tensor("zero_mean_sq_145_cast")]; + tensor var_5042 = const()[name = tensor("op_5042"), val = tensor([1])]; + tensor var_5043_cast = reduce_mean(axes = var_5042, keep_dims = var_4938, x = zero_mean_sq_145_cast)[name = tensor("op_5043_cast")]; + tensor var_5044_to_fp16 = const()[name = tensor("op_5044_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5045_cast = add(x = var_5043_cast, y = var_5044_to_fp16)[name = tensor("op_5045_cast")]; + tensor denom_145_epsilon_0_to_fp16 = const()[name = tensor("denom_145_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_145_cast = rsqrt(epsilon = denom_145_epsilon_0_to_fp16, x = var_5045_cast)[name = tensor("denom_145_cast")]; + tensor out_145_cast = mul(x = zero_mean_145_cast, y = denom_145_cast)[name = tensor("out_145_cast")]; + tensor var_5049_to_fp16 = const()[name = tensor("op_5049_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446084224)))]; + tensor var_5050_cast = add(x = out_145_cast, y = var_5049_to_fp16)[name = tensor("op_5050_cast")]; + tensor var_5052_to_fp16 = const()[name = tensor("op_5052_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446086848)))]; + tensor hidden_states_209_cast = mul(x = var_5050_cast, y = var_5052_to_fp16)[name = tensor("hidden_states_209_cast")]; + tensor var_5059 = const()[name = tensor("op_5059"), val = tensor([1, 1])]; + tensor var_5061 = const()[name = tensor("op_5061"), val = tensor([1, 1])]; + tensor q_97_pad_type_0 = const()[name = tensor("q_97_pad_type_0"), val = tensor("custom")]; + tensor q_97_pad_0 = const()[name = tensor("q_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446089472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318336))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_97_cast = conv(dilations = var_5061, groups = var_4943, pad = q_97_pad_0, pad_type = q_97_pad_type_0, strides = var_5059, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("q_97_cast")]; + tensor var_5065 = const()[name = tensor("op_5065"), val = tensor([1, 1])]; + tensor var_5067 = const()[name = tensor("op_5067"), val = tensor([1, 1])]; + tensor k_97_pad_type_0 = const()[name = tensor("k_97_pad_type_0"), val = tensor("custom")]; + tensor k_97_pad_0 = const()[name = tensor("k_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547392))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_97_cast = conv(dilations = var_5067, groups = var_4943, pad = k_97_pad_0, pad_type = k_97_pad_type_0, strides = var_5065, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("k_97_cast")]; + tensor var_5071 = const()[name = tensor("op_5071"), val = tensor([1, 1])]; + tensor var_5073 = const()[name = tensor("op_5073"), val = tensor([1, 1])]; + tensor v_97_pad_type_0 = const()[name = tensor("v_97_pad_type_0"), val = tensor("custom")]; + tensor v_97_pad_0 = const()[name = tensor("v_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776448))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_97_cast = conv(dilations = var_5073, groups = var_4943, pad = v_97_pad_0, pad_type = v_97_pad_type_0, strides = var_5071, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("v_97_cast")]; + tensor var_5077 = const()[name = tensor("op_5077"), val = tensor([2, 20, 64, -1])]; + tensor var_5078_cast = reshape(shape = var_5077, x = q_97_cast)[name = tensor("op_5078_cast")]; + tensor var_5079 = const()[name = tensor("op_5079"), val = tensor([2, 20, 64, -1])]; + tensor var_5080_cast = reshape(shape = var_5079, x = k_97_cast)[name = tensor("op_5080_cast")]; + tensor var_5081 = const()[name = tensor("op_5081"), val = tensor([2, 20, 64, -1])]; + tensor var_5082_cast = reshape(shape = var_5081, x = v_97_cast)[name = tensor("op_5082_cast")]; + tensor attn_weights_193_transpose_x_0 = const()[name = tensor("attn_weights_193_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_193_transpose_y_0 = const()[name = tensor("attn_weights_193_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_193_cast = matmul(transpose_x = attn_weights_193_transpose_x_0, transpose_y = attn_weights_193_transpose_y_0, x = var_5078_cast, y = var_5080_cast)[name = tensor("attn_weights_193_cast")]; + tensor var_4934_to_fp16 = const()[name = tensor("op_4934_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_195_cast = mul(x = attn_weights_193_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_195_cast")]; + tensor var_5086_cast = softmax(axis = var_4927, x = attn_weights_195_cast)[name = tensor("op_5086_cast")]; + tensor attn_97_transpose_x_0 = const()[name = tensor("attn_97_transpose_x_0"), val = tensor(false)]; + tensor attn_97_transpose_y_0 = const()[name = tensor("attn_97_transpose_y_0"), val = tensor(true)]; + tensor attn_97_cast = matmul(transpose_x = attn_97_transpose_x_0, transpose_y = attn_97_transpose_y_0, x = var_5082_cast, y = var_5086_cast)[name = tensor("attn_97_cast")]; + tensor var_5090 = const()[name = tensor("op_5090"), val = tensor([2, 1280, 1, -1])]; + tensor input_327_cast = reshape(shape = var_5090, x = attn_97_cast)[name = tensor("input_327_cast")]; + tensor var_5095 = const()[name = tensor("op_5095"), val = tensor([1, 1])]; + tensor var_5097 = const()[name = tensor("op_5097"), val = tensor([1, 1])]; + tensor var_5099_pad_type_0 = const()[name = tensor("op_5099_pad_type_0"), val = tensor("custom")]; + tensor var_5099_pad_0 = const()[name = tensor("op_5099_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450595904))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450596032)))]; + tensor var_5099_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_5097, groups = var_4943, pad = var_5099_pad_0, pad_type = var_5099_pad_type_0, strides = var_5095, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_327_cast)[name = tensor("op_5099_cast")]; + tensor inputs_147_cast = add(x = var_5099_cast, y = inputs_145_cast)[name = tensor("inputs_147_cast")]; + tensor var_5103 = const()[name = tensor("op_5103"), val = tensor([1])]; + tensor channels_mean_147_cast = reduce_mean(axes = var_5103, keep_dims = var_4938, x = inputs_147_cast)[name = tensor("channels_mean_147_cast")]; + tensor zero_mean_147_cast = sub(x = inputs_147_cast, y = channels_mean_147_cast)[name = tensor("zero_mean_147_cast")]; + tensor zero_mean_sq_147_cast = mul(x = zero_mean_147_cast, y = zero_mean_147_cast)[name = tensor("zero_mean_sq_147_cast")]; + tensor var_5107 = const()[name = tensor("op_5107"), val = tensor([1])]; + tensor var_5108_cast = reduce_mean(axes = var_5107, keep_dims = var_4938, x = zero_mean_sq_147_cast)[name = tensor("op_5108_cast")]; + tensor var_5109_to_fp16 = const()[name = tensor("op_5109_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5110_cast = add(x = var_5108_cast, y = var_5109_to_fp16)[name = tensor("op_5110_cast")]; + tensor denom_147_epsilon_0_to_fp16 = const()[name = tensor("denom_147_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_147_cast = rsqrt(epsilon = denom_147_epsilon_0_to_fp16, x = var_5110_cast)[name = tensor("denom_147_cast")]; + tensor out_147_cast = mul(x = zero_mean_147_cast, y = denom_147_cast)[name = tensor("out_147_cast")]; + tensor var_5114_to_fp16 = const()[name = tensor("op_5114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450598656)))]; + tensor var_5115_cast = add(x = out_147_cast, y = var_5114_to_fp16)[name = tensor("op_5115_cast")]; + tensor var_5117_to_fp16 = const()[name = tensor("op_5117_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450601280)))]; + tensor hidden_states_211_cast = mul(x = var_5115_cast, y = var_5117_to_fp16)[name = tensor("hidden_states_211_cast")]; + tensor var_5124 = const()[name = tensor("op_5124"), val = tensor([1, 1])]; + tensor var_5126 = const()[name = tensor("op_5126"), val = tensor([1, 1])]; + tensor q_99_pad_type_0 = const()[name = tensor("q_99_pad_type_0"), val = tensor("custom")]; + tensor q_99_pad_0 = const()[name = tensor("q_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450603904))), lut = tensor([-0x1.964p-7, 0x1.96p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_99_cast = conv(dilations = var_5126, groups = var_4943, pad = q_99_pad_0, pad_type = q_99_pad_type_0, strides = var_5124, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_211_cast)[name = tensor("q_99_cast")]; + tensor var_5130 = const()[name = tensor("op_5130"), val = tensor([1, 1])]; + tensor var_5132 = const()[name = tensor("op_5132"), val = tensor([1, 1])]; + tensor k_99_pad_type_0 = const()[name = tensor("k_99_pad_type_0"), val = tensor("custom")]; + tensor k_99_pad_0 = const()[name = tensor("k_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450808768))), lut = tensor([-0x1.444p-7, 0x1.44cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_99_cast = conv(dilations = var_5132, groups = var_4943, pad = k_99_pad_0, pad_type = k_99_pad_type_0, strides = var_5130, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_99_cast")]; + tensor var_5136 = const()[name = tensor("op_5136"), val = tensor([1, 1])]; + tensor var_5138 = const()[name = tensor("op_5138"), val = tensor([1, 1])]; + tensor v_99_pad_type_0 = const()[name = tensor("v_99_pad_type_0"), val = tensor("custom")]; + tensor v_99_pad_0 = const()[name = tensor("v_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451136512))), lut = tensor([-0x1.658p-7, 0x1.66p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_99_cast = conv(dilations = var_5138, groups = var_4943, pad = v_99_pad_0, pad_type = v_99_pad_type_0, strides = var_5136, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_99_cast")]; + tensor var_5142 = const()[name = tensor("op_5142"), val = tensor([2, 20, 64, -1])]; + tensor var_5143_cast = reshape(shape = var_5142, x = q_99_cast)[name = tensor("op_5143_cast")]; + tensor var_5144 = const()[name = tensor("op_5144"), val = tensor([2, 20, 64, -1])]; + tensor var_5145_cast = reshape(shape = var_5144, x = k_99_cast)[name = tensor("op_5145_cast")]; + tensor var_5146 = const()[name = tensor("op_5146"), val = tensor([2, 20, 64, -1])]; + tensor var_5147_cast = reshape(shape = var_5146, x = v_99_cast)[name = tensor("op_5147_cast")]; + tensor attn_weights_197_transpose_x_0 = const()[name = tensor("attn_weights_197_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_197_transpose_y_0 = const()[name = tensor("attn_weights_197_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_197_cast = matmul(transpose_x = attn_weights_197_transpose_x_0, transpose_y = attn_weights_197_transpose_y_0, x = var_5143_cast, y = var_5145_cast)[name = tensor("attn_weights_197_cast")]; + tensor attn_weights_199_cast = mul(x = attn_weights_197_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_199_cast")]; + tensor var_5151_cast = softmax(axis = var_4927, x = attn_weights_199_cast)[name = tensor("op_5151_cast")]; + tensor attn_99_transpose_x_0 = const()[name = tensor("attn_99_transpose_x_0"), val = tensor(false)]; + tensor attn_99_transpose_y_0 = const()[name = tensor("attn_99_transpose_y_0"), val = tensor(true)]; + tensor attn_99_cast = matmul(transpose_x = attn_99_transpose_x_0, transpose_y = attn_99_transpose_y_0, x = var_5147_cast, y = var_5151_cast)[name = tensor("attn_99_cast")]; + tensor var_5155 = const()[name = tensor("op_5155"), val = tensor([2, 1280, 1, -1])]; + tensor input_329_cast = reshape(shape = var_5155, x = attn_99_cast)[name = tensor("input_329_cast")]; + tensor var_5160 = const()[name = tensor("op_5160"), val = tensor([1, 1])]; + tensor var_5162 = const()[name = tensor("op_5162"), val = tensor([1, 1])]; + tensor var_5164_pad_type_0 = const()[name = tensor("op_5164_pad_type_0"), val = tensor("custom")]; + tensor var_5164_pad_0 = const()[name = tensor("op_5164_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451464256))), lut = tensor([-0x1.78cp-8, 0x1.764p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451669120)))]; + tensor var_5164_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_5162, groups = var_4943, pad = var_5164_pad_0, pad_type = var_5164_pad_type_0, strides = var_5160, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("op_5164_cast")]; + tensor inputs_149_cast = add(x = var_5164_cast, y = inputs_147_cast)[name = tensor("inputs_149_cast")]; + tensor var_5168 = const()[name = tensor("op_5168"), val = tensor([1])]; + tensor channels_mean_149_cast = reduce_mean(axes = var_5168, keep_dims = var_4938, x = inputs_149_cast)[name = tensor("channels_mean_149_cast")]; + tensor zero_mean_149_cast = sub(x = inputs_149_cast, y = channels_mean_149_cast)[name = tensor("zero_mean_149_cast")]; + tensor zero_mean_sq_149_cast = mul(x = zero_mean_149_cast, y = zero_mean_149_cast)[name = tensor("zero_mean_sq_149_cast")]; + tensor var_5172 = const()[name = tensor("op_5172"), val = tensor([1])]; + tensor var_5173_cast = reduce_mean(axes = var_5172, keep_dims = var_4938, x = zero_mean_sq_149_cast)[name = tensor("op_5173_cast")]; + tensor var_5174_to_fp16 = const()[name = tensor("op_5174_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5175_cast = add(x = var_5173_cast, y = var_5174_to_fp16)[name = tensor("op_5175_cast")]; + tensor denom_149_epsilon_0_to_fp16 = const()[name = tensor("denom_149_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_149_cast = rsqrt(epsilon = denom_149_epsilon_0_to_fp16, x = var_5175_cast)[name = tensor("denom_149_cast")]; + tensor out_149_cast = mul(x = zero_mean_149_cast, y = denom_149_cast)[name = tensor("out_149_cast")]; + tensor var_5179_to_fp16 = const()[name = tensor("op_5179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451671744)))]; + tensor var_5180_cast = add(x = out_149_cast, y = var_5179_to_fp16)[name = tensor("op_5180_cast")]; + tensor var_5182_to_fp16 = const()[name = tensor("op_5182_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451674368)))]; + tensor input_331_cast = mul(x = var_5180_cast, y = var_5182_to_fp16)[name = tensor("input_331_cast")]; + tensor var_5190 = const()[name = tensor("op_5190"), val = tensor([1, 1])]; + tensor var_5192 = const()[name = tensor("op_5192"), val = tensor([1, 1])]; + tensor var_5194_pad_type_0 = const()[name = tensor("op_5194_pad_type_0"), val = tensor("custom")]; + tensor var_5194_pad_0 = const()[name = tensor("op_5194_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451676992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458230656))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458230784)))]; + tensor var_5194_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_5192, groups = var_4943, pad = var_5194_pad_0, pad_type = var_5194_pad_type_0, strides = var_5190, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_331_cast)[name = tensor("op_5194_cast")]; + tensor var_5195_split_sizes_0 = const()[name = tensor("op_5195_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5195_axis_0 = const()[name = tensor("op_5195_axis_0"), val = tensor(1)]; + tensor var_5195_cast_0, tensor var_5195_cast_1 = split(axis = var_5195_axis_0, split_sizes = var_5195_split_sizes_0, x = var_5194_cast)[name = tensor("op_5195_cast")]; + tensor var_5197_mode_0 = const()[name = tensor("op_5197_mode_0"), val = tensor("EXACT")]; + tensor var_5197_cast = gelu(mode = var_5197_mode_0, x = var_5195_cast_1)[name = tensor("op_5197_cast")]; + tensor input_333_cast = mul(x = var_5195_cast_0, y = var_5197_cast)[name = tensor("input_333_cast")]; + tensor var_5201 = const()[name = tensor("op_5201"), val = tensor([1, 1])]; + tensor var_5203 = const()[name = tensor("op_5203"), val = tensor([1, 1])]; + tensor var_5205_pad_type_0 = const()[name = tensor("op_5205_pad_type_0"), val = tensor("custom")]; + tensor var_5205_pad_0 = const()[name = tensor("op_5205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458251328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461528192))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461528320)))]; + tensor var_5205_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_5203, groups = var_4943, pad = var_5205_pad_0, pad_type = var_5205_pad_type_0, strides = var_5201, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("op_5205_cast")]; + tensor inputs_151_cast = add(x = var_5205_cast, y = inputs_149_cast)[name = tensor("inputs_151_cast")]; + tensor var_5215 = const()[name = tensor("op_5215"), val = tensor([1])]; + tensor channels_mean_151_cast = reduce_mean(axes = var_5215, keep_dims = var_4938, x = inputs_151_cast)[name = tensor("channels_mean_151_cast")]; + tensor zero_mean_151_cast = sub(x = inputs_151_cast, y = channels_mean_151_cast)[name = tensor("zero_mean_151_cast")]; + tensor zero_mean_sq_151_cast = mul(x = zero_mean_151_cast, y = zero_mean_151_cast)[name = tensor("zero_mean_sq_151_cast")]; + tensor var_5219 = const()[name = tensor("op_5219"), val = tensor([1])]; + tensor var_5220_cast = reduce_mean(axes = var_5219, keep_dims = var_4938, x = zero_mean_sq_151_cast)[name = tensor("op_5220_cast")]; + tensor var_5221_to_fp16 = const()[name = tensor("op_5221_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5222_cast = add(x = var_5220_cast, y = var_5221_to_fp16)[name = tensor("op_5222_cast")]; + tensor denom_151_epsilon_0_to_fp16 = const()[name = tensor("denom_151_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_151_cast = rsqrt(epsilon = denom_151_epsilon_0_to_fp16, x = var_5222_cast)[name = tensor("denom_151_cast")]; + tensor out_151_cast = mul(x = zero_mean_151_cast, y = denom_151_cast)[name = tensor("out_151_cast")]; + tensor var_5226_to_fp16 = const()[name = tensor("op_5226_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461530944)))]; + tensor var_5227_cast = add(x = out_151_cast, y = var_5226_to_fp16)[name = tensor("op_5227_cast")]; + tensor var_5229_to_fp16 = const()[name = tensor("op_5229_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461533568)))]; + tensor hidden_states_215_cast = mul(x = var_5227_cast, y = var_5229_to_fp16)[name = tensor("hidden_states_215_cast")]; + tensor var_5236 = const()[name = tensor("op_5236"), val = tensor([1, 1])]; + tensor var_5238 = const()[name = tensor("op_5238"), val = tensor([1, 1])]; + tensor q_101_pad_type_0 = const()[name = tensor("q_101_pad_type_0"), val = tensor("custom")]; + tensor q_101_pad_0 = const()[name = tensor("q_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461536192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462355456))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_101_cast = conv(dilations = var_5238, groups = var_4943, pad = q_101_pad_0, pad_type = q_101_pad_type_0, strides = var_5236, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("q_101_cast")]; + tensor var_5242 = const()[name = tensor("op_5242"), val = tensor([1, 1])]; + tensor var_5244 = const()[name = tensor("op_5244"), val = tensor([1, 1])]; + tensor k_101_pad_type_0 = const()[name = tensor("k_101_pad_type_0"), val = tensor("custom")]; + tensor k_101_pad_0 = const()[name = tensor("k_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462355584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(463584448))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_101_cast = conv(dilations = var_5244, groups = var_4943, pad = k_101_pad_0, pad_type = k_101_pad_type_0, strides = var_5242, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("k_101_cast")]; + tensor var_5248 = const()[name = tensor("op_5248"), val = tensor([1, 1])]; + tensor var_5250 = const()[name = tensor("op_5250"), val = tensor([1, 1])]; + tensor v_101_pad_type_0 = const()[name = tensor("v_101_pad_type_0"), val = tensor("custom")]; + tensor v_101_pad_0 = const()[name = tensor("v_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(463584640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(464403904))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_101_cast = conv(dilations = var_5250, groups = var_4943, pad = v_101_pad_0, pad_type = v_101_pad_type_0, strides = var_5248, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("v_101_cast")]; + tensor var_5254 = const()[name = tensor("op_5254"), val = tensor([2, 20, 64, -1])]; + tensor var_5255_cast = reshape(shape = var_5254, x = q_101_cast)[name = tensor("op_5255_cast")]; + tensor var_5256 = const()[name = tensor("op_5256"), val = tensor([2, 20, 64, -1])]; + tensor var_5257_cast = reshape(shape = var_5256, x = k_101_cast)[name = tensor("op_5257_cast")]; + tensor var_5258 = const()[name = tensor("op_5258"), val = tensor([2, 20, 64, -1])]; + tensor var_5259_cast = reshape(shape = var_5258, x = v_101_cast)[name = tensor("op_5259_cast")]; + tensor attn_weights_201_transpose_x_0 = const()[name = tensor("attn_weights_201_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_201_transpose_y_0 = const()[name = tensor("attn_weights_201_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_201_cast = matmul(transpose_x = attn_weights_201_transpose_x_0, transpose_y = attn_weights_201_transpose_y_0, x = var_5255_cast, y = var_5257_cast)[name = tensor("attn_weights_201_cast")]; + tensor attn_weights_203_cast = mul(x = attn_weights_201_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_203_cast")]; + tensor var_5263_cast = softmax(axis = var_4927, x = attn_weights_203_cast)[name = tensor("op_5263_cast")]; + tensor attn_101_transpose_x_0 = const()[name = tensor("attn_101_transpose_x_0"), val = tensor(false)]; + tensor attn_101_transpose_y_0 = const()[name = tensor("attn_101_transpose_y_0"), val = tensor(true)]; + tensor attn_101_cast = matmul(transpose_x = attn_101_transpose_x_0, transpose_y = attn_101_transpose_y_0, x = var_5259_cast, y = var_5263_cast)[name = tensor("attn_101_cast")]; + tensor var_5267 = const()[name = tensor("op_5267"), val = tensor([2, 1280, 1, -1])]; + tensor input_335_cast = reshape(shape = var_5267, x = attn_101_cast)[name = tensor("input_335_cast")]; + tensor var_5272 = const()[name = tensor("op_5272"), val = tensor([1, 1])]; + tensor var_5274 = const()[name = tensor("op_5274"), val = tensor([1, 1])]; + tensor var_5276_pad_type_0 = const()[name = tensor("op_5276_pad_type_0"), val = tensor("custom")]; + tensor var_5276_pad_0 = const()[name = tensor("op_5276_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(464404032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465632896))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465633088)))]; + tensor var_5276_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_5274, groups = var_4943, pad = var_5276_pad_0, pad_type = var_5276_pad_type_0, strides = var_5272, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_335_cast)[name = tensor("op_5276_cast")]; + tensor inputs_153_cast = add(x = var_5276_cast, y = inputs_151_cast)[name = tensor("inputs_153_cast")]; + tensor var_5280 = const()[name = tensor("op_5280"), val = tensor([1])]; + tensor channels_mean_153_cast = reduce_mean(axes = var_5280, keep_dims = var_4938, x = inputs_153_cast)[name = tensor("channels_mean_153_cast")]; + tensor zero_mean_153_cast = sub(x = inputs_153_cast, y = channels_mean_153_cast)[name = tensor("zero_mean_153_cast")]; + tensor zero_mean_sq_153_cast = mul(x = zero_mean_153_cast, y = zero_mean_153_cast)[name = tensor("zero_mean_sq_153_cast")]; + tensor var_5284 = const()[name = tensor("op_5284"), val = tensor([1])]; + tensor var_5285_cast = reduce_mean(axes = var_5284, keep_dims = var_4938, x = zero_mean_sq_153_cast)[name = tensor("op_5285_cast")]; + tensor var_5286_to_fp16 = const()[name = tensor("op_5286_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5287_cast = add(x = var_5285_cast, y = var_5286_to_fp16)[name = tensor("op_5287_cast")]; + tensor denom_153_epsilon_0_to_fp16 = const()[name = tensor("denom_153_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_153_cast = rsqrt(epsilon = denom_153_epsilon_0_to_fp16, x = var_5287_cast)[name = tensor("denom_153_cast")]; + tensor out_153_cast = mul(x = zero_mean_153_cast, y = denom_153_cast)[name = tensor("out_153_cast")]; + tensor var_5291_to_fp16 = const()[name = tensor("op_5291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465635712)))]; + tensor var_5292_cast = add(x = out_153_cast, y = var_5291_to_fp16)[name = tensor("op_5292_cast")]; + tensor var_5294_to_fp16 = const()[name = tensor("op_5294_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465638336)))]; + tensor hidden_states_217_cast = mul(x = var_5292_cast, y = var_5294_to_fp16)[name = tensor("hidden_states_217_cast")]; + tensor var_5301 = const()[name = tensor("op_5301"), val = tensor([1, 1])]; + tensor var_5303 = const()[name = tensor("op_5303"), val = tensor([1, 1])]; + tensor q_103_pad_type_0 = const()[name = tensor("q_103_pad_type_0"), val = tensor("custom")]; + tensor q_103_pad_0 = const()[name = tensor("q_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465640960))), lut = tensor([-0x1.acp-7, 0x1.ab4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_103_cast = conv(dilations = var_5303, groups = var_4943, pad = q_103_pad_0, pad_type = q_103_pad_type_0, strides = var_5301, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_217_cast)[name = tensor("q_103_cast")]; + tensor var_5307 = const()[name = tensor("op_5307"), val = tensor([1, 1])]; + tensor var_5309 = const()[name = tensor("op_5309"), val = tensor([1, 1])]; + tensor k_103_pad_type_0 = const()[name = tensor("k_103_pad_type_0"), val = tensor("custom")]; + tensor k_103_pad_0 = const()[name = tensor("k_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465845824))), lut = tensor([-0x1.514p-7, 0x1.528p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_103_cast = conv(dilations = var_5309, groups = var_4943, pad = k_103_pad_0, pad_type = k_103_pad_type_0, strides = var_5307, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_103_cast")]; + tensor var_5313 = const()[name = tensor("op_5313"), val = tensor([1, 1])]; + tensor var_5315 = const()[name = tensor("op_5315"), val = tensor([1, 1])]; + tensor v_103_pad_type_0 = const()[name = tensor("v_103_pad_type_0"), val = tensor("custom")]; + tensor v_103_pad_0 = const()[name = tensor("v_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466173568))), lut = tensor([-0x1.88cp-7, 0x1.884p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_103_cast = conv(dilations = var_5315, groups = var_4943, pad = v_103_pad_0, pad_type = v_103_pad_type_0, strides = var_5313, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_103_cast")]; + tensor var_5319 = const()[name = tensor("op_5319"), val = tensor([2, 20, 64, -1])]; + tensor var_5320_cast = reshape(shape = var_5319, x = q_103_cast)[name = tensor("op_5320_cast")]; + tensor var_5321 = const()[name = tensor("op_5321"), val = tensor([2, 20, 64, -1])]; + tensor var_5322_cast = reshape(shape = var_5321, x = k_103_cast)[name = tensor("op_5322_cast")]; + tensor var_5323 = const()[name = tensor("op_5323"), val = tensor([2, 20, 64, -1])]; + tensor var_5324_cast = reshape(shape = var_5323, x = v_103_cast)[name = tensor("op_5324_cast")]; + tensor attn_weights_205_transpose_x_0 = const()[name = tensor("attn_weights_205_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_205_transpose_y_0 = const()[name = tensor("attn_weights_205_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_205_cast = matmul(transpose_x = attn_weights_205_transpose_x_0, transpose_y = attn_weights_205_transpose_y_0, x = var_5320_cast, y = var_5322_cast)[name = tensor("attn_weights_205_cast")]; + tensor attn_weights_207_cast = mul(x = attn_weights_205_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_207_cast")]; + tensor var_5328_cast = softmax(axis = var_4927, x = attn_weights_207_cast)[name = tensor("op_5328_cast")]; + tensor attn_103_transpose_x_0 = const()[name = tensor("attn_103_transpose_x_0"), val = tensor(false)]; + tensor attn_103_transpose_y_0 = const()[name = tensor("attn_103_transpose_y_0"), val = tensor(true)]; + tensor attn_103_cast = matmul(transpose_x = attn_103_transpose_x_0, transpose_y = attn_103_transpose_y_0, x = var_5324_cast, y = var_5328_cast)[name = tensor("attn_103_cast")]; + tensor var_5332 = const()[name = tensor("op_5332"), val = tensor([2, 1280, 1, -1])]; + tensor input_337_cast = reshape(shape = var_5332, x = attn_103_cast)[name = tensor("input_337_cast")]; + tensor var_5337 = const()[name = tensor("op_5337"), val = tensor([1, 1])]; + tensor var_5339 = const()[name = tensor("op_5339"), val = tensor([1, 1])]; + tensor var_5341_pad_type_0 = const()[name = tensor("op_5341_pad_type_0"), val = tensor("custom")]; + tensor var_5341_pad_0 = const()[name = tensor("op_5341_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466501312))), lut = tensor([-0x1.aacp-8, 0x1.ab4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466706176)))]; + tensor var_5341_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_5339, groups = var_4943, pad = var_5341_pad_0, pad_type = var_5341_pad_type_0, strides = var_5337, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("op_5341_cast")]; + tensor inputs_155_cast = add(x = var_5341_cast, y = inputs_153_cast)[name = tensor("inputs_155_cast")]; + tensor var_5345 = const()[name = tensor("op_5345"), val = tensor([1])]; + tensor channels_mean_155_cast = reduce_mean(axes = var_5345, keep_dims = var_4938, x = inputs_155_cast)[name = tensor("channels_mean_155_cast")]; + tensor zero_mean_155_cast = sub(x = inputs_155_cast, y = channels_mean_155_cast)[name = tensor("zero_mean_155_cast")]; + tensor zero_mean_sq_155_cast = mul(x = zero_mean_155_cast, y = zero_mean_155_cast)[name = tensor("zero_mean_sq_155_cast")]; + tensor var_5349 = const()[name = tensor("op_5349"), val = tensor([1])]; + tensor var_5350_cast = reduce_mean(axes = var_5349, keep_dims = var_4938, x = zero_mean_sq_155_cast)[name = tensor("op_5350_cast")]; + tensor var_5351_to_fp16 = const()[name = tensor("op_5351_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5352_cast = add(x = var_5350_cast, y = var_5351_to_fp16)[name = tensor("op_5352_cast")]; + tensor denom_155_epsilon_0_to_fp16 = const()[name = tensor("denom_155_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_155_cast = rsqrt(epsilon = denom_155_epsilon_0_to_fp16, x = var_5352_cast)[name = tensor("denom_155_cast")]; + tensor out_155_cast = mul(x = zero_mean_155_cast, y = denom_155_cast)[name = tensor("out_155_cast")]; + tensor var_5356_to_fp16 = const()[name = tensor("op_5356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466708800)))]; + tensor var_5357_cast = add(x = out_155_cast, y = var_5356_to_fp16)[name = tensor("op_5357_cast")]; + tensor var_5359_to_fp16 = const()[name = tensor("op_5359_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466711424)))]; + tensor input_339_cast = mul(x = var_5357_cast, y = var_5359_to_fp16)[name = tensor("input_339_cast")]; + tensor var_5367 = const()[name = tensor("op_5367"), val = tensor([1, 1])]; + tensor var_5369 = const()[name = tensor("op_5369"), val = tensor([1, 1])]; + tensor var_5371_pad_type_0 = const()[name = tensor("op_5371_pad_type_0"), val = tensor("custom")]; + tensor var_5371_pad_0 = const()[name = tensor("op_5371_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466714048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476544512))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476544704)))]; + tensor var_5371_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_5369, groups = var_4943, pad = var_5371_pad_0, pad_type = var_5371_pad_type_0, strides = var_5367, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_339_cast)[name = tensor("op_5371_cast")]; + tensor var_5372_split_sizes_0 = const()[name = tensor("op_5372_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5372_axis_0 = const()[name = tensor("op_5372_axis_0"), val = tensor(1)]; + tensor var_5372_cast_0, tensor var_5372_cast_1 = split(axis = var_5372_axis_0, split_sizes = var_5372_split_sizes_0, x = var_5371_cast)[name = tensor("op_5372_cast")]; + tensor var_5374_mode_0 = const()[name = tensor("op_5374_mode_0"), val = tensor("EXACT")]; + tensor var_5374_cast = gelu(mode = var_5374_mode_0, x = var_5372_cast_1)[name = tensor("op_5374_cast")]; + tensor input_341_cast = mul(x = var_5372_cast_0, y = var_5374_cast)[name = tensor("input_341_cast")]; + tensor var_5378 = const()[name = tensor("op_5378"), val = tensor([1, 1])]; + tensor var_5380 = const()[name = tensor("op_5380"), val = tensor([1, 1])]; + tensor var_5382_pad_type_0 = const()[name = tensor("op_5382_pad_type_0"), val = tensor("custom")]; + tensor var_5382_pad_0 = const()[name = tensor("op_5382_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476565248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479842112))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479842240)))]; + tensor var_5382_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_5380, groups = var_4943, pad = var_5382_pad_0, pad_type = var_5382_pad_type_0, strides = var_5378, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_341_cast)[name = tensor("op_5382_cast")]; + tensor inputs_157_cast = add(x = var_5382_cast, y = inputs_155_cast)[name = tensor("inputs_157_cast")]; + tensor var_5392 = const()[name = tensor("op_5392"), val = tensor([1])]; + tensor channels_mean_157_cast = reduce_mean(axes = var_5392, keep_dims = var_4938, x = inputs_157_cast)[name = tensor("channels_mean_157_cast")]; + tensor zero_mean_157_cast = sub(x = inputs_157_cast, y = channels_mean_157_cast)[name = tensor("zero_mean_157_cast")]; + tensor zero_mean_sq_157_cast = mul(x = zero_mean_157_cast, y = zero_mean_157_cast)[name = tensor("zero_mean_sq_157_cast")]; + tensor var_5396 = const()[name = tensor("op_5396"), val = tensor([1])]; + tensor var_5397_cast = reduce_mean(axes = var_5396, keep_dims = var_4938, x = zero_mean_sq_157_cast)[name = tensor("op_5397_cast")]; + tensor var_5398_to_fp16 = const()[name = tensor("op_5398_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5399_cast = add(x = var_5397_cast, y = var_5398_to_fp16)[name = tensor("op_5399_cast")]; + tensor denom_157_epsilon_0_to_fp16 = const()[name = tensor("denom_157_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_157_cast = rsqrt(epsilon = denom_157_epsilon_0_to_fp16, x = var_5399_cast)[name = tensor("denom_157_cast")]; + tensor out_157_cast = mul(x = zero_mean_157_cast, y = denom_157_cast)[name = tensor("out_157_cast")]; + tensor var_5403_to_fp16 = const()[name = tensor("op_5403_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479844864)))]; + tensor var_5404_cast = add(x = out_157_cast, y = var_5403_to_fp16)[name = tensor("op_5404_cast")]; + tensor var_5406_to_fp16 = const()[name = tensor("op_5406_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479847488)))]; + tensor hidden_states_221_cast = mul(x = var_5404_cast, y = var_5406_to_fp16)[name = tensor("hidden_states_221_cast")]; + tensor var_5413 = const()[name = tensor("op_5413"), val = tensor([1, 1])]; + tensor var_5415 = const()[name = tensor("op_5415"), val = tensor([1, 1])]; + tensor q_105_pad_type_0 = const()[name = tensor("q_105_pad_type_0"), val = tensor("custom")]; + tensor q_105_pad_0 = const()[name = tensor("q_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479850112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480669376))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_105_cast = conv(dilations = var_5415, groups = var_4943, pad = q_105_pad_0, pad_type = q_105_pad_type_0, strides = var_5413, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("q_105_cast")]; + tensor var_5419 = const()[name = tensor("op_5419"), val = tensor([1, 1])]; + tensor var_5421 = const()[name = tensor("op_5421"), val = tensor([1, 1])]; + tensor k_105_pad_type_0 = const()[name = tensor("k_105_pad_type_0"), val = tensor("custom")]; + tensor k_105_pad_0 = const()[name = tensor("k_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480669504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(481488768))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_105_cast = conv(dilations = var_5421, groups = var_4943, pad = k_105_pad_0, pad_type = k_105_pad_type_0, strides = var_5419, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("k_105_cast")]; + tensor var_5425 = const()[name = tensor("op_5425"), val = tensor([1, 1])]; + tensor var_5427 = const()[name = tensor("op_5427"), val = tensor([1, 1])]; + tensor v_105_pad_type_0 = const()[name = tensor("v_105_pad_type_0"), val = tensor("custom")]; + tensor v_105_pad_0 = const()[name = tensor("v_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(481488896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(482308160))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_105_cast = conv(dilations = var_5427, groups = var_4943, pad = v_105_pad_0, pad_type = v_105_pad_type_0, strides = var_5425, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("v_105_cast")]; + tensor var_5431 = const()[name = tensor("op_5431"), val = tensor([2, 20, 64, -1])]; + tensor var_5432_cast = reshape(shape = var_5431, x = q_105_cast)[name = tensor("op_5432_cast")]; + tensor var_5433 = const()[name = tensor("op_5433"), val = tensor([2, 20, 64, -1])]; + tensor var_5434_cast = reshape(shape = var_5433, x = k_105_cast)[name = tensor("op_5434_cast")]; + tensor var_5435 = const()[name = tensor("op_5435"), val = tensor([2, 20, 64, -1])]; + tensor var_5436_cast = reshape(shape = var_5435, x = v_105_cast)[name = tensor("op_5436_cast")]; + tensor attn_weights_209_transpose_x_0 = const()[name = tensor("attn_weights_209_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_209_transpose_y_0 = const()[name = tensor("attn_weights_209_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_209_cast = matmul(transpose_x = attn_weights_209_transpose_x_0, transpose_y = attn_weights_209_transpose_y_0, x = var_5432_cast, y = var_5434_cast)[name = tensor("attn_weights_209_cast")]; + tensor attn_weights_211_cast = mul(x = attn_weights_209_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_211_cast")]; + tensor var_5440_cast = softmax(axis = var_4927, x = attn_weights_211_cast)[name = tensor("op_5440_cast")]; + tensor attn_105_transpose_x_0 = const()[name = tensor("attn_105_transpose_x_0"), val = tensor(false)]; + tensor attn_105_transpose_y_0 = const()[name = tensor("attn_105_transpose_y_0"), val = tensor(true)]; + tensor attn_105_cast = matmul(transpose_x = attn_105_transpose_x_0, transpose_y = attn_105_transpose_y_0, x = var_5436_cast, y = var_5440_cast)[name = tensor("attn_105_cast")]; + tensor var_5444 = const()[name = tensor("op_5444"), val = tensor([2, 1280, 1, -1])]; + tensor input_343_cast = reshape(shape = var_5444, x = attn_105_cast)[name = tensor("input_343_cast")]; + tensor var_5449 = const()[name = tensor("op_5449"), val = tensor([1, 1])]; + tensor var_5451 = const()[name = tensor("op_5451"), val = tensor([1, 1])]; + tensor var_5453_pad_type_0 = const()[name = tensor("op_5453_pad_type_0"), val = tensor("custom")]; + tensor var_5453_pad_0 = const()[name = tensor("op_5453_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(482308288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483127552))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483127680)))]; + tensor var_5453_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_5451, groups = var_4943, pad = var_5453_pad_0, pad_type = var_5453_pad_type_0, strides = var_5449, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_343_cast)[name = tensor("op_5453_cast")]; + tensor inputs_159_cast = add(x = var_5453_cast, y = inputs_157_cast)[name = tensor("inputs_159_cast")]; + tensor var_5457 = const()[name = tensor("op_5457"), val = tensor([1])]; + tensor channels_mean_159_cast = reduce_mean(axes = var_5457, keep_dims = var_4938, x = inputs_159_cast)[name = tensor("channels_mean_159_cast")]; + tensor zero_mean_159_cast = sub(x = inputs_159_cast, y = channels_mean_159_cast)[name = tensor("zero_mean_159_cast")]; + tensor zero_mean_sq_159_cast = mul(x = zero_mean_159_cast, y = zero_mean_159_cast)[name = tensor("zero_mean_sq_159_cast")]; + tensor var_5461 = const()[name = tensor("op_5461"), val = tensor([1])]; + tensor var_5462_cast = reduce_mean(axes = var_5461, keep_dims = var_4938, x = zero_mean_sq_159_cast)[name = tensor("op_5462_cast")]; + tensor var_5463_to_fp16 = const()[name = tensor("op_5463_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5464_cast = add(x = var_5462_cast, y = var_5463_to_fp16)[name = tensor("op_5464_cast")]; + tensor denom_159_epsilon_0_to_fp16 = const()[name = tensor("denom_159_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_159_cast = rsqrt(epsilon = denom_159_epsilon_0_to_fp16, x = var_5464_cast)[name = tensor("denom_159_cast")]; + tensor out_159_cast = mul(x = zero_mean_159_cast, y = denom_159_cast)[name = tensor("out_159_cast")]; + tensor var_5468_to_fp16 = const()[name = tensor("op_5468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483130304)))]; + tensor var_5469_cast = add(x = out_159_cast, y = var_5468_to_fp16)[name = tensor("op_5469_cast")]; + tensor var_5471_to_fp16 = const()[name = tensor("op_5471_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483132928)))]; + tensor hidden_states_223_cast = mul(x = var_5469_cast, y = var_5471_to_fp16)[name = tensor("hidden_states_223_cast")]; + tensor var_5478 = const()[name = tensor("op_5478"), val = tensor([1, 1])]; + tensor var_5480 = const()[name = tensor("op_5480"), val = tensor([1, 1])]; + tensor q_107_pad_type_0 = const()[name = tensor("q_107_pad_type_0"), val = tensor("custom")]; + tensor q_107_pad_0 = const()[name = tensor("q_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483135552))), lut = tensor([-0x1.93p-7, 0x1.938p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_107_cast = conv(dilations = var_5480, groups = var_4943, pad = q_107_pad_0, pad_type = q_107_pad_type_0, strides = var_5478, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_223_cast)[name = tensor("q_107_cast")]; + tensor var_5484 = const()[name = tensor("op_5484"), val = tensor([1, 1])]; + tensor var_5486 = const()[name = tensor("op_5486"), val = tensor([1, 1])]; + tensor k_107_pad_type_0 = const()[name = tensor("k_107_pad_type_0"), val = tensor("custom")]; + tensor k_107_pad_0 = const()[name = tensor("k_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483340416))), lut = tensor([-0x1.294p-7, 0x1.28cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_107_cast = conv(dilations = var_5486, groups = var_4943, pad = k_107_pad_0, pad_type = k_107_pad_type_0, strides = var_5484, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_107_cast")]; + tensor var_5490 = const()[name = tensor("op_5490"), val = tensor([1, 1])]; + tensor var_5492 = const()[name = tensor("op_5492"), val = tensor([1, 1])]; + tensor v_107_pad_type_0 = const()[name = tensor("v_107_pad_type_0"), val = tensor("custom")]; + tensor v_107_pad_0 = const()[name = tensor("v_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483668160))), lut = tensor([-0x1.63cp-7, 0x1.62cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_107_cast = conv(dilations = var_5492, groups = var_4943, pad = v_107_pad_0, pad_type = v_107_pad_type_0, strides = var_5490, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_107_cast")]; + tensor var_5496 = const()[name = tensor("op_5496"), val = tensor([2, 20, 64, -1])]; + tensor var_5497_cast = reshape(shape = var_5496, x = q_107_cast)[name = tensor("op_5497_cast")]; + tensor var_5498 = const()[name = tensor("op_5498"), val = tensor([2, 20, 64, -1])]; + tensor var_5499_cast = reshape(shape = var_5498, x = k_107_cast)[name = tensor("op_5499_cast")]; + tensor var_5500 = const()[name = tensor("op_5500"), val = tensor([2, 20, 64, -1])]; + tensor var_5501_cast = reshape(shape = var_5500, x = v_107_cast)[name = tensor("op_5501_cast")]; + tensor attn_weights_213_transpose_x_0 = const()[name = tensor("attn_weights_213_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_213_transpose_y_0 = const()[name = tensor("attn_weights_213_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_213_cast = matmul(transpose_x = attn_weights_213_transpose_x_0, transpose_y = attn_weights_213_transpose_y_0, x = var_5497_cast, y = var_5499_cast)[name = tensor("attn_weights_213_cast")]; + tensor attn_weights_215_cast = mul(x = attn_weights_213_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_215_cast")]; + tensor var_5505_cast = softmax(axis = var_4927, x = attn_weights_215_cast)[name = tensor("op_5505_cast")]; + tensor attn_107_transpose_x_0 = const()[name = tensor("attn_107_transpose_x_0"), val = tensor(false)]; + tensor attn_107_transpose_y_0 = const()[name = tensor("attn_107_transpose_y_0"), val = tensor(true)]; + tensor attn_107_cast = matmul(transpose_x = attn_107_transpose_x_0, transpose_y = attn_107_transpose_y_0, x = var_5501_cast, y = var_5505_cast)[name = tensor("attn_107_cast")]; + tensor var_5509 = const()[name = tensor("op_5509"), val = tensor([2, 1280, 1, -1])]; + tensor input_345_cast = reshape(shape = var_5509, x = attn_107_cast)[name = tensor("input_345_cast")]; + tensor var_5514 = const()[name = tensor("op_5514"), val = tensor([1, 1])]; + tensor var_5516 = const()[name = tensor("op_5516"), val = tensor([1, 1])]; + tensor var_5518_pad_type_0 = const()[name = tensor("op_5518_pad_type_0"), val = tensor("custom")]; + tensor var_5518_pad_0 = const()[name = tensor("op_5518_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483995904))), lut = tensor([-0x1.878p-8, 0x1.86cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484200768)))]; + tensor var_5518_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_5516, groups = var_4943, pad = var_5518_pad_0, pad_type = var_5518_pad_type_0, strides = var_5514, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("op_5518_cast")]; + tensor inputs_161_cast = add(x = var_5518_cast, y = inputs_159_cast)[name = tensor("inputs_161_cast")]; + tensor var_5522 = const()[name = tensor("op_5522"), val = tensor([1])]; + tensor channels_mean_161_cast = reduce_mean(axes = var_5522, keep_dims = var_4938, x = inputs_161_cast)[name = tensor("channels_mean_161_cast")]; + tensor zero_mean_161_cast = sub(x = inputs_161_cast, y = channels_mean_161_cast)[name = tensor("zero_mean_161_cast")]; + tensor zero_mean_sq_161_cast = mul(x = zero_mean_161_cast, y = zero_mean_161_cast)[name = tensor("zero_mean_sq_161_cast")]; + tensor var_5526 = const()[name = tensor("op_5526"), val = tensor([1])]; + tensor var_5527_cast = reduce_mean(axes = var_5526, keep_dims = var_4938, x = zero_mean_sq_161_cast)[name = tensor("op_5527_cast")]; + tensor var_5528_to_fp16 = const()[name = tensor("op_5528_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5529_cast = add(x = var_5527_cast, y = var_5528_to_fp16)[name = tensor("op_5529_cast")]; + tensor denom_161_epsilon_0_to_fp16 = const()[name = tensor("denom_161_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_161_cast = rsqrt(epsilon = denom_161_epsilon_0_to_fp16, x = var_5529_cast)[name = tensor("denom_161_cast")]; + tensor out_161_cast = mul(x = zero_mean_161_cast, y = denom_161_cast)[name = tensor("out_161_cast")]; + tensor var_5533_to_fp16 = const()[name = tensor("op_5533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484203392)))]; + tensor var_5534_cast = add(x = out_161_cast, y = var_5533_to_fp16)[name = tensor("op_5534_cast")]; + tensor var_5536_to_fp16 = const()[name = tensor("op_5536_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484206016)))]; + tensor input_347_cast = mul(x = var_5534_cast, y = var_5536_to_fp16)[name = tensor("input_347_cast")]; + tensor var_5544 = const()[name = tensor("op_5544"), val = tensor([1, 1])]; + tensor var_5546 = const()[name = tensor("op_5546"), val = tensor([1, 1])]; + tensor var_5548_pad_type_0 = const()[name = tensor("op_5548_pad_type_0"), val = tensor("custom")]; + tensor var_5548_pad_0 = const()[name = tensor("op_5548_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484208640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490762304))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490762432)))]; + tensor var_5548_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_5546, groups = var_4943, pad = var_5548_pad_0, pad_type = var_5548_pad_type_0, strides = var_5544, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_347_cast)[name = tensor("op_5548_cast")]; + tensor var_5549_split_sizes_0 = const()[name = tensor("op_5549_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5549_axis_0 = const()[name = tensor("op_5549_axis_0"), val = tensor(1)]; + tensor var_5549_cast_0, tensor var_5549_cast_1 = split(axis = var_5549_axis_0, split_sizes = var_5549_split_sizes_0, x = var_5548_cast)[name = tensor("op_5549_cast")]; + tensor var_5551_mode_0 = const()[name = tensor("op_5551_mode_0"), val = tensor("EXACT")]; + tensor var_5551_cast = gelu(mode = var_5551_mode_0, x = var_5549_cast_1)[name = tensor("op_5551_cast")]; + tensor input_349_cast = mul(x = var_5549_cast_0, y = var_5551_cast)[name = tensor("input_349_cast")]; + tensor var_5555 = const()[name = tensor("op_5555"), val = tensor([1, 1])]; + tensor var_5557 = const()[name = tensor("op_5557"), val = tensor([1, 1])]; + tensor var_5559_pad_type_0 = const()[name = tensor("op_5559_pad_type_0"), val = tensor("custom")]; + tensor var_5559_pad_0 = const()[name = tensor("op_5559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490782976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494059840))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494059968)))]; + tensor var_5559_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_5557, groups = var_4943, pad = var_5559_pad_0, pad_type = var_5559_pad_type_0, strides = var_5555, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("op_5559_cast")]; + tensor inputs_163_cast = add(x = var_5559_cast, y = inputs_161_cast)[name = tensor("inputs_163_cast")]; + tensor var_5569 = const()[name = tensor("op_5569"), val = tensor([1])]; + tensor channels_mean_163_cast = reduce_mean(axes = var_5569, keep_dims = var_4938, x = inputs_163_cast)[name = tensor("channels_mean_163_cast")]; + tensor zero_mean_163_cast = sub(x = inputs_163_cast, y = channels_mean_163_cast)[name = tensor("zero_mean_163_cast")]; + tensor zero_mean_sq_163_cast = mul(x = zero_mean_163_cast, y = zero_mean_163_cast)[name = tensor("zero_mean_sq_163_cast")]; + tensor var_5573 = const()[name = tensor("op_5573"), val = tensor([1])]; + tensor var_5574_cast = reduce_mean(axes = var_5573, keep_dims = var_4938, x = zero_mean_sq_163_cast)[name = tensor("op_5574_cast")]; + tensor var_5575_to_fp16 = const()[name = tensor("op_5575_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5576_cast = add(x = var_5574_cast, y = var_5575_to_fp16)[name = tensor("op_5576_cast")]; + tensor denom_163_epsilon_0_to_fp16 = const()[name = tensor("denom_163_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_163_cast = rsqrt(epsilon = denom_163_epsilon_0_to_fp16, x = var_5576_cast)[name = tensor("denom_163_cast")]; + tensor out_163_cast = mul(x = zero_mean_163_cast, y = denom_163_cast)[name = tensor("out_163_cast")]; + tensor var_5580_to_fp16 = const()[name = tensor("op_5580_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494062592)))]; + tensor var_5581_cast = add(x = out_163_cast, y = var_5580_to_fp16)[name = tensor("op_5581_cast")]; + tensor var_5583_to_fp16 = const()[name = tensor("op_5583_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494065216)))]; + tensor hidden_states_227_cast = mul(x = var_5581_cast, y = var_5583_to_fp16)[name = tensor("hidden_states_227_cast")]; + tensor var_5590 = const()[name = tensor("op_5590"), val = tensor([1, 1])]; + tensor var_5592 = const()[name = tensor("op_5592"), val = tensor([1, 1])]; + tensor q_109_pad_type_0 = const()[name = tensor("q_109_pad_type_0"), val = tensor("custom")]; + tensor q_109_pad_0 = const()[name = tensor("q_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494067840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494887104))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_109_cast = conv(dilations = var_5592, groups = var_4943, pad = q_109_pad_0, pad_type = q_109_pad_type_0, strides = var_5590, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("q_109_cast")]; + tensor var_5596 = const()[name = tensor("op_5596"), val = tensor([1, 1])]; + tensor var_5598 = const()[name = tensor("op_5598"), val = tensor([1, 1])]; + tensor k_109_pad_type_0 = const()[name = tensor("k_109_pad_type_0"), val = tensor("custom")]; + tensor k_109_pad_0 = const()[name = tensor("k_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494887232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495706496))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_109_cast = conv(dilations = var_5598, groups = var_4943, pad = k_109_pad_0, pad_type = k_109_pad_type_0, strides = var_5596, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("k_109_cast")]; + tensor var_5602 = const()[name = tensor("op_5602"), val = tensor([1, 1])]; + tensor var_5604 = const()[name = tensor("op_5604"), val = tensor([1, 1])]; + tensor v_109_pad_type_0 = const()[name = tensor("v_109_pad_type_0"), val = tensor("custom")]; + tensor v_109_pad_0 = const()[name = tensor("v_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495706624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496525888))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_109_cast = conv(dilations = var_5604, groups = var_4943, pad = v_109_pad_0, pad_type = v_109_pad_type_0, strides = var_5602, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("v_109_cast")]; + tensor var_5608 = const()[name = tensor("op_5608"), val = tensor([2, 20, 64, -1])]; + tensor var_5609_cast = reshape(shape = var_5608, x = q_109_cast)[name = tensor("op_5609_cast")]; + tensor var_5610 = const()[name = tensor("op_5610"), val = tensor([2, 20, 64, -1])]; + tensor var_5611_cast = reshape(shape = var_5610, x = k_109_cast)[name = tensor("op_5611_cast")]; + tensor var_5612 = const()[name = tensor("op_5612"), val = tensor([2, 20, 64, -1])]; + tensor var_5613_cast = reshape(shape = var_5612, x = v_109_cast)[name = tensor("op_5613_cast")]; + tensor attn_weights_217_transpose_x_0 = const()[name = tensor("attn_weights_217_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_217_transpose_y_0 = const()[name = tensor("attn_weights_217_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_217_cast = matmul(transpose_x = attn_weights_217_transpose_x_0, transpose_y = attn_weights_217_transpose_y_0, x = var_5609_cast, y = var_5611_cast)[name = tensor("attn_weights_217_cast")]; + tensor attn_weights_219_cast = mul(x = attn_weights_217_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_219_cast")]; + tensor var_5617_cast = softmax(axis = var_4927, x = attn_weights_219_cast)[name = tensor("op_5617_cast")]; + tensor attn_109_transpose_x_0 = const()[name = tensor("attn_109_transpose_x_0"), val = tensor(false)]; + tensor attn_109_transpose_y_0 = const()[name = tensor("attn_109_transpose_y_0"), val = tensor(true)]; + tensor attn_109_cast = matmul(transpose_x = attn_109_transpose_x_0, transpose_y = attn_109_transpose_y_0, x = var_5613_cast, y = var_5617_cast)[name = tensor("attn_109_cast")]; + tensor var_5621 = const()[name = tensor("op_5621"), val = tensor([2, 1280, 1, -1])]; + tensor input_351_cast = reshape(shape = var_5621, x = attn_109_cast)[name = tensor("input_351_cast")]; + tensor var_5626 = const()[name = tensor("op_5626"), val = tensor([1, 1])]; + tensor var_5628 = const()[name = tensor("op_5628"), val = tensor([1, 1])]; + tensor var_5630_pad_type_0 = const()[name = tensor("op_5630_pad_type_0"), val = tensor("custom")]; + tensor var_5630_pad_0 = const()[name = tensor("op_5630_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496526016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497345280))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497345408)))]; + tensor var_5630_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_5628, groups = var_4943, pad = var_5630_pad_0, pad_type = var_5630_pad_type_0, strides = var_5626, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_351_cast)[name = tensor("op_5630_cast")]; + tensor inputs_165_cast = add(x = var_5630_cast, y = inputs_163_cast)[name = tensor("inputs_165_cast")]; + tensor var_5634 = const()[name = tensor("op_5634"), val = tensor([1])]; + tensor channels_mean_165_cast = reduce_mean(axes = var_5634, keep_dims = var_4938, x = inputs_165_cast)[name = tensor("channels_mean_165_cast")]; + tensor zero_mean_165_cast = sub(x = inputs_165_cast, y = channels_mean_165_cast)[name = tensor("zero_mean_165_cast")]; + tensor zero_mean_sq_165_cast = mul(x = zero_mean_165_cast, y = zero_mean_165_cast)[name = tensor("zero_mean_sq_165_cast")]; + tensor var_5638 = const()[name = tensor("op_5638"), val = tensor([1])]; + tensor var_5639_cast = reduce_mean(axes = var_5638, keep_dims = var_4938, x = zero_mean_sq_165_cast)[name = tensor("op_5639_cast")]; + tensor var_5640_to_fp16 = const()[name = tensor("op_5640_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5641_cast = add(x = var_5639_cast, y = var_5640_to_fp16)[name = tensor("op_5641_cast")]; + tensor denom_165_epsilon_0_to_fp16 = const()[name = tensor("denom_165_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_165_cast = rsqrt(epsilon = denom_165_epsilon_0_to_fp16, x = var_5641_cast)[name = tensor("denom_165_cast")]; + tensor out_165_cast = mul(x = zero_mean_165_cast, y = denom_165_cast)[name = tensor("out_165_cast")]; + tensor var_5645_to_fp16 = const()[name = tensor("op_5645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497348032)))]; + tensor var_5646_cast = add(x = out_165_cast, y = var_5645_to_fp16)[name = tensor("op_5646_cast")]; + tensor var_5648_to_fp16 = const()[name = tensor("op_5648_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497350656)))]; + tensor hidden_states_229_cast = mul(x = var_5646_cast, y = var_5648_to_fp16)[name = tensor("hidden_states_229_cast")]; + tensor var_5655 = const()[name = tensor("op_5655"), val = tensor([1, 1])]; + tensor var_5657 = const()[name = tensor("op_5657"), val = tensor([1, 1])]; + tensor q_111_pad_type_0 = const()[name = tensor("q_111_pad_type_0"), val = tensor("custom")]; + tensor q_111_pad_0 = const()[name = tensor("q_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497353280))), lut = tensor([-0x1.7b4p-7, 0x1.7a4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_111_cast = conv(dilations = var_5657, groups = var_4943, pad = q_111_pad_0, pad_type = q_111_pad_type_0, strides = var_5655, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_229_cast)[name = tensor("q_111_cast")]; + tensor var_5661 = const()[name = tensor("op_5661"), val = tensor([1, 1])]; + tensor var_5663 = const()[name = tensor("op_5663"), val = tensor([1, 1])]; + tensor k_111_pad_type_0 = const()[name = tensor("k_111_pad_type_0"), val = tensor("custom")]; + tensor k_111_pad_0 = const()[name = tensor("k_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497558144))), lut = tensor([-0x1.038p-7, 0x1.044p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_111_cast = conv(dilations = var_5663, groups = var_4943, pad = k_111_pad_0, pad_type = k_111_pad_type_0, strides = var_5661, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_111_cast")]; + tensor var_5667 = const()[name = tensor("op_5667"), val = tensor([1, 1])]; + tensor var_5669 = const()[name = tensor("op_5669"), val = tensor([1, 1])]; + tensor v_111_pad_type_0 = const()[name = tensor("v_111_pad_type_0"), val = tensor("custom")]; + tensor v_111_pad_0 = const()[name = tensor("v_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497885888))), lut = tensor([-0x1.354p-7, 0x1.35cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_111_cast = conv(dilations = var_5669, groups = var_4943, pad = v_111_pad_0, pad_type = v_111_pad_type_0, strides = var_5667, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_111_cast")]; + tensor var_5673 = const()[name = tensor("op_5673"), val = tensor([2, 20, 64, -1])]; + tensor var_5674_cast = reshape(shape = var_5673, x = q_111_cast)[name = tensor("op_5674_cast")]; + tensor var_5675 = const()[name = tensor("op_5675"), val = tensor([2, 20, 64, -1])]; + tensor var_5676_cast = reshape(shape = var_5675, x = k_111_cast)[name = tensor("op_5676_cast")]; + tensor var_5677 = const()[name = tensor("op_5677"), val = tensor([2, 20, 64, -1])]; + tensor var_5678_cast = reshape(shape = var_5677, x = v_111_cast)[name = tensor("op_5678_cast")]; + tensor attn_weights_221_transpose_x_0 = const()[name = tensor("attn_weights_221_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_221_transpose_y_0 = const()[name = tensor("attn_weights_221_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_221_cast = matmul(transpose_x = attn_weights_221_transpose_x_0, transpose_y = attn_weights_221_transpose_y_0, x = var_5674_cast, y = var_5676_cast)[name = tensor("attn_weights_221_cast")]; + tensor attn_weights_223_cast = mul(x = attn_weights_221_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_223_cast")]; + tensor var_5682_cast = softmax(axis = var_4927, x = attn_weights_223_cast)[name = tensor("op_5682_cast")]; + tensor attn_111_transpose_x_0 = const()[name = tensor("attn_111_transpose_x_0"), val = tensor(false)]; + tensor attn_111_transpose_y_0 = const()[name = tensor("attn_111_transpose_y_0"), val = tensor(true)]; + tensor attn_111_cast = matmul(transpose_x = attn_111_transpose_x_0, transpose_y = attn_111_transpose_y_0, x = var_5678_cast, y = var_5682_cast)[name = tensor("attn_111_cast")]; + tensor var_5686 = const()[name = tensor("op_5686"), val = tensor([2, 1280, 1, -1])]; + tensor input_353_cast = reshape(shape = var_5686, x = attn_111_cast)[name = tensor("input_353_cast")]; + tensor var_5691 = const()[name = tensor("op_5691"), val = tensor([1, 1])]; + tensor var_5693 = const()[name = tensor("op_5693"), val = tensor([1, 1])]; + tensor var_5695_pad_type_0 = const()[name = tensor("op_5695_pad_type_0"), val = tensor("custom")]; + tensor var_5695_pad_0 = const()[name = tensor("op_5695_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498213632))), lut = tensor([-0x1.5a4p-8, 0x1.5bcp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498418496)))]; + tensor var_5695_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_5693, groups = var_4943, pad = var_5695_pad_0, pad_type = var_5695_pad_type_0, strides = var_5691, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("op_5695_cast")]; + tensor inputs_167_cast = add(x = var_5695_cast, y = inputs_165_cast)[name = tensor("inputs_167_cast")]; + tensor var_5699 = const()[name = tensor("op_5699"), val = tensor([1])]; + tensor channels_mean_167_cast = reduce_mean(axes = var_5699, keep_dims = var_4938, x = inputs_167_cast)[name = tensor("channels_mean_167_cast")]; + tensor zero_mean_167_cast = sub(x = inputs_167_cast, y = channels_mean_167_cast)[name = tensor("zero_mean_167_cast")]; + tensor zero_mean_sq_167_cast = mul(x = zero_mean_167_cast, y = zero_mean_167_cast)[name = tensor("zero_mean_sq_167_cast")]; + tensor var_5703 = const()[name = tensor("op_5703"), val = tensor([1])]; + tensor var_5704_cast = reduce_mean(axes = var_5703, keep_dims = var_4938, x = zero_mean_sq_167_cast)[name = tensor("op_5704_cast")]; + tensor var_5705_to_fp16 = const()[name = tensor("op_5705_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5706_cast = add(x = var_5704_cast, y = var_5705_to_fp16)[name = tensor("op_5706_cast")]; + tensor denom_167_epsilon_0_to_fp16 = const()[name = tensor("denom_167_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_167_cast = rsqrt(epsilon = denom_167_epsilon_0_to_fp16, x = var_5706_cast)[name = tensor("denom_167_cast")]; + tensor out_167_cast = mul(x = zero_mean_167_cast, y = denom_167_cast)[name = tensor("out_167_cast")]; + tensor var_5710_to_fp16 = const()[name = tensor("op_5710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498421120)))]; + tensor var_5711_cast = add(x = out_167_cast, y = var_5710_to_fp16)[name = tensor("op_5711_cast")]; + tensor var_5713_to_fp16 = const()[name = tensor("op_5713_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498423744)))]; + tensor input_355_cast = mul(x = var_5711_cast, y = var_5713_to_fp16)[name = tensor("input_355_cast")]; + tensor var_5721 = const()[name = tensor("op_5721"), val = tensor([1, 1])]; + tensor var_5723 = const()[name = tensor("op_5723"), val = tensor([1, 1])]; + tensor var_5725_pad_type_0 = const()[name = tensor("op_5725_pad_type_0"), val = tensor("custom")]; + tensor var_5725_pad_0 = const()[name = tensor("op_5725_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498426368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504980032))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504980160)))]; + tensor var_5725_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_5723, groups = var_4943, pad = var_5725_pad_0, pad_type = var_5725_pad_type_0, strides = var_5721, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_355_cast)[name = tensor("op_5725_cast")]; + tensor var_5726_split_sizes_0 = const()[name = tensor("op_5726_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5726_axis_0 = const()[name = tensor("op_5726_axis_0"), val = tensor(1)]; + tensor var_5726_cast_0, tensor var_5726_cast_1 = split(axis = var_5726_axis_0, split_sizes = var_5726_split_sizes_0, x = var_5725_cast)[name = tensor("op_5726_cast")]; + tensor var_5728_mode_0 = const()[name = tensor("op_5728_mode_0"), val = tensor("EXACT")]; + tensor var_5728_cast = gelu(mode = var_5728_mode_0, x = var_5726_cast_1)[name = tensor("op_5728_cast")]; + tensor input_357_cast = mul(x = var_5726_cast_0, y = var_5728_cast)[name = tensor("input_357_cast")]; + tensor var_5732 = const()[name = tensor("op_5732"), val = tensor([1, 1])]; + tensor var_5734 = const()[name = tensor("op_5734"), val = tensor([1, 1])]; + tensor var_5736_pad_type_0 = const()[name = tensor("op_5736_pad_type_0"), val = tensor("custom")]; + tensor var_5736_pad_0 = const()[name = tensor("op_5736_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(505000704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508277568))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508277696)))]; + tensor var_5736_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_5734, groups = var_4943, pad = var_5736_pad_0, pad_type = var_5736_pad_type_0, strides = var_5732, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_357_cast)[name = tensor("op_5736_cast")]; + tensor inputs_169_cast = add(x = var_5736_cast, y = inputs_167_cast)[name = tensor("inputs_169_cast")]; + tensor var_5746 = const()[name = tensor("op_5746"), val = tensor([1])]; + tensor channels_mean_169_cast = reduce_mean(axes = var_5746, keep_dims = var_4938, x = inputs_169_cast)[name = tensor("channels_mean_169_cast")]; + tensor zero_mean_169_cast = sub(x = inputs_169_cast, y = channels_mean_169_cast)[name = tensor("zero_mean_169_cast")]; + tensor zero_mean_sq_169_cast = mul(x = zero_mean_169_cast, y = zero_mean_169_cast)[name = tensor("zero_mean_sq_169_cast")]; + tensor var_5750 = const()[name = tensor("op_5750"), val = tensor([1])]; + tensor var_5751_cast = reduce_mean(axes = var_5750, keep_dims = var_4938, x = zero_mean_sq_169_cast)[name = tensor("op_5751_cast")]; + tensor var_5752_to_fp16 = const()[name = tensor("op_5752_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5753_cast = add(x = var_5751_cast, y = var_5752_to_fp16)[name = tensor("op_5753_cast")]; + tensor denom_169_epsilon_0_to_fp16 = const()[name = tensor("denom_169_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_169_cast = rsqrt(epsilon = denom_169_epsilon_0_to_fp16, x = var_5753_cast)[name = tensor("denom_169_cast")]; + tensor out_169_cast = mul(x = zero_mean_169_cast, y = denom_169_cast)[name = tensor("out_169_cast")]; + tensor var_5757_to_fp16 = const()[name = tensor("op_5757_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508280320)))]; + tensor var_5758_cast = add(x = out_169_cast, y = var_5757_to_fp16)[name = tensor("op_5758_cast")]; + tensor var_5760_to_fp16 = const()[name = tensor("op_5760_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508282944)))]; + tensor hidden_states_233_cast = mul(x = var_5758_cast, y = var_5760_to_fp16)[name = tensor("hidden_states_233_cast")]; + tensor var_5767 = const()[name = tensor("op_5767"), val = tensor([1, 1])]; + tensor var_5769 = const()[name = tensor("op_5769"), val = tensor([1, 1])]; + tensor q_113_pad_type_0 = const()[name = tensor("q_113_pad_type_0"), val = tensor("custom")]; + tensor q_113_pad_0 = const()[name = tensor("q_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508285568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509104832))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_113_cast = conv(dilations = var_5769, groups = var_4943, pad = q_113_pad_0, pad_type = q_113_pad_type_0, strides = var_5767, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("q_113_cast")]; + tensor var_5773 = const()[name = tensor("op_5773"), val = tensor([1, 1])]; + tensor var_5775 = const()[name = tensor("op_5775"), val = tensor([1, 1])]; + tensor k_113_pad_type_0 = const()[name = tensor("k_113_pad_type_0"), val = tensor("custom")]; + tensor k_113_pad_0 = const()[name = tensor("k_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509104960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509924224))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_113_cast = conv(dilations = var_5775, groups = var_4943, pad = k_113_pad_0, pad_type = k_113_pad_type_0, strides = var_5773, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("k_113_cast")]; + tensor var_5779 = const()[name = tensor("op_5779"), val = tensor([1, 1])]; + tensor var_5781 = const()[name = tensor("op_5781"), val = tensor([1, 1])]; + tensor v_113_pad_type_0 = const()[name = tensor("v_113_pad_type_0"), val = tensor("custom")]; + tensor v_113_pad_0 = const()[name = tensor("v_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509924352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510743616))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_113_cast = conv(dilations = var_5781, groups = var_4943, pad = v_113_pad_0, pad_type = v_113_pad_type_0, strides = var_5779, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("v_113_cast")]; + tensor var_5785 = const()[name = tensor("op_5785"), val = tensor([2, 20, 64, -1])]; + tensor var_5786_cast = reshape(shape = var_5785, x = q_113_cast)[name = tensor("op_5786_cast")]; + tensor var_5787 = const()[name = tensor("op_5787"), val = tensor([2, 20, 64, -1])]; + tensor var_5788_cast = reshape(shape = var_5787, x = k_113_cast)[name = tensor("op_5788_cast")]; + tensor var_5789 = const()[name = tensor("op_5789"), val = tensor([2, 20, 64, -1])]; + tensor var_5790_cast = reshape(shape = var_5789, x = v_113_cast)[name = tensor("op_5790_cast")]; + tensor attn_weights_225_transpose_x_0 = const()[name = tensor("attn_weights_225_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_225_transpose_y_0 = const()[name = tensor("attn_weights_225_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_225_cast = matmul(transpose_x = attn_weights_225_transpose_x_0, transpose_y = attn_weights_225_transpose_y_0, x = var_5786_cast, y = var_5788_cast)[name = tensor("attn_weights_225_cast")]; + tensor attn_weights_227_cast = mul(x = attn_weights_225_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_227_cast")]; + tensor var_5794_cast = softmax(axis = var_4927, x = attn_weights_227_cast)[name = tensor("op_5794_cast")]; + tensor attn_113_transpose_x_0 = const()[name = tensor("attn_113_transpose_x_0"), val = tensor(false)]; + tensor attn_113_transpose_y_0 = const()[name = tensor("attn_113_transpose_y_0"), val = tensor(true)]; + tensor attn_113_cast = matmul(transpose_x = attn_113_transpose_x_0, transpose_y = attn_113_transpose_y_0, x = var_5790_cast, y = var_5794_cast)[name = tensor("attn_113_cast")]; + tensor var_5798 = const()[name = tensor("op_5798"), val = tensor([2, 1280, 1, -1])]; + tensor input_359_cast = reshape(shape = var_5798, x = attn_113_cast)[name = tensor("input_359_cast")]; + tensor var_5803 = const()[name = tensor("op_5803"), val = tensor([1, 1])]; + tensor var_5805 = const()[name = tensor("op_5805"), val = tensor([1, 1])]; + tensor var_5807_pad_type_0 = const()[name = tensor("op_5807_pad_type_0"), val = tensor("custom")]; + tensor var_5807_pad_0 = const()[name = tensor("op_5807_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510743744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511972608))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511972800)))]; + tensor var_5807_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_5805, groups = var_4943, pad = var_5807_pad_0, pad_type = var_5807_pad_type_0, strides = var_5803, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_359_cast)[name = tensor("op_5807_cast")]; + tensor inputs_171_cast = add(x = var_5807_cast, y = inputs_169_cast)[name = tensor("inputs_171_cast")]; + tensor var_5811 = const()[name = tensor("op_5811"), val = tensor([1])]; + tensor channels_mean_171_cast = reduce_mean(axes = var_5811, keep_dims = var_4938, x = inputs_171_cast)[name = tensor("channels_mean_171_cast")]; + tensor zero_mean_171_cast = sub(x = inputs_171_cast, y = channels_mean_171_cast)[name = tensor("zero_mean_171_cast")]; + tensor zero_mean_sq_171_cast = mul(x = zero_mean_171_cast, y = zero_mean_171_cast)[name = tensor("zero_mean_sq_171_cast")]; + tensor var_5815 = const()[name = tensor("op_5815"), val = tensor([1])]; + tensor var_5816_cast = reduce_mean(axes = var_5815, keep_dims = var_4938, x = zero_mean_sq_171_cast)[name = tensor("op_5816_cast")]; + tensor var_5817_to_fp16 = const()[name = tensor("op_5817_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5818_cast = add(x = var_5816_cast, y = var_5817_to_fp16)[name = tensor("op_5818_cast")]; + tensor denom_171_epsilon_0_to_fp16 = const()[name = tensor("denom_171_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_171_cast = rsqrt(epsilon = denom_171_epsilon_0_to_fp16, x = var_5818_cast)[name = tensor("denom_171_cast")]; + tensor out_171_cast = mul(x = zero_mean_171_cast, y = denom_171_cast)[name = tensor("out_171_cast")]; + tensor var_5822_to_fp16 = const()[name = tensor("op_5822_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511975424)))]; + tensor var_5823_cast = add(x = out_171_cast, y = var_5822_to_fp16)[name = tensor("op_5823_cast")]; + tensor var_5825_to_fp16 = const()[name = tensor("op_5825_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511978048)))]; + tensor hidden_states_235_cast = mul(x = var_5823_cast, y = var_5825_to_fp16)[name = tensor("hidden_states_235_cast")]; + tensor var_5832 = const()[name = tensor("op_5832"), val = tensor([1, 1])]; + tensor var_5834 = const()[name = tensor("op_5834"), val = tensor([1, 1])]; + tensor q_115_pad_type_0 = const()[name = tensor("q_115_pad_type_0"), val = tensor("custom")]; + tensor q_115_pad_0 = const()[name = tensor("q_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511980672))), lut = tensor([-0x1.6f8p-7, 0x1.6f8p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_115_cast = conv(dilations = var_5834, groups = var_4943, pad = q_115_pad_0, pad_type = q_115_pad_type_0, strides = var_5832, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_235_cast)[name = tensor("q_115_cast")]; + tensor var_5838 = const()[name = tensor("op_5838"), val = tensor([1, 1])]; + tensor var_5840 = const()[name = tensor("op_5840"), val = tensor([1, 1])]; + tensor k_115_pad_type_0 = const()[name = tensor("k_115_pad_type_0"), val = tensor("custom")]; + tensor k_115_pad_0 = const()[name = tensor("k_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512185536))), lut = tensor([-0x1.e54p-8, 0x1.e5p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_115_cast = conv(dilations = var_5840, groups = var_4943, pad = k_115_pad_0, pad_type = k_115_pad_type_0, strides = var_5838, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_115_cast")]; + tensor var_5844 = const()[name = tensor("op_5844"), val = tensor([1, 1])]; + tensor var_5846 = const()[name = tensor("op_5846"), val = tensor([1, 1])]; + tensor v_115_pad_type_0 = const()[name = tensor("v_115_pad_type_0"), val = tensor("custom")]; + tensor v_115_pad_0 = const()[name = tensor("v_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512513280))), lut = tensor([-0x1.1b4p-7, 0x1.1bp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_115_cast = conv(dilations = var_5846, groups = var_4943, pad = v_115_pad_0, pad_type = v_115_pad_type_0, strides = var_5844, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_115_cast")]; + tensor var_5850 = const()[name = tensor("op_5850"), val = tensor([2, 20, 64, -1])]; + tensor var_5851_cast = reshape(shape = var_5850, x = q_115_cast)[name = tensor("op_5851_cast")]; + tensor var_5852 = const()[name = tensor("op_5852"), val = tensor([2, 20, 64, -1])]; + tensor var_5853_cast = reshape(shape = var_5852, x = k_115_cast)[name = tensor("op_5853_cast")]; + tensor var_5854 = const()[name = tensor("op_5854"), val = tensor([2, 20, 64, -1])]; + tensor var_5855_cast = reshape(shape = var_5854, x = v_115_cast)[name = tensor("op_5855_cast")]; + tensor attn_weights_229_transpose_x_0 = const()[name = tensor("attn_weights_229_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_229_transpose_y_0 = const()[name = tensor("attn_weights_229_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_229_cast = matmul(transpose_x = attn_weights_229_transpose_x_0, transpose_y = attn_weights_229_transpose_y_0, x = var_5851_cast, y = var_5853_cast)[name = tensor("attn_weights_229_cast")]; + tensor attn_weights_231_cast = mul(x = attn_weights_229_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_231_cast")]; + tensor var_5859_cast = softmax(axis = var_4927, x = attn_weights_231_cast)[name = tensor("op_5859_cast")]; + tensor attn_115_transpose_x_0 = const()[name = tensor("attn_115_transpose_x_0"), val = tensor(false)]; + tensor attn_115_transpose_y_0 = const()[name = tensor("attn_115_transpose_y_0"), val = tensor(true)]; + tensor attn_115_cast = matmul(transpose_x = attn_115_transpose_x_0, transpose_y = attn_115_transpose_y_0, x = var_5855_cast, y = var_5859_cast)[name = tensor("attn_115_cast")]; + tensor var_5863 = const()[name = tensor("op_5863"), val = tensor([2, 1280, 1, -1])]; + tensor input_361_cast = reshape(shape = var_5863, x = attn_115_cast)[name = tensor("input_361_cast")]; + tensor var_5868 = const()[name = tensor("op_5868"), val = tensor([1, 1])]; + tensor var_5870 = const()[name = tensor("op_5870"), val = tensor([1, 1])]; + tensor var_5872_pad_type_0 = const()[name = tensor("op_5872_pad_type_0"), val = tensor("custom")]; + tensor var_5872_pad_0 = const()[name = tensor("op_5872_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512841024))), lut = tensor([-0x1.478p-8, 0x1.47p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513045888)))]; + tensor var_5872_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_5870, groups = var_4943, pad = var_5872_pad_0, pad_type = var_5872_pad_type_0, strides = var_5868, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("op_5872_cast")]; + tensor inputs_173_cast = add(x = var_5872_cast, y = inputs_171_cast)[name = tensor("inputs_173_cast")]; + tensor var_5876 = const()[name = tensor("op_5876"), val = tensor([1])]; + tensor channels_mean_173_cast = reduce_mean(axes = var_5876, keep_dims = var_4938, x = inputs_173_cast)[name = tensor("channels_mean_173_cast")]; + tensor zero_mean_173_cast = sub(x = inputs_173_cast, y = channels_mean_173_cast)[name = tensor("zero_mean_173_cast")]; + tensor zero_mean_sq_173_cast = mul(x = zero_mean_173_cast, y = zero_mean_173_cast)[name = tensor("zero_mean_sq_173_cast")]; + tensor var_5880 = const()[name = tensor("op_5880"), val = tensor([1])]; + tensor var_5881_cast = reduce_mean(axes = var_5880, keep_dims = var_4938, x = zero_mean_sq_173_cast)[name = tensor("op_5881_cast")]; + tensor var_5882_to_fp16 = const()[name = tensor("op_5882_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5883_cast = add(x = var_5881_cast, y = var_5882_to_fp16)[name = tensor("op_5883_cast")]; + tensor denom_173_epsilon_0_to_fp16 = const()[name = tensor("denom_173_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_173_cast = rsqrt(epsilon = denom_173_epsilon_0_to_fp16, x = var_5883_cast)[name = tensor("denom_173_cast")]; + tensor out_173_cast = mul(x = zero_mean_173_cast, y = denom_173_cast)[name = tensor("out_173_cast")]; + tensor var_5887_to_fp16 = const()[name = tensor("op_5887_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513048512)))]; + tensor var_5888_cast = add(x = out_173_cast, y = var_5887_to_fp16)[name = tensor("op_5888_cast")]; + tensor var_5890_to_fp16 = const()[name = tensor("op_5890_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513051136)))]; + tensor input_363_cast = mul(x = var_5888_cast, y = var_5890_to_fp16)[name = tensor("input_363_cast")]; + tensor var_5898 = const()[name = tensor("op_5898"), val = tensor([1, 1])]; + tensor var_5900 = const()[name = tensor("op_5900"), val = tensor([1, 1])]; + tensor var_5902_pad_type_0 = const()[name = tensor("op_5902_pad_type_0"), val = tensor("custom")]; + tensor var_5902_pad_0 = const()[name = tensor("op_5902_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513053760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519607424))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519607552)))]; + tensor var_5902_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_5900, groups = var_4943, pad = var_5902_pad_0, pad_type = var_5902_pad_type_0, strides = var_5898, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_363_cast)[name = tensor("op_5902_cast")]; + tensor var_5903_split_sizes_0 = const()[name = tensor("op_5903_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5903_axis_0 = const()[name = tensor("op_5903_axis_0"), val = tensor(1)]; + tensor var_5903_cast_0, tensor var_5903_cast_1 = split(axis = var_5903_axis_0, split_sizes = var_5903_split_sizes_0, x = var_5902_cast)[name = tensor("op_5903_cast")]; + tensor var_5905_mode_0 = const()[name = tensor("op_5905_mode_0"), val = tensor("EXACT")]; + tensor var_5905_cast = gelu(mode = var_5905_mode_0, x = var_5903_cast_1)[name = tensor("op_5905_cast")]; + tensor input_365_cast = mul(x = var_5903_cast_0, y = var_5905_cast)[name = tensor("input_365_cast")]; + tensor var_5909 = const()[name = tensor("op_5909"), val = tensor([1, 1])]; + tensor var_5911 = const()[name = tensor("op_5911"), val = tensor([1, 1])]; + tensor var_5913_pad_type_0 = const()[name = tensor("op_5913_pad_type_0"), val = tensor("custom")]; + tensor var_5913_pad_0 = const()[name = tensor("op_5913_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519628096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522904960))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522905088)))]; + tensor var_5913_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_5911, groups = var_4943, pad = var_5913_pad_0, pad_type = var_5913_pad_type_0, strides = var_5909, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("op_5913_cast")]; + tensor inputs_175_cast = add(x = var_5913_cast, y = inputs_173_cast)[name = tensor("inputs_175_cast")]; + tensor var_5923 = const()[name = tensor("op_5923"), val = tensor([1])]; + tensor channels_mean_175_cast = reduce_mean(axes = var_5923, keep_dims = var_4938, x = inputs_175_cast)[name = tensor("channels_mean_175_cast")]; + tensor zero_mean_175_cast = sub(x = inputs_175_cast, y = channels_mean_175_cast)[name = tensor("zero_mean_175_cast")]; + tensor zero_mean_sq_175_cast = mul(x = zero_mean_175_cast, y = zero_mean_175_cast)[name = tensor("zero_mean_sq_175_cast")]; + tensor var_5927 = const()[name = tensor("op_5927"), val = tensor([1])]; + tensor var_5928_cast = reduce_mean(axes = var_5927, keep_dims = var_4938, x = zero_mean_sq_175_cast)[name = tensor("op_5928_cast")]; + tensor var_5929_to_fp16 = const()[name = tensor("op_5929_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5930_cast = add(x = var_5928_cast, y = var_5929_to_fp16)[name = tensor("op_5930_cast")]; + tensor denom_175_epsilon_0_to_fp16 = const()[name = tensor("denom_175_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_175_cast = rsqrt(epsilon = denom_175_epsilon_0_to_fp16, x = var_5930_cast)[name = tensor("denom_175_cast")]; + tensor out_175_cast = mul(x = zero_mean_175_cast, y = denom_175_cast)[name = tensor("out_175_cast")]; + tensor var_5934_to_fp16 = const()[name = tensor("op_5934_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522907712)))]; + tensor var_5935_cast = add(x = out_175_cast, y = var_5934_to_fp16)[name = tensor("op_5935_cast")]; + tensor var_5937_to_fp16 = const()[name = tensor("op_5937_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522910336)))]; + tensor hidden_states_239_cast = mul(x = var_5935_cast, y = var_5937_to_fp16)[name = tensor("hidden_states_239_cast")]; + tensor var_5944 = const()[name = tensor("op_5944"), val = tensor([1, 1])]; + tensor var_5946 = const()[name = tensor("op_5946"), val = tensor([1, 1])]; + tensor q_117_pad_type_0 = const()[name = tensor("q_117_pad_type_0"), val = tensor("custom")]; + tensor q_117_pad_0 = const()[name = tensor("q_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522912960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523732224))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_117_cast = conv(dilations = var_5946, groups = var_4943, pad = q_117_pad_0, pad_type = q_117_pad_type_0, strides = var_5944, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("q_117_cast")]; + tensor var_5950 = const()[name = tensor("op_5950"), val = tensor([1, 1])]; + tensor var_5952 = const()[name = tensor("op_5952"), val = tensor([1, 1])]; + tensor k_117_pad_type_0 = const()[name = tensor("k_117_pad_type_0"), val = tensor("custom")]; + tensor k_117_pad_0 = const()[name = tensor("k_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523732352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524551616))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_117_cast = conv(dilations = var_5952, groups = var_4943, pad = k_117_pad_0, pad_type = k_117_pad_type_0, strides = var_5950, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("k_117_cast")]; + tensor var_5956 = const()[name = tensor("op_5956"), val = tensor([1, 1])]; + tensor var_5958 = const()[name = tensor("op_5958"), val = tensor([1, 1])]; + tensor v_117_pad_type_0 = const()[name = tensor("v_117_pad_type_0"), val = tensor("custom")]; + tensor v_117_pad_0 = const()[name = tensor("v_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524551744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525371008))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_117_cast = conv(dilations = var_5958, groups = var_4943, pad = v_117_pad_0, pad_type = v_117_pad_type_0, strides = var_5956, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("v_117_cast")]; + tensor var_5962 = const()[name = tensor("op_5962"), val = tensor([2, 20, 64, -1])]; + tensor var_5963_cast = reshape(shape = var_5962, x = q_117_cast)[name = tensor("op_5963_cast")]; + tensor var_5964 = const()[name = tensor("op_5964"), val = tensor([2, 20, 64, -1])]; + tensor var_5965_cast = reshape(shape = var_5964, x = k_117_cast)[name = tensor("op_5965_cast")]; + tensor var_5966 = const()[name = tensor("op_5966"), val = tensor([2, 20, 64, -1])]; + tensor var_5967_cast = reshape(shape = var_5966, x = v_117_cast)[name = tensor("op_5967_cast")]; + tensor attn_weights_233_transpose_x_0 = const()[name = tensor("attn_weights_233_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_233_transpose_y_0 = const()[name = tensor("attn_weights_233_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_233_cast = matmul(transpose_x = attn_weights_233_transpose_x_0, transpose_y = attn_weights_233_transpose_y_0, x = var_5963_cast, y = var_5965_cast)[name = tensor("attn_weights_233_cast")]; + tensor attn_weights_235_cast = mul(x = attn_weights_233_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_235_cast")]; + tensor var_5971_cast = softmax(axis = var_4927, x = attn_weights_235_cast)[name = tensor("op_5971_cast")]; + tensor attn_117_transpose_x_0 = const()[name = tensor("attn_117_transpose_x_0"), val = tensor(false)]; + tensor attn_117_transpose_y_0 = const()[name = tensor("attn_117_transpose_y_0"), val = tensor(true)]; + tensor attn_117_cast = matmul(transpose_x = attn_117_transpose_x_0, transpose_y = attn_117_transpose_y_0, x = var_5967_cast, y = var_5971_cast)[name = tensor("attn_117_cast")]; + tensor var_5975 = const()[name = tensor("op_5975"), val = tensor([2, 1280, 1, -1])]; + tensor input_367_cast = reshape(shape = var_5975, x = attn_117_cast)[name = tensor("input_367_cast")]; + tensor var_5980 = const()[name = tensor("op_5980"), val = tensor([1, 1])]; + tensor var_5982 = const()[name = tensor("op_5982"), val = tensor([1, 1])]; + tensor var_5984_pad_type_0 = const()[name = tensor("op_5984_pad_type_0"), val = tensor("custom")]; + tensor var_5984_pad_0 = const()[name = tensor("op_5984_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525371136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526190400))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526190528)))]; + tensor var_5984_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_5982, groups = var_4943, pad = var_5984_pad_0, pad_type = var_5984_pad_type_0, strides = var_5980, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_367_cast)[name = tensor("op_5984_cast")]; + tensor inputs_177_cast = add(x = var_5984_cast, y = inputs_175_cast)[name = tensor("inputs_177_cast")]; + tensor var_5988 = const()[name = tensor("op_5988"), val = tensor([1])]; + tensor channels_mean_177_cast = reduce_mean(axes = var_5988, keep_dims = var_4938, x = inputs_177_cast)[name = tensor("channels_mean_177_cast")]; + tensor zero_mean_177_cast = sub(x = inputs_177_cast, y = channels_mean_177_cast)[name = tensor("zero_mean_177_cast")]; + tensor zero_mean_sq_177_cast = mul(x = zero_mean_177_cast, y = zero_mean_177_cast)[name = tensor("zero_mean_sq_177_cast")]; + tensor var_5992 = const()[name = tensor("op_5992"), val = tensor([1])]; + tensor var_5993_cast = reduce_mean(axes = var_5992, keep_dims = var_4938, x = zero_mean_sq_177_cast)[name = tensor("op_5993_cast")]; + tensor var_5994_to_fp16 = const()[name = tensor("op_5994_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5995_cast = add(x = var_5993_cast, y = var_5994_to_fp16)[name = tensor("op_5995_cast")]; + tensor denom_177_epsilon_0_to_fp16 = const()[name = tensor("denom_177_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_177_cast = rsqrt(epsilon = denom_177_epsilon_0_to_fp16, x = var_5995_cast)[name = tensor("denom_177_cast")]; + tensor out_177_cast = mul(x = zero_mean_177_cast, y = denom_177_cast)[name = tensor("out_177_cast")]; + tensor var_5999_to_fp16 = const()[name = tensor("op_5999_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526193152)))]; + tensor var_6000_cast = add(x = out_177_cast, y = var_5999_to_fp16)[name = tensor("op_6000_cast")]; + tensor var_6002_to_fp16 = const()[name = tensor("op_6002_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526195776)))]; + tensor hidden_states_241_cast = mul(x = var_6000_cast, y = var_6002_to_fp16)[name = tensor("hidden_states_241_cast")]; + tensor var_6009 = const()[name = tensor("op_6009"), val = tensor([1, 1])]; + tensor var_6011 = const()[name = tensor("op_6011"), val = tensor([1, 1])]; + tensor q_119_pad_type_0 = const()[name = tensor("q_119_pad_type_0"), val = tensor("custom")]; + tensor q_119_pad_0 = const()[name = tensor("q_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526198400))), lut = tensor([-0x1.6a4p-7, 0x1.6ap-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_119_cast = conv(dilations = var_6011, groups = var_4943, pad = q_119_pad_0, pad_type = q_119_pad_type_0, strides = var_6009, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_241_cast)[name = tensor("q_119_cast")]; + tensor var_6015 = const()[name = tensor("op_6015"), val = tensor([1, 1])]; + tensor var_6017 = const()[name = tensor("op_6017"), val = tensor([1, 1])]; + tensor k_119_pad_type_0 = const()[name = tensor("k_119_pad_type_0"), val = tensor("custom")]; + tensor k_119_pad_0 = const()[name = tensor("k_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526403264))), lut = tensor([-0x1.d9cp-8, 0x1.d8p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_119_cast = conv(dilations = var_6017, groups = var_4943, pad = k_119_pad_0, pad_type = k_119_pad_type_0, strides = var_6015, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_119_cast")]; + tensor var_6021 = const()[name = tensor("op_6021"), val = tensor([1, 1])]; + tensor var_6023 = const()[name = tensor("op_6023"), val = tensor([1, 1])]; + tensor v_119_pad_type_0 = const()[name = tensor("v_119_pad_type_0"), val = tensor("custom")]; + tensor v_119_pad_0 = const()[name = tensor("v_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526731008))), lut = tensor([-0x1.128p-7, 0x1.13p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_119_cast = conv(dilations = var_6023, groups = var_4943, pad = v_119_pad_0, pad_type = v_119_pad_type_0, strides = var_6021, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_119_cast")]; + tensor var_6027 = const()[name = tensor("op_6027"), val = tensor([2, 20, 64, -1])]; + tensor var_6028_cast = reshape(shape = var_6027, x = q_119_cast)[name = tensor("op_6028_cast")]; + tensor var_6029 = const()[name = tensor("op_6029"), val = tensor([2, 20, 64, -1])]; + tensor var_6030_cast = reshape(shape = var_6029, x = k_119_cast)[name = tensor("op_6030_cast")]; + tensor var_6031 = const()[name = tensor("op_6031"), val = tensor([2, 20, 64, -1])]; + tensor var_6032_cast = reshape(shape = var_6031, x = v_119_cast)[name = tensor("op_6032_cast")]; + tensor attn_weights_237_transpose_x_0 = const()[name = tensor("attn_weights_237_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_237_transpose_y_0 = const()[name = tensor("attn_weights_237_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_237_cast = matmul(transpose_x = attn_weights_237_transpose_x_0, transpose_y = attn_weights_237_transpose_y_0, x = var_6028_cast, y = var_6030_cast)[name = tensor("attn_weights_237_cast")]; + tensor attn_weights_239_cast = mul(x = attn_weights_237_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_239_cast")]; + tensor var_6036_cast = softmax(axis = var_4927, x = attn_weights_239_cast)[name = tensor("op_6036_cast")]; + tensor attn_119_transpose_x_0 = const()[name = tensor("attn_119_transpose_x_0"), val = tensor(false)]; + tensor attn_119_transpose_y_0 = const()[name = tensor("attn_119_transpose_y_0"), val = tensor(true)]; + tensor attn_119_cast = matmul(transpose_x = attn_119_transpose_x_0, transpose_y = attn_119_transpose_y_0, x = var_6032_cast, y = var_6036_cast)[name = tensor("attn_119_cast")]; + tensor var_6040 = const()[name = tensor("op_6040"), val = tensor([2, 1280, 1, -1])]; + tensor input_369_cast = reshape(shape = var_6040, x = attn_119_cast)[name = tensor("input_369_cast")]; + tensor var_6045 = const()[name = tensor("op_6045"), val = tensor([1, 1])]; + tensor var_6047 = const()[name = tensor("op_6047"), val = tensor([1, 1])]; + tensor var_6049_pad_type_0 = const()[name = tensor("op_6049_pad_type_0"), val = tensor("custom")]; + tensor var_6049_pad_0 = const()[name = tensor("op_6049_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527058752))), lut = tensor([-0x1.45p-8, 0x1.46p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527263616)))]; + tensor var_6049_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_6047, groups = var_4943, pad = var_6049_pad_0, pad_type = var_6049_pad_type_0, strides = var_6045, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("op_6049_cast")]; + tensor inputs_179_cast = add(x = var_6049_cast, y = inputs_177_cast)[name = tensor("inputs_179_cast")]; + tensor var_6053 = const()[name = tensor("op_6053"), val = tensor([1])]; + tensor channels_mean_179_cast = reduce_mean(axes = var_6053, keep_dims = var_4938, x = inputs_179_cast)[name = tensor("channels_mean_179_cast")]; + tensor zero_mean_179_cast = sub(x = inputs_179_cast, y = channels_mean_179_cast)[name = tensor("zero_mean_179_cast")]; + tensor zero_mean_sq_179_cast = mul(x = zero_mean_179_cast, y = zero_mean_179_cast)[name = tensor("zero_mean_sq_179_cast")]; + tensor var_6057 = const()[name = tensor("op_6057"), val = tensor([1])]; + tensor var_6058_cast = reduce_mean(axes = var_6057, keep_dims = var_4938, x = zero_mean_sq_179_cast)[name = tensor("op_6058_cast")]; + tensor var_6059_to_fp16 = const()[name = tensor("op_6059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6060_cast = add(x = var_6058_cast, y = var_6059_to_fp16)[name = tensor("op_6060_cast")]; + tensor denom_179_epsilon_0_to_fp16 = const()[name = tensor("denom_179_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_179_cast = rsqrt(epsilon = denom_179_epsilon_0_to_fp16, x = var_6060_cast)[name = tensor("denom_179_cast")]; + tensor out_179_cast = mul(x = zero_mean_179_cast, y = denom_179_cast)[name = tensor("out_179_cast")]; + tensor var_6064_to_fp16 = const()[name = tensor("op_6064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527266240)))]; + tensor var_6065_cast = add(x = out_179_cast, y = var_6064_to_fp16)[name = tensor("op_6065_cast")]; + tensor var_6067_to_fp16 = const()[name = tensor("op_6067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527268864)))]; + tensor input_371_cast = mul(x = var_6065_cast, y = var_6067_to_fp16)[name = tensor("input_371_cast")]; + tensor var_6075 = const()[name = tensor("op_6075"), val = tensor([1, 1])]; + tensor var_6077 = const()[name = tensor("op_6077"), val = tensor([1, 1])]; + tensor var_6079_pad_type_0 = const()[name = tensor("op_6079_pad_type_0"), val = tensor("custom")]; + tensor var_6079_pad_0 = const()[name = tensor("op_6079_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527271488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533825152))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533825280)))]; + tensor var_6079_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_6077, groups = var_4943, pad = var_6079_pad_0, pad_type = var_6079_pad_type_0, strides = var_6075, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_371_cast)[name = tensor("op_6079_cast")]; + tensor var_6080_split_sizes_0 = const()[name = tensor("op_6080_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6080_axis_0 = const()[name = tensor("op_6080_axis_0"), val = tensor(1)]; + tensor var_6080_cast_0, tensor var_6080_cast_1 = split(axis = var_6080_axis_0, split_sizes = var_6080_split_sizes_0, x = var_6079_cast)[name = tensor("op_6080_cast")]; + tensor var_6082_mode_0 = const()[name = tensor("op_6082_mode_0"), val = tensor("EXACT")]; + tensor var_6082_cast = gelu(mode = var_6082_mode_0, x = var_6080_cast_1)[name = tensor("op_6082_cast")]; + tensor input_373_cast = mul(x = var_6080_cast_0, y = var_6082_cast)[name = tensor("input_373_cast")]; + tensor var_6086 = const()[name = tensor("op_6086"), val = tensor([1, 1])]; + tensor var_6088 = const()[name = tensor("op_6088"), val = tensor([1, 1])]; + tensor var_6090_pad_type_0 = const()[name = tensor("op_6090_pad_type_0"), val = tensor("custom")]; + tensor var_6090_pad_0 = const()[name = tensor("op_6090_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533845824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537122688))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537122816)))]; + tensor var_6090_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_6088, groups = var_4943, pad = var_6090_pad_0, pad_type = var_6090_pad_type_0, strides = var_6086, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_373_cast)[name = tensor("op_6090_cast")]; + tensor inputs_181_cast = add(x = var_6090_cast, y = inputs_179_cast)[name = tensor("inputs_181_cast")]; + tensor var_6100 = const()[name = tensor("op_6100"), val = tensor([1])]; + tensor channels_mean_181_cast = reduce_mean(axes = var_6100, keep_dims = var_4938, x = inputs_181_cast)[name = tensor("channels_mean_181_cast")]; + tensor zero_mean_181_cast = sub(x = inputs_181_cast, y = channels_mean_181_cast)[name = tensor("zero_mean_181_cast")]; + tensor zero_mean_sq_181_cast = mul(x = zero_mean_181_cast, y = zero_mean_181_cast)[name = tensor("zero_mean_sq_181_cast")]; + tensor var_6104 = const()[name = tensor("op_6104"), val = tensor([1])]; + tensor var_6105_cast = reduce_mean(axes = var_6104, keep_dims = var_4938, x = zero_mean_sq_181_cast)[name = tensor("op_6105_cast")]; + tensor var_6106_to_fp16 = const()[name = tensor("op_6106_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6107_cast = add(x = var_6105_cast, y = var_6106_to_fp16)[name = tensor("op_6107_cast")]; + tensor denom_181_epsilon_0_to_fp16 = const()[name = tensor("denom_181_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_181_cast = rsqrt(epsilon = denom_181_epsilon_0_to_fp16, x = var_6107_cast)[name = tensor("denom_181_cast")]; + tensor out_181_cast = mul(x = zero_mean_181_cast, y = denom_181_cast)[name = tensor("out_181_cast")]; + tensor var_6111_to_fp16 = const()[name = tensor("op_6111_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537125440)))]; + tensor var_6112_cast = add(x = out_181_cast, y = var_6111_to_fp16)[name = tensor("op_6112_cast")]; + tensor var_6114_to_fp16 = const()[name = tensor("op_6114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537128064)))]; + tensor hidden_states_245_cast = mul(x = var_6112_cast, y = var_6114_to_fp16)[name = tensor("hidden_states_245_cast")]; + tensor var_6121 = const()[name = tensor("op_6121"), val = tensor([1, 1])]; + tensor var_6123 = const()[name = tensor("op_6123"), val = tensor([1, 1])]; + tensor q_121_pad_type_0 = const()[name = tensor("q_121_pad_type_0"), val = tensor("custom")]; + tensor q_121_pad_0 = const()[name = tensor("q_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537130688))), lut = tensor([-0x1.29cp-5, -0x1.694p-7, 0x1.64p-7, 0x1.28cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_121_cast = conv(dilations = var_6123, groups = var_4943, pad = q_121_pad_0, pad_type = q_121_pad_type_0, strides = var_6121, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("q_121_cast")]; + tensor var_6127 = const()[name = tensor("op_6127"), val = tensor([1, 1])]; + tensor var_6129 = const()[name = tensor("op_6129"), val = tensor([1, 1])]; + tensor k_121_pad_type_0 = const()[name = tensor("k_121_pad_type_0"), val = tensor("custom")]; + tensor k_121_pad_0 = const()[name = tensor("k_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537540352))), lut = tensor([-0x1.28p-5, -0x1.644p-7, 0x1.688p-7, 0x1.294p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_121_cast = conv(dilations = var_6129, groups = var_4943, pad = k_121_pad_0, pad_type = k_121_pad_type_0, strides = var_6127, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("k_121_cast")]; + tensor var_6133 = const()[name = tensor("op_6133"), val = tensor([1, 1])]; + tensor var_6135 = const()[name = tensor("op_6135"), val = tensor([1, 1])]; + tensor v_121_pad_type_0 = const()[name = tensor("v_121_pad_type_0"), val = tensor("custom")]; + tensor v_121_pad_0 = const()[name = tensor("v_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537950016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538769280))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_121_cast = conv(dilations = var_6135, groups = var_4943, pad = v_121_pad_0, pad_type = v_121_pad_type_0, strides = var_6133, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("v_121_cast")]; + tensor var_6139 = const()[name = tensor("op_6139"), val = tensor([2, 20, 64, -1])]; + tensor var_6140_cast = reshape(shape = var_6139, x = q_121_cast)[name = tensor("op_6140_cast")]; + tensor var_6141 = const()[name = tensor("op_6141"), val = tensor([2, 20, 64, -1])]; + tensor var_6142_cast = reshape(shape = var_6141, x = k_121_cast)[name = tensor("op_6142_cast")]; + tensor var_6143 = const()[name = tensor("op_6143"), val = tensor([2, 20, 64, -1])]; + tensor var_6144_cast = reshape(shape = var_6143, x = v_121_cast)[name = tensor("op_6144_cast")]; + tensor attn_weights_241_transpose_x_0 = const()[name = tensor("attn_weights_241_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_241_transpose_y_0 = const()[name = tensor("attn_weights_241_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_241_cast = matmul(transpose_x = attn_weights_241_transpose_x_0, transpose_y = attn_weights_241_transpose_y_0, x = var_6140_cast, y = var_6142_cast)[name = tensor("attn_weights_241_cast")]; + tensor attn_weights_243_cast = mul(x = attn_weights_241_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_243_cast")]; + tensor var_6148_cast = softmax(axis = var_4927, x = attn_weights_243_cast)[name = tensor("op_6148_cast")]; + tensor attn_121_transpose_x_0 = const()[name = tensor("attn_121_transpose_x_0"), val = tensor(false)]; + tensor attn_121_transpose_y_0 = const()[name = tensor("attn_121_transpose_y_0"), val = tensor(true)]; + tensor attn_121_cast = matmul(transpose_x = attn_121_transpose_x_0, transpose_y = attn_121_transpose_y_0, x = var_6144_cast, y = var_6148_cast)[name = tensor("attn_121_cast")]; + tensor var_6152 = const()[name = tensor("op_6152"), val = tensor([2, 1280, 1, -1])]; + tensor input_375_cast = reshape(shape = var_6152, x = attn_121_cast)[name = tensor("input_375_cast")]; + tensor var_6157 = const()[name = tensor("op_6157"), val = tensor([1, 1])]; + tensor var_6159 = const()[name = tensor("op_6159"), val = tensor([1, 1])]; + tensor var_6161_pad_type_0 = const()[name = tensor("op_6161_pad_type_0"), val = tensor("custom")]; + tensor var_6161_pad_0 = const()[name = tensor("op_6161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538769408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539588672))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539588800)))]; + tensor var_6161_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_6159, groups = var_4943, pad = var_6161_pad_0, pad_type = var_6161_pad_type_0, strides = var_6157, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_375_cast)[name = tensor("op_6161_cast")]; + tensor inputs_183_cast = add(x = var_6161_cast, y = inputs_181_cast)[name = tensor("inputs_183_cast")]; + tensor var_6165 = const()[name = tensor("op_6165"), val = tensor([1])]; + tensor channels_mean_183_cast = reduce_mean(axes = var_6165, keep_dims = var_4938, x = inputs_183_cast)[name = tensor("channels_mean_183_cast")]; + tensor zero_mean_183_cast = sub(x = inputs_183_cast, y = channels_mean_183_cast)[name = tensor("zero_mean_183_cast")]; + tensor zero_mean_sq_183_cast = mul(x = zero_mean_183_cast, y = zero_mean_183_cast)[name = tensor("zero_mean_sq_183_cast")]; + tensor var_6169 = const()[name = tensor("op_6169"), val = tensor([1])]; + tensor var_6170_cast = reduce_mean(axes = var_6169, keep_dims = var_4938, x = zero_mean_sq_183_cast)[name = tensor("op_6170_cast")]; + tensor var_6171_to_fp16 = const()[name = tensor("op_6171_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6172_cast = add(x = var_6170_cast, y = var_6171_to_fp16)[name = tensor("op_6172_cast")]; + tensor denom_183_epsilon_0_to_fp16 = const()[name = tensor("denom_183_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_183_cast = rsqrt(epsilon = denom_183_epsilon_0_to_fp16, x = var_6172_cast)[name = tensor("denom_183_cast")]; + tensor out_183_cast = mul(x = zero_mean_183_cast, y = denom_183_cast)[name = tensor("out_183_cast")]; + tensor var_6176_to_fp16 = const()[name = tensor("op_6176_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539591424)))]; + tensor var_6177_cast = add(x = out_183_cast, y = var_6176_to_fp16)[name = tensor("op_6177_cast")]; + tensor var_6179_to_fp16 = const()[name = tensor("op_6179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539594048)))]; + tensor hidden_states_247_cast = mul(x = var_6177_cast, y = var_6179_to_fp16)[name = tensor("hidden_states_247_cast")]; + tensor var_6186 = const()[name = tensor("op_6186"), val = tensor([1, 1])]; + tensor var_6188 = const()[name = tensor("op_6188"), val = tensor([1, 1])]; + tensor q_123_pad_type_0 = const()[name = tensor("q_123_pad_type_0"), val = tensor("custom")]; + tensor q_123_pad_0 = const()[name = tensor("q_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539596672))), lut = tensor([-0x1.664p-7, 0x1.668p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_123_cast = conv(dilations = var_6188, groups = var_4943, pad = q_123_pad_0, pad_type = q_123_pad_type_0, strides = var_6186, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_247_cast)[name = tensor("q_123_cast")]; + tensor var_6192 = const()[name = tensor("op_6192"), val = tensor([1, 1])]; + tensor var_6194 = const()[name = tensor("op_6194"), val = tensor([1, 1])]; + tensor k_123_pad_type_0 = const()[name = tensor("k_123_pad_type_0"), val = tensor("custom")]; + tensor k_123_pad_0 = const()[name = tensor("k_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539801536))), lut = tensor([-0x1.c18p-8, 0x1.c1p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_123_cast = conv(dilations = var_6194, groups = var_4943, pad = k_123_pad_0, pad_type = k_123_pad_type_0, strides = var_6192, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_123_cast")]; + tensor var_6198 = const()[name = tensor("op_6198"), val = tensor([1, 1])]; + tensor var_6200 = const()[name = tensor("op_6200"), val = tensor([1, 1])]; + tensor v_123_pad_type_0 = const()[name = tensor("v_123_pad_type_0"), val = tensor("custom")]; + tensor v_123_pad_0 = const()[name = tensor("v_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540129280))), lut = tensor([-0x1.f8cp-8, 0x1.f88p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_123_cast = conv(dilations = var_6200, groups = var_4943, pad = v_123_pad_0, pad_type = v_123_pad_type_0, strides = var_6198, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_123_cast")]; + tensor var_6204 = const()[name = tensor("op_6204"), val = tensor([2, 20, 64, -1])]; + tensor var_6205_cast = reshape(shape = var_6204, x = q_123_cast)[name = tensor("op_6205_cast")]; + tensor var_6206 = const()[name = tensor("op_6206"), val = tensor([2, 20, 64, -1])]; + tensor var_6207_cast = reshape(shape = var_6206, x = k_123_cast)[name = tensor("op_6207_cast")]; + tensor var_6208 = const()[name = tensor("op_6208"), val = tensor([2, 20, 64, -1])]; + tensor var_6209_cast = reshape(shape = var_6208, x = v_123_cast)[name = tensor("op_6209_cast")]; + tensor attn_weights_245_transpose_x_0 = const()[name = tensor("attn_weights_245_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_245_transpose_y_0 = const()[name = tensor("attn_weights_245_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_245_cast = matmul(transpose_x = attn_weights_245_transpose_x_0, transpose_y = attn_weights_245_transpose_y_0, x = var_6205_cast, y = var_6207_cast)[name = tensor("attn_weights_245_cast")]; + tensor attn_weights_247_cast = mul(x = attn_weights_245_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_247_cast")]; + tensor var_6213_cast = softmax(axis = var_4927, x = attn_weights_247_cast)[name = tensor("op_6213_cast")]; + tensor attn_123_transpose_x_0 = const()[name = tensor("attn_123_transpose_x_0"), val = tensor(false)]; + tensor attn_123_transpose_y_0 = const()[name = tensor("attn_123_transpose_y_0"), val = tensor(true)]; + tensor attn_123_cast = matmul(transpose_x = attn_123_transpose_x_0, transpose_y = attn_123_transpose_y_0, x = var_6209_cast, y = var_6213_cast)[name = tensor("attn_123_cast")]; + tensor var_6217 = const()[name = tensor("op_6217"), val = tensor([2, 1280, 1, -1])]; + tensor input_377_cast = reshape(shape = var_6217, x = attn_123_cast)[name = tensor("input_377_cast")]; + tensor var_6222 = const()[name = tensor("op_6222"), val = tensor([1, 1])]; + tensor var_6224 = const()[name = tensor("op_6224"), val = tensor([1, 1])]; + tensor var_6226_pad_type_0 = const()[name = tensor("op_6226_pad_type_0"), val = tensor("custom")]; + tensor var_6226_pad_0 = const()[name = tensor("op_6226_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540457024))), lut = tensor([-0x1.35p-8, 0x1.37p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540661888)))]; + tensor var_6226_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_6224, groups = var_4943, pad = var_6226_pad_0, pad_type = var_6226_pad_type_0, strides = var_6222, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_377_cast)[name = tensor("op_6226_cast")]; + tensor inputs_185_cast = add(x = var_6226_cast, y = inputs_183_cast)[name = tensor("inputs_185_cast")]; + tensor var_6230 = const()[name = tensor("op_6230"), val = tensor([1])]; + tensor channels_mean_185_cast = reduce_mean(axes = var_6230, keep_dims = var_4938, x = inputs_185_cast)[name = tensor("channels_mean_185_cast")]; + tensor zero_mean_185_cast = sub(x = inputs_185_cast, y = channels_mean_185_cast)[name = tensor("zero_mean_185_cast")]; + tensor zero_mean_sq_185_cast = mul(x = zero_mean_185_cast, y = zero_mean_185_cast)[name = tensor("zero_mean_sq_185_cast")]; + tensor var_6234 = const()[name = tensor("op_6234"), val = tensor([1])]; + tensor var_6235_cast = reduce_mean(axes = var_6234, keep_dims = var_4938, x = zero_mean_sq_185_cast)[name = tensor("op_6235_cast")]; + tensor var_6236_to_fp16 = const()[name = tensor("op_6236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6237_cast = add(x = var_6235_cast, y = var_6236_to_fp16)[name = tensor("op_6237_cast")]; + tensor denom_185_epsilon_0_to_fp16 = const()[name = tensor("denom_185_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_185_cast = rsqrt(epsilon = denom_185_epsilon_0_to_fp16, x = var_6237_cast)[name = tensor("denom_185_cast")]; + tensor out_185_cast = mul(x = zero_mean_185_cast, y = denom_185_cast)[name = tensor("out_185_cast")]; + tensor var_6241_to_fp16 = const()[name = tensor("op_6241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540664512)))]; + tensor var_6242_cast = add(x = out_185_cast, y = var_6241_to_fp16)[name = tensor("op_6242_cast")]; + tensor var_6244_to_fp16 = const()[name = tensor("op_6244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540667136)))]; + tensor input_379_cast = mul(x = var_6242_cast, y = var_6244_to_fp16)[name = tensor("input_379_cast")]; + tensor var_6252 = const()[name = tensor("op_6252"), val = tensor([1, 1])]; + tensor var_6254 = const()[name = tensor("op_6254"), val = tensor([1, 1])]; + tensor var_6256_pad_type_0 = const()[name = tensor("op_6256_pad_type_0"), val = tensor("custom")]; + tensor var_6256_pad_0 = const()[name = tensor("op_6256_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540669760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547223424))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547223552)))]; + tensor var_6256_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_6254, groups = var_4943, pad = var_6256_pad_0, pad_type = var_6256_pad_type_0, strides = var_6252, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_379_cast)[name = tensor("op_6256_cast")]; + tensor var_6257_split_sizes_0 = const()[name = tensor("op_6257_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6257_axis_0 = const()[name = tensor("op_6257_axis_0"), val = tensor(1)]; + tensor var_6257_cast_0, tensor var_6257_cast_1 = split(axis = var_6257_axis_0, split_sizes = var_6257_split_sizes_0, x = var_6256_cast)[name = tensor("op_6257_cast")]; + tensor var_6259_mode_0 = const()[name = tensor("op_6259_mode_0"), val = tensor("EXACT")]; + tensor var_6259_cast = gelu(mode = var_6259_mode_0, x = var_6257_cast_1)[name = tensor("op_6259_cast")]; + tensor input_381_cast = mul(x = var_6257_cast_0, y = var_6259_cast)[name = tensor("input_381_cast")]; + tensor var_6263 = const()[name = tensor("op_6263"), val = tensor([1, 1])]; + tensor var_6265 = const()[name = tensor("op_6265"), val = tensor([1, 1])]; + tensor var_6267_pad_type_0 = const()[name = tensor("op_6267_pad_type_0"), val = tensor("custom")]; + tensor var_6267_pad_0 = const()[name = tensor("op_6267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547244096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550520960))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550521088)))]; + tensor var_6267_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_6265, groups = var_4943, pad = var_6267_pad_0, pad_type = var_6267_pad_type_0, strides = var_6263, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_381_cast)[name = tensor("op_6267_cast")]; + tensor inputs_187_cast = add(x = var_6267_cast, y = inputs_185_cast)[name = tensor("inputs_187_cast")]; + tensor var_6277 = const()[name = tensor("op_6277"), val = tensor([1])]; + tensor channels_mean_187_cast = reduce_mean(axes = var_6277, keep_dims = var_4938, x = inputs_187_cast)[name = tensor("channels_mean_187_cast")]; + tensor zero_mean_187_cast = sub(x = inputs_187_cast, y = channels_mean_187_cast)[name = tensor("zero_mean_187_cast")]; + tensor zero_mean_sq_187_cast = mul(x = zero_mean_187_cast, y = zero_mean_187_cast)[name = tensor("zero_mean_sq_187_cast")]; + tensor var_6281 = const()[name = tensor("op_6281"), val = tensor([1])]; + tensor var_6282_cast = reduce_mean(axes = var_6281, keep_dims = var_4938, x = zero_mean_sq_187_cast)[name = tensor("op_6282_cast")]; + tensor var_6283_to_fp16 = const()[name = tensor("op_6283_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6284_cast = add(x = var_6282_cast, y = var_6283_to_fp16)[name = tensor("op_6284_cast")]; + tensor denom_187_epsilon_0_to_fp16 = const()[name = tensor("denom_187_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_187_cast = rsqrt(epsilon = denom_187_epsilon_0_to_fp16, x = var_6284_cast)[name = tensor("denom_187_cast")]; + tensor out_187_cast = mul(x = zero_mean_187_cast, y = denom_187_cast)[name = tensor("out_187_cast")]; + tensor var_6288_to_fp16 = const()[name = tensor("op_6288_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550523712)))]; + tensor var_6289_cast = add(x = out_187_cast, y = var_6288_to_fp16)[name = tensor("op_6289_cast")]; + tensor var_6291_to_fp16 = const()[name = tensor("op_6291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550526336)))]; + tensor hidden_states_251_cast = mul(x = var_6289_cast, y = var_6291_to_fp16)[name = tensor("hidden_states_251_cast")]; + tensor var_6298 = const()[name = tensor("op_6298"), val = tensor([1, 1])]; + tensor var_6300 = const()[name = tensor("op_6300"), val = tensor([1, 1])]; + tensor q_125_pad_type_0 = const()[name = tensor("q_125_pad_type_0"), val = tensor("custom")]; + tensor q_125_pad_0 = const()[name = tensor("q_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550528960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551348224))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_125_cast = conv(dilations = var_6300, groups = var_4943, pad = q_125_pad_0, pad_type = q_125_pad_type_0, strides = var_6298, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("q_125_cast")]; + tensor var_6304 = const()[name = tensor("op_6304"), val = tensor([1, 1])]; + tensor var_6306 = const()[name = tensor("op_6306"), val = tensor([1, 1])]; + tensor k_125_pad_type_0 = const()[name = tensor("k_125_pad_type_0"), val = tensor("custom")]; + tensor k_125_pad_0 = const()[name = tensor("k_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551348352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552167616))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_125_cast = conv(dilations = var_6306, groups = var_4943, pad = k_125_pad_0, pad_type = k_125_pad_type_0, strides = var_6304, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("k_125_cast")]; + tensor var_6310 = const()[name = tensor("op_6310"), val = tensor([1, 1])]; + tensor var_6312 = const()[name = tensor("op_6312"), val = tensor([1, 1])]; + tensor v_125_pad_type_0 = const()[name = tensor("v_125_pad_type_0"), val = tensor("custom")]; + tensor v_125_pad_0 = const()[name = tensor("v_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552167744))), lut = tensor([-0x1.454p-5, -0x1.88p-7, 0x1.864p-7, 0x1.44cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_125_cast = conv(dilations = var_6312, groups = var_4943, pad = v_125_pad_0, pad_type = v_125_pad_type_0, strides = var_6310, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("v_125_cast")]; + tensor var_6316 = const()[name = tensor("op_6316"), val = tensor([2, 20, 64, -1])]; + tensor var_6317_cast = reshape(shape = var_6316, x = q_125_cast)[name = tensor("op_6317_cast")]; + tensor var_6318 = const()[name = tensor("op_6318"), val = tensor([2, 20, 64, -1])]; + tensor var_6319_cast = reshape(shape = var_6318, x = k_125_cast)[name = tensor("op_6319_cast")]; + tensor var_6320 = const()[name = tensor("op_6320"), val = tensor([2, 20, 64, -1])]; + tensor var_6321_cast = reshape(shape = var_6320, x = v_125_cast)[name = tensor("op_6321_cast")]; + tensor attn_weights_249_transpose_x_0 = const()[name = tensor("attn_weights_249_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_249_transpose_y_0 = const()[name = tensor("attn_weights_249_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_249_cast = matmul(transpose_x = attn_weights_249_transpose_x_0, transpose_y = attn_weights_249_transpose_y_0, x = var_6317_cast, y = var_6319_cast)[name = tensor("attn_weights_249_cast")]; + tensor attn_weights_251_cast = mul(x = attn_weights_249_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_251_cast")]; + tensor var_6325_cast = softmax(axis = var_4927, x = attn_weights_251_cast)[name = tensor("op_6325_cast")]; + tensor attn_125_transpose_x_0 = const()[name = tensor("attn_125_transpose_x_0"), val = tensor(false)]; + tensor attn_125_transpose_y_0 = const()[name = tensor("attn_125_transpose_y_0"), val = tensor(true)]; + tensor attn_125_cast = matmul(transpose_x = attn_125_transpose_x_0, transpose_y = attn_125_transpose_y_0, x = var_6321_cast, y = var_6325_cast)[name = tensor("attn_125_cast")]; + tensor var_6329 = const()[name = tensor("op_6329"), val = tensor([2, 1280, 1, -1])]; + tensor input_383_cast = reshape(shape = var_6329, x = attn_125_cast)[name = tensor("input_383_cast")]; + tensor var_6334 = const()[name = tensor("op_6334"), val = tensor([1, 1])]; + tensor var_6336 = const()[name = tensor("op_6336"), val = tensor([1, 1])]; + tensor var_6338_pad_type_0 = const()[name = tensor("op_6338_pad_type_0"), val = tensor("custom")]; + tensor var_6338_pad_0 = const()[name = tensor("op_6338_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552577408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553396672))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553396800)))]; + tensor var_6338_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_6336, groups = var_4943, pad = var_6338_pad_0, pad_type = var_6338_pad_type_0, strides = var_6334, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_383_cast)[name = tensor("op_6338_cast")]; + tensor inputs_189_cast = add(x = var_6338_cast, y = inputs_187_cast)[name = tensor("inputs_189_cast")]; + tensor var_6342 = const()[name = tensor("op_6342"), val = tensor([1])]; + tensor channels_mean_189_cast = reduce_mean(axes = var_6342, keep_dims = var_4938, x = inputs_189_cast)[name = tensor("channels_mean_189_cast")]; + tensor zero_mean_189_cast = sub(x = inputs_189_cast, y = channels_mean_189_cast)[name = tensor("zero_mean_189_cast")]; + tensor zero_mean_sq_189_cast = mul(x = zero_mean_189_cast, y = zero_mean_189_cast)[name = tensor("zero_mean_sq_189_cast")]; + tensor var_6346 = const()[name = tensor("op_6346"), val = tensor([1])]; + tensor var_6347_cast = reduce_mean(axes = var_6346, keep_dims = var_4938, x = zero_mean_sq_189_cast)[name = tensor("op_6347_cast")]; + tensor var_6348_to_fp16 = const()[name = tensor("op_6348_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6349_cast = add(x = var_6347_cast, y = var_6348_to_fp16)[name = tensor("op_6349_cast")]; + tensor denom_189_epsilon_0_to_fp16 = const()[name = tensor("denom_189_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_189_cast = rsqrt(epsilon = denom_189_epsilon_0_to_fp16, x = var_6349_cast)[name = tensor("denom_189_cast")]; + tensor out_189_cast = mul(x = zero_mean_189_cast, y = denom_189_cast)[name = tensor("out_189_cast")]; + tensor var_6353_to_fp16 = const()[name = tensor("op_6353_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553399424)))]; + tensor var_6354_cast = add(x = out_189_cast, y = var_6353_to_fp16)[name = tensor("op_6354_cast")]; + tensor var_6356_to_fp16 = const()[name = tensor("op_6356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553402048)))]; + tensor hidden_states_253_cast = mul(x = var_6354_cast, y = var_6356_to_fp16)[name = tensor("hidden_states_253_cast")]; + tensor var_6363 = const()[name = tensor("op_6363"), val = tensor([1, 1])]; + tensor var_6365 = const()[name = tensor("op_6365"), val = tensor([1, 1])]; + tensor q_127_pad_type_0 = const()[name = tensor("q_127_pad_type_0"), val = tensor("custom")]; + tensor q_127_pad_0 = const()[name = tensor("q_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553404672))), lut = tensor([-0x1.61p-7, 0x1.61p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_127_cast = conv(dilations = var_6365, groups = var_4943, pad = q_127_pad_0, pad_type = q_127_pad_type_0, strides = var_6363, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_253_cast)[name = tensor("q_127_cast")]; + tensor var_6369 = const()[name = tensor("op_6369"), val = tensor([1, 1])]; + tensor var_6371 = const()[name = tensor("op_6371"), val = tensor([1, 1])]; + tensor k_127_pad_type_0 = const()[name = tensor("k_127_pad_type_0"), val = tensor("custom")]; + tensor k_127_pad_0 = const()[name = tensor("k_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553609536))), lut = tensor([-0x1.b7p-8, 0x1.b6cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_127_cast = conv(dilations = var_6371, groups = var_4943, pad = k_127_pad_0, pad_type = k_127_pad_type_0, strides = var_6369, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_127_cast")]; + tensor var_6375 = const()[name = tensor("op_6375"), val = tensor([1, 1])]; + tensor var_6377 = const()[name = tensor("op_6377"), val = tensor([1, 1])]; + tensor v_127_pad_type_0 = const()[name = tensor("v_127_pad_type_0"), val = tensor("custom")]; + tensor v_127_pad_0 = const()[name = tensor("v_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553937280))), lut = tensor([-0x1.e78p-8, 0x1.e74p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_127_cast = conv(dilations = var_6377, groups = var_4943, pad = v_127_pad_0, pad_type = v_127_pad_type_0, strides = var_6375, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_127_cast")]; + tensor var_6381 = const()[name = tensor("op_6381"), val = tensor([2, 20, 64, -1])]; + tensor var_6382_cast = reshape(shape = var_6381, x = q_127_cast)[name = tensor("op_6382_cast")]; + tensor var_6383 = const()[name = tensor("op_6383"), val = tensor([2, 20, 64, -1])]; + tensor var_6384_cast = reshape(shape = var_6383, x = k_127_cast)[name = tensor("op_6384_cast")]; + tensor var_6385 = const()[name = tensor("op_6385"), val = tensor([2, 20, 64, -1])]; + tensor var_6386_cast = reshape(shape = var_6385, x = v_127_cast)[name = tensor("op_6386_cast")]; + tensor attn_weights_253_transpose_x_0 = const()[name = tensor("attn_weights_253_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_253_transpose_y_0 = const()[name = tensor("attn_weights_253_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_253_cast = matmul(transpose_x = attn_weights_253_transpose_x_0, transpose_y = attn_weights_253_transpose_y_0, x = var_6382_cast, y = var_6384_cast)[name = tensor("attn_weights_253_cast")]; + tensor attn_weights_255_cast = mul(x = attn_weights_253_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_255_cast")]; + tensor var_6390_cast = softmax(axis = var_4927, x = attn_weights_255_cast)[name = tensor("op_6390_cast")]; + tensor attn_127_transpose_x_0 = const()[name = tensor("attn_127_transpose_x_0"), val = tensor(false)]; + tensor attn_127_transpose_y_0 = const()[name = tensor("attn_127_transpose_y_0"), val = tensor(true)]; + tensor attn_127_cast = matmul(transpose_x = attn_127_transpose_x_0, transpose_y = attn_127_transpose_y_0, x = var_6386_cast, y = var_6390_cast)[name = tensor("attn_127_cast")]; + tensor var_6394 = const()[name = tensor("op_6394"), val = tensor([2, 1280, 1, -1])]; + tensor input_385_cast = reshape(shape = var_6394, x = attn_127_cast)[name = tensor("input_385_cast")]; + tensor var_6399 = const()[name = tensor("op_6399"), val = tensor([1, 1])]; + tensor var_6401 = const()[name = tensor("op_6401"), val = tensor([1, 1])]; + tensor var_6403_pad_type_0 = const()[name = tensor("op_6403_pad_type_0"), val = tensor("custom")]; + tensor var_6403_pad_0 = const()[name = tensor("op_6403_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554265024))), lut = tensor([-0x1.37cp-8, 0x1.364p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554469888)))]; + tensor var_6403_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_6401, groups = var_4943, pad = var_6403_pad_0, pad_type = var_6403_pad_type_0, strides = var_6399, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_385_cast)[name = tensor("op_6403_cast")]; + tensor inputs_191_cast = add(x = var_6403_cast, y = inputs_189_cast)[name = tensor("inputs_191_cast")]; + tensor var_6407 = const()[name = tensor("op_6407"), val = tensor([1])]; + tensor channels_mean_191_cast = reduce_mean(axes = var_6407, keep_dims = var_4938, x = inputs_191_cast)[name = tensor("channels_mean_191_cast")]; + tensor zero_mean_191_cast = sub(x = inputs_191_cast, y = channels_mean_191_cast)[name = tensor("zero_mean_191_cast")]; + tensor zero_mean_sq_191_cast = mul(x = zero_mean_191_cast, y = zero_mean_191_cast)[name = tensor("zero_mean_sq_191_cast")]; + tensor var_6411 = const()[name = tensor("op_6411"), val = tensor([1])]; + tensor var_6412_cast = reduce_mean(axes = var_6411, keep_dims = var_4938, x = zero_mean_sq_191_cast)[name = tensor("op_6412_cast")]; + tensor var_6413_to_fp16 = const()[name = tensor("op_6413_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6414_cast = add(x = var_6412_cast, y = var_6413_to_fp16)[name = tensor("op_6414_cast")]; + tensor denom_191_epsilon_0_to_fp16 = const()[name = tensor("denom_191_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_191_cast = rsqrt(epsilon = denom_191_epsilon_0_to_fp16, x = var_6414_cast)[name = tensor("denom_191_cast")]; + tensor out_191_cast = mul(x = zero_mean_191_cast, y = denom_191_cast)[name = tensor("out_191_cast")]; + tensor var_6418_to_fp16 = const()[name = tensor("op_6418_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554472512)))]; + tensor var_6419_cast = add(x = out_191_cast, y = var_6418_to_fp16)[name = tensor("op_6419_cast")]; + tensor var_6421_to_fp16 = const()[name = tensor("op_6421_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554475136)))]; + tensor input_387_cast = mul(x = var_6419_cast, y = var_6421_to_fp16)[name = tensor("input_387_cast")]; + tensor var_6429 = const()[name = tensor("op_6429"), val = tensor([1, 1])]; + tensor var_6431 = const()[name = tensor("op_6431"), val = tensor([1, 1])]; + tensor var_6433_pad_type_0 = const()[name = tensor("op_6433_pad_type_0"), val = tensor("custom")]; + tensor var_6433_pad_0 = const()[name = tensor("op_6433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554477760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561031424))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561031552)))]; + tensor var_6433_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_6431, groups = var_4943, pad = var_6433_pad_0, pad_type = var_6433_pad_type_0, strides = var_6429, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_387_cast)[name = tensor("op_6433_cast")]; + tensor var_6434_split_sizes_0 = const()[name = tensor("op_6434_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6434_axis_0 = const()[name = tensor("op_6434_axis_0"), val = tensor(1)]; + tensor var_6434_cast_0, tensor var_6434_cast_1 = split(axis = var_6434_axis_0, split_sizes = var_6434_split_sizes_0, x = var_6433_cast)[name = tensor("op_6434_cast")]; + tensor var_6436_mode_0 = const()[name = tensor("op_6436_mode_0"), val = tensor("EXACT")]; + tensor var_6436_cast = gelu(mode = var_6436_mode_0, x = var_6434_cast_1)[name = tensor("op_6436_cast")]; + tensor input_389_cast = mul(x = var_6434_cast_0, y = var_6436_cast)[name = tensor("input_389_cast")]; + tensor var_6440 = const()[name = tensor("op_6440"), val = tensor([1, 1])]; + tensor var_6442 = const()[name = tensor("op_6442"), val = tensor([1, 1])]; + tensor var_6444_pad_type_0 = const()[name = tensor("op_6444_pad_type_0"), val = tensor("custom")]; + tensor var_6444_pad_0 = const()[name = tensor("op_6444_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561052096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564328960))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564329088)))]; + tensor var_6444_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_6442, groups = var_4943, pad = var_6444_pad_0, pad_type = var_6444_pad_type_0, strides = var_6440, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_389_cast)[name = tensor("op_6444_cast")]; + tensor inputs_193_cast = add(x = var_6444_cast, y = inputs_191_cast)[name = tensor("inputs_193_cast")]; + tensor var_6454 = const()[name = tensor("op_6454"), val = tensor([1])]; + tensor channels_mean_193_cast = reduce_mean(axes = var_6454, keep_dims = var_4938, x = inputs_193_cast)[name = tensor("channels_mean_193_cast")]; + tensor zero_mean_193_cast = sub(x = inputs_193_cast, y = channels_mean_193_cast)[name = tensor("zero_mean_193_cast")]; + tensor zero_mean_sq_193_cast = mul(x = zero_mean_193_cast, y = zero_mean_193_cast)[name = tensor("zero_mean_sq_193_cast")]; + tensor var_6458 = const()[name = tensor("op_6458"), val = tensor([1])]; + tensor var_6459_cast = reduce_mean(axes = var_6458, keep_dims = var_4938, x = zero_mean_sq_193_cast)[name = tensor("op_6459_cast")]; + tensor var_6460_to_fp16 = const()[name = tensor("op_6460_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6461_cast = add(x = var_6459_cast, y = var_6460_to_fp16)[name = tensor("op_6461_cast")]; + tensor denom_193_epsilon_0_to_fp16 = const()[name = tensor("denom_193_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_193_cast = rsqrt(epsilon = denom_193_epsilon_0_to_fp16, x = var_6461_cast)[name = tensor("denom_193_cast")]; + tensor out_193_cast = mul(x = zero_mean_193_cast, y = denom_193_cast)[name = tensor("out_193_cast")]; + tensor var_6465_to_fp16 = const()[name = tensor("op_6465_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564331712)))]; + tensor var_6466_cast = add(x = out_193_cast, y = var_6465_to_fp16)[name = tensor("op_6466_cast")]; + tensor var_6468_to_fp16 = const()[name = tensor("op_6468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564334336)))]; + tensor hidden_states_257_cast = mul(x = var_6466_cast, y = var_6468_to_fp16)[name = tensor("hidden_states_257_cast")]; + tensor var_6475 = const()[name = tensor("op_6475"), val = tensor([1, 1])]; + tensor var_6477 = const()[name = tensor("op_6477"), val = tensor([1, 1])]; + tensor q_129_pad_type_0 = const()[name = tensor("q_129_pad_type_0"), val = tensor("custom")]; + tensor q_129_pad_0 = const()[name = tensor("q_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564336960))), lut = tensor([-0x1.32cp-5, -0x1.6ecp-7, 0x1.738p-7, 0x1.338p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_129_cast = conv(dilations = var_6477, groups = var_4943, pad = q_129_pad_0, pad_type = q_129_pad_type_0, strides = var_6475, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("q_129_cast")]; + tensor var_6481 = const()[name = tensor("op_6481"), val = tensor([1, 1])]; + tensor var_6483 = const()[name = tensor("op_6483"), val = tensor([1, 1])]; + tensor k_129_pad_type_0 = const()[name = tensor("k_129_pad_type_0"), val = tensor("custom")]; + tensor k_129_pad_0 = const()[name = tensor("k_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564746624))), lut = tensor([-0x1.328p-5, -0x1.71p-7, 0x1.724p-7, 0x1.32cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_129_cast = conv(dilations = var_6483, groups = var_4943, pad = k_129_pad_0, pad_type = k_129_pad_type_0, strides = var_6481, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("k_129_cast")]; + tensor var_6487 = const()[name = tensor("op_6487"), val = tensor([1, 1])]; + tensor var_6489 = const()[name = tensor("op_6489"), val = tensor([1, 1])]; + tensor v_129_pad_type_0 = const()[name = tensor("v_129_pad_type_0"), val = tensor("custom")]; + tensor v_129_pad_0 = const()[name = tensor("v_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565156288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565975552))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_129_cast = conv(dilations = var_6489, groups = var_4943, pad = v_129_pad_0, pad_type = v_129_pad_type_0, strides = var_6487, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("v_129_cast")]; + tensor var_6493 = const()[name = tensor("op_6493"), val = tensor([2, 20, 64, -1])]; + tensor var_6494_cast = reshape(shape = var_6493, x = q_129_cast)[name = tensor("op_6494_cast")]; + tensor var_6495 = const()[name = tensor("op_6495"), val = tensor([2, 20, 64, -1])]; + tensor var_6496_cast = reshape(shape = var_6495, x = k_129_cast)[name = tensor("op_6496_cast")]; + tensor var_6497 = const()[name = tensor("op_6497"), val = tensor([2, 20, 64, -1])]; + tensor var_6498_cast = reshape(shape = var_6497, x = v_129_cast)[name = tensor("op_6498_cast")]; + tensor attn_weights_257_transpose_x_0 = const()[name = tensor("attn_weights_257_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_257_transpose_y_0 = const()[name = tensor("attn_weights_257_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_257_cast = matmul(transpose_x = attn_weights_257_transpose_x_0, transpose_y = attn_weights_257_transpose_y_0, x = var_6494_cast, y = var_6496_cast)[name = tensor("attn_weights_257_cast")]; + tensor attn_weights_259_cast = mul(x = attn_weights_257_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_259_cast")]; + tensor var_6502_cast = softmax(axis = var_4927, x = attn_weights_259_cast)[name = tensor("op_6502_cast")]; + tensor attn_129_transpose_x_0 = const()[name = tensor("attn_129_transpose_x_0"), val = tensor(false)]; + tensor attn_129_transpose_y_0 = const()[name = tensor("attn_129_transpose_y_0"), val = tensor(true)]; + tensor attn_129_cast = matmul(transpose_x = attn_129_transpose_x_0, transpose_y = attn_129_transpose_y_0, x = var_6498_cast, y = var_6502_cast)[name = tensor("attn_129_cast")]; + tensor var_6506 = const()[name = tensor("op_6506"), val = tensor([2, 1280, 1, -1])]; + tensor input_391_cast = reshape(shape = var_6506, x = attn_129_cast)[name = tensor("input_391_cast")]; + tensor var_6511 = const()[name = tensor("op_6511"), val = tensor([1, 1])]; + tensor var_6513 = const()[name = tensor("op_6513"), val = tensor([1, 1])]; + tensor var_6515_pad_type_0 = const()[name = tensor("op_6515_pad_type_0"), val = tensor("custom")]; + tensor var_6515_pad_0 = const()[name = tensor("op_6515_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565975680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566794944))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566795072)))]; + tensor var_6515_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_6513, groups = var_4943, pad = var_6515_pad_0, pad_type = var_6515_pad_type_0, strides = var_6511, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_391_cast)[name = tensor("op_6515_cast")]; + tensor inputs_195_cast = add(x = var_6515_cast, y = inputs_193_cast)[name = tensor("inputs_195_cast")]; + tensor var_6519 = const()[name = tensor("op_6519"), val = tensor([1])]; + tensor channels_mean_195_cast = reduce_mean(axes = var_6519, keep_dims = var_4938, x = inputs_195_cast)[name = tensor("channels_mean_195_cast")]; + tensor zero_mean_195_cast = sub(x = inputs_195_cast, y = channels_mean_195_cast)[name = tensor("zero_mean_195_cast")]; + tensor zero_mean_sq_195_cast = mul(x = zero_mean_195_cast, y = zero_mean_195_cast)[name = tensor("zero_mean_sq_195_cast")]; + tensor var_6523 = const()[name = tensor("op_6523"), val = tensor([1])]; + tensor var_6524_cast = reduce_mean(axes = var_6523, keep_dims = var_4938, x = zero_mean_sq_195_cast)[name = tensor("op_6524_cast")]; + tensor var_6525_to_fp16 = const()[name = tensor("op_6525_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6526_cast = add(x = var_6524_cast, y = var_6525_to_fp16)[name = tensor("op_6526_cast")]; + tensor denom_195_epsilon_0_to_fp16 = const()[name = tensor("denom_195_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_195_cast = rsqrt(epsilon = denom_195_epsilon_0_to_fp16, x = var_6526_cast)[name = tensor("denom_195_cast")]; + tensor out_195_cast = mul(x = zero_mean_195_cast, y = denom_195_cast)[name = tensor("out_195_cast")]; + tensor var_6530_to_fp16 = const()[name = tensor("op_6530_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566797696)))]; + tensor var_6531_cast = add(x = out_195_cast, y = var_6530_to_fp16)[name = tensor("op_6531_cast")]; + tensor var_6533_to_fp16 = const()[name = tensor("op_6533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566800320)))]; + tensor hidden_states_259_cast = mul(x = var_6531_cast, y = var_6533_to_fp16)[name = tensor("hidden_states_259_cast")]; + tensor var_6540 = const()[name = tensor("op_6540"), val = tensor([1, 1])]; + tensor var_6542 = const()[name = tensor("op_6542"), val = tensor([1, 1])]; + tensor q_131_pad_type_0 = const()[name = tensor("q_131_pad_type_0"), val = tensor("custom")]; + tensor q_131_pad_0 = const()[name = tensor("q_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566802944))), lut = tensor([-0x1.60cp-7, 0x1.604p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_131_cast = conv(dilations = var_6542, groups = var_4943, pad = q_131_pad_0, pad_type = q_131_pad_type_0, strides = var_6540, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_259_cast)[name = tensor("q_131_cast")]; + tensor var_6546 = const()[name = tensor("op_6546"), val = tensor([1, 1])]; + tensor var_6548 = const()[name = tensor("op_6548"), val = tensor([1, 1])]; + tensor k_131_pad_type_0 = const()[name = tensor("k_131_pad_type_0"), val = tensor("custom")]; + tensor k_131_pad_0 = const()[name = tensor("k_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567007808))), lut = tensor([-0x1.b4p-8, 0x1.b3cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_131_cast = conv(dilations = var_6548, groups = var_4943, pad = k_131_pad_0, pad_type = k_131_pad_type_0, strides = var_6546, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_131_cast")]; + tensor var_6552 = const()[name = tensor("op_6552"), val = tensor([1, 1])]; + tensor var_6554 = const()[name = tensor("op_6554"), val = tensor([1, 1])]; + tensor v_131_pad_type_0 = const()[name = tensor("v_131_pad_type_0"), val = tensor("custom")]; + tensor v_131_pad_0 = const()[name = tensor("v_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567335552))), lut = tensor([-0x1.e54p-8, 0x1.e38p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_131_cast = conv(dilations = var_6554, groups = var_4943, pad = v_131_pad_0, pad_type = v_131_pad_type_0, strides = var_6552, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_131_cast")]; + tensor var_6558 = const()[name = tensor("op_6558"), val = tensor([2, 20, 64, -1])]; + tensor var_6559_cast = reshape(shape = var_6558, x = q_131_cast)[name = tensor("op_6559_cast")]; + tensor var_6560 = const()[name = tensor("op_6560"), val = tensor([2, 20, 64, -1])]; + tensor var_6561_cast = reshape(shape = var_6560, x = k_131_cast)[name = tensor("op_6561_cast")]; + tensor var_6562 = const()[name = tensor("op_6562"), val = tensor([2, 20, 64, -1])]; + tensor var_6563_cast = reshape(shape = var_6562, x = v_131_cast)[name = tensor("op_6563_cast")]; + tensor attn_weights_261_transpose_x_0 = const()[name = tensor("attn_weights_261_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_261_transpose_y_0 = const()[name = tensor("attn_weights_261_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_261_cast = matmul(transpose_x = attn_weights_261_transpose_x_0, transpose_y = attn_weights_261_transpose_y_0, x = var_6559_cast, y = var_6561_cast)[name = tensor("attn_weights_261_cast")]; + tensor attn_weights_263_cast = mul(x = attn_weights_261_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_263_cast")]; + tensor var_6567_cast = softmax(axis = var_4927, x = attn_weights_263_cast)[name = tensor("op_6567_cast")]; + tensor attn_131_transpose_x_0 = const()[name = tensor("attn_131_transpose_x_0"), val = tensor(false)]; + tensor attn_131_transpose_y_0 = const()[name = tensor("attn_131_transpose_y_0"), val = tensor(true)]; + tensor attn_131_cast = matmul(transpose_x = attn_131_transpose_x_0, transpose_y = attn_131_transpose_y_0, x = var_6563_cast, y = var_6567_cast)[name = tensor("attn_131_cast")]; + tensor var_6571 = const()[name = tensor("op_6571"), val = tensor([2, 1280, 1, -1])]; + tensor input_393_cast = reshape(shape = var_6571, x = attn_131_cast)[name = tensor("input_393_cast")]; + tensor var_6576 = const()[name = tensor("op_6576"), val = tensor([1, 1])]; + tensor var_6578 = const()[name = tensor("op_6578"), val = tensor([1, 1])]; + tensor var_6580_pad_type_0 = const()[name = tensor("op_6580_pad_type_0"), val = tensor("custom")]; + tensor var_6580_pad_0 = const()[name = tensor("op_6580_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567663296))), lut = tensor([-0x1.42p-8, 0x1.41cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567868160)))]; + tensor var_6580_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_6578, groups = var_4943, pad = var_6580_pad_0, pad_type = var_6580_pad_type_0, strides = var_6576, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_393_cast)[name = tensor("op_6580_cast")]; + tensor inputs_197_cast = add(x = var_6580_cast, y = inputs_195_cast)[name = tensor("inputs_197_cast")]; + tensor var_6584 = const()[name = tensor("op_6584"), val = tensor([1])]; + tensor channels_mean_197_cast = reduce_mean(axes = var_6584, keep_dims = var_4938, x = inputs_197_cast)[name = tensor("channels_mean_197_cast")]; + tensor zero_mean_197_cast = sub(x = inputs_197_cast, y = channels_mean_197_cast)[name = tensor("zero_mean_197_cast")]; + tensor zero_mean_sq_197_cast = mul(x = zero_mean_197_cast, y = zero_mean_197_cast)[name = tensor("zero_mean_sq_197_cast")]; + tensor var_6588 = const()[name = tensor("op_6588"), val = tensor([1])]; + tensor var_6589_cast = reduce_mean(axes = var_6588, keep_dims = var_4938, x = zero_mean_sq_197_cast)[name = tensor("op_6589_cast")]; + tensor var_6590_to_fp16 = const()[name = tensor("op_6590_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6591_cast = add(x = var_6589_cast, y = var_6590_to_fp16)[name = tensor("op_6591_cast")]; + tensor denom_197_epsilon_0_to_fp16 = const()[name = tensor("denom_197_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_197_cast = rsqrt(epsilon = denom_197_epsilon_0_to_fp16, x = var_6591_cast)[name = tensor("denom_197_cast")]; + tensor out_197_cast = mul(x = zero_mean_197_cast, y = denom_197_cast)[name = tensor("out_197_cast")]; + tensor var_6595_to_fp16 = const()[name = tensor("op_6595_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567870784)))]; + tensor var_6596_cast = add(x = out_197_cast, y = var_6595_to_fp16)[name = tensor("op_6596_cast")]; + tensor var_6598_to_fp16 = const()[name = tensor("op_6598_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567873408)))]; + tensor input_395_cast = mul(x = var_6596_cast, y = var_6598_to_fp16)[name = tensor("input_395_cast")]; + tensor var_6606 = const()[name = tensor("op_6606"), val = tensor([1, 1])]; + tensor var_6608 = const()[name = tensor("op_6608"), val = tensor([1, 1])]; + tensor var_6610_pad_type_0 = const()[name = tensor("op_6610_pad_type_0"), val = tensor("custom")]; + tensor var_6610_pad_0 = const()[name = tensor("op_6610_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567876032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574429696))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574429824)))]; + tensor var_6610_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_6608, groups = var_4943, pad = var_6610_pad_0, pad_type = var_6610_pad_type_0, strides = var_6606, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_395_cast)[name = tensor("op_6610_cast")]; + tensor var_6611_split_sizes_0 = const()[name = tensor("op_6611_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6611_axis_0 = const()[name = tensor("op_6611_axis_0"), val = tensor(1)]; + tensor var_6611_cast_0, tensor var_6611_cast_1 = split(axis = var_6611_axis_0, split_sizes = var_6611_split_sizes_0, x = var_6610_cast)[name = tensor("op_6611_cast")]; + tensor var_6613_mode_0 = const()[name = tensor("op_6613_mode_0"), val = tensor("EXACT")]; + tensor var_6613_cast = gelu(mode = var_6613_mode_0, x = var_6611_cast_1)[name = tensor("op_6613_cast")]; + tensor input_397_cast = mul(x = var_6611_cast_0, y = var_6613_cast)[name = tensor("input_397_cast")]; + tensor var_6617 = const()[name = tensor("op_6617"), val = tensor([1, 1])]; + tensor var_6619 = const()[name = tensor("op_6619"), val = tensor([1, 1])]; + tensor var_6621_pad_type_0 = const()[name = tensor("op_6621_pad_type_0"), val = tensor("custom")]; + tensor var_6621_pad_0 = const()[name = tensor("op_6621_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574450368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577727232))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577727360)))]; + tensor var_6621_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_6619, groups = var_4943, pad = var_6621_pad_0, pad_type = var_6621_pad_type_0, strides = var_6617, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_397_cast)[name = tensor("op_6621_cast")]; + tensor inputs_199_cast = add(x = var_6621_cast, y = inputs_197_cast)[name = tensor("inputs_199_cast")]; + tensor var_6631 = const()[name = tensor("op_6631"), val = tensor([1])]; + tensor channels_mean_199_cast = reduce_mean(axes = var_6631, keep_dims = var_4938, x = inputs_199_cast)[name = tensor("channels_mean_199_cast")]; + tensor zero_mean_199_cast = sub(x = inputs_199_cast, y = channels_mean_199_cast)[name = tensor("zero_mean_199_cast")]; + tensor zero_mean_sq_199_cast = mul(x = zero_mean_199_cast, y = zero_mean_199_cast)[name = tensor("zero_mean_sq_199_cast")]; + tensor var_6635 = const()[name = tensor("op_6635"), val = tensor([1])]; + tensor var_6636_cast = reduce_mean(axes = var_6635, keep_dims = var_4938, x = zero_mean_sq_199_cast)[name = tensor("op_6636_cast")]; + tensor var_6637_to_fp16 = const()[name = tensor("op_6637_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6638_cast = add(x = var_6636_cast, y = var_6637_to_fp16)[name = tensor("op_6638_cast")]; + tensor denom_199_epsilon_0_to_fp16 = const()[name = tensor("denom_199_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_199_cast = rsqrt(epsilon = denom_199_epsilon_0_to_fp16, x = var_6638_cast)[name = tensor("denom_199_cast")]; + tensor out_199_cast = mul(x = zero_mean_199_cast, y = denom_199_cast)[name = tensor("out_199_cast")]; + tensor var_6642_to_fp16 = const()[name = tensor("op_6642_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577729984)))]; + tensor var_6643_cast = add(x = out_199_cast, y = var_6642_to_fp16)[name = tensor("op_6643_cast")]; + tensor var_6645_to_fp16 = const()[name = tensor("op_6645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577732608)))]; + tensor hidden_states_263_cast = mul(x = var_6643_cast, y = var_6645_to_fp16)[name = tensor("hidden_states_263_cast")]; + tensor var_6652 = const()[name = tensor("op_6652"), val = tensor([1, 1])]; + tensor var_6654 = const()[name = tensor("op_6654"), val = tensor([1, 1])]; + tensor q_133_pad_type_0 = const()[name = tensor("q_133_pad_type_0"), val = tensor("custom")]; + tensor q_133_pad_0 = const()[name = tensor("q_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577735232))), lut = tensor([-0x1.338p-5, -0x1.718p-7, 0x1.724p-7, 0x1.334p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_133_cast = conv(dilations = var_6654, groups = var_4943, pad = q_133_pad_0, pad_type = q_133_pad_type_0, strides = var_6652, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("q_133_cast")]; + tensor var_6658 = const()[name = tensor("op_6658"), val = tensor([1, 1])]; + tensor var_6660 = const()[name = tensor("op_6660"), val = tensor([1, 1])]; + tensor k_133_pad_type_0 = const()[name = tensor("k_133_pad_type_0"), val = tensor("custom")]; + tensor k_133_pad_0 = const()[name = tensor("k_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578144896))), lut = tensor([-0x1.318p-5, -0x1.6e8p-7, 0x1.734p-7, 0x1.328p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_133_cast = conv(dilations = var_6660, groups = var_4943, pad = k_133_pad_0, pad_type = k_133_pad_type_0, strides = var_6658, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("k_133_cast")]; + tensor var_6664 = const()[name = tensor("op_6664"), val = tensor([1, 1])]; + tensor var_6666 = const()[name = tensor("op_6666"), val = tensor([1, 1])]; + tensor v_133_pad_type_0 = const()[name = tensor("v_133_pad_type_0"), val = tensor("custom")]; + tensor v_133_pad_0 = const()[name = tensor("v_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578554560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579373824))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_133_cast = conv(dilations = var_6666, groups = var_4943, pad = v_133_pad_0, pad_type = v_133_pad_type_0, strides = var_6664, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("v_133_cast")]; + tensor var_6670 = const()[name = tensor("op_6670"), val = tensor([2, 20, 64, -1])]; + tensor var_6671_cast = reshape(shape = var_6670, x = q_133_cast)[name = tensor("op_6671_cast")]; + tensor var_6672 = const()[name = tensor("op_6672"), val = tensor([2, 20, 64, -1])]; + tensor var_6673_cast = reshape(shape = var_6672, x = k_133_cast)[name = tensor("op_6673_cast")]; + tensor var_6674 = const()[name = tensor("op_6674"), val = tensor([2, 20, 64, -1])]; + tensor var_6675_cast = reshape(shape = var_6674, x = v_133_cast)[name = tensor("op_6675_cast")]; + tensor attn_weights_265_transpose_x_0 = const()[name = tensor("attn_weights_265_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_265_transpose_y_0 = const()[name = tensor("attn_weights_265_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_265_cast = matmul(transpose_x = attn_weights_265_transpose_x_0, transpose_y = attn_weights_265_transpose_y_0, x = var_6671_cast, y = var_6673_cast)[name = tensor("attn_weights_265_cast")]; + tensor attn_weights_267_cast = mul(x = attn_weights_265_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_267_cast")]; + tensor var_6679_cast = softmax(axis = var_4927, x = attn_weights_267_cast)[name = tensor("op_6679_cast")]; + tensor attn_133_transpose_x_0 = const()[name = tensor("attn_133_transpose_x_0"), val = tensor(false)]; + tensor attn_133_transpose_y_0 = const()[name = tensor("attn_133_transpose_y_0"), val = tensor(true)]; + tensor attn_133_cast = matmul(transpose_x = attn_133_transpose_x_0, transpose_y = attn_133_transpose_y_0, x = var_6675_cast, y = var_6679_cast)[name = tensor("attn_133_cast")]; + tensor var_6683 = const()[name = tensor("op_6683"), val = tensor([2, 1280, 1, -1])]; + tensor input_399_cast = reshape(shape = var_6683, x = attn_133_cast)[name = tensor("input_399_cast")]; + tensor var_6688 = const()[name = tensor("op_6688"), val = tensor([1, 1])]; + tensor var_6690 = const()[name = tensor("op_6690"), val = tensor([1, 1])]; + tensor var_6692_pad_type_0 = const()[name = tensor("op_6692_pad_type_0"), val = tensor("custom")]; + tensor var_6692_pad_0 = const()[name = tensor("op_6692_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579373952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580193216))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580193344)))]; + tensor var_6692_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_6690, groups = var_4943, pad = var_6692_pad_0, pad_type = var_6692_pad_type_0, strides = var_6688, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_399_cast)[name = tensor("op_6692_cast")]; + tensor inputs_201_cast = add(x = var_6692_cast, y = inputs_199_cast)[name = tensor("inputs_201_cast")]; + tensor var_6696 = const()[name = tensor("op_6696"), val = tensor([1])]; + tensor channels_mean_201_cast = reduce_mean(axes = var_6696, keep_dims = var_4938, x = inputs_201_cast)[name = tensor("channels_mean_201_cast")]; + tensor zero_mean_201_cast = sub(x = inputs_201_cast, y = channels_mean_201_cast)[name = tensor("zero_mean_201_cast")]; + tensor zero_mean_sq_201_cast = mul(x = zero_mean_201_cast, y = zero_mean_201_cast)[name = tensor("zero_mean_sq_201_cast")]; + tensor var_6700 = const()[name = tensor("op_6700"), val = tensor([1])]; + tensor var_6701_cast = reduce_mean(axes = var_6700, keep_dims = var_4938, x = zero_mean_sq_201_cast)[name = tensor("op_6701_cast")]; + tensor var_6702_to_fp16 = const()[name = tensor("op_6702_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6703_cast = add(x = var_6701_cast, y = var_6702_to_fp16)[name = tensor("op_6703_cast")]; + tensor denom_201_epsilon_0_to_fp16 = const()[name = tensor("denom_201_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_201_cast = rsqrt(epsilon = denom_201_epsilon_0_to_fp16, x = var_6703_cast)[name = tensor("denom_201_cast")]; + tensor out_201_cast = mul(x = zero_mean_201_cast, y = denom_201_cast)[name = tensor("out_201_cast")]; + tensor var_6707_to_fp16 = const()[name = tensor("op_6707_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580195968)))]; + tensor var_6708_cast = add(x = out_201_cast, y = var_6707_to_fp16)[name = tensor("op_6708_cast")]; + tensor var_6710_to_fp16 = const()[name = tensor("op_6710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580198592)))]; + tensor hidden_states_265_cast = mul(x = var_6708_cast, y = var_6710_to_fp16)[name = tensor("hidden_states_265_cast")]; + tensor var_6717 = const()[name = tensor("op_6717"), val = tensor([1, 1])]; + tensor var_6719 = const()[name = tensor("op_6719"), val = tensor([1, 1])]; + tensor q_135_pad_type_0 = const()[name = tensor("q_135_pad_type_0"), val = tensor("custom")]; + tensor q_135_pad_0 = const()[name = tensor("q_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580201216))), lut = tensor([-0x1.5ccp-7, 0x1.5ccp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_135_cast = conv(dilations = var_6719, groups = var_4943, pad = q_135_pad_0, pad_type = q_135_pad_type_0, strides = var_6717, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_265_cast)[name = tensor("q_135_cast")]; + tensor var_6723 = const()[name = tensor("op_6723"), val = tensor([1, 1])]; + tensor var_6725 = const()[name = tensor("op_6725"), val = tensor([1, 1])]; + tensor k_135_pad_type_0 = const()[name = tensor("k_135_pad_type_0"), val = tensor("custom")]; + tensor k_135_pad_0 = const()[name = tensor("k_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580406080))), lut = tensor([-0x1.aa4p-8, 0x1.abp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_135_cast = conv(dilations = var_6725, groups = var_4943, pad = k_135_pad_0, pad_type = k_135_pad_type_0, strides = var_6723, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_135_cast")]; + tensor var_6729 = const()[name = tensor("op_6729"), val = tensor([1, 1])]; + tensor var_6731 = const()[name = tensor("op_6731"), val = tensor([1, 1])]; + tensor v_135_pad_type_0 = const()[name = tensor("v_135_pad_type_0"), val = tensor("custom")]; + tensor v_135_pad_0 = const()[name = tensor("v_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580733824))), lut = tensor([-0x1.cdcp-8, 0x1.cd4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_135_cast = conv(dilations = var_6731, groups = var_4943, pad = v_135_pad_0, pad_type = v_135_pad_type_0, strides = var_6729, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_135_cast")]; + tensor var_6735 = const()[name = tensor("op_6735"), val = tensor([2, 20, 64, -1])]; + tensor var_6736_cast = reshape(shape = var_6735, x = q_135_cast)[name = tensor("op_6736_cast")]; + tensor var_6737 = const()[name = tensor("op_6737"), val = tensor([2, 20, 64, -1])]; + tensor var_6738_cast = reshape(shape = var_6737, x = k_135_cast)[name = tensor("op_6738_cast")]; + tensor var_6739 = const()[name = tensor("op_6739"), val = tensor([2, 20, 64, -1])]; + tensor var_6740_cast = reshape(shape = var_6739, x = v_135_cast)[name = tensor("op_6740_cast")]; + tensor attn_weights_269_transpose_x_0 = const()[name = tensor("attn_weights_269_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_269_transpose_y_0 = const()[name = tensor("attn_weights_269_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_269_cast = matmul(transpose_x = attn_weights_269_transpose_x_0, transpose_y = attn_weights_269_transpose_y_0, x = var_6736_cast, y = var_6738_cast)[name = tensor("attn_weights_269_cast")]; + tensor attn_weights_271_cast = mul(x = attn_weights_269_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_271_cast")]; + tensor var_6744_cast = softmax(axis = var_4927, x = attn_weights_271_cast)[name = tensor("op_6744_cast")]; + tensor attn_135_transpose_x_0 = const()[name = tensor("attn_135_transpose_x_0"), val = tensor(false)]; + tensor attn_135_transpose_y_0 = const()[name = tensor("attn_135_transpose_y_0"), val = tensor(true)]; + tensor attn_135_cast = matmul(transpose_x = attn_135_transpose_x_0, transpose_y = attn_135_transpose_y_0, x = var_6740_cast, y = var_6744_cast)[name = tensor("attn_135_cast")]; + tensor var_6748 = const()[name = tensor("op_6748"), val = tensor([2, 1280, 1, -1])]; + tensor input_401_cast = reshape(shape = var_6748, x = attn_135_cast)[name = tensor("input_401_cast")]; + tensor var_6753 = const()[name = tensor("op_6753"), val = tensor([1, 1])]; + tensor var_6755 = const()[name = tensor("op_6755"), val = tensor([1, 1])]; + tensor var_6757_pad_type_0 = const()[name = tensor("op_6757_pad_type_0"), val = tensor("custom")]; + tensor var_6757_pad_0 = const()[name = tensor("op_6757_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581061568))), lut = tensor([-0x1.48cp-8, 0x1.49p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581266432)))]; + tensor var_6757_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_6755, groups = var_4943, pad = var_6757_pad_0, pad_type = var_6757_pad_type_0, strides = var_6753, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_401_cast)[name = tensor("op_6757_cast")]; + tensor inputs_203_cast = add(x = var_6757_cast, y = inputs_201_cast)[name = tensor("inputs_203_cast")]; + tensor var_6761 = const()[name = tensor("op_6761"), val = tensor([1])]; + tensor channels_mean_203_cast = reduce_mean(axes = var_6761, keep_dims = var_4938, x = inputs_203_cast)[name = tensor("channels_mean_203_cast")]; + tensor zero_mean_203_cast = sub(x = inputs_203_cast, y = channels_mean_203_cast)[name = tensor("zero_mean_203_cast")]; + tensor zero_mean_sq_203_cast = mul(x = zero_mean_203_cast, y = zero_mean_203_cast)[name = tensor("zero_mean_sq_203_cast")]; + tensor var_6765 = const()[name = tensor("op_6765"), val = tensor([1])]; + tensor var_6766_cast = reduce_mean(axes = var_6765, keep_dims = var_4938, x = zero_mean_sq_203_cast)[name = tensor("op_6766_cast")]; + tensor var_6767_to_fp16 = const()[name = tensor("op_6767_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6768_cast = add(x = var_6766_cast, y = var_6767_to_fp16)[name = tensor("op_6768_cast")]; + tensor denom_203_epsilon_0_to_fp16 = const()[name = tensor("denom_203_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_203_cast = rsqrt(epsilon = denom_203_epsilon_0_to_fp16, x = var_6768_cast)[name = tensor("denom_203_cast")]; + tensor out_203_cast = mul(x = zero_mean_203_cast, y = denom_203_cast)[name = tensor("out_203_cast")]; + tensor var_6772_to_fp16 = const()[name = tensor("op_6772_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581269056)))]; + tensor var_6773_cast = add(x = out_203_cast, y = var_6772_to_fp16)[name = tensor("op_6773_cast")]; + tensor var_6775_to_fp16 = const()[name = tensor("op_6775_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581271680)))]; + tensor input_403_cast = mul(x = var_6773_cast, y = var_6775_to_fp16)[name = tensor("input_403_cast")]; + tensor var_6783 = const()[name = tensor("op_6783"), val = tensor([1, 1])]; + tensor var_6785 = const()[name = tensor("op_6785"), val = tensor([1, 1])]; + tensor var_6787_pad_type_0 = const()[name = tensor("op_6787_pad_type_0"), val = tensor("custom")]; + tensor var_6787_pad_0 = const()[name = tensor("op_6787_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581274304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587827968))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587828096)))]; + tensor var_6787_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_6785, groups = var_4943, pad = var_6787_pad_0, pad_type = var_6787_pad_type_0, strides = var_6783, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_403_cast)[name = tensor("op_6787_cast")]; + tensor var_6788_split_sizes_0 = const()[name = tensor("op_6788_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6788_axis_0 = const()[name = tensor("op_6788_axis_0"), val = tensor(1)]; + tensor var_6788_cast_0, tensor var_6788_cast_1 = split(axis = var_6788_axis_0, split_sizes = var_6788_split_sizes_0, x = var_6787_cast)[name = tensor("op_6788_cast")]; + tensor var_6790_mode_0 = const()[name = tensor("op_6790_mode_0"), val = tensor("EXACT")]; + tensor var_6790_cast = gelu(mode = var_6790_mode_0, x = var_6788_cast_1)[name = tensor("op_6790_cast")]; + tensor input_405_cast = mul(x = var_6788_cast_0, y = var_6790_cast)[name = tensor("input_405_cast")]; + tensor var_6794 = const()[name = tensor("op_6794"), val = tensor([1, 1])]; + tensor var_6796 = const()[name = tensor("op_6796"), val = tensor([1, 1])]; + tensor var_6798_pad_type_0 = const()[name = tensor("op_6798_pad_type_0"), val = tensor("custom")]; + tensor var_6798_pad_0 = const()[name = tensor("op_6798_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587848640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591125504))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591125632)))]; + tensor var_6798_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_6796, groups = var_4943, pad = var_6798_pad_0, pad_type = var_6798_pad_type_0, strides = var_6794, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_405_cast)[name = tensor("op_6798_cast")]; + tensor hidden_states_269_cast = add(x = var_6798_cast, y = inputs_203_cast)[name = tensor("hidden_states_269_cast")]; + tensor var_6800 = const()[name = tensor("op_6800"), val = tensor([2, 1280, 32, 32])]; + tensor input_407_cast = reshape(shape = var_6800, x = hidden_states_269_cast)[name = tensor("input_407_cast")]; + tensor var_6804 = const()[name = tensor("op_6804"), val = tensor([1, 1])]; + tensor var_6806 = const()[name = tensor("op_6806"), val = tensor([1, 1])]; + tensor hidden_states_271_pad_type_0 = const()[name = tensor("hidden_states_271_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_271_pad_0 = const()[name = tensor("hidden_states_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591128256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591947520))), name = tensor("mid_block_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591947648)))]; + tensor hidden_states_271_cast = conv(bias = mid_block_attentions_0_proj_out_bias_to_fp16, dilations = var_6806, groups = var_4943, pad = hidden_states_271_pad_0, pad_type = hidden_states_271_pad_type_0, strides = var_6804, weight = mid_block_attentions_0_proj_out_weight_to_fp16_palettized, x = input_407_cast)[name = tensor("hidden_states_271_cast")]; + tensor input_409_cast = add(x = hidden_states_271_cast, y = hidden_states_205_cast)[name = tensor("input_409_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = input_409_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591950272)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591952896)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_413_cast = silu(x = add_39_cast)[name = tensor("input_413_cast")]; + tensor var_6821 = const()[name = tensor("op_6821"), val = tensor([1, 1])]; + tensor var_6823 = const()[name = tensor("op_6823"), val = tensor([1, 1])]; + tensor hidden_states_273_pad_type_0 = const()[name = tensor("hidden_states_273_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_273_pad_0 = const()[name = tensor("hidden_states_273_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591955520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603014784))), name = tensor("mid_block_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603014976)))]; + tensor hidden_states_273_cast = conv(bias = mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_6823, groups = var_4943, pad = hidden_states_273_pad_0, pad_type = hidden_states_273_pad_type_0, strides = var_6821, weight = mid_block_resnets_1_conv1_weight_to_fp16_palettized, x = input_413_cast)[name = tensor("hidden_states_273_cast")]; + tensor var_6829 = const()[name = tensor("op_6829"), val = tensor([1, 1])]; + tensor var_6831 = const()[name = tensor("op_6831"), val = tensor([1, 1])]; + tensor temb_15_pad_type_0 = const()[name = tensor("temb_15_pad_type_0"), val = tensor("custom")]; + tensor temb_15_pad_0 = const()[name = tensor("temb_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603017600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603836864))), name = tensor("mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603836992)))]; + tensor temb_15_cast = conv(bias = mid_block_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_6831, groups = var_4943, pad = temb_15_pad_0, pad_type = temb_15_pad_type_0, strides = var_6829, weight = mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_15_cast")]; + tensor input_417_cast = add(x = hidden_states_273_cast, y = temb_15_cast)[name = tensor("input_417_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_417_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603839616)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603842240)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_421_cast = silu(x = add_41_cast)[name = tensor("input_421_cast")]; + tensor var_6841 = const()[name = tensor("op_6841"), val = tensor([1, 1])]; + tensor var_6843 = const()[name = tensor("op_6843"), val = tensor([1, 1])]; + tensor hidden_states_275_pad_type_0 = const()[name = tensor("hidden_states_275_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_275_pad_0 = const()[name = tensor("hidden_states_275_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603844864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614904128))), name = tensor("mid_block_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614904320)))]; + tensor hidden_states_275_cast = conv(bias = mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_6843, groups = var_4943, pad = hidden_states_275_pad_0, pad_type = hidden_states_275_pad_type_0, strides = var_6841, weight = mid_block_resnets_1_conv2_weight_to_fp16_palettized, x = input_421_cast)[name = tensor("hidden_states_275_cast")]; + tensor hidden_states_277_cast = add(x = input_409_cast, y = hidden_states_275_cast)[name = tensor("hidden_states_277_cast")]; + tensor var_6849 = const()[name = tensor("op_6849"), val = tensor(3)]; + tensor var_6860 = const()[name = tensor("op_6860"), val = tensor(true)]; + tensor var_6865 = const()[name = tensor("op_6865"), val = tensor(1)]; + tensor input_423_interleave_0 = const()[name = tensor("input_423_interleave_0"), val = tensor(false)]; + tensor input_423_cast = concat(axis = var_6865, interleave = input_423_interleave_0, values = (hidden_states_277_cast, input_311_cast))[name = tensor("input_423_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = input_423_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_mean_0_to_fp16 = const()[name = tensor("add_43_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614906944)))]; + tensor add_43_variance_0_to_fp16 = const()[name = tensor("add_43_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614912128)))]; + tensor add_43_gamma_0_to_fp16 = const()[name = tensor("add_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614917312)))]; + tensor add_43_beta_0_to_fp16 = const()[name = tensor("add_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614922496)))]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_427_cast = silu(x = add_43_cast)[name = tensor("input_427_cast")]; + tensor var_6894 = const()[name = tensor("op_6894"), val = tensor([1, 1])]; + tensor var_6896 = const()[name = tensor("op_6896"), val = tensor([1, 1])]; + tensor hidden_states_279_pad_type_0 = const()[name = tensor("hidden_states_279_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_279_pad_0 = const()[name = tensor("hidden_states_279_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614927680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637046144))), name = tensor("up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637046336)))]; + tensor hidden_states_279_cast = conv(bias = up_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_6896, groups = var_6865, pad = hidden_states_279_pad_0, pad_type = hidden_states_279_pad_type_0, strides = var_6894, weight = up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_427_cast)[name = tensor("hidden_states_279_cast")]; + tensor var_6902 = const()[name = tensor("op_6902"), val = tensor([1, 1])]; + tensor var_6904 = const()[name = tensor("op_6904"), val = tensor([1, 1])]; + tensor temb_17_pad_type_0 = const()[name = tensor("temb_17_pad_type_0"), val = tensor("custom")]; + tensor temb_17_pad_0 = const()[name = tensor("temb_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637048960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637868224))), name = tensor("up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637868352)))]; + tensor temb_17_cast = conv(bias = up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_6904, groups = var_6865, pad = temb_17_pad_0, pad_type = temb_17_pad_type_0, strides = var_6902, weight = up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_17_cast")]; + tensor input_431_cast = add(x = hidden_states_279_cast, y = temb_17_cast)[name = tensor("input_431_cast")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_88_cast = reshape(shape = reshape_88_shape_0, x = input_431_cast)[name = tensor("reshape_88_cast")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66_cast = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88_cast)[name = tensor("reduce_mean_66_cast")]; + tensor sub_44_cast = sub(x = reshape_88_cast, y = reduce_mean_66_cast)[name = tensor("sub_44_cast")]; + tensor square_22_cast = square(x = sub_44_cast)[name = tensor("square_22_cast")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68_cast = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22_cast)[name = tensor("reduce_mean_68_cast")]; + tensor add_44_y_0_to_fp16 = const()[name = tensor("add_44_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_44_cast = add(x = reduce_mean_68_cast, y = add_44_y_0_to_fp16)[name = tensor("add_44_cast")]; + tensor sqrt_22_cast = sqrt(x = add_44_cast)[name = tensor("sqrt_22_cast")]; + tensor real_div_22_cast = real_div(x = sub_44_cast, y = sqrt_22_cast)[name = tensor("real_div_22_cast")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_89_cast = reshape(shape = reshape_89_shape_0, x = real_div_22_cast)[name = tensor("reshape_89_cast")]; + tensor add_45_gamma_0_to_fp16 = const()[name = tensor("add_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637870976)))]; + tensor add_45_beta_0_to_fp16 = const()[name = tensor("add_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637873600)))]; + tensor add_45_epsilon_0_to_fp16 = const()[name = tensor("add_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_45_cast = batch_norm(beta = add_45_beta_0_to_fp16, epsilon = add_45_epsilon_0_to_fp16, gamma = add_45_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_89_cast)[name = tensor("add_45_cast")]; + tensor input_435_cast = silu(x = add_45_cast)[name = tensor("input_435_cast")]; + tensor var_6914 = const()[name = tensor("op_6914"), val = tensor([1, 1])]; + tensor var_6916 = const()[name = tensor("op_6916"), val = tensor([1, 1])]; + tensor hidden_states_281_pad_type_0 = const()[name = tensor("hidden_states_281_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_281_pad_0 = const()[name = tensor("hidden_states_281_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637876224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648935488))), name = tensor("up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648935680)))]; + tensor hidden_states_281_cast = conv(bias = up_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_6916, groups = var_6865, pad = hidden_states_281_pad_0, pad_type = hidden_states_281_pad_type_0, strides = var_6914, weight = up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_435_cast)[name = tensor("hidden_states_281_cast")]; + tensor var_6921 = const()[name = tensor("op_6921"), val = tensor([1, 1])]; + tensor var_6923 = const()[name = tensor("op_6923"), val = tensor([1, 1])]; + tensor x_5_pad_type_0 = const()[name = tensor("x_5_pad_type_0"), val = tensor("custom")]; + tensor x_5_pad_0 = const()[name = tensor("x_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648938304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651395968))), name = tensor("up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651396160)))]; + tensor x_5_cast = conv(bias = up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_6923, groups = var_6865, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = var_6921, weight = up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_423_cast)[name = tensor("x_5_cast")]; + tensor hidden_states_283_cast = add(x = x_5_cast, y = hidden_states_281_cast)[name = tensor("hidden_states_283_cast")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_92_cast = reshape(shape = reshape_92_shape_0, x = hidden_states_283_cast)[name = tensor("reshape_92_cast")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69_cast = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92_cast)[name = tensor("reduce_mean_69_cast")]; + tensor sub_46_cast = sub(x = reshape_92_cast, y = reduce_mean_69_cast)[name = tensor("sub_46_cast")]; + tensor square_23_cast = square(x = sub_46_cast)[name = tensor("square_23_cast")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71_cast = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23_cast)[name = tensor("reduce_mean_71_cast")]; + tensor add_46_y_0_to_fp16 = const()[name = tensor("add_46_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_46_cast = add(x = reduce_mean_71_cast, y = add_46_y_0_to_fp16)[name = tensor("add_46_cast")]; + tensor sqrt_23_cast = sqrt(x = add_46_cast)[name = tensor("sqrt_23_cast")]; + tensor real_div_23_cast = real_div(x = sub_46_cast, y = sqrt_23_cast)[name = tensor("real_div_23_cast")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_93_cast = reshape(shape = reshape_93_shape_0, x = real_div_23_cast)[name = tensor("reshape_93_cast")]; + tensor add_47_gamma_0_to_fp16 = const()[name = tensor("add_47_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651398784)))]; + tensor add_47_beta_0_to_fp16 = const()[name = tensor("add_47_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651401408)))]; + tensor add_47_epsilon_0_to_fp16 = const()[name = tensor("add_47_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_47_cast = batch_norm(beta = add_47_beta_0_to_fp16, epsilon = add_47_epsilon_0_to_fp16, gamma = add_47_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_93_cast)[name = tensor("add_47_cast")]; + tensor var_6961 = const()[name = tensor("op_6961"), val = tensor([1, 1])]; + tensor var_6963 = const()[name = tensor("op_6963"), val = tensor([1, 1])]; + tensor hidden_states_285_pad_type_0 = const()[name = tensor("hidden_states_285_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_285_pad_0 = const()[name = tensor("hidden_states_285_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651404032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652632896))), name = tensor("up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652633088)))]; + tensor hidden_states_285_cast = conv(bias = up_blocks_0_attentions_0_proj_in_bias_to_fp16, dilations = var_6963, groups = var_6865, pad = hidden_states_285_pad_0, pad_type = hidden_states_285_pad_type_0, strides = var_6961, weight = up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized, x = add_47_cast)[name = tensor("hidden_states_285_cast")]; + tensor var_6968 = const()[name = tensor("op_6968"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_205_cast = reshape(shape = var_6968, x = hidden_states_285_cast)[name = tensor("inputs_205_cast")]; + tensor var_6978 = const()[name = tensor("op_6978"), val = tensor([1])]; + tensor channels_mean_205_cast = reduce_mean(axes = var_6978, keep_dims = var_6860, x = inputs_205_cast)[name = tensor("channels_mean_205_cast")]; + tensor zero_mean_205_cast = sub(x = inputs_205_cast, y = channels_mean_205_cast)[name = tensor("zero_mean_205_cast")]; + tensor zero_mean_sq_205_cast = mul(x = zero_mean_205_cast, y = zero_mean_205_cast)[name = tensor("zero_mean_sq_205_cast")]; + tensor var_6982 = const()[name = tensor("op_6982"), val = tensor([1])]; + tensor var_6983_cast = reduce_mean(axes = var_6982, keep_dims = var_6860, x = zero_mean_sq_205_cast)[name = tensor("op_6983_cast")]; + tensor var_6984_to_fp16 = const()[name = tensor("op_6984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6985_cast = add(x = var_6983_cast, y = var_6984_to_fp16)[name = tensor("op_6985_cast")]; + tensor denom_205_epsilon_0_to_fp16 = const()[name = tensor("denom_205_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_205_cast = rsqrt(epsilon = denom_205_epsilon_0_to_fp16, x = var_6985_cast)[name = tensor("denom_205_cast")]; + tensor out_205_cast = mul(x = zero_mean_205_cast, y = denom_205_cast)[name = tensor("out_205_cast")]; + tensor var_6989_to_fp16 = const()[name = tensor("op_6989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652635712)))]; + tensor var_6990_cast = add(x = out_205_cast, y = var_6989_to_fp16)[name = tensor("op_6990_cast")]; + tensor var_6992_to_fp16 = const()[name = tensor("op_6992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652638336)))]; + tensor hidden_states_287_cast = mul(x = var_6990_cast, y = var_6992_to_fp16)[name = tensor("hidden_states_287_cast")]; + tensor var_6999 = const()[name = tensor("op_6999"), val = tensor([1, 1])]; + tensor var_7001 = const()[name = tensor("op_7001"), val = tensor([1, 1])]; + tensor q_137_pad_type_0 = const()[name = tensor("q_137_pad_type_0"), val = tensor("custom")]; + tensor q_137_pad_0 = const()[name = tensor("q_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652640960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653460224))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_137_cast = conv(dilations = var_7001, groups = var_6865, pad = q_137_pad_0, pad_type = q_137_pad_type_0, strides = var_6999, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("q_137_cast")]; + tensor var_7005 = const()[name = tensor("op_7005"), val = tensor([1, 1])]; + tensor var_7007 = const()[name = tensor("op_7007"), val = tensor([1, 1])]; + tensor k_137_pad_type_0 = const()[name = tensor("k_137_pad_type_0"), val = tensor("custom")]; + tensor k_137_pad_0 = const()[name = tensor("k_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653460352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654279616))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_137_cast = conv(dilations = var_7007, groups = var_6865, pad = k_137_pad_0, pad_type = k_137_pad_type_0, strides = var_7005, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("k_137_cast")]; + tensor var_7011 = const()[name = tensor("op_7011"), val = tensor([1, 1])]; + tensor var_7013 = const()[name = tensor("op_7013"), val = tensor([1, 1])]; + tensor v_137_pad_type_0 = const()[name = tensor("v_137_pad_type_0"), val = tensor("custom")]; + tensor v_137_pad_0 = const()[name = tensor("v_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654279744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655099008))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_137_cast = conv(dilations = var_7013, groups = var_6865, pad = v_137_pad_0, pad_type = v_137_pad_type_0, strides = var_7011, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("v_137_cast")]; + tensor var_7017 = const()[name = tensor("op_7017"), val = tensor([2, 20, 64, -1])]; + tensor var_7018_cast = reshape(shape = var_7017, x = q_137_cast)[name = tensor("op_7018_cast")]; + tensor var_7019 = const()[name = tensor("op_7019"), val = tensor([2, 20, 64, -1])]; + tensor var_7020_cast = reshape(shape = var_7019, x = k_137_cast)[name = tensor("op_7020_cast")]; + tensor var_7021 = const()[name = tensor("op_7021"), val = tensor([2, 20, 64, -1])]; + tensor var_7022_cast = reshape(shape = var_7021, x = v_137_cast)[name = tensor("op_7022_cast")]; + tensor attn_weights_273_transpose_x_0 = const()[name = tensor("attn_weights_273_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_273_transpose_y_0 = const()[name = tensor("attn_weights_273_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_273_cast = matmul(transpose_x = attn_weights_273_transpose_x_0, transpose_y = attn_weights_273_transpose_y_0, x = var_7018_cast, y = var_7020_cast)[name = tensor("attn_weights_273_cast")]; + tensor var_6856_to_fp16 = const()[name = tensor("op_6856_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_275_cast = mul(x = attn_weights_273_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_275_cast")]; + tensor var_7026_cast = softmax(axis = var_6849, x = attn_weights_275_cast)[name = tensor("op_7026_cast")]; + tensor attn_137_transpose_x_0 = const()[name = tensor("attn_137_transpose_x_0"), val = tensor(false)]; + tensor attn_137_transpose_y_0 = const()[name = tensor("attn_137_transpose_y_0"), val = tensor(true)]; + tensor attn_137_cast = matmul(transpose_x = attn_137_transpose_x_0, transpose_y = attn_137_transpose_y_0, x = var_7022_cast, y = var_7026_cast)[name = tensor("attn_137_cast")]; + tensor var_7030 = const()[name = tensor("op_7030"), val = tensor([2, 1280, 1, -1])]; + tensor input_439_cast = reshape(shape = var_7030, x = attn_137_cast)[name = tensor("input_439_cast")]; + tensor var_7035 = const()[name = tensor("op_7035"), val = tensor([1, 1])]; + tensor var_7037 = const()[name = tensor("op_7037"), val = tensor([1, 1])]; + tensor var_7039_pad_type_0 = const()[name = tensor("op_7039_pad_type_0"), val = tensor("custom")]; + tensor var_7039_pad_0 = const()[name = tensor("op_7039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655099136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656328000))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656328192)))]; + tensor var_7039_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_7037, groups = var_6865, pad = var_7039_pad_0, pad_type = var_7039_pad_type_0, strides = var_7035, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_439_cast)[name = tensor("op_7039_cast")]; + tensor inputs_207_cast = add(x = var_7039_cast, y = inputs_205_cast)[name = tensor("inputs_207_cast")]; + tensor var_7043 = const()[name = tensor("op_7043"), val = tensor([1])]; + tensor channels_mean_207_cast = reduce_mean(axes = var_7043, keep_dims = var_6860, x = inputs_207_cast)[name = tensor("channels_mean_207_cast")]; + tensor zero_mean_207_cast = sub(x = inputs_207_cast, y = channels_mean_207_cast)[name = tensor("zero_mean_207_cast")]; + tensor zero_mean_sq_207_cast = mul(x = zero_mean_207_cast, y = zero_mean_207_cast)[name = tensor("zero_mean_sq_207_cast")]; + tensor var_7047 = const()[name = tensor("op_7047"), val = tensor([1])]; + tensor var_7048_cast = reduce_mean(axes = var_7047, keep_dims = var_6860, x = zero_mean_sq_207_cast)[name = tensor("op_7048_cast")]; + tensor var_7049_to_fp16 = const()[name = tensor("op_7049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7050_cast = add(x = var_7048_cast, y = var_7049_to_fp16)[name = tensor("op_7050_cast")]; + tensor denom_207_epsilon_0_to_fp16 = const()[name = tensor("denom_207_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_207_cast = rsqrt(epsilon = denom_207_epsilon_0_to_fp16, x = var_7050_cast)[name = tensor("denom_207_cast")]; + tensor out_207_cast = mul(x = zero_mean_207_cast, y = denom_207_cast)[name = tensor("out_207_cast")]; + tensor var_7054_to_fp16 = const()[name = tensor("op_7054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656330816)))]; + tensor var_7055_cast = add(x = out_207_cast, y = var_7054_to_fp16)[name = tensor("op_7055_cast")]; + tensor var_7057_to_fp16 = const()[name = tensor("op_7057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656333440)))]; + tensor hidden_states_289_cast = mul(x = var_7055_cast, y = var_7057_to_fp16)[name = tensor("hidden_states_289_cast")]; + tensor var_7064 = const()[name = tensor("op_7064"), val = tensor([1, 1])]; + tensor var_7066 = const()[name = tensor("op_7066"), val = tensor([1, 1])]; + tensor q_139_pad_type_0 = const()[name = tensor("q_139_pad_type_0"), val = tensor("custom")]; + tensor q_139_pad_0 = const()[name = tensor("q_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656336064))), lut = tensor([-0x1.f74p-7, 0x1.f98p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_139_cast = conv(dilations = var_7066, groups = var_6865, pad = q_139_pad_0, pad_type = q_139_pad_type_0, strides = var_7064, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_289_cast)[name = tensor("q_139_cast")]; + tensor var_7070 = const()[name = tensor("op_7070"), val = tensor([1, 1])]; + tensor var_7072 = const()[name = tensor("op_7072"), val = tensor([1, 1])]; + tensor k_139_pad_type_0 = const()[name = tensor("k_139_pad_type_0"), val = tensor("custom")]; + tensor k_139_pad_0 = const()[name = tensor("k_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656540928))), lut = tensor([-0x1.d24p-7, 0x1.d68p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_139_cast = conv(dilations = var_7072, groups = var_6865, pad = k_139_pad_0, pad_type = k_139_pad_type_0, strides = var_7070, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_139_cast")]; + tensor var_7076 = const()[name = tensor("op_7076"), val = tensor([1, 1])]; + tensor var_7078 = const()[name = tensor("op_7078"), val = tensor([1, 1])]; + tensor v_139_pad_type_0 = const()[name = tensor("v_139_pad_type_0"), val = tensor("custom")]; + tensor v_139_pad_0 = const()[name = tensor("v_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656868672))), lut = tensor([-0x1.ce8p-6, -0x1.0bcp-7, 0x1.09cp-7, 0x1.cd8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_139_cast = conv(dilations = var_7078, groups = var_6865, pad = v_139_pad_0, pad_type = v_139_pad_type_0, strides = var_7076, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_139_cast")]; + tensor var_7082 = const()[name = tensor("op_7082"), val = tensor([2, 20, 64, -1])]; + tensor var_7083_cast = reshape(shape = var_7082, x = q_139_cast)[name = tensor("op_7083_cast")]; + tensor var_7084 = const()[name = tensor("op_7084"), val = tensor([2, 20, 64, -1])]; + tensor var_7085_cast = reshape(shape = var_7084, x = k_139_cast)[name = tensor("op_7085_cast")]; + tensor var_7086 = const()[name = tensor("op_7086"), val = tensor([2, 20, 64, -1])]; + tensor var_7087_cast = reshape(shape = var_7086, x = v_139_cast)[name = tensor("op_7087_cast")]; + tensor attn_weights_277_transpose_x_0 = const()[name = tensor("attn_weights_277_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_277_transpose_y_0 = const()[name = tensor("attn_weights_277_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_277_cast = matmul(transpose_x = attn_weights_277_transpose_x_0, transpose_y = attn_weights_277_transpose_y_0, x = var_7083_cast, y = var_7085_cast)[name = tensor("attn_weights_277_cast")]; + tensor attn_weights_279_cast = mul(x = attn_weights_277_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_279_cast")]; + tensor var_7091_cast = softmax(axis = var_6849, x = attn_weights_279_cast)[name = tensor("op_7091_cast")]; + tensor attn_139_transpose_x_0 = const()[name = tensor("attn_139_transpose_x_0"), val = tensor(false)]; + tensor attn_139_transpose_y_0 = const()[name = tensor("attn_139_transpose_y_0"), val = tensor(true)]; + tensor attn_139_cast = matmul(transpose_x = attn_139_transpose_x_0, transpose_y = attn_139_transpose_y_0, x = var_7087_cast, y = var_7091_cast)[name = tensor("attn_139_cast")]; + tensor var_7095 = const()[name = tensor("op_7095"), val = tensor([2, 1280, 1, -1])]; + tensor input_441_cast = reshape(shape = var_7095, x = attn_139_cast)[name = tensor("input_441_cast")]; + tensor var_7100 = const()[name = tensor("op_7100"), val = tensor([1, 1])]; + tensor var_7102 = const()[name = tensor("op_7102"), val = tensor([1, 1])]; + tensor var_7104_pad_type_0 = const()[name = tensor("op_7104_pad_type_0"), val = tensor("custom")]; + tensor var_7104_pad_0 = const()[name = tensor("op_7104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657524096))), lut = tensor([-0x1.f6cp-8, 0x1.f54p-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657728960)))]; + tensor var_7104_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_7102, groups = var_6865, pad = var_7104_pad_0, pad_type = var_7104_pad_type_0, strides = var_7100, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_441_cast)[name = tensor("op_7104_cast")]; + tensor inputs_209_cast = add(x = var_7104_cast, y = inputs_207_cast)[name = tensor("inputs_209_cast")]; + tensor var_7108 = const()[name = tensor("op_7108"), val = tensor([1])]; + tensor channels_mean_209_cast = reduce_mean(axes = var_7108, keep_dims = var_6860, x = inputs_209_cast)[name = tensor("channels_mean_209_cast")]; + tensor zero_mean_209_cast = sub(x = inputs_209_cast, y = channels_mean_209_cast)[name = tensor("zero_mean_209_cast")]; + tensor zero_mean_sq_209_cast = mul(x = zero_mean_209_cast, y = zero_mean_209_cast)[name = tensor("zero_mean_sq_209_cast")]; + tensor var_7112 = const()[name = tensor("op_7112"), val = tensor([1])]; + tensor var_7113_cast = reduce_mean(axes = var_7112, keep_dims = var_6860, x = zero_mean_sq_209_cast)[name = tensor("op_7113_cast")]; + tensor var_7114_to_fp16 = const()[name = tensor("op_7114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7115_cast = add(x = var_7113_cast, y = var_7114_to_fp16)[name = tensor("op_7115_cast")]; + tensor denom_209_epsilon_0_to_fp16 = const()[name = tensor("denom_209_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_209_cast = rsqrt(epsilon = denom_209_epsilon_0_to_fp16, x = var_7115_cast)[name = tensor("denom_209_cast")]; + tensor out_209_cast = mul(x = zero_mean_209_cast, y = denom_209_cast)[name = tensor("out_209_cast")]; + tensor var_7119_to_fp16 = const()[name = tensor("op_7119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657731584)))]; + tensor var_7120_cast = add(x = out_209_cast, y = var_7119_to_fp16)[name = tensor("op_7120_cast")]; + tensor var_7122_to_fp16 = const()[name = tensor("op_7122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657734208)))]; + tensor input_443_cast = mul(x = var_7120_cast, y = var_7122_to_fp16)[name = tensor("input_443_cast")]; + tensor var_7130 = const()[name = tensor("op_7130"), val = tensor([1, 1])]; + tensor var_7132 = const()[name = tensor("op_7132"), val = tensor([1, 1])]; + tensor var_7134_pad_type_0 = const()[name = tensor("op_7134_pad_type_0"), val = tensor("custom")]; + tensor var_7134_pad_0 = const()[name = tensor("op_7134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657736832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667567296))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667567488)))]; + tensor var_7134_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_7132, groups = var_6865, pad = var_7134_pad_0, pad_type = var_7134_pad_type_0, strides = var_7130, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_443_cast)[name = tensor("op_7134_cast")]; + tensor var_7135_split_sizes_0 = const()[name = tensor("op_7135_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7135_axis_0 = const()[name = tensor("op_7135_axis_0"), val = tensor(1)]; + tensor var_7135_cast_0, tensor var_7135_cast_1 = split(axis = var_7135_axis_0, split_sizes = var_7135_split_sizes_0, x = var_7134_cast)[name = tensor("op_7135_cast")]; + tensor var_7137_mode_0 = const()[name = tensor("op_7137_mode_0"), val = tensor("EXACT")]; + tensor var_7137_cast = gelu(mode = var_7137_mode_0, x = var_7135_cast_1)[name = tensor("op_7137_cast")]; + tensor input_445_cast = mul(x = var_7135_cast_0, y = var_7137_cast)[name = tensor("input_445_cast")]; + tensor var_7141 = const()[name = tensor("op_7141"), val = tensor([1, 1])]; + tensor var_7143 = const()[name = tensor("op_7143"), val = tensor([1, 1])]; + tensor var_7145_pad_type_0 = const()[name = tensor("op_7145_pad_type_0"), val = tensor("custom")]; + tensor var_7145_pad_0 = const()[name = tensor("op_7145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667588032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672503296))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672503488)))]; + tensor var_7145_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_7143, groups = var_6865, pad = var_7145_pad_0, pad_type = var_7145_pad_type_0, strides = var_7141, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_445_cast)[name = tensor("op_7145_cast")]; + tensor inputs_211_cast = add(x = var_7145_cast, y = inputs_209_cast)[name = tensor("inputs_211_cast")]; + tensor var_7155 = const()[name = tensor("op_7155"), val = tensor([1])]; + tensor channels_mean_211_cast = reduce_mean(axes = var_7155, keep_dims = var_6860, x = inputs_211_cast)[name = tensor("channels_mean_211_cast")]; + tensor zero_mean_211_cast = sub(x = inputs_211_cast, y = channels_mean_211_cast)[name = tensor("zero_mean_211_cast")]; + tensor zero_mean_sq_211_cast = mul(x = zero_mean_211_cast, y = zero_mean_211_cast)[name = tensor("zero_mean_sq_211_cast")]; + tensor var_7159 = const()[name = tensor("op_7159"), val = tensor([1])]; + tensor var_7160_cast = reduce_mean(axes = var_7159, keep_dims = var_6860, x = zero_mean_sq_211_cast)[name = tensor("op_7160_cast")]; + tensor var_7161_to_fp16 = const()[name = tensor("op_7161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7162_cast = add(x = var_7160_cast, y = var_7161_to_fp16)[name = tensor("op_7162_cast")]; + tensor denom_211_epsilon_0_to_fp16 = const()[name = tensor("denom_211_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_211_cast = rsqrt(epsilon = denom_211_epsilon_0_to_fp16, x = var_7162_cast)[name = tensor("denom_211_cast")]; + tensor out_211_cast = mul(x = zero_mean_211_cast, y = denom_211_cast)[name = tensor("out_211_cast")]; + tensor var_7166_to_fp16 = const()[name = tensor("op_7166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672506112)))]; + tensor var_7167_cast = add(x = out_211_cast, y = var_7166_to_fp16)[name = tensor("op_7167_cast")]; + tensor var_7169_to_fp16 = const()[name = tensor("op_7169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672508736)))]; + tensor hidden_states_293_cast = mul(x = var_7167_cast, y = var_7169_to_fp16)[name = tensor("hidden_states_293_cast")]; + tensor var_7176 = const()[name = tensor("op_7176"), val = tensor([1, 1])]; + tensor var_7178 = const()[name = tensor("op_7178"), val = tensor([1, 1])]; + tensor q_141_pad_type_0 = const()[name = tensor("q_141_pad_type_0"), val = tensor("custom")]; + tensor q_141_pad_0 = const()[name = tensor("q_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672511360))), lut = tensor([-0x1.5b4p-5, -0x1.a14p-7, 0x1.a2cp-7, 0x1.5cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_141_cast = conv(dilations = var_7178, groups = var_6865, pad = q_141_pad_0, pad_type = q_141_pad_type_0, strides = var_7176, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("q_141_cast")]; + tensor var_7182 = const()[name = tensor("op_7182"), val = tensor([1, 1])]; + tensor var_7184 = const()[name = tensor("op_7184"), val = tensor([1, 1])]; + tensor k_141_pad_type_0 = const()[name = tensor("k_141_pad_type_0"), val = tensor("custom")]; + tensor k_141_pad_0 = const()[name = tensor("k_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672921024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673740288))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_141_cast = conv(dilations = var_7184, groups = var_6865, pad = k_141_pad_0, pad_type = k_141_pad_type_0, strides = var_7182, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("k_141_cast")]; + tensor var_7188 = const()[name = tensor("op_7188"), val = tensor([1, 1])]; + tensor var_7190 = const()[name = tensor("op_7190"), val = tensor([1, 1])]; + tensor v_141_pad_type_0 = const()[name = tensor("v_141_pad_type_0"), val = tensor("custom")]; + tensor v_141_pad_0 = const()[name = tensor("v_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673740416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(674559680))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_141_cast = conv(dilations = var_7190, groups = var_6865, pad = v_141_pad_0, pad_type = v_141_pad_type_0, strides = var_7188, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("v_141_cast")]; + tensor var_7194 = const()[name = tensor("op_7194"), val = tensor([2, 20, 64, -1])]; + tensor var_7195_cast = reshape(shape = var_7194, x = q_141_cast)[name = tensor("op_7195_cast")]; + tensor var_7196 = const()[name = tensor("op_7196"), val = tensor([2, 20, 64, -1])]; + tensor var_7197_cast = reshape(shape = var_7196, x = k_141_cast)[name = tensor("op_7197_cast")]; + tensor var_7198 = const()[name = tensor("op_7198"), val = tensor([2, 20, 64, -1])]; + tensor var_7199_cast = reshape(shape = var_7198, x = v_141_cast)[name = tensor("op_7199_cast")]; + tensor attn_weights_281_transpose_x_0 = const()[name = tensor("attn_weights_281_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_281_transpose_y_0 = const()[name = tensor("attn_weights_281_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_281_cast = matmul(transpose_x = attn_weights_281_transpose_x_0, transpose_y = attn_weights_281_transpose_y_0, x = var_7195_cast, y = var_7197_cast)[name = tensor("attn_weights_281_cast")]; + tensor attn_weights_283_cast = mul(x = attn_weights_281_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_283_cast")]; + tensor var_7203_cast = softmax(axis = var_6849, x = attn_weights_283_cast)[name = tensor("op_7203_cast")]; + tensor attn_141_transpose_x_0 = const()[name = tensor("attn_141_transpose_x_0"), val = tensor(false)]; + tensor attn_141_transpose_y_0 = const()[name = tensor("attn_141_transpose_y_0"), val = tensor(true)]; + tensor attn_141_cast = matmul(transpose_x = attn_141_transpose_x_0, transpose_y = attn_141_transpose_y_0, x = var_7199_cast, y = var_7203_cast)[name = tensor("attn_141_cast")]; + tensor var_7207 = const()[name = tensor("op_7207"), val = tensor([2, 1280, 1, -1])]; + tensor input_447_cast = reshape(shape = var_7207, x = attn_141_cast)[name = tensor("input_447_cast")]; + tensor var_7212 = const()[name = tensor("op_7212"), val = tensor([1, 1])]; + tensor var_7214 = const()[name = tensor("op_7214"), val = tensor([1, 1])]; + tensor var_7216_pad_type_0 = const()[name = tensor("op_7216_pad_type_0"), val = tensor("custom")]; + tensor var_7216_pad_0 = const()[name = tensor("op_7216_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(674559808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675379072))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675379200)))]; + tensor var_7216_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_7214, groups = var_6865, pad = var_7216_pad_0, pad_type = var_7216_pad_type_0, strides = var_7212, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_447_cast)[name = tensor("op_7216_cast")]; + tensor inputs_213_cast = add(x = var_7216_cast, y = inputs_211_cast)[name = tensor("inputs_213_cast")]; + tensor var_7220 = const()[name = tensor("op_7220"), val = tensor([1])]; + tensor channels_mean_213_cast = reduce_mean(axes = var_7220, keep_dims = var_6860, x = inputs_213_cast)[name = tensor("channels_mean_213_cast")]; + tensor zero_mean_213_cast = sub(x = inputs_213_cast, y = channels_mean_213_cast)[name = tensor("zero_mean_213_cast")]; + tensor zero_mean_sq_213_cast = mul(x = zero_mean_213_cast, y = zero_mean_213_cast)[name = tensor("zero_mean_sq_213_cast")]; + tensor var_7224 = const()[name = tensor("op_7224"), val = tensor([1])]; + tensor var_7225_cast = reduce_mean(axes = var_7224, keep_dims = var_6860, x = zero_mean_sq_213_cast)[name = tensor("op_7225_cast")]; + tensor var_7226_to_fp16 = const()[name = tensor("op_7226_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7227_cast = add(x = var_7225_cast, y = var_7226_to_fp16)[name = tensor("op_7227_cast")]; + tensor denom_213_epsilon_0_to_fp16 = const()[name = tensor("denom_213_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_213_cast = rsqrt(epsilon = denom_213_epsilon_0_to_fp16, x = var_7227_cast)[name = tensor("denom_213_cast")]; + tensor out_213_cast = mul(x = zero_mean_213_cast, y = denom_213_cast)[name = tensor("out_213_cast")]; + tensor var_7231_to_fp16 = const()[name = tensor("op_7231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675381824)))]; + tensor var_7232_cast = add(x = out_213_cast, y = var_7231_to_fp16)[name = tensor("op_7232_cast")]; + tensor var_7234_to_fp16 = const()[name = tensor("op_7234_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675384448)))]; + tensor hidden_states_295_cast = mul(x = var_7232_cast, y = var_7234_to_fp16)[name = tensor("hidden_states_295_cast")]; + tensor var_7241 = const()[name = tensor("op_7241"), val = tensor([1, 1])]; + tensor var_7243 = const()[name = tensor("op_7243"), val = tensor([1, 1])]; + tensor q_143_pad_type_0 = const()[name = tensor("q_143_pad_type_0"), val = tensor("custom")]; + tensor q_143_pad_0 = const()[name = tensor("q_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675387072))), lut = tensor([-0x1.298p-6, 0x1.28cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_143_cast = conv(dilations = var_7243, groups = var_6865, pad = q_143_pad_0, pad_type = q_143_pad_type_0, strides = var_7241, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_295_cast)[name = tensor("q_143_cast")]; + tensor var_7247 = const()[name = tensor("op_7247"), val = tensor([1, 1])]; + tensor var_7249 = const()[name = tensor("op_7249"), val = tensor([1, 1])]; + tensor k_143_pad_type_0 = const()[name = tensor("k_143_pad_type_0"), val = tensor("custom")]; + tensor k_143_pad_0 = const()[name = tensor("k_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675591936))), lut = tensor([-0x1.0c4p-6, 0x1.0bp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_143_cast = conv(dilations = var_7249, groups = var_6865, pad = k_143_pad_0, pad_type = k_143_pad_type_0, strides = var_7247, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_143_cast")]; + tensor var_7253 = const()[name = tensor("op_7253"), val = tensor([1, 1])]; + tensor var_7255 = const()[name = tensor("op_7255"), val = tensor([1, 1])]; + tensor v_143_pad_type_0 = const()[name = tensor("v_143_pad_type_0"), val = tensor("custom")]; + tensor v_143_pad_0 = const()[name = tensor("v_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675919680))), lut = tensor([-0x1.0ecp-5, -0x1.35cp-7, 0x1.35cp-7, 0x1.0e8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_143_cast = conv(dilations = var_7255, groups = var_6865, pad = v_143_pad_0, pad_type = v_143_pad_type_0, strides = var_7253, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_143_cast")]; + tensor var_7259 = const()[name = tensor("op_7259"), val = tensor([2, 20, 64, -1])]; + tensor var_7260_cast = reshape(shape = var_7259, x = q_143_cast)[name = tensor("op_7260_cast")]; + tensor var_7261 = const()[name = tensor("op_7261"), val = tensor([2, 20, 64, -1])]; + tensor var_7262_cast = reshape(shape = var_7261, x = k_143_cast)[name = tensor("op_7262_cast")]; + tensor var_7263 = const()[name = tensor("op_7263"), val = tensor([2, 20, 64, -1])]; + tensor var_7264_cast = reshape(shape = var_7263, x = v_143_cast)[name = tensor("op_7264_cast")]; + tensor attn_weights_285_transpose_x_0 = const()[name = tensor("attn_weights_285_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_285_transpose_y_0 = const()[name = tensor("attn_weights_285_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_285_cast = matmul(transpose_x = attn_weights_285_transpose_x_0, transpose_y = attn_weights_285_transpose_y_0, x = var_7260_cast, y = var_7262_cast)[name = tensor("attn_weights_285_cast")]; + tensor attn_weights_287_cast = mul(x = attn_weights_285_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_287_cast")]; + tensor var_7268_cast = softmax(axis = var_6849, x = attn_weights_287_cast)[name = tensor("op_7268_cast")]; + tensor attn_143_transpose_x_0 = const()[name = tensor("attn_143_transpose_x_0"), val = tensor(false)]; + tensor attn_143_transpose_y_0 = const()[name = tensor("attn_143_transpose_y_0"), val = tensor(true)]; + tensor attn_143_cast = matmul(transpose_x = attn_143_transpose_x_0, transpose_y = attn_143_transpose_y_0, x = var_7264_cast, y = var_7268_cast)[name = tensor("attn_143_cast")]; + tensor var_7272 = const()[name = tensor("op_7272"), val = tensor([2, 1280, 1, -1])]; + tensor input_449_cast = reshape(shape = var_7272, x = attn_143_cast)[name = tensor("input_449_cast")]; + tensor var_7277 = const()[name = tensor("op_7277"), val = tensor([1, 1])]; + tensor var_7279 = const()[name = tensor("op_7279"), val = tensor([1, 1])]; + tensor var_7281_pad_type_0 = const()[name = tensor("op_7281_pad_type_0"), val = tensor("custom")]; + tensor var_7281_pad_0 = const()[name = tensor("op_7281_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676575104))), lut = tensor([-0x1.2fp-7, 0x1.2f4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676779968)))]; + tensor var_7281_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_7279, groups = var_6865, pad = var_7281_pad_0, pad_type = var_7281_pad_type_0, strides = var_7277, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_449_cast)[name = tensor("op_7281_cast")]; + tensor inputs_215_cast = add(x = var_7281_cast, y = inputs_213_cast)[name = tensor("inputs_215_cast")]; + tensor var_7285 = const()[name = tensor("op_7285"), val = tensor([1])]; + tensor channels_mean_215_cast = reduce_mean(axes = var_7285, keep_dims = var_6860, x = inputs_215_cast)[name = tensor("channels_mean_215_cast")]; + tensor zero_mean_215_cast = sub(x = inputs_215_cast, y = channels_mean_215_cast)[name = tensor("zero_mean_215_cast")]; + tensor zero_mean_sq_215_cast = mul(x = zero_mean_215_cast, y = zero_mean_215_cast)[name = tensor("zero_mean_sq_215_cast")]; + tensor var_7289 = const()[name = tensor("op_7289"), val = tensor([1])]; + tensor var_7290_cast = reduce_mean(axes = var_7289, keep_dims = var_6860, x = zero_mean_sq_215_cast)[name = tensor("op_7290_cast")]; + tensor var_7291_to_fp16 = const()[name = tensor("op_7291_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7292_cast = add(x = var_7290_cast, y = var_7291_to_fp16)[name = tensor("op_7292_cast")]; + tensor denom_215_epsilon_0_to_fp16 = const()[name = tensor("denom_215_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_215_cast = rsqrt(epsilon = denom_215_epsilon_0_to_fp16, x = var_7292_cast)[name = tensor("denom_215_cast")]; + tensor out_215_cast = mul(x = zero_mean_215_cast, y = denom_215_cast)[name = tensor("out_215_cast")]; + tensor var_7296_to_fp16 = const()[name = tensor("op_7296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676782592)))]; + tensor var_7297_cast = add(x = out_215_cast, y = var_7296_to_fp16)[name = tensor("op_7297_cast")]; + tensor var_7299_to_fp16 = const()[name = tensor("op_7299_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676785216)))]; + tensor input_451_cast = mul(x = var_7297_cast, y = var_7299_to_fp16)[name = tensor("input_451_cast")]; + tensor var_7307 = const()[name = tensor("op_7307"), val = tensor([1, 1])]; + tensor var_7309 = const()[name = tensor("op_7309"), val = tensor([1, 1])]; + tensor var_7311_pad_type_0 = const()[name = tensor("op_7311_pad_type_0"), val = tensor("custom")]; + tensor var_7311_pad_0 = const()[name = tensor("op_7311_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676787840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683341504))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683341632)))]; + tensor var_7311_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_7309, groups = var_6865, pad = var_7311_pad_0, pad_type = var_7311_pad_type_0, strides = var_7307, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_451_cast)[name = tensor("op_7311_cast")]; + tensor var_7312_split_sizes_0 = const()[name = tensor("op_7312_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7312_axis_0 = const()[name = tensor("op_7312_axis_0"), val = tensor(1)]; + tensor var_7312_cast_0, tensor var_7312_cast_1 = split(axis = var_7312_axis_0, split_sizes = var_7312_split_sizes_0, x = var_7311_cast)[name = tensor("op_7312_cast")]; + tensor var_7314_mode_0 = const()[name = tensor("op_7314_mode_0"), val = tensor("EXACT")]; + tensor var_7314_cast = gelu(mode = var_7314_mode_0, x = var_7312_cast_1)[name = tensor("op_7314_cast")]; + tensor input_453_cast = mul(x = var_7312_cast_0, y = var_7314_cast)[name = tensor("input_453_cast")]; + tensor var_7318 = const()[name = tensor("op_7318"), val = tensor([1, 1])]; + tensor var_7320 = const()[name = tensor("op_7320"), val = tensor([1, 1])]; + tensor var_7322_pad_type_0 = const()[name = tensor("op_7322_pad_type_0"), val = tensor("custom")]; + tensor var_7322_pad_0 = const()[name = tensor("op_7322_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683362176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686639040))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686639168)))]; + tensor var_7322_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_7320, groups = var_6865, pad = var_7322_pad_0, pad_type = var_7322_pad_type_0, strides = var_7318, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_453_cast)[name = tensor("op_7322_cast")]; + tensor inputs_217_cast = add(x = var_7322_cast, y = inputs_215_cast)[name = tensor("inputs_217_cast")]; + tensor var_7332 = const()[name = tensor("op_7332"), val = tensor([1])]; + tensor channels_mean_217_cast = reduce_mean(axes = var_7332, keep_dims = var_6860, x = inputs_217_cast)[name = tensor("channels_mean_217_cast")]; + tensor zero_mean_217_cast = sub(x = inputs_217_cast, y = channels_mean_217_cast)[name = tensor("zero_mean_217_cast")]; + tensor zero_mean_sq_217_cast = mul(x = zero_mean_217_cast, y = zero_mean_217_cast)[name = tensor("zero_mean_sq_217_cast")]; + tensor var_7336 = const()[name = tensor("op_7336"), val = tensor([1])]; + tensor var_7337_cast = reduce_mean(axes = var_7336, keep_dims = var_6860, x = zero_mean_sq_217_cast)[name = tensor("op_7337_cast")]; + tensor var_7338_to_fp16 = const()[name = tensor("op_7338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7339_cast = add(x = var_7337_cast, y = var_7338_to_fp16)[name = tensor("op_7339_cast")]; + tensor denom_217_epsilon_0_to_fp16 = const()[name = tensor("denom_217_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_217_cast = rsqrt(epsilon = denom_217_epsilon_0_to_fp16, x = var_7339_cast)[name = tensor("denom_217_cast")]; + tensor out_217_cast = mul(x = zero_mean_217_cast, y = denom_217_cast)[name = tensor("out_217_cast")]; + tensor var_7343_to_fp16 = const()[name = tensor("op_7343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686641792)))]; + tensor var_7344_cast = add(x = out_217_cast, y = var_7343_to_fp16)[name = tensor("op_7344_cast")]; + tensor var_7346_to_fp16 = const()[name = tensor("op_7346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686644416)))]; + tensor hidden_states_299_cast = mul(x = var_7344_cast, y = var_7346_to_fp16)[name = tensor("hidden_states_299_cast")]; + tensor var_7353 = const()[name = tensor("op_7353"), val = tensor([1, 1])]; + tensor var_7355 = const()[name = tensor("op_7355"), val = tensor([1, 1])]; + tensor q_145_pad_type_0 = const()[name = tensor("q_145_pad_type_0"), val = tensor("custom")]; + tensor q_145_pad_0 = const()[name = tensor("q_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686647040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687466304))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_145_cast = conv(dilations = var_7355, groups = var_6865, pad = q_145_pad_0, pad_type = q_145_pad_type_0, strides = var_7353, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("q_145_cast")]; + tensor var_7359 = const()[name = tensor("op_7359"), val = tensor([1, 1])]; + tensor var_7361 = const()[name = tensor("op_7361"), val = tensor([1, 1])]; + tensor k_145_pad_type_0 = const()[name = tensor("k_145_pad_type_0"), val = tensor("custom")]; + tensor k_145_pad_0 = const()[name = tensor("k_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687466432))), lut = tensor([-0x1.628p-5, -0x1.aa4p-7, 0x1.abcp-7, 0x1.638p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_145_cast = conv(dilations = var_7361, groups = var_6865, pad = k_145_pad_0, pad_type = k_145_pad_type_0, strides = var_7359, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("k_145_cast")]; + tensor var_7365 = const()[name = tensor("op_7365"), val = tensor([1, 1])]; + tensor var_7367 = const()[name = tensor("op_7367"), val = tensor([1, 1])]; + tensor v_145_pad_type_0 = const()[name = tensor("v_145_pad_type_0"), val = tensor("custom")]; + tensor v_145_pad_0 = const()[name = tensor("v_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687876096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(688695360))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_145_cast = conv(dilations = var_7367, groups = var_6865, pad = v_145_pad_0, pad_type = v_145_pad_type_0, strides = var_7365, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("v_145_cast")]; + tensor var_7371 = const()[name = tensor("op_7371"), val = tensor([2, 20, 64, -1])]; + tensor var_7372_cast = reshape(shape = var_7371, x = q_145_cast)[name = tensor("op_7372_cast")]; + tensor var_7373 = const()[name = tensor("op_7373"), val = tensor([2, 20, 64, -1])]; + tensor var_7374_cast = reshape(shape = var_7373, x = k_145_cast)[name = tensor("op_7374_cast")]; + tensor var_7375 = const()[name = tensor("op_7375"), val = tensor([2, 20, 64, -1])]; + tensor var_7376_cast = reshape(shape = var_7375, x = v_145_cast)[name = tensor("op_7376_cast")]; + tensor attn_weights_289_transpose_x_0 = const()[name = tensor("attn_weights_289_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_289_transpose_y_0 = const()[name = tensor("attn_weights_289_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_289_cast = matmul(transpose_x = attn_weights_289_transpose_x_0, transpose_y = attn_weights_289_transpose_y_0, x = var_7372_cast, y = var_7374_cast)[name = tensor("attn_weights_289_cast")]; + tensor attn_weights_291_cast = mul(x = attn_weights_289_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_291_cast")]; + tensor var_7380_cast = softmax(axis = var_6849, x = attn_weights_291_cast)[name = tensor("op_7380_cast")]; + tensor attn_145_transpose_x_0 = const()[name = tensor("attn_145_transpose_x_0"), val = tensor(false)]; + tensor attn_145_transpose_y_0 = const()[name = tensor("attn_145_transpose_y_0"), val = tensor(true)]; + tensor attn_145_cast = matmul(transpose_x = attn_145_transpose_x_0, transpose_y = attn_145_transpose_y_0, x = var_7376_cast, y = var_7380_cast)[name = tensor("attn_145_cast")]; + tensor var_7384 = const()[name = tensor("op_7384"), val = tensor([2, 1280, 1, -1])]; + tensor input_455_cast = reshape(shape = var_7384, x = attn_145_cast)[name = tensor("input_455_cast")]; + tensor var_7389 = const()[name = tensor("op_7389"), val = tensor([1, 1])]; + tensor var_7391 = const()[name = tensor("op_7391"), val = tensor([1, 1])]; + tensor var_7393_pad_type_0 = const()[name = tensor("op_7393_pad_type_0"), val = tensor("custom")]; + tensor var_7393_pad_0 = const()[name = tensor("op_7393_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(688695488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689924352))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689924544)))]; + tensor var_7393_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_7391, groups = var_6865, pad = var_7393_pad_0, pad_type = var_7393_pad_type_0, strides = var_7389, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_455_cast)[name = tensor("op_7393_cast")]; + tensor inputs_219_cast = add(x = var_7393_cast, y = inputs_217_cast)[name = tensor("inputs_219_cast")]; + tensor var_7397 = const()[name = tensor("op_7397"), val = tensor([1])]; + tensor channels_mean_219_cast = reduce_mean(axes = var_7397, keep_dims = var_6860, x = inputs_219_cast)[name = tensor("channels_mean_219_cast")]; + tensor zero_mean_219_cast = sub(x = inputs_219_cast, y = channels_mean_219_cast)[name = tensor("zero_mean_219_cast")]; + tensor zero_mean_sq_219_cast = mul(x = zero_mean_219_cast, y = zero_mean_219_cast)[name = tensor("zero_mean_sq_219_cast")]; + tensor var_7401 = const()[name = tensor("op_7401"), val = tensor([1])]; + tensor var_7402_cast = reduce_mean(axes = var_7401, keep_dims = var_6860, x = zero_mean_sq_219_cast)[name = tensor("op_7402_cast")]; + tensor var_7403_to_fp16 = const()[name = tensor("op_7403_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7404_cast = add(x = var_7402_cast, y = var_7403_to_fp16)[name = tensor("op_7404_cast")]; + tensor denom_219_epsilon_0_to_fp16 = const()[name = tensor("denom_219_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_219_cast = rsqrt(epsilon = denom_219_epsilon_0_to_fp16, x = var_7404_cast)[name = tensor("denom_219_cast")]; + tensor out_219_cast = mul(x = zero_mean_219_cast, y = denom_219_cast)[name = tensor("out_219_cast")]; + tensor var_7408_to_fp16 = const()[name = tensor("op_7408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689927168)))]; + tensor var_7409_cast = add(x = out_219_cast, y = var_7408_to_fp16)[name = tensor("op_7409_cast")]; + tensor var_7411_to_fp16 = const()[name = tensor("op_7411_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689929792)))]; + tensor hidden_states_301_cast = mul(x = var_7409_cast, y = var_7411_to_fp16)[name = tensor("hidden_states_301_cast")]; + tensor var_7418 = const()[name = tensor("op_7418"), val = tensor([1, 1])]; + tensor var_7420 = const()[name = tensor("op_7420"), val = tensor([1, 1])]; + tensor q_147_pad_type_0 = const()[name = tensor("q_147_pad_type_0"), val = tensor("custom")]; + tensor q_147_pad_0 = const()[name = tensor("q_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689932416))), lut = tensor([-0x1.39cp-5, -0x1.76cp-7, 0x1.744p-7, 0x1.38cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_147_cast = conv(dilations = var_7420, groups = var_6865, pad = q_147_pad_0, pad_type = q_147_pad_type_0, strides = var_7418, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_301_cast)[name = tensor("q_147_cast")]; + tensor var_7424 = const()[name = tensor("op_7424"), val = tensor([1, 1])]; + tensor var_7426 = const()[name = tensor("op_7426"), val = tensor([1, 1])]; + tensor k_147_pad_type_0 = const()[name = tensor("k_147_pad_type_0"), val = tensor("custom")]; + tensor k_147_pad_0 = const()[name = tensor("k_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690342080))), lut = tensor([-0x1.1a8p-5, -0x1.4e4p-7, 0x1.4acp-7, 0x1.19cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_147_cast = conv(dilations = var_7426, groups = var_6865, pad = k_147_pad_0, pad_type = k_147_pad_type_0, strides = var_7424, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_147_cast")]; + tensor var_7430 = const()[name = tensor("op_7430"), val = tensor([1, 1])]; + tensor var_7432 = const()[name = tensor("op_7432"), val = tensor([1, 1])]; + tensor v_147_pad_type_0 = const()[name = tensor("v_147_pad_type_0"), val = tensor("custom")]; + tensor v_147_pad_0 = const()[name = tensor("v_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690997504))), lut = tensor([-0x1.25cp-5, -0x1.544p-7, 0x1.518p-7, 0x1.254p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_147_cast = conv(dilations = var_7432, groups = var_6865, pad = v_147_pad_0, pad_type = v_147_pad_type_0, strides = var_7430, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_147_cast")]; + tensor var_7436 = const()[name = tensor("op_7436"), val = tensor([2, 20, 64, -1])]; + tensor var_7437_cast = reshape(shape = var_7436, x = q_147_cast)[name = tensor("op_7437_cast")]; + tensor var_7438 = const()[name = tensor("op_7438"), val = tensor([2, 20, 64, -1])]; + tensor var_7439_cast = reshape(shape = var_7438, x = k_147_cast)[name = tensor("op_7439_cast")]; + tensor var_7440 = const()[name = tensor("op_7440"), val = tensor([2, 20, 64, -1])]; + tensor var_7441_cast = reshape(shape = var_7440, x = v_147_cast)[name = tensor("op_7441_cast")]; + tensor attn_weights_293_transpose_x_0 = const()[name = tensor("attn_weights_293_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_293_transpose_y_0 = const()[name = tensor("attn_weights_293_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_293_cast = matmul(transpose_x = attn_weights_293_transpose_x_0, transpose_y = attn_weights_293_transpose_y_0, x = var_7437_cast, y = var_7439_cast)[name = tensor("attn_weights_293_cast")]; + tensor attn_weights_295_cast = mul(x = attn_weights_293_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_295_cast")]; + tensor var_7445_cast = softmax(axis = var_6849, x = attn_weights_295_cast)[name = tensor("op_7445_cast")]; + tensor attn_147_transpose_x_0 = const()[name = tensor("attn_147_transpose_x_0"), val = tensor(false)]; + tensor attn_147_transpose_y_0 = const()[name = tensor("attn_147_transpose_y_0"), val = tensor(true)]; + tensor attn_147_cast = matmul(transpose_x = attn_147_transpose_x_0, transpose_y = attn_147_transpose_y_0, x = var_7441_cast, y = var_7445_cast)[name = tensor("attn_147_cast")]; + tensor var_7449 = const()[name = tensor("op_7449"), val = tensor([2, 1280, 1, -1])]; + tensor input_457_cast = reshape(shape = var_7449, x = attn_147_cast)[name = tensor("input_457_cast")]; + tensor var_7454 = const()[name = tensor("op_7454"), val = tensor([1, 1])]; + tensor var_7456 = const()[name = tensor("op_7456"), val = tensor([1, 1])]; + tensor var_7458_pad_type_0 = const()[name = tensor("op_7458_pad_type_0"), val = tensor("custom")]; + tensor var_7458_pad_0 = const()[name = tensor("op_7458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691652928))), lut = tensor([-0x1.52cp-7, 0x1.548p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691857792)))]; + tensor var_7458_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_7456, groups = var_6865, pad = var_7458_pad_0, pad_type = var_7458_pad_type_0, strides = var_7454, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_457_cast)[name = tensor("op_7458_cast")]; + tensor inputs_221_cast = add(x = var_7458_cast, y = inputs_219_cast)[name = tensor("inputs_221_cast")]; + tensor var_7462 = const()[name = tensor("op_7462"), val = tensor([1])]; + tensor channels_mean_221_cast = reduce_mean(axes = var_7462, keep_dims = var_6860, x = inputs_221_cast)[name = tensor("channels_mean_221_cast")]; + tensor zero_mean_221_cast = sub(x = inputs_221_cast, y = channels_mean_221_cast)[name = tensor("zero_mean_221_cast")]; + tensor zero_mean_sq_221_cast = mul(x = zero_mean_221_cast, y = zero_mean_221_cast)[name = tensor("zero_mean_sq_221_cast")]; + tensor var_7466 = const()[name = tensor("op_7466"), val = tensor([1])]; + tensor var_7467_cast = reduce_mean(axes = var_7466, keep_dims = var_6860, x = zero_mean_sq_221_cast)[name = tensor("op_7467_cast")]; + tensor var_7468_to_fp16 = const()[name = tensor("op_7468_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7469_cast = add(x = var_7467_cast, y = var_7468_to_fp16)[name = tensor("op_7469_cast")]; + tensor denom_221_epsilon_0_to_fp16 = const()[name = tensor("denom_221_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_221_cast = rsqrt(epsilon = denom_221_epsilon_0_to_fp16, x = var_7469_cast)[name = tensor("denom_221_cast")]; + tensor out_221_cast = mul(x = zero_mean_221_cast, y = denom_221_cast)[name = tensor("out_221_cast")]; + tensor var_7473_to_fp16 = const()[name = tensor("op_7473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691860416)))]; + tensor var_7474_cast = add(x = out_221_cast, y = var_7473_to_fp16)[name = tensor("op_7474_cast")]; + tensor var_7476_to_fp16 = const()[name = tensor("op_7476_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691863040)))]; + tensor input_459_cast = mul(x = var_7474_cast, y = var_7476_to_fp16)[name = tensor("input_459_cast")]; + tensor var_7484 = const()[name = tensor("op_7484"), val = tensor([1, 1])]; + tensor var_7486 = const()[name = tensor("op_7486"), val = tensor([1, 1])]; + tensor var_7488_pad_type_0 = const()[name = tensor("op_7488_pad_type_0"), val = tensor("custom")]; + tensor var_7488_pad_0 = const()[name = tensor("op_7488_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691865664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698419328))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698419456)))]; + tensor var_7488_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_7486, groups = var_6865, pad = var_7488_pad_0, pad_type = var_7488_pad_type_0, strides = var_7484, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_459_cast)[name = tensor("op_7488_cast")]; + tensor var_7489_split_sizes_0 = const()[name = tensor("op_7489_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7489_axis_0 = const()[name = tensor("op_7489_axis_0"), val = tensor(1)]; + tensor var_7489_cast_0, tensor var_7489_cast_1 = split(axis = var_7489_axis_0, split_sizes = var_7489_split_sizes_0, x = var_7488_cast)[name = tensor("op_7489_cast")]; + tensor var_7491_mode_0 = const()[name = tensor("op_7491_mode_0"), val = tensor("EXACT")]; + tensor var_7491_cast = gelu(mode = var_7491_mode_0, x = var_7489_cast_1)[name = tensor("op_7491_cast")]; + tensor input_461_cast = mul(x = var_7489_cast_0, y = var_7491_cast)[name = tensor("input_461_cast")]; + tensor var_7495 = const()[name = tensor("op_7495"), val = tensor([1, 1])]; + tensor var_7497 = const()[name = tensor("op_7497"), val = tensor([1, 1])]; + tensor var_7499_pad_type_0 = const()[name = tensor("op_7499_pad_type_0"), val = tensor("custom")]; + tensor var_7499_pad_0 = const()[name = tensor("op_7499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698440000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703355264))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703355456)))]; + tensor var_7499_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_7497, groups = var_6865, pad = var_7499_pad_0, pad_type = var_7499_pad_type_0, strides = var_7495, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_461_cast)[name = tensor("op_7499_cast")]; + tensor inputs_223_cast = add(x = var_7499_cast, y = inputs_221_cast)[name = tensor("inputs_223_cast")]; + tensor var_7509 = const()[name = tensor("op_7509"), val = tensor([1])]; + tensor channels_mean_223_cast = reduce_mean(axes = var_7509, keep_dims = var_6860, x = inputs_223_cast)[name = tensor("channels_mean_223_cast")]; + tensor zero_mean_223_cast = sub(x = inputs_223_cast, y = channels_mean_223_cast)[name = tensor("zero_mean_223_cast")]; + tensor zero_mean_sq_223_cast = mul(x = zero_mean_223_cast, y = zero_mean_223_cast)[name = tensor("zero_mean_sq_223_cast")]; + tensor var_7513 = const()[name = tensor("op_7513"), val = tensor([1])]; + tensor var_7514_cast = reduce_mean(axes = var_7513, keep_dims = var_6860, x = zero_mean_sq_223_cast)[name = tensor("op_7514_cast")]; + tensor var_7515_to_fp16 = const()[name = tensor("op_7515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7516_cast = add(x = var_7514_cast, y = var_7515_to_fp16)[name = tensor("op_7516_cast")]; + tensor denom_223_epsilon_0_to_fp16 = const()[name = tensor("denom_223_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_223_cast = rsqrt(epsilon = denom_223_epsilon_0_to_fp16, x = var_7516_cast)[name = tensor("denom_223_cast")]; + tensor out_223_cast = mul(x = zero_mean_223_cast, y = denom_223_cast)[name = tensor("out_223_cast")]; + tensor var_7520_to_fp16 = const()[name = tensor("op_7520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703358080)))]; + tensor var_7521_cast = add(x = out_223_cast, y = var_7520_to_fp16)[name = tensor("op_7521_cast")]; + tensor var_7523_to_fp16 = const()[name = tensor("op_7523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703360704)))]; + tensor hidden_states_305_cast = mul(x = var_7521_cast, y = var_7523_to_fp16)[name = tensor("hidden_states_305_cast")]; + tensor var_7530 = const()[name = tensor("op_7530"), val = tensor([1, 1])]; + tensor var_7532 = const()[name = tensor("op_7532"), val = tensor([1, 1])]; + tensor q_149_pad_type_0 = const()[name = tensor("q_149_pad_type_0"), val = tensor("custom")]; + tensor q_149_pad_0 = const()[name = tensor("q_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703363328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704182592))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_149_cast = conv(dilations = var_7532, groups = var_6865, pad = q_149_pad_0, pad_type = q_149_pad_type_0, strides = var_7530, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("q_149_cast")]; + tensor var_7536 = const()[name = tensor("op_7536"), val = tensor([1, 1])]; + tensor var_7538 = const()[name = tensor("op_7538"), val = tensor([1, 1])]; + tensor k_149_pad_type_0 = const()[name = tensor("k_149_pad_type_0"), val = tensor("custom")]; + tensor k_149_pad_0 = const()[name = tensor("k_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704182720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705001984))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_149_cast = conv(dilations = var_7538, groups = var_6865, pad = k_149_pad_0, pad_type = k_149_pad_type_0, strides = var_7536, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("k_149_cast")]; + tensor var_7542 = const()[name = tensor("op_7542"), val = tensor([1, 1])]; + tensor var_7544 = const()[name = tensor("op_7544"), val = tensor([1, 1])]; + tensor v_149_pad_type_0 = const()[name = tensor("v_149_pad_type_0"), val = tensor("custom")]; + tensor v_149_pad_0 = const()[name = tensor("v_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705002112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705821376))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_149_cast = conv(dilations = var_7544, groups = var_6865, pad = v_149_pad_0, pad_type = v_149_pad_type_0, strides = var_7542, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("v_149_cast")]; + tensor var_7548 = const()[name = tensor("op_7548"), val = tensor([2, 20, 64, -1])]; + tensor var_7549_cast = reshape(shape = var_7548, x = q_149_cast)[name = tensor("op_7549_cast")]; + tensor var_7550 = const()[name = tensor("op_7550"), val = tensor([2, 20, 64, -1])]; + tensor var_7551_cast = reshape(shape = var_7550, x = k_149_cast)[name = tensor("op_7551_cast")]; + tensor var_7552 = const()[name = tensor("op_7552"), val = tensor([2, 20, 64, -1])]; + tensor var_7553_cast = reshape(shape = var_7552, x = v_149_cast)[name = tensor("op_7553_cast")]; + tensor attn_weights_297_transpose_x_0 = const()[name = tensor("attn_weights_297_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_297_transpose_y_0 = const()[name = tensor("attn_weights_297_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_297_cast = matmul(transpose_x = attn_weights_297_transpose_x_0, transpose_y = attn_weights_297_transpose_y_0, x = var_7549_cast, y = var_7551_cast)[name = tensor("attn_weights_297_cast")]; + tensor attn_weights_299_cast = mul(x = attn_weights_297_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_299_cast")]; + tensor var_7557_cast = softmax(axis = var_6849, x = attn_weights_299_cast)[name = tensor("op_7557_cast")]; + tensor attn_149_transpose_x_0 = const()[name = tensor("attn_149_transpose_x_0"), val = tensor(false)]; + tensor attn_149_transpose_y_0 = const()[name = tensor("attn_149_transpose_y_0"), val = tensor(true)]; + tensor attn_149_cast = matmul(transpose_x = attn_149_transpose_x_0, transpose_y = attn_149_transpose_y_0, x = var_7553_cast, y = var_7557_cast)[name = tensor("attn_149_cast")]; + tensor var_7561 = const()[name = tensor("op_7561"), val = tensor([2, 1280, 1, -1])]; + tensor input_463_cast = reshape(shape = var_7561, x = attn_149_cast)[name = tensor("input_463_cast")]; + tensor var_7566 = const()[name = tensor("op_7566"), val = tensor([1, 1])]; + tensor var_7568 = const()[name = tensor("op_7568"), val = tensor([1, 1])]; + tensor var_7570_pad_type_0 = const()[name = tensor("op_7570_pad_type_0"), val = tensor("custom")]; + tensor var_7570_pad_0 = const()[name = tensor("op_7570_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705821504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707050368))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707050560)))]; + tensor var_7570_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_7568, groups = var_6865, pad = var_7570_pad_0, pad_type = var_7570_pad_type_0, strides = var_7566, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_463_cast)[name = tensor("op_7570_cast")]; + tensor inputs_225_cast = add(x = var_7570_cast, y = inputs_223_cast)[name = tensor("inputs_225_cast")]; + tensor var_7574 = const()[name = tensor("op_7574"), val = tensor([1])]; + tensor channels_mean_225_cast = reduce_mean(axes = var_7574, keep_dims = var_6860, x = inputs_225_cast)[name = tensor("channels_mean_225_cast")]; + tensor zero_mean_225_cast = sub(x = inputs_225_cast, y = channels_mean_225_cast)[name = tensor("zero_mean_225_cast")]; + tensor zero_mean_sq_225_cast = mul(x = zero_mean_225_cast, y = zero_mean_225_cast)[name = tensor("zero_mean_sq_225_cast")]; + tensor var_7578 = const()[name = tensor("op_7578"), val = tensor([1])]; + tensor var_7579_cast = reduce_mean(axes = var_7578, keep_dims = var_6860, x = zero_mean_sq_225_cast)[name = tensor("op_7579_cast")]; + tensor var_7580_to_fp16 = const()[name = tensor("op_7580_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7581_cast = add(x = var_7579_cast, y = var_7580_to_fp16)[name = tensor("op_7581_cast")]; + tensor denom_225_epsilon_0_to_fp16 = const()[name = tensor("denom_225_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_225_cast = rsqrt(epsilon = denom_225_epsilon_0_to_fp16, x = var_7581_cast)[name = tensor("denom_225_cast")]; + tensor out_225_cast = mul(x = zero_mean_225_cast, y = denom_225_cast)[name = tensor("out_225_cast")]; + tensor var_7585_to_fp16 = const()[name = tensor("op_7585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707053184)))]; + tensor var_7586_cast = add(x = out_225_cast, y = var_7585_to_fp16)[name = tensor("op_7586_cast")]; + tensor var_7588_to_fp16 = const()[name = tensor("op_7588_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707055808)))]; + tensor hidden_states_307_cast = mul(x = var_7586_cast, y = var_7588_to_fp16)[name = tensor("hidden_states_307_cast")]; + tensor var_7595 = const()[name = tensor("op_7595"), val = tensor([1, 1])]; + tensor var_7597 = const()[name = tensor("op_7597"), val = tensor([1, 1])]; + tensor q_151_pad_type_0 = const()[name = tensor("q_151_pad_type_0"), val = tensor("custom")]; + tensor q_151_pad_0 = const()[name = tensor("q_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707058432))), lut = tensor([-0x1.3fcp-6, 0x1.3fcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_151_cast = conv(dilations = var_7597, groups = var_6865, pad = q_151_pad_0, pad_type = q_151_pad_type_0, strides = var_7595, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_307_cast)[name = tensor("q_151_cast")]; + tensor var_7601 = const()[name = tensor("op_7601"), val = tensor([1, 1])]; + tensor var_7603 = const()[name = tensor("op_7603"), val = tensor([1, 1])]; + tensor k_151_pad_type_0 = const()[name = tensor("k_151_pad_type_0"), val = tensor("custom")]; + tensor k_151_pad_0 = const()[name = tensor("k_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707263296))), lut = tensor([-0x1.114p-6, 0x1.114p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_151_cast = conv(dilations = var_7603, groups = var_6865, pad = k_151_pad_0, pad_type = k_151_pad_type_0, strides = var_7601, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_151_cast")]; + tensor var_7607 = const()[name = tensor("op_7607"), val = tensor([1, 1])]; + tensor var_7609 = const()[name = tensor("op_7609"), val = tensor([1, 1])]; + tensor v_151_pad_type_0 = const()[name = tensor("v_151_pad_type_0"), val = tensor("custom")]; + tensor v_151_pad_0 = const()[name = tensor("v_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707591040))), lut = tensor([-0x1.24p-6, 0x1.228p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_151_cast = conv(dilations = var_7609, groups = var_6865, pad = v_151_pad_0, pad_type = v_151_pad_type_0, strides = var_7607, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_151_cast")]; + tensor var_7613 = const()[name = tensor("op_7613"), val = tensor([2, 20, 64, -1])]; + tensor var_7614_cast = reshape(shape = var_7613, x = q_151_cast)[name = tensor("op_7614_cast")]; + tensor var_7615 = const()[name = tensor("op_7615"), val = tensor([2, 20, 64, -1])]; + tensor var_7616_cast = reshape(shape = var_7615, x = k_151_cast)[name = tensor("op_7616_cast")]; + tensor var_7617 = const()[name = tensor("op_7617"), val = tensor([2, 20, 64, -1])]; + tensor var_7618_cast = reshape(shape = var_7617, x = v_151_cast)[name = tensor("op_7618_cast")]; + tensor attn_weights_301_transpose_x_0 = const()[name = tensor("attn_weights_301_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_301_transpose_y_0 = const()[name = tensor("attn_weights_301_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_301_cast = matmul(transpose_x = attn_weights_301_transpose_x_0, transpose_y = attn_weights_301_transpose_y_0, x = var_7614_cast, y = var_7616_cast)[name = tensor("attn_weights_301_cast")]; + tensor attn_weights_303_cast = mul(x = attn_weights_301_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_303_cast")]; + tensor var_7622_cast = softmax(axis = var_6849, x = attn_weights_303_cast)[name = tensor("op_7622_cast")]; + tensor attn_151_transpose_x_0 = const()[name = tensor("attn_151_transpose_x_0"), val = tensor(false)]; + tensor attn_151_transpose_y_0 = const()[name = tensor("attn_151_transpose_y_0"), val = tensor(true)]; + tensor attn_151_cast = matmul(transpose_x = attn_151_transpose_x_0, transpose_y = attn_151_transpose_y_0, x = var_7618_cast, y = var_7622_cast)[name = tensor("attn_151_cast")]; + tensor var_7626 = const()[name = tensor("op_7626"), val = tensor([2, 1280, 1, -1])]; + tensor input_465_cast = reshape(shape = var_7626, x = attn_151_cast)[name = tensor("input_465_cast")]; + tensor var_7631 = const()[name = tensor("op_7631"), val = tensor([1, 1])]; + tensor var_7633 = const()[name = tensor("op_7633"), val = tensor([1, 1])]; + tensor var_7635_pad_type_0 = const()[name = tensor("op_7635_pad_type_0"), val = tensor("custom")]; + tensor var_7635_pad_0 = const()[name = tensor("op_7635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707918784))), lut = tensor([-0x1.574p-7, 0x1.56cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708123648)))]; + tensor var_7635_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_7633, groups = var_6865, pad = var_7635_pad_0, pad_type = var_7635_pad_type_0, strides = var_7631, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_465_cast)[name = tensor("op_7635_cast")]; + tensor inputs_227_cast = add(x = var_7635_cast, y = inputs_225_cast)[name = tensor("inputs_227_cast")]; + tensor var_7639 = const()[name = tensor("op_7639"), val = tensor([1])]; + tensor channels_mean_227_cast = reduce_mean(axes = var_7639, keep_dims = var_6860, x = inputs_227_cast)[name = tensor("channels_mean_227_cast")]; + tensor zero_mean_227_cast = sub(x = inputs_227_cast, y = channels_mean_227_cast)[name = tensor("zero_mean_227_cast")]; + tensor zero_mean_sq_227_cast = mul(x = zero_mean_227_cast, y = zero_mean_227_cast)[name = tensor("zero_mean_sq_227_cast")]; + tensor var_7643 = const()[name = tensor("op_7643"), val = tensor([1])]; + tensor var_7644_cast = reduce_mean(axes = var_7643, keep_dims = var_6860, x = zero_mean_sq_227_cast)[name = tensor("op_7644_cast")]; + tensor var_7645_to_fp16 = const()[name = tensor("op_7645_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7646_cast = add(x = var_7644_cast, y = var_7645_to_fp16)[name = tensor("op_7646_cast")]; + tensor denom_227_epsilon_0_to_fp16 = const()[name = tensor("denom_227_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_227_cast = rsqrt(epsilon = denom_227_epsilon_0_to_fp16, x = var_7646_cast)[name = tensor("denom_227_cast")]; + tensor out_227_cast = mul(x = zero_mean_227_cast, y = denom_227_cast)[name = tensor("out_227_cast")]; + tensor var_7650_to_fp16 = const()[name = tensor("op_7650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708126272)))]; + tensor var_7651_cast = add(x = out_227_cast, y = var_7650_to_fp16)[name = tensor("op_7651_cast")]; + tensor var_7653_to_fp16 = const()[name = tensor("op_7653_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708128896)))]; + tensor input_467_cast = mul(x = var_7651_cast, y = var_7653_to_fp16)[name = tensor("input_467_cast")]; + tensor var_7661 = const()[name = tensor("op_7661"), val = tensor([1, 1])]; + tensor var_7663 = const()[name = tensor("op_7663"), val = tensor([1, 1])]; + tensor var_7665_pad_type_0 = const()[name = tensor("op_7665_pad_type_0"), val = tensor("custom")]; + tensor var_7665_pad_0 = const()[name = tensor("op_7665_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708131520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714685184))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714685312)))]; + tensor var_7665_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_7663, groups = var_6865, pad = var_7665_pad_0, pad_type = var_7665_pad_type_0, strides = var_7661, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_467_cast)[name = tensor("op_7665_cast")]; + tensor var_7666_split_sizes_0 = const()[name = tensor("op_7666_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7666_axis_0 = const()[name = tensor("op_7666_axis_0"), val = tensor(1)]; + tensor var_7666_cast_0, tensor var_7666_cast_1 = split(axis = var_7666_axis_0, split_sizes = var_7666_split_sizes_0, x = var_7665_cast)[name = tensor("op_7666_cast")]; + tensor var_7668_mode_0 = const()[name = tensor("op_7668_mode_0"), val = tensor("EXACT")]; + tensor var_7668_cast = gelu(mode = var_7668_mode_0, x = var_7666_cast_1)[name = tensor("op_7668_cast")]; + tensor input_469_cast = mul(x = var_7666_cast_0, y = var_7668_cast)[name = tensor("input_469_cast")]; + tensor var_7672 = const()[name = tensor("op_7672"), val = tensor([1, 1])]; + tensor var_7674 = const()[name = tensor("op_7674"), val = tensor([1, 1])]; + tensor var_7676_pad_type_0 = const()[name = tensor("op_7676_pad_type_0"), val = tensor("custom")]; + tensor var_7676_pad_0 = const()[name = tensor("op_7676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714705856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717982720))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717982848)))]; + tensor var_7676_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_7674, groups = var_6865, pad = var_7676_pad_0, pad_type = var_7676_pad_type_0, strides = var_7672, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_469_cast)[name = tensor("op_7676_cast")]; + tensor inputs_229_cast = add(x = var_7676_cast, y = inputs_227_cast)[name = tensor("inputs_229_cast")]; + tensor var_7686 = const()[name = tensor("op_7686"), val = tensor([1])]; + tensor channels_mean_229_cast = reduce_mean(axes = var_7686, keep_dims = var_6860, x = inputs_229_cast)[name = tensor("channels_mean_229_cast")]; + tensor zero_mean_229_cast = sub(x = inputs_229_cast, y = channels_mean_229_cast)[name = tensor("zero_mean_229_cast")]; + tensor zero_mean_sq_229_cast = mul(x = zero_mean_229_cast, y = zero_mean_229_cast)[name = tensor("zero_mean_sq_229_cast")]; + tensor var_7690 = const()[name = tensor("op_7690"), val = tensor([1])]; + tensor var_7691_cast = reduce_mean(axes = var_7690, keep_dims = var_6860, x = zero_mean_sq_229_cast)[name = tensor("op_7691_cast")]; + tensor var_7692_to_fp16 = const()[name = tensor("op_7692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7693_cast = add(x = var_7691_cast, y = var_7692_to_fp16)[name = tensor("op_7693_cast")]; + tensor denom_229_epsilon_0_to_fp16 = const()[name = tensor("denom_229_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_229_cast = rsqrt(epsilon = denom_229_epsilon_0_to_fp16, x = var_7693_cast)[name = tensor("denom_229_cast")]; + tensor out_229_cast = mul(x = zero_mean_229_cast, y = denom_229_cast)[name = tensor("out_229_cast")]; + tensor var_7697_to_fp16 = const()[name = tensor("op_7697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717985472)))]; + tensor var_7698_cast = add(x = out_229_cast, y = var_7697_to_fp16)[name = tensor("op_7698_cast")]; + tensor var_7700_to_fp16 = const()[name = tensor("op_7700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717988096)))]; + tensor hidden_states_311_cast = mul(x = var_7698_cast, y = var_7700_to_fp16)[name = tensor("hidden_states_311_cast")]; + tensor var_7707 = const()[name = tensor("op_7707"), val = tensor([1, 1])]; + tensor var_7709 = const()[name = tensor("op_7709"), val = tensor([1, 1])]; + tensor q_153_pad_type_0 = const()[name = tensor("q_153_pad_type_0"), val = tensor("custom")]; + tensor q_153_pad_0 = const()[name = tensor("q_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717990720))), lut = tensor([-0x1.6ap-5, -0x1.b3cp-7, 0x1.b1p-7, 0x1.69p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_153_cast = conv(dilations = var_7709, groups = var_6865, pad = q_153_pad_0, pad_type = q_153_pad_type_0, strides = var_7707, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("q_153_cast")]; + tensor var_7713 = const()[name = tensor("op_7713"), val = tensor([1, 1])]; + tensor var_7715 = const()[name = tensor("op_7715"), val = tensor([1, 1])]; + tensor k_153_pad_type_0 = const()[name = tensor("k_153_pad_type_0"), val = tensor("custom")]; + tensor k_153_pad_0 = const()[name = tensor("k_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(718400384))), lut = tensor([-0x1.6ap-5, -0x1.b1cp-7, 0x1.b64p-7, 0x1.6bp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_153_cast = conv(dilations = var_7715, groups = var_6865, pad = k_153_pad_0, pad_type = k_153_pad_type_0, strides = var_7713, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("k_153_cast")]; + tensor var_7719 = const()[name = tensor("op_7719"), val = tensor([1, 1])]; + tensor var_7721 = const()[name = tensor("op_7721"), val = tensor([1, 1])]; + tensor v_153_pad_type_0 = const()[name = tensor("v_153_pad_type_0"), val = tensor("custom")]; + tensor v_153_pad_0 = const()[name = tensor("v_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(718810048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(719629312))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_153_cast = conv(dilations = var_7721, groups = var_6865, pad = v_153_pad_0, pad_type = v_153_pad_type_0, strides = var_7719, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("v_153_cast")]; + tensor var_7725 = const()[name = tensor("op_7725"), val = tensor([2, 20, 64, -1])]; + tensor var_7726_cast = reshape(shape = var_7725, x = q_153_cast)[name = tensor("op_7726_cast")]; + tensor var_7727 = const()[name = tensor("op_7727"), val = tensor([2, 20, 64, -1])]; + tensor var_7728_cast = reshape(shape = var_7727, x = k_153_cast)[name = tensor("op_7728_cast")]; + tensor var_7729 = const()[name = tensor("op_7729"), val = tensor([2, 20, 64, -1])]; + tensor var_7730_cast = reshape(shape = var_7729, x = v_153_cast)[name = tensor("op_7730_cast")]; + tensor attn_weights_305_transpose_x_0 = const()[name = tensor("attn_weights_305_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_305_transpose_y_0 = const()[name = tensor("attn_weights_305_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_305_cast = matmul(transpose_x = attn_weights_305_transpose_x_0, transpose_y = attn_weights_305_transpose_y_0, x = var_7726_cast, y = var_7728_cast)[name = tensor("attn_weights_305_cast")]; + tensor attn_weights_307_cast = mul(x = attn_weights_305_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_307_cast")]; + tensor var_7734_cast = softmax(axis = var_6849, x = attn_weights_307_cast)[name = tensor("op_7734_cast")]; + tensor attn_153_transpose_x_0 = const()[name = tensor("attn_153_transpose_x_0"), val = tensor(false)]; + tensor attn_153_transpose_y_0 = const()[name = tensor("attn_153_transpose_y_0"), val = tensor(true)]; + tensor attn_153_cast = matmul(transpose_x = attn_153_transpose_x_0, transpose_y = attn_153_transpose_y_0, x = var_7730_cast, y = var_7734_cast)[name = tensor("attn_153_cast")]; + tensor var_7738 = const()[name = tensor("op_7738"), val = tensor([2, 1280, 1, -1])]; + tensor input_471_cast = reshape(shape = var_7738, x = attn_153_cast)[name = tensor("input_471_cast")]; + tensor var_7743 = const()[name = tensor("op_7743"), val = tensor([1, 1])]; + tensor var_7745 = const()[name = tensor("op_7745"), val = tensor([1, 1])]; + tensor var_7747_pad_type_0 = const()[name = tensor("op_7747_pad_type_0"), val = tensor("custom")]; + tensor var_7747_pad_0 = const()[name = tensor("op_7747_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(719629440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720858304))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720858496)))]; + tensor var_7747_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_7745, groups = var_6865, pad = var_7747_pad_0, pad_type = var_7747_pad_type_0, strides = var_7743, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_471_cast)[name = tensor("op_7747_cast")]; + tensor inputs_231_cast = add(x = var_7747_cast, y = inputs_229_cast)[name = tensor("inputs_231_cast")]; + tensor var_7751 = const()[name = tensor("op_7751"), val = tensor([1])]; + tensor channels_mean_231_cast = reduce_mean(axes = var_7751, keep_dims = var_6860, x = inputs_231_cast)[name = tensor("channels_mean_231_cast")]; + tensor zero_mean_231_cast = sub(x = inputs_231_cast, y = channels_mean_231_cast)[name = tensor("zero_mean_231_cast")]; + tensor zero_mean_sq_231_cast = mul(x = zero_mean_231_cast, y = zero_mean_231_cast)[name = tensor("zero_mean_sq_231_cast")]; + tensor var_7755 = const()[name = tensor("op_7755"), val = tensor([1])]; + tensor var_7756_cast = reduce_mean(axes = var_7755, keep_dims = var_6860, x = zero_mean_sq_231_cast)[name = tensor("op_7756_cast")]; + tensor var_7757_to_fp16 = const()[name = tensor("op_7757_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7758_cast = add(x = var_7756_cast, y = var_7757_to_fp16)[name = tensor("op_7758_cast")]; + tensor denom_231_epsilon_0_to_fp16 = const()[name = tensor("denom_231_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_231_cast = rsqrt(epsilon = denom_231_epsilon_0_to_fp16, x = var_7758_cast)[name = tensor("denom_231_cast")]; + tensor out_231_cast = mul(x = zero_mean_231_cast, y = denom_231_cast)[name = tensor("out_231_cast")]; + tensor var_7762_to_fp16 = const()[name = tensor("op_7762_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720861120)))]; + tensor var_7763_cast = add(x = out_231_cast, y = var_7762_to_fp16)[name = tensor("op_7763_cast")]; + tensor var_7765_to_fp16 = const()[name = tensor("op_7765_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720863744)))]; + tensor hidden_states_313_cast = mul(x = var_7763_cast, y = var_7765_to_fp16)[name = tensor("hidden_states_313_cast")]; + tensor var_7772 = const()[name = tensor("op_7772"), val = tensor([1, 1])]; + tensor var_7774 = const()[name = tensor("op_7774"), val = tensor([1, 1])]; + tensor q_155_pad_type_0 = const()[name = tensor("q_155_pad_type_0"), val = tensor("custom")]; + tensor q_155_pad_0 = const()[name = tensor("q_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720866368))), lut = tensor([-0x1.22cp-6, 0x1.234p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_155_cast = conv(dilations = var_7774, groups = var_6865, pad = q_155_pad_0, pad_type = q_155_pad_type_0, strides = var_7772, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_313_cast)[name = tensor("q_155_cast")]; + tensor var_7778 = const()[name = tensor("op_7778"), val = tensor([1, 1])]; + tensor var_7780 = const()[name = tensor("op_7780"), val = tensor([1, 1])]; + tensor k_155_pad_type_0 = const()[name = tensor("k_155_pad_type_0"), val = tensor("custom")]; + tensor k_155_pad_0 = const()[name = tensor("k_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721071232))), lut = tensor([-0x1.da8p-7, 0x1.da8p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_155_cast = conv(dilations = var_7780, groups = var_6865, pad = k_155_pad_0, pad_type = k_155_pad_type_0, strides = var_7778, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_155_cast")]; + tensor var_7784 = const()[name = tensor("op_7784"), val = tensor([1, 1])]; + tensor var_7786 = const()[name = tensor("op_7786"), val = tensor([1, 1])]; + tensor v_155_pad_type_0 = const()[name = tensor("v_155_pad_type_0"), val = tensor("custom")]; + tensor v_155_pad_0 = const()[name = tensor("v_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721398976))), lut = tensor([-0x1.14cp-6, 0x1.15p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_155_cast = conv(dilations = var_7786, groups = var_6865, pad = v_155_pad_0, pad_type = v_155_pad_type_0, strides = var_7784, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_155_cast")]; + tensor var_7790 = const()[name = tensor("op_7790"), val = tensor([2, 20, 64, -1])]; + tensor var_7791_cast = reshape(shape = var_7790, x = q_155_cast)[name = tensor("op_7791_cast")]; + tensor var_7792 = const()[name = tensor("op_7792"), val = tensor([2, 20, 64, -1])]; + tensor var_7793_cast = reshape(shape = var_7792, x = k_155_cast)[name = tensor("op_7793_cast")]; + tensor var_7794 = const()[name = tensor("op_7794"), val = tensor([2, 20, 64, -1])]; + tensor var_7795_cast = reshape(shape = var_7794, x = v_155_cast)[name = tensor("op_7795_cast")]; + tensor attn_weights_309_transpose_x_0 = const()[name = tensor("attn_weights_309_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_309_transpose_y_0 = const()[name = tensor("attn_weights_309_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_309_cast = matmul(transpose_x = attn_weights_309_transpose_x_0, transpose_y = attn_weights_309_transpose_y_0, x = var_7791_cast, y = var_7793_cast)[name = tensor("attn_weights_309_cast")]; + tensor attn_weights_311_cast = mul(x = attn_weights_309_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_311_cast")]; + tensor var_7799_cast = softmax(axis = var_6849, x = attn_weights_311_cast)[name = tensor("op_7799_cast")]; + tensor attn_155_transpose_x_0 = const()[name = tensor("attn_155_transpose_x_0"), val = tensor(false)]; + tensor attn_155_transpose_y_0 = const()[name = tensor("attn_155_transpose_y_0"), val = tensor(true)]; + tensor attn_155_cast = matmul(transpose_x = attn_155_transpose_x_0, transpose_y = attn_155_transpose_y_0, x = var_7795_cast, y = var_7799_cast)[name = tensor("attn_155_cast")]; + tensor var_7803 = const()[name = tensor("op_7803"), val = tensor([2, 1280, 1, -1])]; + tensor input_473_cast = reshape(shape = var_7803, x = attn_155_cast)[name = tensor("input_473_cast")]; + tensor var_7808 = const()[name = tensor("op_7808"), val = tensor([1, 1])]; + tensor var_7810 = const()[name = tensor("op_7810"), val = tensor([1, 1])]; + tensor var_7812_pad_type_0 = const()[name = tensor("op_7812_pad_type_0"), val = tensor("custom")]; + tensor var_7812_pad_0 = const()[name = tensor("op_7812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721726720))), lut = tensor([-0x1.4acp-7, 0x1.494p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721931584)))]; + tensor var_7812_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_7810, groups = var_6865, pad = var_7812_pad_0, pad_type = var_7812_pad_type_0, strides = var_7808, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_473_cast)[name = tensor("op_7812_cast")]; + tensor inputs_233_cast = add(x = var_7812_cast, y = inputs_231_cast)[name = tensor("inputs_233_cast")]; + tensor var_7816 = const()[name = tensor("op_7816"), val = tensor([1])]; + tensor channels_mean_233_cast = reduce_mean(axes = var_7816, keep_dims = var_6860, x = inputs_233_cast)[name = tensor("channels_mean_233_cast")]; + tensor zero_mean_233_cast = sub(x = inputs_233_cast, y = channels_mean_233_cast)[name = tensor("zero_mean_233_cast")]; + tensor zero_mean_sq_233_cast = mul(x = zero_mean_233_cast, y = zero_mean_233_cast)[name = tensor("zero_mean_sq_233_cast")]; + tensor var_7820 = const()[name = tensor("op_7820"), val = tensor([1])]; + tensor var_7821_cast = reduce_mean(axes = var_7820, keep_dims = var_6860, x = zero_mean_sq_233_cast)[name = tensor("op_7821_cast")]; + tensor var_7822_to_fp16 = const()[name = tensor("op_7822_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7823_cast = add(x = var_7821_cast, y = var_7822_to_fp16)[name = tensor("op_7823_cast")]; + tensor denom_233_epsilon_0_to_fp16 = const()[name = tensor("denom_233_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_233_cast = rsqrt(epsilon = denom_233_epsilon_0_to_fp16, x = var_7823_cast)[name = tensor("denom_233_cast")]; + tensor out_233_cast = mul(x = zero_mean_233_cast, y = denom_233_cast)[name = tensor("out_233_cast")]; + tensor var_7827_to_fp16 = const()[name = tensor("op_7827_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721934208)))]; + tensor var_7828_cast = add(x = out_233_cast, y = var_7827_to_fp16)[name = tensor("op_7828_cast")]; + tensor var_7830_to_fp16 = const()[name = tensor("op_7830_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721936832)))]; + tensor input_475_cast = mul(x = var_7828_cast, y = var_7830_to_fp16)[name = tensor("input_475_cast")]; + tensor var_7838 = const()[name = tensor("op_7838"), val = tensor([1, 1])]; + tensor var_7840 = const()[name = tensor("op_7840"), val = tensor([1, 1])]; + tensor var_7842_pad_type_0 = const()[name = tensor("op_7842_pad_type_0"), val = tensor("custom")]; + tensor var_7842_pad_0 = const()[name = tensor("op_7842_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721939456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728493120))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728493248)))]; + tensor var_7842_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_7840, groups = var_6865, pad = var_7842_pad_0, pad_type = var_7842_pad_type_0, strides = var_7838, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_475_cast)[name = tensor("op_7842_cast")]; + tensor var_7843_split_sizes_0 = const()[name = tensor("op_7843_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7843_axis_0 = const()[name = tensor("op_7843_axis_0"), val = tensor(1)]; + tensor var_7843_cast_0, tensor var_7843_cast_1 = split(axis = var_7843_axis_0, split_sizes = var_7843_split_sizes_0, x = var_7842_cast)[name = tensor("op_7843_cast")]; + tensor var_7845_mode_0 = const()[name = tensor("op_7845_mode_0"), val = tensor("EXACT")]; + tensor var_7845_cast = gelu(mode = var_7845_mode_0, x = var_7843_cast_1)[name = tensor("op_7845_cast")]; + tensor input_477_cast = mul(x = var_7843_cast_0, y = var_7845_cast)[name = tensor("input_477_cast")]; + tensor var_7849 = const()[name = tensor("op_7849"), val = tensor([1, 1])]; + tensor var_7851 = const()[name = tensor("op_7851"), val = tensor([1, 1])]; + tensor var_7853_pad_type_0 = const()[name = tensor("op_7853_pad_type_0"), val = tensor("custom")]; + tensor var_7853_pad_0 = const()[name = tensor("op_7853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728513792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731790656))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731790784)))]; + tensor var_7853_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_7851, groups = var_6865, pad = var_7853_pad_0, pad_type = var_7853_pad_type_0, strides = var_7849, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_477_cast)[name = tensor("op_7853_cast")]; + tensor inputs_235_cast = add(x = var_7853_cast, y = inputs_233_cast)[name = tensor("inputs_235_cast")]; + tensor var_7863 = const()[name = tensor("op_7863"), val = tensor([1])]; + tensor channels_mean_235_cast = reduce_mean(axes = var_7863, keep_dims = var_6860, x = inputs_235_cast)[name = tensor("channels_mean_235_cast")]; + tensor zero_mean_235_cast = sub(x = inputs_235_cast, y = channels_mean_235_cast)[name = tensor("zero_mean_235_cast")]; + tensor zero_mean_sq_235_cast = mul(x = zero_mean_235_cast, y = zero_mean_235_cast)[name = tensor("zero_mean_sq_235_cast")]; + tensor var_7867 = const()[name = tensor("op_7867"), val = tensor([1])]; + tensor var_7868_cast = reduce_mean(axes = var_7867, keep_dims = var_6860, x = zero_mean_sq_235_cast)[name = tensor("op_7868_cast")]; + tensor var_7869_to_fp16 = const()[name = tensor("op_7869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7870_cast = add(x = var_7868_cast, y = var_7869_to_fp16)[name = tensor("op_7870_cast")]; + tensor denom_235_epsilon_0_to_fp16 = const()[name = tensor("denom_235_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_235_cast = rsqrt(epsilon = denom_235_epsilon_0_to_fp16, x = var_7870_cast)[name = tensor("denom_235_cast")]; + tensor out_235_cast = mul(x = zero_mean_235_cast, y = denom_235_cast)[name = tensor("out_235_cast")]; + tensor var_7874_to_fp16 = const()[name = tensor("op_7874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731793408)))]; + tensor var_7875_cast = add(x = out_235_cast, y = var_7874_to_fp16)[name = tensor("op_7875_cast")]; + tensor var_7877_to_fp16 = const()[name = tensor("op_7877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731796032)))]; + tensor hidden_states_317_cast = mul(x = var_7875_cast, y = var_7877_to_fp16)[name = tensor("hidden_states_317_cast")]; + tensor var_7884 = const()[name = tensor("op_7884"), val = tensor([1, 1])]; + tensor var_7886 = const()[name = tensor("op_7886"), val = tensor([1, 1])]; + tensor q_157_pad_type_0 = const()[name = tensor("q_157_pad_type_0"), val = tensor("custom")]; + tensor q_157_pad_0 = const()[name = tensor("q_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731798656))), lut = tensor([-0x1.66cp-5, -0x1.b04p-7, 0x1.adcp-7, 0x1.66cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_157_cast = conv(dilations = var_7886, groups = var_6865, pad = q_157_pad_0, pad_type = q_157_pad_type_0, strides = var_7884, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("q_157_cast")]; + tensor var_7890 = const()[name = tensor("op_7890"), val = tensor([1, 1])]; + tensor var_7892 = const()[name = tensor("op_7892"), val = tensor([1, 1])]; + tensor k_157_pad_type_0 = const()[name = tensor("k_157_pad_type_0"), val = tensor("custom")]; + tensor k_157_pad_0 = const()[name = tensor("k_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732208320))), lut = tensor([-0x1.684p-5, -0x1.b38p-7, 0x1.accp-7, 0x1.66cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_157_cast = conv(dilations = var_7892, groups = var_6865, pad = k_157_pad_0, pad_type = k_157_pad_type_0, strides = var_7890, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("k_157_cast")]; + tensor var_7896 = const()[name = tensor("op_7896"), val = tensor([1, 1])]; + tensor var_7898 = const()[name = tensor("op_7898"), val = tensor([1, 1])]; + tensor v_157_pad_type_0 = const()[name = tensor("v_157_pad_type_0"), val = tensor("custom")]; + tensor v_157_pad_0 = const()[name = tensor("v_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732617984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733437248))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_157_cast = conv(dilations = var_7898, groups = var_6865, pad = v_157_pad_0, pad_type = v_157_pad_type_0, strides = var_7896, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("v_157_cast")]; + tensor var_7902 = const()[name = tensor("op_7902"), val = tensor([2, 20, 64, -1])]; + tensor var_7903_cast = reshape(shape = var_7902, x = q_157_cast)[name = tensor("op_7903_cast")]; + tensor var_7904 = const()[name = tensor("op_7904"), val = tensor([2, 20, 64, -1])]; + tensor var_7905_cast = reshape(shape = var_7904, x = k_157_cast)[name = tensor("op_7905_cast")]; + tensor var_7906 = const()[name = tensor("op_7906"), val = tensor([2, 20, 64, -1])]; + tensor var_7907_cast = reshape(shape = var_7906, x = v_157_cast)[name = tensor("op_7907_cast")]; + tensor attn_weights_313_transpose_x_0 = const()[name = tensor("attn_weights_313_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_313_transpose_y_0 = const()[name = tensor("attn_weights_313_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_313_cast = matmul(transpose_x = attn_weights_313_transpose_x_0, transpose_y = attn_weights_313_transpose_y_0, x = var_7903_cast, y = var_7905_cast)[name = tensor("attn_weights_313_cast")]; + tensor attn_weights_315_cast = mul(x = attn_weights_313_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_315_cast")]; + tensor var_7911_cast = softmax(axis = var_6849, x = attn_weights_315_cast)[name = tensor("op_7911_cast")]; + tensor attn_157_transpose_x_0 = const()[name = tensor("attn_157_transpose_x_0"), val = tensor(false)]; + tensor attn_157_transpose_y_0 = const()[name = tensor("attn_157_transpose_y_0"), val = tensor(true)]; + tensor attn_157_cast = matmul(transpose_x = attn_157_transpose_x_0, transpose_y = attn_157_transpose_y_0, x = var_7907_cast, y = var_7911_cast)[name = tensor("attn_157_cast")]; + tensor var_7915 = const()[name = tensor("op_7915"), val = tensor([2, 1280, 1, -1])]; + tensor input_479_cast = reshape(shape = var_7915, x = attn_157_cast)[name = tensor("input_479_cast")]; + tensor var_7920 = const()[name = tensor("op_7920"), val = tensor([1, 1])]; + tensor var_7922 = const()[name = tensor("op_7922"), val = tensor([1, 1])]; + tensor var_7924_pad_type_0 = const()[name = tensor("op_7924_pad_type_0"), val = tensor("custom")]; + tensor var_7924_pad_0 = const()[name = tensor("op_7924_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733437376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734666240))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734666432)))]; + tensor var_7924_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_7922, groups = var_6865, pad = var_7924_pad_0, pad_type = var_7924_pad_type_0, strides = var_7920, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_479_cast)[name = tensor("op_7924_cast")]; + tensor inputs_237_cast = add(x = var_7924_cast, y = inputs_235_cast)[name = tensor("inputs_237_cast")]; + tensor var_7928 = const()[name = tensor("op_7928"), val = tensor([1])]; + tensor channels_mean_237_cast = reduce_mean(axes = var_7928, keep_dims = var_6860, x = inputs_237_cast)[name = tensor("channels_mean_237_cast")]; + tensor zero_mean_237_cast = sub(x = inputs_237_cast, y = channels_mean_237_cast)[name = tensor("zero_mean_237_cast")]; + tensor zero_mean_sq_237_cast = mul(x = zero_mean_237_cast, y = zero_mean_237_cast)[name = tensor("zero_mean_sq_237_cast")]; + tensor var_7932 = const()[name = tensor("op_7932"), val = tensor([1])]; + tensor var_7933_cast = reduce_mean(axes = var_7932, keep_dims = var_6860, x = zero_mean_sq_237_cast)[name = tensor("op_7933_cast")]; + tensor var_7934_to_fp16 = const()[name = tensor("op_7934_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7935_cast = add(x = var_7933_cast, y = var_7934_to_fp16)[name = tensor("op_7935_cast")]; + tensor denom_237_epsilon_0_to_fp16 = const()[name = tensor("denom_237_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_237_cast = rsqrt(epsilon = denom_237_epsilon_0_to_fp16, x = var_7935_cast)[name = tensor("denom_237_cast")]; + tensor out_237_cast = mul(x = zero_mean_237_cast, y = denom_237_cast)[name = tensor("out_237_cast")]; + tensor var_7939_to_fp16 = const()[name = tensor("op_7939_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734669056)))]; + tensor var_7940_cast = add(x = out_237_cast, y = var_7939_to_fp16)[name = tensor("op_7940_cast")]; + tensor var_7942_to_fp16 = const()[name = tensor("op_7942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734671680)))]; + tensor hidden_states_319_cast = mul(x = var_7940_cast, y = var_7942_to_fp16)[name = tensor("hidden_states_319_cast")]; + tensor var_7949 = const()[name = tensor("op_7949"), val = tensor([1, 1])]; + tensor var_7951 = const()[name = tensor("op_7951"), val = tensor([1, 1])]; + tensor q_159_pad_type_0 = const()[name = tensor("q_159_pad_type_0"), val = tensor("custom")]; + tensor q_159_pad_0 = const()[name = tensor("q_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734674304))), lut = tensor([-0x1.144p-6, 0x1.148p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_159_cast = conv(dilations = var_7951, groups = var_6865, pad = q_159_pad_0, pad_type = q_159_pad_type_0, strides = var_7949, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_319_cast)[name = tensor("q_159_cast")]; + tensor var_7955 = const()[name = tensor("op_7955"), val = tensor([1, 1])]; + tensor var_7957 = const()[name = tensor("op_7957"), val = tensor([1, 1])]; + tensor k_159_pad_type_0 = const()[name = tensor("k_159_pad_type_0"), val = tensor("custom")]; + tensor k_159_pad_0 = const()[name = tensor("k_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734879168))), lut = tensor([-0x1.b5cp-7, 0x1.b6p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_159_cast = conv(dilations = var_7957, groups = var_6865, pad = k_159_pad_0, pad_type = k_159_pad_type_0, strides = var_7955, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_159_cast")]; + tensor var_7961 = const()[name = tensor("op_7961"), val = tensor([1, 1])]; + tensor var_7963 = const()[name = tensor("op_7963"), val = tensor([1, 1])]; + tensor v_159_pad_type_0 = const()[name = tensor("v_159_pad_type_0"), val = tensor("custom")]; + tensor v_159_pad_0 = const()[name = tensor("v_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735206912))), lut = tensor([-0x1.0cp-6, 0x1.0c8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_159_cast = conv(dilations = var_7963, groups = var_6865, pad = v_159_pad_0, pad_type = v_159_pad_type_0, strides = var_7961, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_159_cast")]; + tensor var_7967 = const()[name = tensor("op_7967"), val = tensor([2, 20, 64, -1])]; + tensor var_7968_cast = reshape(shape = var_7967, x = q_159_cast)[name = tensor("op_7968_cast")]; + tensor var_7969 = const()[name = tensor("op_7969"), val = tensor([2, 20, 64, -1])]; + tensor var_7970_cast = reshape(shape = var_7969, x = k_159_cast)[name = tensor("op_7970_cast")]; + tensor var_7971 = const()[name = tensor("op_7971"), val = tensor([2, 20, 64, -1])]; + tensor var_7972_cast = reshape(shape = var_7971, x = v_159_cast)[name = tensor("op_7972_cast")]; + tensor attn_weights_317_transpose_x_0 = const()[name = tensor("attn_weights_317_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_317_transpose_y_0 = const()[name = tensor("attn_weights_317_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_317_cast = matmul(transpose_x = attn_weights_317_transpose_x_0, transpose_y = attn_weights_317_transpose_y_0, x = var_7968_cast, y = var_7970_cast)[name = tensor("attn_weights_317_cast")]; + tensor attn_weights_319_cast = mul(x = attn_weights_317_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_319_cast")]; + tensor var_7976_cast = softmax(axis = var_6849, x = attn_weights_319_cast)[name = tensor("op_7976_cast")]; + tensor attn_159_transpose_x_0 = const()[name = tensor("attn_159_transpose_x_0"), val = tensor(false)]; + tensor attn_159_transpose_y_0 = const()[name = tensor("attn_159_transpose_y_0"), val = tensor(true)]; + tensor attn_159_cast = matmul(transpose_x = attn_159_transpose_x_0, transpose_y = attn_159_transpose_y_0, x = var_7972_cast, y = var_7976_cast)[name = tensor("attn_159_cast")]; + tensor var_7980 = const()[name = tensor("op_7980"), val = tensor([2, 1280, 1, -1])]; + tensor input_481_cast = reshape(shape = var_7980, x = attn_159_cast)[name = tensor("input_481_cast")]; + tensor var_7985 = const()[name = tensor("op_7985"), val = tensor([1, 1])]; + tensor var_7987 = const()[name = tensor("op_7987"), val = tensor([1, 1])]; + tensor var_7989_pad_type_0 = const()[name = tensor("op_7989_pad_type_0"), val = tensor("custom")]; + tensor var_7989_pad_0 = const()[name = tensor("op_7989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735534656))), lut = tensor([-0x1.46p-7, 0x1.45cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735739520)))]; + tensor var_7989_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_7987, groups = var_6865, pad = var_7989_pad_0, pad_type = var_7989_pad_type_0, strides = var_7985, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_481_cast)[name = tensor("op_7989_cast")]; + tensor inputs_239_cast = add(x = var_7989_cast, y = inputs_237_cast)[name = tensor("inputs_239_cast")]; + tensor var_7993 = const()[name = tensor("op_7993"), val = tensor([1])]; + tensor channels_mean_239_cast = reduce_mean(axes = var_7993, keep_dims = var_6860, x = inputs_239_cast)[name = tensor("channels_mean_239_cast")]; + tensor zero_mean_239_cast = sub(x = inputs_239_cast, y = channels_mean_239_cast)[name = tensor("zero_mean_239_cast")]; + tensor zero_mean_sq_239_cast = mul(x = zero_mean_239_cast, y = zero_mean_239_cast)[name = tensor("zero_mean_sq_239_cast")]; + tensor var_7997 = const()[name = tensor("op_7997"), val = tensor([1])]; + tensor var_7998_cast = reduce_mean(axes = var_7997, keep_dims = var_6860, x = zero_mean_sq_239_cast)[name = tensor("op_7998_cast")]; + tensor var_7999_to_fp16 = const()[name = tensor("op_7999_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8000_cast = add(x = var_7998_cast, y = var_7999_to_fp16)[name = tensor("op_8000_cast")]; + tensor denom_239_epsilon_0_to_fp16 = const()[name = tensor("denom_239_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_239_cast = rsqrt(epsilon = denom_239_epsilon_0_to_fp16, x = var_8000_cast)[name = tensor("denom_239_cast")]; + tensor out_239_cast = mul(x = zero_mean_239_cast, y = denom_239_cast)[name = tensor("out_239_cast")]; + tensor var_8004_to_fp16 = const()[name = tensor("op_8004_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735742144)))]; + tensor var_8005_cast = add(x = out_239_cast, y = var_8004_to_fp16)[name = tensor("op_8005_cast")]; + tensor var_8007_to_fp16 = const()[name = tensor("op_8007_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735744768)))]; + tensor input_483_cast = mul(x = var_8005_cast, y = var_8007_to_fp16)[name = tensor("input_483_cast")]; + tensor var_8015 = const()[name = tensor("op_8015"), val = tensor([1, 1])]; + tensor var_8017 = const()[name = tensor("op_8017"), val = tensor([1, 1])]; + tensor var_8019_pad_type_0 = const()[name = tensor("op_8019_pad_type_0"), val = tensor("custom")]; + tensor var_8019_pad_0 = const()[name = tensor("op_8019_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735747392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742301056))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742301184)))]; + tensor var_8019_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_8017, groups = var_6865, pad = var_8019_pad_0, pad_type = var_8019_pad_type_0, strides = var_8015, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_483_cast)[name = tensor("op_8019_cast")]; + tensor var_8020_split_sizes_0 = const()[name = tensor("op_8020_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8020_axis_0 = const()[name = tensor("op_8020_axis_0"), val = tensor(1)]; + tensor var_8020_cast_0, tensor var_8020_cast_1 = split(axis = var_8020_axis_0, split_sizes = var_8020_split_sizes_0, x = var_8019_cast)[name = tensor("op_8020_cast")]; + tensor var_8022_mode_0 = const()[name = tensor("op_8022_mode_0"), val = tensor("EXACT")]; + tensor var_8022_cast = gelu(mode = var_8022_mode_0, x = var_8020_cast_1)[name = tensor("op_8022_cast")]; + tensor input_485_cast = mul(x = var_8020_cast_0, y = var_8022_cast)[name = tensor("input_485_cast")]; + tensor var_8026 = const()[name = tensor("op_8026"), val = tensor([1, 1])]; + tensor var_8028 = const()[name = tensor("op_8028"), val = tensor([1, 1])]; + tensor var_8030_pad_type_0 = const()[name = tensor("op_8030_pad_type_0"), val = tensor("custom")]; + tensor var_8030_pad_0 = const()[name = tensor("op_8030_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742321728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745598592))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745598720)))]; + tensor var_8030_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_8028, groups = var_6865, pad = var_8030_pad_0, pad_type = var_8030_pad_type_0, strides = var_8026, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_485_cast)[name = tensor("op_8030_cast")]; + tensor inputs_241_cast = add(x = var_8030_cast, y = inputs_239_cast)[name = tensor("inputs_241_cast")]; + tensor var_8040 = const()[name = tensor("op_8040"), val = tensor([1])]; + tensor channels_mean_241_cast = reduce_mean(axes = var_8040, keep_dims = var_6860, x = inputs_241_cast)[name = tensor("channels_mean_241_cast")]; + tensor zero_mean_241_cast = sub(x = inputs_241_cast, y = channels_mean_241_cast)[name = tensor("zero_mean_241_cast")]; + tensor zero_mean_sq_241_cast = mul(x = zero_mean_241_cast, y = zero_mean_241_cast)[name = tensor("zero_mean_sq_241_cast")]; + tensor var_8044 = const()[name = tensor("op_8044"), val = tensor([1])]; + tensor var_8045_cast = reduce_mean(axes = var_8044, keep_dims = var_6860, x = zero_mean_sq_241_cast)[name = tensor("op_8045_cast")]; + tensor var_8046_to_fp16 = const()[name = tensor("op_8046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8047_cast = add(x = var_8045_cast, y = var_8046_to_fp16)[name = tensor("op_8047_cast")]; + tensor denom_241_epsilon_0_to_fp16 = const()[name = tensor("denom_241_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_241_cast = rsqrt(epsilon = denom_241_epsilon_0_to_fp16, x = var_8047_cast)[name = tensor("denom_241_cast")]; + tensor out_241_cast = mul(x = zero_mean_241_cast, y = denom_241_cast)[name = tensor("out_241_cast")]; + tensor var_8051_to_fp16 = const()[name = tensor("op_8051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745601344)))]; + tensor var_8052_cast = add(x = out_241_cast, y = var_8051_to_fp16)[name = tensor("op_8052_cast")]; + tensor var_8054_to_fp16 = const()[name = tensor("op_8054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745603968)))]; + tensor hidden_states_323_cast = mul(x = var_8052_cast, y = var_8054_to_fp16)[name = tensor("hidden_states_323_cast")]; + tensor var_8061 = const()[name = tensor("op_8061"), val = tensor([1, 1])]; + tensor var_8063 = const()[name = tensor("op_8063"), val = tensor([1, 1])]; + tensor q_161_pad_type_0 = const()[name = tensor("q_161_pad_type_0"), val = tensor("custom")]; + tensor q_161_pad_0 = const()[name = tensor("q_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745606592))), lut = tensor([-0x1.7a8p-6, 0x1.7cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_161_cast = conv(dilations = var_8063, groups = var_6865, pad = q_161_pad_0, pad_type = q_161_pad_type_0, strides = var_8061, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("q_161_cast")]; + tensor var_8067 = const()[name = tensor("op_8067"), val = tensor([1, 1])]; + tensor var_8069 = const()[name = tensor("op_8069"), val = tensor([1, 1])]; + tensor k_161_pad_type_0 = const()[name = tensor("k_161_pad_type_0"), val = tensor("custom")]; + tensor k_161_pad_0 = const()[name = tensor("k_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745811456))), lut = tensor([-0x1.7b8p-6, 0x1.7bcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_161_cast = conv(dilations = var_8069, groups = var_6865, pad = k_161_pad_0, pad_type = k_161_pad_type_0, strides = var_8067, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("k_161_cast")]; + tensor var_8073 = const()[name = tensor("op_8073"), val = tensor([1, 1])]; + tensor var_8075 = const()[name = tensor("op_8075"), val = tensor([1, 1])]; + tensor v_161_pad_type_0 = const()[name = tensor("v_161_pad_type_0"), val = tensor("custom")]; + tensor v_161_pad_0 = const()[name = tensor("v_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746016320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746835584))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_161_cast = conv(dilations = var_8075, groups = var_6865, pad = v_161_pad_0, pad_type = v_161_pad_type_0, strides = var_8073, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("v_161_cast")]; + tensor var_8079 = const()[name = tensor("op_8079"), val = tensor([2, 20, 64, -1])]; + tensor var_8080_cast = reshape(shape = var_8079, x = q_161_cast)[name = tensor("op_8080_cast")]; + tensor var_8081 = const()[name = tensor("op_8081"), val = tensor([2, 20, 64, -1])]; + tensor var_8082_cast = reshape(shape = var_8081, x = k_161_cast)[name = tensor("op_8082_cast")]; + tensor var_8083 = const()[name = tensor("op_8083"), val = tensor([2, 20, 64, -1])]; + tensor var_8084_cast = reshape(shape = var_8083, x = v_161_cast)[name = tensor("op_8084_cast")]; + tensor attn_weights_321_transpose_x_0 = const()[name = tensor("attn_weights_321_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_321_transpose_y_0 = const()[name = tensor("attn_weights_321_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_321_cast = matmul(transpose_x = attn_weights_321_transpose_x_0, transpose_y = attn_weights_321_transpose_y_0, x = var_8080_cast, y = var_8082_cast)[name = tensor("attn_weights_321_cast")]; + tensor attn_weights_323_cast = mul(x = attn_weights_321_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_323_cast")]; + tensor var_8088_cast = softmax(axis = var_6849, x = attn_weights_323_cast)[name = tensor("op_8088_cast")]; + tensor attn_161_transpose_x_0 = const()[name = tensor("attn_161_transpose_x_0"), val = tensor(false)]; + tensor attn_161_transpose_y_0 = const()[name = tensor("attn_161_transpose_y_0"), val = tensor(true)]; + tensor attn_161_cast = matmul(transpose_x = attn_161_transpose_x_0, transpose_y = attn_161_transpose_y_0, x = var_8084_cast, y = var_8088_cast)[name = tensor("attn_161_cast")]; + tensor var_8092 = const()[name = tensor("op_8092"), val = tensor([2, 1280, 1, -1])]; + tensor input_487_cast = reshape(shape = var_8092, x = attn_161_cast)[name = tensor("input_487_cast")]; + tensor var_8097 = const()[name = tensor("op_8097"), val = tensor([1, 1])]; + tensor var_8099 = const()[name = tensor("op_8099"), val = tensor([1, 1])]; + tensor var_8101_pad_type_0 = const()[name = tensor("op_8101_pad_type_0"), val = tensor("custom")]; + tensor var_8101_pad_0 = const()[name = tensor("op_8101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746835712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748064576))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748064768)))]; + tensor var_8101_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_8099, groups = var_6865, pad = var_8101_pad_0, pad_type = var_8101_pad_type_0, strides = var_8097, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_487_cast)[name = tensor("op_8101_cast")]; + tensor inputs_243_cast = add(x = var_8101_cast, y = inputs_241_cast)[name = tensor("inputs_243_cast")]; + tensor var_8105 = const()[name = tensor("op_8105"), val = tensor([1])]; + tensor channels_mean_243_cast = reduce_mean(axes = var_8105, keep_dims = var_6860, x = inputs_243_cast)[name = tensor("channels_mean_243_cast")]; + tensor zero_mean_243_cast = sub(x = inputs_243_cast, y = channels_mean_243_cast)[name = tensor("zero_mean_243_cast")]; + tensor zero_mean_sq_243_cast = mul(x = zero_mean_243_cast, y = zero_mean_243_cast)[name = tensor("zero_mean_sq_243_cast")]; + tensor var_8109 = const()[name = tensor("op_8109"), val = tensor([1])]; + tensor var_8110_cast = reduce_mean(axes = var_8109, keep_dims = var_6860, x = zero_mean_sq_243_cast)[name = tensor("op_8110_cast")]; + tensor var_8111_to_fp16 = const()[name = tensor("op_8111_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8112_cast = add(x = var_8110_cast, y = var_8111_to_fp16)[name = tensor("op_8112_cast")]; + tensor denom_243_epsilon_0_to_fp16 = const()[name = tensor("denom_243_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_243_cast = rsqrt(epsilon = denom_243_epsilon_0_to_fp16, x = var_8112_cast)[name = tensor("denom_243_cast")]; + tensor out_243_cast = mul(x = zero_mean_243_cast, y = denom_243_cast)[name = tensor("out_243_cast")]; + tensor var_8116_to_fp16 = const()[name = tensor("op_8116_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748067392)))]; + tensor var_8117_cast = add(x = out_243_cast, y = var_8116_to_fp16)[name = tensor("op_8117_cast")]; + tensor var_8119_to_fp16 = const()[name = tensor("op_8119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748070016)))]; + tensor hidden_states_325_cast = mul(x = var_8117_cast, y = var_8119_to_fp16)[name = tensor("hidden_states_325_cast")]; + tensor var_8126 = const()[name = tensor("op_8126"), val = tensor([1, 1])]; + tensor var_8128 = const()[name = tensor("op_8128"), val = tensor([1, 1])]; + tensor q_163_pad_type_0 = const()[name = tensor("q_163_pad_type_0"), val = tensor("custom")]; + tensor q_163_pad_0 = const()[name = tensor("q_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748072640))), lut = tensor([-0x1.f4cp-7, 0x1.f54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_163_cast = conv(dilations = var_8128, groups = var_6865, pad = q_163_pad_0, pad_type = q_163_pad_type_0, strides = var_8126, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_325_cast)[name = tensor("q_163_cast")]; + tensor var_8132 = const()[name = tensor("op_8132"), val = tensor([1, 1])]; + tensor var_8134 = const()[name = tensor("op_8134"), val = tensor([1, 1])]; + tensor k_163_pad_type_0 = const()[name = tensor("k_163_pad_type_0"), val = tensor("custom")]; + tensor k_163_pad_0 = const()[name = tensor("k_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748277504))), lut = tensor([-0x1.7dcp-7, 0x1.7dp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_163_cast = conv(dilations = var_8134, groups = var_6865, pad = k_163_pad_0, pad_type = k_163_pad_type_0, strides = var_8132, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_163_cast")]; + tensor var_8138 = const()[name = tensor("op_8138"), val = tensor([1, 1])]; + tensor var_8140 = const()[name = tensor("op_8140"), val = tensor([1, 1])]; + tensor v_163_pad_type_0 = const()[name = tensor("v_163_pad_type_0"), val = tensor("custom")]; + tensor v_163_pad_0 = const()[name = tensor("v_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748605248))), lut = tensor([-0x1.ed4p-7, 0x1.ea4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_163_cast = conv(dilations = var_8140, groups = var_6865, pad = v_163_pad_0, pad_type = v_163_pad_type_0, strides = var_8138, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_163_cast")]; + tensor var_8144 = const()[name = tensor("op_8144"), val = tensor([2, 20, 64, -1])]; + tensor var_8145_cast = reshape(shape = var_8144, x = q_163_cast)[name = tensor("op_8145_cast")]; + tensor var_8146 = const()[name = tensor("op_8146"), val = tensor([2, 20, 64, -1])]; + tensor var_8147_cast = reshape(shape = var_8146, x = k_163_cast)[name = tensor("op_8147_cast")]; + tensor var_8148 = const()[name = tensor("op_8148"), val = tensor([2, 20, 64, -1])]; + tensor var_8149_cast = reshape(shape = var_8148, x = v_163_cast)[name = tensor("op_8149_cast")]; + tensor attn_weights_325_transpose_x_0 = const()[name = tensor("attn_weights_325_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_325_transpose_y_0 = const()[name = tensor("attn_weights_325_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_325_cast = matmul(transpose_x = attn_weights_325_transpose_x_0, transpose_y = attn_weights_325_transpose_y_0, x = var_8145_cast, y = var_8147_cast)[name = tensor("attn_weights_325_cast")]; + tensor attn_weights_327_cast = mul(x = attn_weights_325_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_327_cast")]; + tensor var_8153_cast = softmax(axis = var_6849, x = attn_weights_327_cast)[name = tensor("op_8153_cast")]; + tensor attn_163_transpose_x_0 = const()[name = tensor("attn_163_transpose_x_0"), val = tensor(false)]; + tensor attn_163_transpose_y_0 = const()[name = tensor("attn_163_transpose_y_0"), val = tensor(true)]; + tensor attn_163_cast = matmul(transpose_x = attn_163_transpose_x_0, transpose_y = attn_163_transpose_y_0, x = var_8149_cast, y = var_8153_cast)[name = tensor("attn_163_cast")]; + tensor var_8157 = const()[name = tensor("op_8157"), val = tensor([2, 1280, 1, -1])]; + tensor input_489_cast = reshape(shape = var_8157, x = attn_163_cast)[name = tensor("input_489_cast")]; + tensor var_8162 = const()[name = tensor("op_8162"), val = tensor([1, 1])]; + tensor var_8164 = const()[name = tensor("op_8164"), val = tensor([1, 1])]; + tensor var_8166_pad_type_0 = const()[name = tensor("op_8166_pad_type_0"), val = tensor("custom")]; + tensor var_8166_pad_0 = const()[name = tensor("op_8166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748932992))), lut = tensor([-0x1.2bp-7, 0x1.2b4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749137856)))]; + tensor var_8166_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_8164, groups = var_6865, pad = var_8166_pad_0, pad_type = var_8166_pad_type_0, strides = var_8162, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_489_cast)[name = tensor("op_8166_cast")]; + tensor inputs_245_cast = add(x = var_8166_cast, y = inputs_243_cast)[name = tensor("inputs_245_cast")]; + tensor var_8170 = const()[name = tensor("op_8170"), val = tensor([1])]; + tensor channels_mean_245_cast = reduce_mean(axes = var_8170, keep_dims = var_6860, x = inputs_245_cast)[name = tensor("channels_mean_245_cast")]; + tensor zero_mean_245_cast = sub(x = inputs_245_cast, y = channels_mean_245_cast)[name = tensor("zero_mean_245_cast")]; + tensor zero_mean_sq_245_cast = mul(x = zero_mean_245_cast, y = zero_mean_245_cast)[name = tensor("zero_mean_sq_245_cast")]; + tensor var_8174 = const()[name = tensor("op_8174"), val = tensor([1])]; + tensor var_8175_cast = reduce_mean(axes = var_8174, keep_dims = var_6860, x = zero_mean_sq_245_cast)[name = tensor("op_8175_cast")]; + tensor var_8176_to_fp16 = const()[name = tensor("op_8176_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8177_cast = add(x = var_8175_cast, y = var_8176_to_fp16)[name = tensor("op_8177_cast")]; + tensor denom_245_epsilon_0_to_fp16 = const()[name = tensor("denom_245_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_245_cast = rsqrt(epsilon = denom_245_epsilon_0_to_fp16, x = var_8177_cast)[name = tensor("denom_245_cast")]; + tensor out_245_cast = mul(x = zero_mean_245_cast, y = denom_245_cast)[name = tensor("out_245_cast")]; + tensor var_8181_to_fp16 = const()[name = tensor("op_8181_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749140480)))]; + tensor var_8182_cast = add(x = out_245_cast, y = var_8181_to_fp16)[name = tensor("op_8182_cast")]; + tensor var_8184_to_fp16 = const()[name = tensor("op_8184_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749143104)))]; + tensor input_491_cast = mul(x = var_8182_cast, y = var_8184_to_fp16)[name = tensor("input_491_cast")]; + tensor var_8192 = const()[name = tensor("op_8192"), val = tensor([1, 1])]; + tensor var_8194 = const()[name = tensor("op_8194"), val = tensor([1, 1])]; + tensor var_8196_pad_type_0 = const()[name = tensor("op_8196_pad_type_0"), val = tensor("custom")]; + tensor var_8196_pad_0 = const()[name = tensor("op_8196_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749145728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755699392))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755699520)))]; + tensor var_8196_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_8194, groups = var_6865, pad = var_8196_pad_0, pad_type = var_8196_pad_type_0, strides = var_8192, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_491_cast)[name = tensor("op_8196_cast")]; + tensor var_8197_split_sizes_0 = const()[name = tensor("op_8197_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8197_axis_0 = const()[name = tensor("op_8197_axis_0"), val = tensor(1)]; + tensor var_8197_cast_0, tensor var_8197_cast_1 = split(axis = var_8197_axis_0, split_sizes = var_8197_split_sizes_0, x = var_8196_cast)[name = tensor("op_8197_cast")]; + tensor var_8199_mode_0 = const()[name = tensor("op_8199_mode_0"), val = tensor("EXACT")]; + tensor var_8199_cast = gelu(mode = var_8199_mode_0, x = var_8197_cast_1)[name = tensor("op_8199_cast")]; + tensor input_493_cast = mul(x = var_8197_cast_0, y = var_8199_cast)[name = tensor("input_493_cast")]; + tensor var_8203 = const()[name = tensor("op_8203"), val = tensor([1, 1])]; + tensor var_8205 = const()[name = tensor("op_8205"), val = tensor([1, 1])]; + tensor var_8207_pad_type_0 = const()[name = tensor("op_8207_pad_type_0"), val = tensor("custom")]; + tensor var_8207_pad_0 = const()[name = tensor("op_8207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755720064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758996928))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758997056)))]; + tensor var_8207_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_8205, groups = var_6865, pad = var_8207_pad_0, pad_type = var_8207_pad_type_0, strides = var_8203, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_493_cast)[name = tensor("op_8207_cast")]; + tensor inputs_247_cast = add(x = var_8207_cast, y = inputs_245_cast)[name = tensor("inputs_247_cast")]; + tensor var_8217 = const()[name = tensor("op_8217"), val = tensor([1])]; + tensor channels_mean_247_cast = reduce_mean(axes = var_8217, keep_dims = var_6860, x = inputs_247_cast)[name = tensor("channels_mean_247_cast")]; + tensor zero_mean_247_cast = sub(x = inputs_247_cast, y = channels_mean_247_cast)[name = tensor("zero_mean_247_cast")]; + tensor zero_mean_sq_247_cast = mul(x = zero_mean_247_cast, y = zero_mean_247_cast)[name = tensor("zero_mean_sq_247_cast")]; + tensor var_8221 = const()[name = tensor("op_8221"), val = tensor([1])]; + tensor var_8222_cast = reduce_mean(axes = var_8221, keep_dims = var_6860, x = zero_mean_sq_247_cast)[name = tensor("op_8222_cast")]; + tensor var_8223_to_fp16 = const()[name = tensor("op_8223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8224_cast = add(x = var_8222_cast, y = var_8223_to_fp16)[name = tensor("op_8224_cast")]; + tensor denom_247_epsilon_0_to_fp16 = const()[name = tensor("denom_247_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_247_cast = rsqrt(epsilon = denom_247_epsilon_0_to_fp16, x = var_8224_cast)[name = tensor("denom_247_cast")]; + tensor out_247_cast = mul(x = zero_mean_247_cast, y = denom_247_cast)[name = tensor("out_247_cast")]; + tensor var_8228_to_fp16 = const()[name = tensor("op_8228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758999680)))]; + tensor var_8229_cast = add(x = out_247_cast, y = var_8228_to_fp16)[name = tensor("op_8229_cast")]; + tensor var_8231_to_fp16 = const()[name = tensor("op_8231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759002304)))]; + tensor hidden_states_329_cast = mul(x = var_8229_cast, y = var_8231_to_fp16)[name = tensor("hidden_states_329_cast")]; + tensor var_8238 = const()[name = tensor("op_8238"), val = tensor([1, 1])]; + tensor var_8240 = const()[name = tensor("op_8240"), val = tensor([1, 1])]; + tensor q_165_pad_type_0 = const()[name = tensor("q_165_pad_type_0"), val = tensor("custom")]; + tensor q_165_pad_0 = const()[name = tensor("q_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759004928))), lut = tensor([-0x1.67p-5, -0x1.afp-7, 0x1.afcp-7, 0x1.668p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_165_cast = conv(dilations = var_8240, groups = var_6865, pad = q_165_pad_0, pad_type = q_165_pad_type_0, strides = var_8238, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("q_165_cast")]; + tensor var_8244 = const()[name = tensor("op_8244"), val = tensor([1, 1])]; + tensor var_8246 = const()[name = tensor("op_8246"), val = tensor([1, 1])]; + tensor k_165_pad_type_0 = const()[name = tensor("k_165_pad_type_0"), val = tensor("custom")]; + tensor k_165_pad_0 = const()[name = tensor("k_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759414592))), lut = tensor([-0x1.678p-5, -0x1.b0cp-7, 0x1.b04p-7, 0x1.678p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_165_cast = conv(dilations = var_8246, groups = var_6865, pad = k_165_pad_0, pad_type = k_165_pad_type_0, strides = var_8244, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("k_165_cast")]; + tensor var_8250 = const()[name = tensor("op_8250"), val = tensor([1, 1])]; + tensor var_8252 = const()[name = tensor("op_8252"), val = tensor([1, 1])]; + tensor v_165_pad_type_0 = const()[name = tensor("v_165_pad_type_0"), val = tensor("custom")]; + tensor v_165_pad_0 = const()[name = tensor("v_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759824256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760643520))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_165_cast = conv(dilations = var_8252, groups = var_6865, pad = v_165_pad_0, pad_type = v_165_pad_type_0, strides = var_8250, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("v_165_cast")]; + tensor var_8256 = const()[name = tensor("op_8256"), val = tensor([2, 20, 64, -1])]; + tensor var_8257_cast = reshape(shape = var_8256, x = q_165_cast)[name = tensor("op_8257_cast")]; + tensor var_8258 = const()[name = tensor("op_8258"), val = tensor([2, 20, 64, -1])]; + tensor var_8259_cast = reshape(shape = var_8258, x = k_165_cast)[name = tensor("op_8259_cast")]; + tensor var_8260 = const()[name = tensor("op_8260"), val = tensor([2, 20, 64, -1])]; + tensor var_8261_cast = reshape(shape = var_8260, x = v_165_cast)[name = tensor("op_8261_cast")]; + tensor attn_weights_329_transpose_x_0 = const()[name = tensor("attn_weights_329_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_329_transpose_y_0 = const()[name = tensor("attn_weights_329_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_329_cast = matmul(transpose_x = attn_weights_329_transpose_x_0, transpose_y = attn_weights_329_transpose_y_0, x = var_8257_cast, y = var_8259_cast)[name = tensor("attn_weights_329_cast")]; + tensor attn_weights_331_cast = mul(x = attn_weights_329_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_331_cast")]; + tensor var_8265_cast = softmax(axis = var_6849, x = attn_weights_331_cast)[name = tensor("op_8265_cast")]; + tensor attn_165_transpose_x_0 = const()[name = tensor("attn_165_transpose_x_0"), val = tensor(false)]; + tensor attn_165_transpose_y_0 = const()[name = tensor("attn_165_transpose_y_0"), val = tensor(true)]; + tensor attn_165_cast = matmul(transpose_x = attn_165_transpose_x_0, transpose_y = attn_165_transpose_y_0, x = var_8261_cast, y = var_8265_cast)[name = tensor("attn_165_cast")]; + tensor var_8269 = const()[name = tensor("op_8269"), val = tensor([2, 1280, 1, -1])]; + tensor input_495_cast = reshape(shape = var_8269, x = attn_165_cast)[name = tensor("input_495_cast")]; + tensor var_8274 = const()[name = tensor("op_8274"), val = tensor([1, 1])]; + tensor var_8276 = const()[name = tensor("op_8276"), val = tensor([1, 1])]; + tensor var_8278_pad_type_0 = const()[name = tensor("op_8278_pad_type_0"), val = tensor("custom")]; + tensor var_8278_pad_0 = const()[name = tensor("op_8278_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760643648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761462912))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761463040)))]; + tensor var_8278_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_8276, groups = var_6865, pad = var_8278_pad_0, pad_type = var_8278_pad_type_0, strides = var_8274, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_495_cast)[name = tensor("op_8278_cast")]; + tensor inputs_249_cast = add(x = var_8278_cast, y = inputs_247_cast)[name = tensor("inputs_249_cast")]; + tensor var_8282 = const()[name = tensor("op_8282"), val = tensor([1])]; + tensor channels_mean_249_cast = reduce_mean(axes = var_8282, keep_dims = var_6860, x = inputs_249_cast)[name = tensor("channels_mean_249_cast")]; + tensor zero_mean_249_cast = sub(x = inputs_249_cast, y = channels_mean_249_cast)[name = tensor("zero_mean_249_cast")]; + tensor zero_mean_sq_249_cast = mul(x = zero_mean_249_cast, y = zero_mean_249_cast)[name = tensor("zero_mean_sq_249_cast")]; + tensor var_8286 = const()[name = tensor("op_8286"), val = tensor([1])]; + tensor var_8287_cast = reduce_mean(axes = var_8286, keep_dims = var_6860, x = zero_mean_sq_249_cast)[name = tensor("op_8287_cast")]; + tensor var_8288_to_fp16 = const()[name = tensor("op_8288_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8289_cast = add(x = var_8287_cast, y = var_8288_to_fp16)[name = tensor("op_8289_cast")]; + tensor denom_249_epsilon_0_to_fp16 = const()[name = tensor("denom_249_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_249_cast = rsqrt(epsilon = denom_249_epsilon_0_to_fp16, x = var_8289_cast)[name = tensor("denom_249_cast")]; + tensor out_249_cast = mul(x = zero_mean_249_cast, y = denom_249_cast)[name = tensor("out_249_cast")]; + tensor var_8293_to_fp16 = const()[name = tensor("op_8293_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761465664)))]; + tensor var_8294_cast = add(x = out_249_cast, y = var_8293_to_fp16)[name = tensor("op_8294_cast")]; + tensor var_8296_to_fp16 = const()[name = tensor("op_8296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761468288)))]; + tensor hidden_states_331_cast = mul(x = var_8294_cast, y = var_8296_to_fp16)[name = tensor("hidden_states_331_cast")]; + tensor var_8303 = const()[name = tensor("op_8303"), val = tensor([1, 1])]; + tensor var_8305 = const()[name = tensor("op_8305"), val = tensor([1, 1])]; + tensor q_167_pad_type_0 = const()[name = tensor("q_167_pad_type_0"), val = tensor("custom")]; + tensor q_167_pad_0 = const()[name = tensor("q_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761470912))), lut = tensor([-0x1.d44p-7, 0x1.d3cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_167_cast = conv(dilations = var_8305, groups = var_6865, pad = q_167_pad_0, pad_type = q_167_pad_type_0, strides = var_8303, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_331_cast)[name = tensor("q_167_cast")]; + tensor var_8309 = const()[name = tensor("op_8309"), val = tensor([1, 1])]; + tensor var_8311 = const()[name = tensor("op_8311"), val = tensor([1, 1])]; + tensor k_167_pad_type_0 = const()[name = tensor("k_167_pad_type_0"), val = tensor("custom")]; + tensor k_167_pad_0 = const()[name = tensor("k_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761675776))), lut = tensor([-0x1.59cp-7, 0x1.59p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_167_cast = conv(dilations = var_8311, groups = var_6865, pad = k_167_pad_0, pad_type = k_167_pad_type_0, strides = var_8309, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_167_cast")]; + tensor var_8315 = const()[name = tensor("op_8315"), val = tensor([1, 1])]; + tensor var_8317 = const()[name = tensor("op_8317"), val = tensor([1, 1])]; + tensor v_167_pad_type_0 = const()[name = tensor("v_167_pad_type_0"), val = tensor("custom")]; + tensor v_167_pad_0 = const()[name = tensor("v_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762003520))), lut = tensor([-0x1.c78p-7, 0x1.c88p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_167_cast = conv(dilations = var_8317, groups = var_6865, pad = v_167_pad_0, pad_type = v_167_pad_type_0, strides = var_8315, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_167_cast")]; + tensor var_8321 = const()[name = tensor("op_8321"), val = tensor([2, 20, 64, -1])]; + tensor var_8322_cast = reshape(shape = var_8321, x = q_167_cast)[name = tensor("op_8322_cast")]; + tensor var_8323 = const()[name = tensor("op_8323"), val = tensor([2, 20, 64, -1])]; + tensor var_8324_cast = reshape(shape = var_8323, x = k_167_cast)[name = tensor("op_8324_cast")]; + tensor var_8325 = const()[name = tensor("op_8325"), val = tensor([2, 20, 64, -1])]; + tensor var_8326_cast = reshape(shape = var_8325, x = v_167_cast)[name = tensor("op_8326_cast")]; + tensor attn_weights_333_transpose_x_0 = const()[name = tensor("attn_weights_333_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_333_transpose_y_0 = const()[name = tensor("attn_weights_333_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_333_cast = matmul(transpose_x = attn_weights_333_transpose_x_0, transpose_y = attn_weights_333_transpose_y_0, x = var_8322_cast, y = var_8324_cast)[name = tensor("attn_weights_333_cast")]; + tensor attn_weights_335_cast = mul(x = attn_weights_333_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_335_cast")]; + tensor var_8330_cast = softmax(axis = var_6849, x = attn_weights_335_cast)[name = tensor("op_8330_cast")]; + tensor attn_167_transpose_x_0 = const()[name = tensor("attn_167_transpose_x_0"), val = tensor(false)]; + tensor attn_167_transpose_y_0 = const()[name = tensor("attn_167_transpose_y_0"), val = tensor(true)]; + tensor attn_167_cast = matmul(transpose_x = attn_167_transpose_x_0, transpose_y = attn_167_transpose_y_0, x = var_8326_cast, y = var_8330_cast)[name = tensor("attn_167_cast")]; + tensor var_8334 = const()[name = tensor("op_8334"), val = tensor([2, 1280, 1, -1])]; + tensor input_497_cast = reshape(shape = var_8334, x = attn_167_cast)[name = tensor("input_497_cast")]; + tensor var_8339 = const()[name = tensor("op_8339"), val = tensor([1, 1])]; + tensor var_8341 = const()[name = tensor("op_8341"), val = tensor([1, 1])]; + tensor var_8343_pad_type_0 = const()[name = tensor("op_8343_pad_type_0"), val = tensor("custom")]; + tensor var_8343_pad_0 = const()[name = tensor("op_8343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762331264))), lut = tensor([-0x1.1a4p-7, 0x1.194p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762536128)))]; + tensor var_8343_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_8341, groups = var_6865, pad = var_8343_pad_0, pad_type = var_8343_pad_type_0, strides = var_8339, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_497_cast)[name = tensor("op_8343_cast")]; + tensor inputs_251_cast = add(x = var_8343_cast, y = inputs_249_cast)[name = tensor("inputs_251_cast")]; + tensor var_8347 = const()[name = tensor("op_8347"), val = tensor([1])]; + tensor channels_mean_251_cast = reduce_mean(axes = var_8347, keep_dims = var_6860, x = inputs_251_cast)[name = tensor("channels_mean_251_cast")]; + tensor zero_mean_251_cast = sub(x = inputs_251_cast, y = channels_mean_251_cast)[name = tensor("zero_mean_251_cast")]; + tensor zero_mean_sq_251_cast = mul(x = zero_mean_251_cast, y = zero_mean_251_cast)[name = tensor("zero_mean_sq_251_cast")]; + tensor var_8351 = const()[name = tensor("op_8351"), val = tensor([1])]; + tensor var_8352_cast = reduce_mean(axes = var_8351, keep_dims = var_6860, x = zero_mean_sq_251_cast)[name = tensor("op_8352_cast")]; + tensor var_8353_to_fp16 = const()[name = tensor("op_8353_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8354_cast = add(x = var_8352_cast, y = var_8353_to_fp16)[name = tensor("op_8354_cast")]; + tensor denom_251_epsilon_0_to_fp16 = const()[name = tensor("denom_251_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_251_cast = rsqrt(epsilon = denom_251_epsilon_0_to_fp16, x = var_8354_cast)[name = tensor("denom_251_cast")]; + tensor out_251_cast = mul(x = zero_mean_251_cast, y = denom_251_cast)[name = tensor("out_251_cast")]; + tensor var_8358_to_fp16 = const()[name = tensor("op_8358_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762538752)))]; + tensor var_8359_cast = add(x = out_251_cast, y = var_8358_to_fp16)[name = tensor("op_8359_cast")]; + tensor var_8361_to_fp16 = const()[name = tensor("op_8361_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762541376)))]; + tensor input_499_cast = mul(x = var_8359_cast, y = var_8361_to_fp16)[name = tensor("input_499_cast")]; + tensor var_8369 = const()[name = tensor("op_8369"), val = tensor([1, 1])]; + tensor var_8371 = const()[name = tensor("op_8371"), val = tensor([1, 1])]; + tensor var_8373_pad_type_0 = const()[name = tensor("op_8373_pad_type_0"), val = tensor("custom")]; + tensor var_8373_pad_0 = const()[name = tensor("op_8373_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762544000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769097664))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769097792)))]; + tensor var_8373_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_8371, groups = var_6865, pad = var_8373_pad_0, pad_type = var_8373_pad_type_0, strides = var_8369, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_499_cast)[name = tensor("op_8373_cast")]; + tensor var_8374_split_sizes_0 = const()[name = tensor("op_8374_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8374_axis_0 = const()[name = tensor("op_8374_axis_0"), val = tensor(1)]; + tensor var_8374_cast_0, tensor var_8374_cast_1 = split(axis = var_8374_axis_0, split_sizes = var_8374_split_sizes_0, x = var_8373_cast)[name = tensor("op_8374_cast")]; + tensor var_8376_mode_0 = const()[name = tensor("op_8376_mode_0"), val = tensor("EXACT")]; + tensor var_8376_cast = gelu(mode = var_8376_mode_0, x = var_8374_cast_1)[name = tensor("op_8376_cast")]; + tensor input_501_cast = mul(x = var_8374_cast_0, y = var_8376_cast)[name = tensor("input_501_cast")]; + tensor var_8380 = const()[name = tensor("op_8380"), val = tensor([1, 1])]; + tensor var_8382 = const()[name = tensor("op_8382"), val = tensor([1, 1])]; + tensor var_8384_pad_type_0 = const()[name = tensor("op_8384_pad_type_0"), val = tensor("custom")]; + tensor var_8384_pad_0 = const()[name = tensor("op_8384_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769118336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772395200))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772395328)))]; + tensor var_8384_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_8382, groups = var_6865, pad = var_8384_pad_0, pad_type = var_8384_pad_type_0, strides = var_8380, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_501_cast)[name = tensor("op_8384_cast")]; + tensor inputs_253_cast = add(x = var_8384_cast, y = inputs_251_cast)[name = tensor("inputs_253_cast")]; + tensor var_8394 = const()[name = tensor("op_8394"), val = tensor([1])]; + tensor channels_mean_253_cast = reduce_mean(axes = var_8394, keep_dims = var_6860, x = inputs_253_cast)[name = tensor("channels_mean_253_cast")]; + tensor zero_mean_253_cast = sub(x = inputs_253_cast, y = channels_mean_253_cast)[name = tensor("zero_mean_253_cast")]; + tensor zero_mean_sq_253_cast = mul(x = zero_mean_253_cast, y = zero_mean_253_cast)[name = tensor("zero_mean_sq_253_cast")]; + tensor var_8398 = const()[name = tensor("op_8398"), val = tensor([1])]; + tensor var_8399_cast = reduce_mean(axes = var_8398, keep_dims = var_6860, x = zero_mean_sq_253_cast)[name = tensor("op_8399_cast")]; + tensor var_8400_to_fp16 = const()[name = tensor("op_8400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8401_cast = add(x = var_8399_cast, y = var_8400_to_fp16)[name = tensor("op_8401_cast")]; + tensor denom_253_epsilon_0_to_fp16 = const()[name = tensor("denom_253_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_253_cast = rsqrt(epsilon = denom_253_epsilon_0_to_fp16, x = var_8401_cast)[name = tensor("denom_253_cast")]; + tensor out_253_cast = mul(x = zero_mean_253_cast, y = denom_253_cast)[name = tensor("out_253_cast")]; + tensor var_8405_to_fp16 = const()[name = tensor("op_8405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772397952)))]; + tensor var_8406_cast = add(x = out_253_cast, y = var_8405_to_fp16)[name = tensor("op_8406_cast")]; + tensor var_8408_to_fp16 = const()[name = tensor("op_8408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772400576)))]; + tensor hidden_states_335_cast = mul(x = var_8406_cast, y = var_8408_to_fp16)[name = tensor("hidden_states_335_cast")]; + tensor var_8415 = const()[name = tensor("op_8415"), val = tensor([1, 1])]; + tensor var_8417 = const()[name = tensor("op_8417"), val = tensor([1, 1])]; + tensor q_169_pad_type_0 = const()[name = tensor("q_169_pad_type_0"), val = tensor("custom")]; + tensor q_169_pad_0 = const()[name = tensor("q_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772403200))), lut = tensor([-0x1.6ap-5, -0x1.b7p-7, 0x1.afp-7, 0x1.684p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_169_cast = conv(dilations = var_8417, groups = var_6865, pad = q_169_pad_0, pad_type = q_169_pad_type_0, strides = var_8415, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("q_169_cast")]; + tensor var_8421 = const()[name = tensor("op_8421"), val = tensor([1, 1])]; + tensor var_8423 = const()[name = tensor("op_8423"), val = tensor([1, 1])]; + tensor k_169_pad_type_0 = const()[name = tensor("k_169_pad_type_0"), val = tensor("custom")]; + tensor k_169_pad_0 = const()[name = tensor("k_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772812864))), lut = tensor([-0x1.69p-5, -0x1.b34p-7, 0x1.b2cp-7, 0x1.688p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_169_cast = conv(dilations = var_8423, groups = var_6865, pad = k_169_pad_0, pad_type = k_169_pad_type_0, strides = var_8421, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("k_169_cast")]; + tensor var_8427 = const()[name = tensor("op_8427"), val = tensor([1, 1])]; + tensor var_8429 = const()[name = tensor("op_8429"), val = tensor([1, 1])]; + tensor v_169_pad_type_0 = const()[name = tensor("v_169_pad_type_0"), val = tensor("custom")]; + tensor v_169_pad_0 = const()[name = tensor("v_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773222528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774041792))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_169_cast = conv(dilations = var_8429, groups = var_6865, pad = v_169_pad_0, pad_type = v_169_pad_type_0, strides = var_8427, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("v_169_cast")]; + tensor var_8433 = const()[name = tensor("op_8433"), val = tensor([2, 20, 64, -1])]; + tensor var_8434_cast = reshape(shape = var_8433, x = q_169_cast)[name = tensor("op_8434_cast")]; + tensor var_8435 = const()[name = tensor("op_8435"), val = tensor([2, 20, 64, -1])]; + tensor var_8436_cast = reshape(shape = var_8435, x = k_169_cast)[name = tensor("op_8436_cast")]; + tensor var_8437 = const()[name = tensor("op_8437"), val = tensor([2, 20, 64, -1])]; + tensor var_8438_cast = reshape(shape = var_8437, x = v_169_cast)[name = tensor("op_8438_cast")]; + tensor attn_weights_337_transpose_x_0 = const()[name = tensor("attn_weights_337_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_337_transpose_y_0 = const()[name = tensor("attn_weights_337_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_337_cast = matmul(transpose_x = attn_weights_337_transpose_x_0, transpose_y = attn_weights_337_transpose_y_0, x = var_8434_cast, y = var_8436_cast)[name = tensor("attn_weights_337_cast")]; + tensor attn_weights_339_cast = mul(x = attn_weights_337_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_339_cast")]; + tensor var_8442_cast = softmax(axis = var_6849, x = attn_weights_339_cast)[name = tensor("op_8442_cast")]; + tensor attn_169_transpose_x_0 = const()[name = tensor("attn_169_transpose_x_0"), val = tensor(false)]; + tensor attn_169_transpose_y_0 = const()[name = tensor("attn_169_transpose_y_0"), val = tensor(true)]; + tensor attn_169_cast = matmul(transpose_x = attn_169_transpose_x_0, transpose_y = attn_169_transpose_y_0, x = var_8438_cast, y = var_8442_cast)[name = tensor("attn_169_cast")]; + tensor var_8446 = const()[name = tensor("op_8446"), val = tensor([2, 1280, 1, -1])]; + tensor input_503_cast = reshape(shape = var_8446, x = attn_169_cast)[name = tensor("input_503_cast")]; + tensor var_8451 = const()[name = tensor("op_8451"), val = tensor([1, 1])]; + tensor var_8453 = const()[name = tensor("op_8453"), val = tensor([1, 1])]; + tensor var_8455_pad_type_0 = const()[name = tensor("op_8455_pad_type_0"), val = tensor("custom")]; + tensor var_8455_pad_0 = const()[name = tensor("op_8455_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774041920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774861184))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774861312)))]; + tensor var_8455_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_8453, groups = var_6865, pad = var_8455_pad_0, pad_type = var_8455_pad_type_0, strides = var_8451, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_503_cast)[name = tensor("op_8455_cast")]; + tensor inputs_255_cast = add(x = var_8455_cast, y = inputs_253_cast)[name = tensor("inputs_255_cast")]; + tensor var_8459 = const()[name = tensor("op_8459"), val = tensor([1])]; + tensor channels_mean_255_cast = reduce_mean(axes = var_8459, keep_dims = var_6860, x = inputs_255_cast)[name = tensor("channels_mean_255_cast")]; + tensor zero_mean_255_cast = sub(x = inputs_255_cast, y = channels_mean_255_cast)[name = tensor("zero_mean_255_cast")]; + tensor zero_mean_sq_255_cast = mul(x = zero_mean_255_cast, y = zero_mean_255_cast)[name = tensor("zero_mean_sq_255_cast")]; + tensor var_8463 = const()[name = tensor("op_8463"), val = tensor([1])]; + tensor var_8464_cast = reduce_mean(axes = var_8463, keep_dims = var_6860, x = zero_mean_sq_255_cast)[name = tensor("op_8464_cast")]; + tensor var_8465_to_fp16 = const()[name = tensor("op_8465_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8466_cast = add(x = var_8464_cast, y = var_8465_to_fp16)[name = tensor("op_8466_cast")]; + tensor denom_255_epsilon_0_to_fp16 = const()[name = tensor("denom_255_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_255_cast = rsqrt(epsilon = denom_255_epsilon_0_to_fp16, x = var_8466_cast)[name = tensor("denom_255_cast")]; + tensor out_255_cast = mul(x = zero_mean_255_cast, y = denom_255_cast)[name = tensor("out_255_cast")]; + tensor var_8470_to_fp16 = const()[name = tensor("op_8470_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774863936)))]; + tensor var_8471_cast = add(x = out_255_cast, y = var_8470_to_fp16)[name = tensor("op_8471_cast")]; + tensor var_8473_to_fp16 = const()[name = tensor("op_8473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774866560)))]; + tensor hidden_states_337_cast = mul(x = var_8471_cast, y = var_8473_to_fp16)[name = tensor("hidden_states_337_cast")]; + tensor var_8480 = const()[name = tensor("op_8480"), val = tensor([1, 1])]; + tensor var_8482 = const()[name = tensor("op_8482"), val = tensor([1, 1])]; + tensor q_171_pad_type_0 = const()[name = tensor("q_171_pad_type_0"), val = tensor("custom")]; + tensor q_171_pad_0 = const()[name = tensor("q_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774869184))), lut = tensor([-0x1.b48p-7, 0x1.b54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_171_cast = conv(dilations = var_8482, groups = var_6865, pad = q_171_pad_0, pad_type = q_171_pad_type_0, strides = var_8480, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_337_cast)[name = tensor("q_171_cast")]; + tensor var_8486 = const()[name = tensor("op_8486"), val = tensor([1, 1])]; + tensor var_8488 = const()[name = tensor("op_8488"), val = tensor([1, 1])]; + tensor k_171_pad_type_0 = const()[name = tensor("k_171_pad_type_0"), val = tensor("custom")]; + tensor k_171_pad_0 = const()[name = tensor("k_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775074048))), lut = tensor([-0x1.38p-7, 0x1.37p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_171_cast = conv(dilations = var_8488, groups = var_6865, pad = k_171_pad_0, pad_type = k_171_pad_type_0, strides = var_8486, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_171_cast")]; + tensor var_8492 = const()[name = tensor("op_8492"), val = tensor([1, 1])]; + tensor var_8494 = const()[name = tensor("op_8494"), val = tensor([1, 1])]; + tensor v_171_pad_type_0 = const()[name = tensor("v_171_pad_type_0"), val = tensor("custom")]; + tensor v_171_pad_0 = const()[name = tensor("v_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775401792))), lut = tensor([-0x1.a1p-7, 0x1.a28p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_171_cast = conv(dilations = var_8494, groups = var_6865, pad = v_171_pad_0, pad_type = v_171_pad_type_0, strides = var_8492, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_171_cast")]; + tensor var_8498 = const()[name = tensor("op_8498"), val = tensor([2, 20, 64, -1])]; + tensor var_8499_cast = reshape(shape = var_8498, x = q_171_cast)[name = tensor("op_8499_cast")]; + tensor var_8500 = const()[name = tensor("op_8500"), val = tensor([2, 20, 64, -1])]; + tensor var_8501_cast = reshape(shape = var_8500, x = k_171_cast)[name = tensor("op_8501_cast")]; + tensor var_8502 = const()[name = tensor("op_8502"), val = tensor([2, 20, 64, -1])]; + tensor var_8503_cast = reshape(shape = var_8502, x = v_171_cast)[name = tensor("op_8503_cast")]; + tensor attn_weights_341_transpose_x_0 = const()[name = tensor("attn_weights_341_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_341_transpose_y_0 = const()[name = tensor("attn_weights_341_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_341_cast = matmul(transpose_x = attn_weights_341_transpose_x_0, transpose_y = attn_weights_341_transpose_y_0, x = var_8499_cast, y = var_8501_cast)[name = tensor("attn_weights_341_cast")]; + tensor attn_weights_343_cast = mul(x = attn_weights_341_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_343_cast")]; + tensor var_8507_cast = softmax(axis = var_6849, x = attn_weights_343_cast)[name = tensor("op_8507_cast")]; + tensor attn_171_transpose_x_0 = const()[name = tensor("attn_171_transpose_x_0"), val = tensor(false)]; + tensor attn_171_transpose_y_0 = const()[name = tensor("attn_171_transpose_y_0"), val = tensor(true)]; + tensor attn_171_cast = matmul(transpose_x = attn_171_transpose_x_0, transpose_y = attn_171_transpose_y_0, x = var_8503_cast, y = var_8507_cast)[name = tensor("attn_171_cast")]; + tensor var_8511 = const()[name = tensor("op_8511"), val = tensor([2, 1280, 1, -1])]; + tensor input_505_cast = reshape(shape = var_8511, x = attn_171_cast)[name = tensor("input_505_cast")]; + tensor var_8516 = const()[name = tensor("op_8516"), val = tensor([1, 1])]; + tensor var_8518 = const()[name = tensor("op_8518"), val = tensor([1, 1])]; + tensor var_8520_pad_type_0 = const()[name = tensor("op_8520_pad_type_0"), val = tensor("custom")]; + tensor var_8520_pad_0 = const()[name = tensor("op_8520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775729536))), lut = tensor([-0x1.048p-7, 0x1.04cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775934400)))]; + tensor var_8520_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_8518, groups = var_6865, pad = var_8520_pad_0, pad_type = var_8520_pad_type_0, strides = var_8516, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_505_cast)[name = tensor("op_8520_cast")]; + tensor inputs_257_cast = add(x = var_8520_cast, y = inputs_255_cast)[name = tensor("inputs_257_cast")]; + tensor var_8524 = const()[name = tensor("op_8524"), val = tensor([1])]; + tensor channels_mean_257_cast = reduce_mean(axes = var_8524, keep_dims = var_6860, x = inputs_257_cast)[name = tensor("channels_mean_257_cast")]; + tensor zero_mean_257_cast = sub(x = inputs_257_cast, y = channels_mean_257_cast)[name = tensor("zero_mean_257_cast")]; + tensor zero_mean_sq_257_cast = mul(x = zero_mean_257_cast, y = zero_mean_257_cast)[name = tensor("zero_mean_sq_257_cast")]; + tensor var_8528 = const()[name = tensor("op_8528"), val = tensor([1])]; + tensor var_8529_cast = reduce_mean(axes = var_8528, keep_dims = var_6860, x = zero_mean_sq_257_cast)[name = tensor("op_8529_cast")]; + tensor var_8530_to_fp16 = const()[name = tensor("op_8530_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8531_cast = add(x = var_8529_cast, y = var_8530_to_fp16)[name = tensor("op_8531_cast")]; + tensor denom_257_epsilon_0_to_fp16 = const()[name = tensor("denom_257_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_257_cast = rsqrt(epsilon = denom_257_epsilon_0_to_fp16, x = var_8531_cast)[name = tensor("denom_257_cast")]; + tensor out_257_cast = mul(x = zero_mean_257_cast, y = denom_257_cast)[name = tensor("out_257_cast")]; + tensor var_8535_to_fp16 = const()[name = tensor("op_8535_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775937024)))]; + tensor var_8536_cast = add(x = out_257_cast, y = var_8535_to_fp16)[name = tensor("op_8536_cast")]; + tensor var_8538_to_fp16 = const()[name = tensor("op_8538_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775939648)))]; + tensor input_507_cast = mul(x = var_8536_cast, y = var_8538_to_fp16)[name = tensor("input_507_cast")]; + tensor var_8546 = const()[name = tensor("op_8546"), val = tensor([1, 1])]; + tensor var_8548 = const()[name = tensor("op_8548"), val = tensor([1, 1])]; + tensor var_8550_pad_type_0 = const()[name = tensor("op_8550_pad_type_0"), val = tensor("custom")]; + tensor var_8550_pad_0 = const()[name = tensor("op_8550_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775942272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782495936))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782496064)))]; + tensor var_8550_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_8548, groups = var_6865, pad = var_8550_pad_0, pad_type = var_8550_pad_type_0, strides = var_8546, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_507_cast)[name = tensor("op_8550_cast")]; + tensor var_8551_split_sizes_0 = const()[name = tensor("op_8551_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8551_axis_0 = const()[name = tensor("op_8551_axis_0"), val = tensor(1)]; + tensor var_8551_cast_0, tensor var_8551_cast_1 = split(axis = var_8551_axis_0, split_sizes = var_8551_split_sizes_0, x = var_8550_cast)[name = tensor("op_8551_cast")]; + tensor var_8553_mode_0 = const()[name = tensor("op_8553_mode_0"), val = tensor("EXACT")]; + tensor var_8553_cast = gelu(mode = var_8553_mode_0, x = var_8551_cast_1)[name = tensor("op_8553_cast")]; + tensor input_509_cast = mul(x = var_8551_cast_0, y = var_8553_cast)[name = tensor("input_509_cast")]; + tensor var_8557 = const()[name = tensor("op_8557"), val = tensor([1, 1])]; + tensor var_8559 = const()[name = tensor("op_8559"), val = tensor([1, 1])]; + tensor var_8561_pad_type_0 = const()[name = tensor("op_8561_pad_type_0"), val = tensor("custom")]; + tensor var_8561_pad_0 = const()[name = tensor("op_8561_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782516608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785793472))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785793600)))]; + tensor var_8561_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_8559, groups = var_6865, pad = var_8561_pad_0, pad_type = var_8561_pad_type_0, strides = var_8557, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_509_cast)[name = tensor("op_8561_cast")]; + tensor inputs_259_cast = add(x = var_8561_cast, y = inputs_257_cast)[name = tensor("inputs_259_cast")]; + tensor var_8571 = const()[name = tensor("op_8571"), val = tensor([1])]; + tensor channels_mean_259_cast = reduce_mean(axes = var_8571, keep_dims = var_6860, x = inputs_259_cast)[name = tensor("channels_mean_259_cast")]; + tensor zero_mean_259_cast = sub(x = inputs_259_cast, y = channels_mean_259_cast)[name = tensor("zero_mean_259_cast")]; + tensor zero_mean_sq_259_cast = mul(x = zero_mean_259_cast, y = zero_mean_259_cast)[name = tensor("zero_mean_sq_259_cast")]; + tensor var_8575 = const()[name = tensor("op_8575"), val = tensor([1])]; + tensor var_8576_cast = reduce_mean(axes = var_8575, keep_dims = var_6860, x = zero_mean_sq_259_cast)[name = tensor("op_8576_cast")]; + tensor var_8577_to_fp16 = const()[name = tensor("op_8577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8578_cast = add(x = var_8576_cast, y = var_8577_to_fp16)[name = tensor("op_8578_cast")]; + tensor denom_259_epsilon_0_to_fp16 = const()[name = tensor("denom_259_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_259_cast = rsqrt(epsilon = denom_259_epsilon_0_to_fp16, x = var_8578_cast)[name = tensor("denom_259_cast")]; + tensor out_259_cast = mul(x = zero_mean_259_cast, y = denom_259_cast)[name = tensor("out_259_cast")]; + tensor var_8582_to_fp16 = const()[name = tensor("op_8582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785796224)))]; + tensor var_8583_cast = add(x = out_259_cast, y = var_8582_to_fp16)[name = tensor("op_8583_cast")]; + tensor var_8585_to_fp16 = const()[name = tensor("op_8585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785798848)))]; + tensor hidden_states_341_cast = mul(x = var_8583_cast, y = var_8585_to_fp16)[name = tensor("hidden_states_341_cast")]; + tensor var_8592 = const()[name = tensor("op_8592"), val = tensor([1, 1])]; + tensor var_8594 = const()[name = tensor("op_8594"), val = tensor([1, 1])]; + tensor q_173_pad_type_0 = const()[name = tensor("q_173_pad_type_0"), val = tensor("custom")]; + tensor q_173_pad_0 = const()[name = tensor("q_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785801472))), lut = tensor([-0x1.6b4p-5, -0x1.b6cp-7, 0x1.b3cp-7, 0x1.6a4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_173_cast = conv(dilations = var_8594, groups = var_6865, pad = q_173_pad_0, pad_type = q_173_pad_type_0, strides = var_8592, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("q_173_cast")]; + tensor var_8598 = const()[name = tensor("op_8598"), val = tensor([1, 1])]; + tensor var_8600 = const()[name = tensor("op_8600"), val = tensor([1, 1])]; + tensor k_173_pad_type_0 = const()[name = tensor("k_173_pad_type_0"), val = tensor("custom")]; + tensor k_173_pad_0 = const()[name = tensor("k_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786211136))), lut = tensor([-0x1.69cp-5, -0x1.b24p-7, 0x1.b88p-7, 0x1.6b8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_173_cast = conv(dilations = var_8600, groups = var_6865, pad = k_173_pad_0, pad_type = k_173_pad_type_0, strides = var_8598, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("k_173_cast")]; + tensor var_8604 = const()[name = tensor("op_8604"), val = tensor([1, 1])]; + tensor var_8606 = const()[name = tensor("op_8606"), val = tensor([1, 1])]; + tensor v_173_pad_type_0 = const()[name = tensor("v_173_pad_type_0"), val = tensor("custom")]; + tensor v_173_pad_0 = const()[name = tensor("v_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786620800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787440064))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_173_cast = conv(dilations = var_8606, groups = var_6865, pad = v_173_pad_0, pad_type = v_173_pad_type_0, strides = var_8604, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("v_173_cast")]; + tensor var_8610 = const()[name = tensor("op_8610"), val = tensor([2, 20, 64, -1])]; + tensor var_8611_cast = reshape(shape = var_8610, x = q_173_cast)[name = tensor("op_8611_cast")]; + tensor var_8612 = const()[name = tensor("op_8612"), val = tensor([2, 20, 64, -1])]; + tensor var_8613_cast = reshape(shape = var_8612, x = k_173_cast)[name = tensor("op_8613_cast")]; + tensor var_8614 = const()[name = tensor("op_8614"), val = tensor([2, 20, 64, -1])]; + tensor var_8615_cast = reshape(shape = var_8614, x = v_173_cast)[name = tensor("op_8615_cast")]; + tensor attn_weights_345_transpose_x_0 = const()[name = tensor("attn_weights_345_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_345_transpose_y_0 = const()[name = tensor("attn_weights_345_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_345_cast = matmul(transpose_x = attn_weights_345_transpose_x_0, transpose_y = attn_weights_345_transpose_y_0, x = var_8611_cast, y = var_8613_cast)[name = tensor("attn_weights_345_cast")]; + tensor attn_weights_347_cast = mul(x = attn_weights_345_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_347_cast")]; + tensor var_8619_cast = softmax(axis = var_6849, x = attn_weights_347_cast)[name = tensor("op_8619_cast")]; + tensor attn_173_transpose_x_0 = const()[name = tensor("attn_173_transpose_x_0"), val = tensor(false)]; + tensor attn_173_transpose_y_0 = const()[name = tensor("attn_173_transpose_y_0"), val = tensor(true)]; + tensor attn_173_cast = matmul(transpose_x = attn_173_transpose_x_0, transpose_y = attn_173_transpose_y_0, x = var_8615_cast, y = var_8619_cast)[name = tensor("attn_173_cast")]; + tensor var_8623 = const()[name = tensor("op_8623"), val = tensor([2, 1280, 1, -1])]; + tensor input_511_cast = reshape(shape = var_8623, x = attn_173_cast)[name = tensor("input_511_cast")]; + tensor var_8628 = const()[name = tensor("op_8628"), val = tensor([1, 1])]; + tensor var_8630 = const()[name = tensor("op_8630"), val = tensor([1, 1])]; + tensor var_8632_pad_type_0 = const()[name = tensor("op_8632_pad_type_0"), val = tensor("custom")]; + tensor var_8632_pad_0 = const()[name = tensor("op_8632_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787440192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788669056))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788669248)))]; + tensor var_8632_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_8630, groups = var_6865, pad = var_8632_pad_0, pad_type = var_8632_pad_type_0, strides = var_8628, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_511_cast)[name = tensor("op_8632_cast")]; + tensor inputs_261_cast = add(x = var_8632_cast, y = inputs_259_cast)[name = tensor("inputs_261_cast")]; + tensor var_8636 = const()[name = tensor("op_8636"), val = tensor([1])]; + tensor channels_mean_261_cast = reduce_mean(axes = var_8636, keep_dims = var_6860, x = inputs_261_cast)[name = tensor("channels_mean_261_cast")]; + tensor zero_mean_261_cast = sub(x = inputs_261_cast, y = channels_mean_261_cast)[name = tensor("zero_mean_261_cast")]; + tensor zero_mean_sq_261_cast = mul(x = zero_mean_261_cast, y = zero_mean_261_cast)[name = tensor("zero_mean_sq_261_cast")]; + tensor var_8640 = const()[name = tensor("op_8640"), val = tensor([1])]; + tensor var_8641_cast = reduce_mean(axes = var_8640, keep_dims = var_6860, x = zero_mean_sq_261_cast)[name = tensor("op_8641_cast")]; + tensor var_8642_to_fp16 = const()[name = tensor("op_8642_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8643_cast = add(x = var_8641_cast, y = var_8642_to_fp16)[name = tensor("op_8643_cast")]; + tensor denom_261_epsilon_0_to_fp16 = const()[name = tensor("denom_261_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_261_cast = rsqrt(epsilon = denom_261_epsilon_0_to_fp16, x = var_8643_cast)[name = tensor("denom_261_cast")]; + tensor out_261_cast = mul(x = zero_mean_261_cast, y = denom_261_cast)[name = tensor("out_261_cast")]; + tensor var_8647_to_fp16 = const()[name = tensor("op_8647_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788671872)))]; + tensor var_8648_cast = add(x = out_261_cast, y = var_8647_to_fp16)[name = tensor("op_8648_cast")]; + tensor var_8650_to_fp16 = const()[name = tensor("op_8650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788674496)))]; + tensor hidden_states_343_cast = mul(x = var_8648_cast, y = var_8650_to_fp16)[name = tensor("hidden_states_343_cast")]; + tensor var_8657 = const()[name = tensor("op_8657"), val = tensor([1, 1])]; + tensor var_8659 = const()[name = tensor("op_8659"), val = tensor([1, 1])]; + tensor q_175_pad_type_0 = const()[name = tensor("q_175_pad_type_0"), val = tensor("custom")]; + tensor q_175_pad_0 = const()[name = tensor("q_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788677120))), lut = tensor([-0x1.944p-7, 0x1.958p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_175_cast = conv(dilations = var_8659, groups = var_6865, pad = q_175_pad_0, pad_type = q_175_pad_type_0, strides = var_8657, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_343_cast)[name = tensor("q_175_cast")]; + tensor var_8663 = const()[name = tensor("op_8663"), val = tensor([1, 1])]; + tensor var_8665 = const()[name = tensor("op_8665"), val = tensor([1, 1])]; + tensor k_175_pad_type_0 = const()[name = tensor("k_175_pad_type_0"), val = tensor("custom")]; + tensor k_175_pad_0 = const()[name = tensor("k_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788881984))), lut = tensor([-0x1.128p-7, 0x1.124p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_175_cast = conv(dilations = var_8665, groups = var_6865, pad = k_175_pad_0, pad_type = k_175_pad_type_0, strides = var_8663, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_175_cast")]; + tensor var_8669 = const()[name = tensor("op_8669"), val = tensor([1, 1])]; + tensor var_8671 = const()[name = tensor("op_8671"), val = tensor([1, 1])]; + tensor v_175_pad_type_0 = const()[name = tensor("v_175_pad_type_0"), val = tensor("custom")]; + tensor v_175_pad_0 = const()[name = tensor("v_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789209728))), lut = tensor([-0x1.57p-7, 0x1.57cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_175_cast = conv(dilations = var_8671, groups = var_6865, pad = v_175_pad_0, pad_type = v_175_pad_type_0, strides = var_8669, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_175_cast")]; + tensor var_8675 = const()[name = tensor("op_8675"), val = tensor([2, 20, 64, -1])]; + tensor var_8676_cast = reshape(shape = var_8675, x = q_175_cast)[name = tensor("op_8676_cast")]; + tensor var_8677 = const()[name = tensor("op_8677"), val = tensor([2, 20, 64, -1])]; + tensor var_8678_cast = reshape(shape = var_8677, x = k_175_cast)[name = tensor("op_8678_cast")]; + tensor var_8679 = const()[name = tensor("op_8679"), val = tensor([2, 20, 64, -1])]; + tensor var_8680_cast = reshape(shape = var_8679, x = v_175_cast)[name = tensor("op_8680_cast")]; + tensor attn_weights_349_transpose_x_0 = const()[name = tensor("attn_weights_349_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_349_transpose_y_0 = const()[name = tensor("attn_weights_349_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_349_cast = matmul(transpose_x = attn_weights_349_transpose_x_0, transpose_y = attn_weights_349_transpose_y_0, x = var_8676_cast, y = var_8678_cast)[name = tensor("attn_weights_349_cast")]; + tensor attn_weights_351_cast = mul(x = attn_weights_349_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_351_cast")]; + tensor var_8684_cast = softmax(axis = var_6849, x = attn_weights_351_cast)[name = tensor("op_8684_cast")]; + tensor attn_175_transpose_x_0 = const()[name = tensor("attn_175_transpose_x_0"), val = tensor(false)]; + tensor attn_175_transpose_y_0 = const()[name = tensor("attn_175_transpose_y_0"), val = tensor(true)]; + tensor attn_175_cast = matmul(transpose_x = attn_175_transpose_x_0, transpose_y = attn_175_transpose_y_0, x = var_8680_cast, y = var_8684_cast)[name = tensor("attn_175_cast")]; + tensor var_8688 = const()[name = tensor("op_8688"), val = tensor([2, 1280, 1, -1])]; + tensor input_513_cast = reshape(shape = var_8688, x = attn_175_cast)[name = tensor("input_513_cast")]; + tensor var_8693 = const()[name = tensor("op_8693"), val = tensor([1, 1])]; + tensor var_8695 = const()[name = tensor("op_8695"), val = tensor([1, 1])]; + tensor var_8697_pad_type_0 = const()[name = tensor("op_8697_pad_type_0"), val = tensor("custom")]; + tensor var_8697_pad_0 = const()[name = tensor("op_8697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789537472))), lut = tensor([-0x1.bep-8, 0x1.bdcp-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789742336)))]; + tensor var_8697_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_8695, groups = var_6865, pad = var_8697_pad_0, pad_type = var_8697_pad_type_0, strides = var_8693, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_513_cast)[name = tensor("op_8697_cast")]; + tensor inputs_263_cast = add(x = var_8697_cast, y = inputs_261_cast)[name = tensor("inputs_263_cast")]; + tensor var_8701 = const()[name = tensor("op_8701"), val = tensor([1])]; + tensor channels_mean_263_cast = reduce_mean(axes = var_8701, keep_dims = var_6860, x = inputs_263_cast)[name = tensor("channels_mean_263_cast")]; + tensor zero_mean_263_cast = sub(x = inputs_263_cast, y = channels_mean_263_cast)[name = tensor("zero_mean_263_cast")]; + tensor zero_mean_sq_263_cast = mul(x = zero_mean_263_cast, y = zero_mean_263_cast)[name = tensor("zero_mean_sq_263_cast")]; + tensor var_8705 = const()[name = tensor("op_8705"), val = tensor([1])]; + tensor var_8706_cast = reduce_mean(axes = var_8705, keep_dims = var_6860, x = zero_mean_sq_263_cast)[name = tensor("op_8706_cast")]; + tensor var_8707_to_fp16 = const()[name = tensor("op_8707_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8708_cast = add(x = var_8706_cast, y = var_8707_to_fp16)[name = tensor("op_8708_cast")]; + tensor denom_263_epsilon_0_to_fp16 = const()[name = tensor("denom_263_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_263_cast = rsqrt(epsilon = denom_263_epsilon_0_to_fp16, x = var_8708_cast)[name = tensor("denom_263_cast")]; + tensor out_263_cast = mul(x = zero_mean_263_cast, y = denom_263_cast)[name = tensor("out_263_cast")]; + tensor var_8712_to_fp16 = const()[name = tensor("op_8712_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789744960)))]; + tensor var_8713_cast = add(x = out_263_cast, y = var_8712_to_fp16)[name = tensor("op_8713_cast")]; + tensor var_8715_to_fp16 = const()[name = tensor("op_8715_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789747584)))]; + tensor input_515_cast = mul(x = var_8713_cast, y = var_8715_to_fp16)[name = tensor("input_515_cast")]; + tensor var_8723 = const()[name = tensor("op_8723"), val = tensor([1, 1])]; + tensor var_8725 = const()[name = tensor("op_8725"), val = tensor([1, 1])]; + tensor var_8727_pad_type_0 = const()[name = tensor("op_8727_pad_type_0"), val = tensor("custom")]; + tensor var_8727_pad_0 = const()[name = tensor("op_8727_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789750208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796303872))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796304000)))]; + tensor var_8727_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_8725, groups = var_6865, pad = var_8727_pad_0, pad_type = var_8727_pad_type_0, strides = var_8723, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_515_cast)[name = tensor("op_8727_cast")]; + tensor var_8728_split_sizes_0 = const()[name = tensor("op_8728_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8728_axis_0 = const()[name = tensor("op_8728_axis_0"), val = tensor(1)]; + tensor var_8728_cast_0, tensor var_8728_cast_1 = split(axis = var_8728_axis_0, split_sizes = var_8728_split_sizes_0, x = var_8727_cast)[name = tensor("op_8728_cast")]; + tensor var_8730_mode_0 = const()[name = tensor("op_8730_mode_0"), val = tensor("EXACT")]; + tensor var_8730_cast = gelu(mode = var_8730_mode_0, x = var_8728_cast_1)[name = tensor("op_8730_cast")]; + tensor input_517_cast = mul(x = var_8728_cast_0, y = var_8730_cast)[name = tensor("input_517_cast")]; + tensor var_8734 = const()[name = tensor("op_8734"), val = tensor([1, 1])]; + tensor var_8736 = const()[name = tensor("op_8736"), val = tensor([1, 1])]; + tensor var_8738_pad_type_0 = const()[name = tensor("op_8738_pad_type_0"), val = tensor("custom")]; + tensor var_8738_pad_0 = const()[name = tensor("op_8738_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796324544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801239808))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801240000)))]; + tensor var_8738_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_8736, groups = var_6865, pad = var_8738_pad_0, pad_type = var_8738_pad_type_0, strides = var_8734, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_517_cast)[name = tensor("op_8738_cast")]; + tensor hidden_states_347_cast = add(x = var_8738_cast, y = inputs_263_cast)[name = tensor("hidden_states_347_cast")]; + tensor var_8740 = const()[name = tensor("op_8740"), val = tensor([2, 1280, 32, 32])]; + tensor input_519_cast = reshape(shape = var_8740, x = hidden_states_347_cast)[name = tensor("input_519_cast")]; + tensor var_8744 = const()[name = tensor("op_8744"), val = tensor([1, 1])]; + tensor var_8746 = const()[name = tensor("op_8746"), val = tensor([1, 1])]; + tensor hidden_states_349_pad_type_0 = const()[name = tensor("hidden_states_349_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_349_pad_0 = const()[name = tensor("hidden_states_349_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801242624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802471488))), name = tensor("up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802471680)))]; + tensor hidden_states_349_cast = conv(bias = up_blocks_0_attentions_0_proj_out_bias_to_fp16, dilations = var_8746, groups = var_6865, pad = hidden_states_349_pad_0, pad_type = hidden_states_349_pad_type_0, strides = var_8744, weight = up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized, x = input_519_cast)[name = tensor("hidden_states_349_cast")]; + tensor hidden_states_351_cast = add(x = hidden_states_349_cast, y = hidden_states_283_cast)[name = tensor("hidden_states_351_cast")]; + tensor input_521_interleave_0 = const()[name = tensor("input_521_interleave_0"), val = tensor(false)]; + tensor input_521_cast = concat(axis = var_6865, interleave = input_521_interleave_0, values = (hidden_states_351_cast, input_213_cast))[name = tensor("input_521_cast")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_96_cast = reshape(shape = reshape_96_shape_0, x = input_521_cast)[name = tensor("reshape_96_cast")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72_cast = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96_cast)[name = tensor("reduce_mean_72_cast")]; + tensor sub_48_cast = sub(x = reshape_96_cast, y = reduce_mean_72_cast)[name = tensor("sub_48_cast")]; + tensor square_24_cast = square(x = sub_48_cast)[name = tensor("square_24_cast")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74_cast = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24_cast)[name = tensor("reduce_mean_74_cast")]; + tensor add_48_y_0_to_fp16 = const()[name = tensor("add_48_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_48_cast = add(x = reduce_mean_74_cast, y = add_48_y_0_to_fp16)[name = tensor("add_48_cast")]; + tensor sqrt_24_cast = sqrt(x = add_48_cast)[name = tensor("sqrt_24_cast")]; + tensor real_div_24_cast = real_div(x = sub_48_cast, y = sqrt_24_cast)[name = tensor("real_div_24_cast")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_97_cast = reshape(shape = reshape_97_shape_0, x = real_div_24_cast)[name = tensor("reshape_97_cast")]; + tensor add_49_gamma_0_to_fp16 = const()[name = tensor("add_49_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802474304)))]; + tensor add_49_beta_0_to_fp16 = const()[name = tensor("add_49_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802479488)))]; + tensor add_49_epsilon_0_to_fp16 = const()[name = tensor("add_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_49_cast = batch_norm(beta = add_49_beta_0_to_fp16, epsilon = add_49_epsilon_0_to_fp16, gamma = add_49_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_97_cast)[name = tensor("add_49_cast")]; + tensor input_525_cast = silu(x = add_49_cast)[name = tensor("input_525_cast")]; + tensor var_8764 = const()[name = tensor("op_8764"), val = tensor([1, 1])]; + tensor var_8766 = const()[name = tensor("op_8766"), val = tensor([1, 1])]; + tensor hidden_states_353_pad_type_0 = const()[name = tensor("hidden_states_353_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_353_pad_0 = const()[name = tensor("hidden_states_353_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802484672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824603136))), name = tensor("up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824603328)))]; + tensor hidden_states_353_cast = conv(bias = up_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_8766, groups = var_6865, pad = hidden_states_353_pad_0, pad_type = hidden_states_353_pad_type_0, strides = var_8764, weight = up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_525_cast)[name = tensor("hidden_states_353_cast")]; + tensor var_8772 = const()[name = tensor("op_8772"), val = tensor([1, 1])]; + tensor var_8774 = const()[name = tensor("op_8774"), val = tensor([1, 1])]; + tensor temb_19_pad_type_0 = const()[name = tensor("temb_19_pad_type_0"), val = tensor("custom")]; + tensor temb_19_pad_0 = const()[name = tensor("temb_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824605952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825834816))), name = tensor("up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825835008)))]; + tensor temb_19_cast = conv(bias = up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_8774, groups = var_6865, pad = temb_19_pad_0, pad_type = temb_19_pad_type_0, strides = var_8772, weight = up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_19_cast")]; + tensor input_529_cast = add(x = hidden_states_353_cast, y = temb_19_cast)[name = tensor("input_529_cast")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_100_cast = reshape(shape = reshape_100_shape_0, x = input_529_cast)[name = tensor("reshape_100_cast")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75_cast = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100_cast)[name = tensor("reduce_mean_75_cast")]; + tensor sub_50_cast = sub(x = reshape_100_cast, y = reduce_mean_75_cast)[name = tensor("sub_50_cast")]; + tensor square_25_cast = square(x = sub_50_cast)[name = tensor("square_25_cast")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77_cast = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25_cast)[name = tensor("reduce_mean_77_cast")]; + tensor add_50_y_0_to_fp16 = const()[name = tensor("add_50_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_50_cast = add(x = reduce_mean_77_cast, y = add_50_y_0_to_fp16)[name = tensor("add_50_cast")]; + tensor sqrt_25_cast = sqrt(x = add_50_cast)[name = tensor("sqrt_25_cast")]; + tensor real_div_25_cast = real_div(x = sub_50_cast, y = sqrt_25_cast)[name = tensor("real_div_25_cast")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_101_cast = reshape(shape = reshape_101_shape_0, x = real_div_25_cast)[name = tensor("reshape_101_cast")]; + tensor add_51_gamma_0_to_fp16 = const()[name = tensor("add_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825837632)))]; + tensor add_51_beta_0_to_fp16 = const()[name = tensor("add_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825840256)))]; + tensor add_51_epsilon_0_to_fp16 = const()[name = tensor("add_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_51_cast = batch_norm(beta = add_51_beta_0_to_fp16, epsilon = add_51_epsilon_0_to_fp16, gamma = add_51_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_101_cast)[name = tensor("add_51_cast")]; + tensor input_533_cast = silu(x = add_51_cast)[name = tensor("input_533_cast")]; + tensor var_8784 = const()[name = tensor("op_8784"), val = tensor([1, 1])]; + tensor var_8786 = const()[name = tensor("op_8786"), val = tensor([1, 1])]; + tensor hidden_states_355_pad_type_0 = const()[name = tensor("hidden_states_355_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_355_pad_0 = const()[name = tensor("hidden_states_355_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825842880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836902144))), name = tensor("up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836902336)))]; + tensor hidden_states_355_cast = conv(bias = up_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_8786, groups = var_6865, pad = hidden_states_355_pad_0, pad_type = hidden_states_355_pad_type_0, strides = var_8784, weight = up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_533_cast)[name = tensor("hidden_states_355_cast")]; + tensor var_8791 = const()[name = tensor("op_8791"), val = tensor([1, 1])]; + tensor var_8793 = const()[name = tensor("op_8793"), val = tensor([1, 1])]; + tensor x_7_pad_type_0 = const()[name = tensor("x_7_pad_type_0"), val = tensor("custom")]; + tensor x_7_pad_0 = const()[name = tensor("x_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836904960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839362624))), name = tensor("up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839362816)))]; + tensor x_7_cast = conv(bias = up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_8793, groups = var_6865, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = var_8791, weight = up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_521_cast)[name = tensor("x_7_cast")]; + tensor hidden_states_357_cast = add(x = x_7_cast, y = hidden_states_355_cast)[name = tensor("hidden_states_357_cast")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_104_cast = reshape(shape = reshape_104_shape_0, x = hidden_states_357_cast)[name = tensor("reshape_104_cast")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78_cast = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104_cast)[name = tensor("reduce_mean_78_cast")]; + tensor sub_52_cast = sub(x = reshape_104_cast, y = reduce_mean_78_cast)[name = tensor("sub_52_cast")]; + tensor square_26_cast = square(x = sub_52_cast)[name = tensor("square_26_cast")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80_cast = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26_cast)[name = tensor("reduce_mean_80_cast")]; + tensor add_52_y_0_to_fp16 = const()[name = tensor("add_52_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_52_cast = add(x = reduce_mean_80_cast, y = add_52_y_0_to_fp16)[name = tensor("add_52_cast")]; + tensor sqrt_26_cast = sqrt(x = add_52_cast)[name = tensor("sqrt_26_cast")]; + tensor real_div_26_cast = real_div(x = sub_52_cast, y = sqrt_26_cast)[name = tensor("real_div_26_cast")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_105_cast = reshape(shape = reshape_105_shape_0, x = real_div_26_cast)[name = tensor("reshape_105_cast")]; + tensor add_53_gamma_0_to_fp16 = const()[name = tensor("add_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839365440)))]; + tensor add_53_beta_0_to_fp16 = const()[name = tensor("add_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839368064)))]; + tensor add_53_epsilon_0_to_fp16 = const()[name = tensor("add_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_53_cast = batch_norm(beta = add_53_beta_0_to_fp16, epsilon = add_53_epsilon_0_to_fp16, gamma = add_53_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_105_cast)[name = tensor("add_53_cast")]; + tensor var_8831 = const()[name = tensor("op_8831"), val = tensor([1, 1])]; + tensor var_8833 = const()[name = tensor("op_8833"), val = tensor([1, 1])]; + tensor hidden_states_359_pad_type_0 = const()[name = tensor("hidden_states_359_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_359_pad_0 = const()[name = tensor("hidden_states_359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839370688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840599552))), name = tensor("up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840599744)))]; + tensor hidden_states_359_cast = conv(bias = up_blocks_0_attentions_1_proj_in_bias_to_fp16, dilations = var_8833, groups = var_6865, pad = hidden_states_359_pad_0, pad_type = hidden_states_359_pad_type_0, strides = var_8831, weight = up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized, x = add_53_cast)[name = tensor("hidden_states_359_cast")]; + tensor var_8838 = const()[name = tensor("op_8838"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_265_cast = reshape(shape = var_8838, x = hidden_states_359_cast)[name = tensor("inputs_265_cast")]; + tensor var_8848 = const()[name = tensor("op_8848"), val = tensor([1])]; + tensor channels_mean_265_cast = reduce_mean(axes = var_8848, keep_dims = var_6860, x = inputs_265_cast)[name = tensor("channels_mean_265_cast")]; + tensor zero_mean_265_cast = sub(x = inputs_265_cast, y = channels_mean_265_cast)[name = tensor("zero_mean_265_cast")]; + tensor zero_mean_sq_265_cast = mul(x = zero_mean_265_cast, y = zero_mean_265_cast)[name = tensor("zero_mean_sq_265_cast")]; + tensor var_8852 = const()[name = tensor("op_8852"), val = tensor([1])]; + tensor var_8853_cast = reduce_mean(axes = var_8852, keep_dims = var_6860, x = zero_mean_sq_265_cast)[name = tensor("op_8853_cast")]; + tensor var_8854_to_fp16 = const()[name = tensor("op_8854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8855_cast = add(x = var_8853_cast, y = var_8854_to_fp16)[name = tensor("op_8855_cast")]; + tensor denom_265_epsilon_0_to_fp16 = const()[name = tensor("denom_265_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_265_cast = rsqrt(epsilon = denom_265_epsilon_0_to_fp16, x = var_8855_cast)[name = tensor("denom_265_cast")]; + tensor out_265_cast = mul(x = zero_mean_265_cast, y = denom_265_cast)[name = tensor("out_265_cast")]; + tensor var_8859_to_fp16 = const()[name = tensor("op_8859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840602368)))]; + tensor var_8860_cast = add(x = out_265_cast, y = var_8859_to_fp16)[name = tensor("op_8860_cast")]; + tensor var_8862_to_fp16 = const()[name = tensor("op_8862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840604992)))]; + tensor hidden_states_361_cast = mul(x = var_8860_cast, y = var_8862_to_fp16)[name = tensor("hidden_states_361_cast")]; + tensor var_8869 = const()[name = tensor("op_8869"), val = tensor([1, 1])]; + tensor var_8871 = const()[name = tensor("op_8871"), val = tensor([1, 1])]; + tensor q_177_pad_type_0 = const()[name = tensor("q_177_pad_type_0"), val = tensor("custom")]; + tensor q_177_pad_0 = const()[name = tensor("q_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840607616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841426880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_177_cast = conv(dilations = var_8871, groups = var_6865, pad = q_177_pad_0, pad_type = q_177_pad_type_0, strides = var_8869, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("q_177_cast")]; + tensor var_8875 = const()[name = tensor("op_8875"), val = tensor([1, 1])]; + tensor var_8877 = const()[name = tensor("op_8877"), val = tensor([1, 1])]; + tensor k_177_pad_type_0 = const()[name = tensor("k_177_pad_type_0"), val = tensor("custom")]; + tensor k_177_pad_0 = const()[name = tensor("k_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841427008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(842246272))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_177_cast = conv(dilations = var_8877, groups = var_6865, pad = k_177_pad_0, pad_type = k_177_pad_type_0, strides = var_8875, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("k_177_cast")]; + tensor var_8881 = const()[name = tensor("op_8881"), val = tensor([1, 1])]; + tensor var_8883 = const()[name = tensor("op_8883"), val = tensor([1, 1])]; + tensor v_177_pad_type_0 = const()[name = tensor("v_177_pad_type_0"), val = tensor("custom")]; + tensor v_177_pad_0 = const()[name = tensor("v_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(842246400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843475264))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_177_cast = conv(dilations = var_8883, groups = var_6865, pad = v_177_pad_0, pad_type = v_177_pad_type_0, strides = var_8881, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("v_177_cast")]; + tensor var_8887 = const()[name = tensor("op_8887"), val = tensor([2, 20, 64, -1])]; + tensor var_8888_cast = reshape(shape = var_8887, x = q_177_cast)[name = tensor("op_8888_cast")]; + tensor var_8889 = const()[name = tensor("op_8889"), val = tensor([2, 20, 64, -1])]; + tensor var_8890_cast = reshape(shape = var_8889, x = k_177_cast)[name = tensor("op_8890_cast")]; + tensor var_8891 = const()[name = tensor("op_8891"), val = tensor([2, 20, 64, -1])]; + tensor var_8892_cast = reshape(shape = var_8891, x = v_177_cast)[name = tensor("op_8892_cast")]; + tensor attn_weights_353_transpose_x_0 = const()[name = tensor("attn_weights_353_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_353_transpose_y_0 = const()[name = tensor("attn_weights_353_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_353_cast = matmul(transpose_x = attn_weights_353_transpose_x_0, transpose_y = attn_weights_353_transpose_y_0, x = var_8888_cast, y = var_8890_cast)[name = tensor("attn_weights_353_cast")]; + tensor attn_weights_355_cast = mul(x = attn_weights_353_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_355_cast")]; + tensor var_8896_cast = softmax(axis = var_6849, x = attn_weights_355_cast)[name = tensor("op_8896_cast")]; + tensor attn_177_transpose_x_0 = const()[name = tensor("attn_177_transpose_x_0"), val = tensor(false)]; + tensor attn_177_transpose_y_0 = const()[name = tensor("attn_177_transpose_y_0"), val = tensor(true)]; + tensor attn_177_cast = matmul(transpose_x = attn_177_transpose_x_0, transpose_y = attn_177_transpose_y_0, x = var_8892_cast, y = var_8896_cast)[name = tensor("attn_177_cast")]; + tensor var_8900 = const()[name = tensor("op_8900"), val = tensor([2, 1280, 1, -1])]; + tensor input_537_cast = reshape(shape = var_8900, x = attn_177_cast)[name = tensor("input_537_cast")]; + tensor var_8905 = const()[name = tensor("op_8905"), val = tensor([1, 1])]; + tensor var_8907 = const()[name = tensor("op_8907"), val = tensor([1, 1])]; + tensor var_8909_pad_type_0 = const()[name = tensor("op_8909_pad_type_0"), val = tensor("custom")]; + tensor var_8909_pad_0 = const()[name = tensor("op_8909_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843475456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844704320))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844704512)))]; + tensor var_8909_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_8907, groups = var_6865, pad = var_8909_pad_0, pad_type = var_8909_pad_type_0, strides = var_8905, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_537_cast)[name = tensor("op_8909_cast")]; + tensor inputs_267_cast = add(x = var_8909_cast, y = inputs_265_cast)[name = tensor("inputs_267_cast")]; + tensor var_8913 = const()[name = tensor("op_8913"), val = tensor([1])]; + tensor channels_mean_267_cast = reduce_mean(axes = var_8913, keep_dims = var_6860, x = inputs_267_cast)[name = tensor("channels_mean_267_cast")]; + tensor zero_mean_267_cast = sub(x = inputs_267_cast, y = channels_mean_267_cast)[name = tensor("zero_mean_267_cast")]; + tensor zero_mean_sq_267_cast = mul(x = zero_mean_267_cast, y = zero_mean_267_cast)[name = tensor("zero_mean_sq_267_cast")]; + tensor var_8917 = const()[name = tensor("op_8917"), val = tensor([1])]; + tensor var_8918_cast = reduce_mean(axes = var_8917, keep_dims = var_6860, x = zero_mean_sq_267_cast)[name = tensor("op_8918_cast")]; + tensor var_8919_to_fp16 = const()[name = tensor("op_8919_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8920_cast = add(x = var_8918_cast, y = var_8919_to_fp16)[name = tensor("op_8920_cast")]; + tensor denom_267_epsilon_0_to_fp16 = const()[name = tensor("denom_267_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_267_cast = rsqrt(epsilon = denom_267_epsilon_0_to_fp16, x = var_8920_cast)[name = tensor("denom_267_cast")]; + tensor out_267_cast = mul(x = zero_mean_267_cast, y = denom_267_cast)[name = tensor("out_267_cast")]; + tensor var_8924_to_fp16 = const()[name = tensor("op_8924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844707136)))]; + tensor var_8925_cast = add(x = out_267_cast, y = var_8924_to_fp16)[name = tensor("op_8925_cast")]; + tensor var_8927_to_fp16 = const()[name = tensor("op_8927_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844709760)))]; + tensor hidden_states_363_cast = mul(x = var_8925_cast, y = var_8927_to_fp16)[name = tensor("hidden_states_363_cast")]; + tensor var_8934 = const()[name = tensor("op_8934"), val = tensor([1, 1])]; + tensor var_8936 = const()[name = tensor("op_8936"), val = tensor([1, 1])]; + tensor q_179_pad_type_0 = const()[name = tensor("q_179_pad_type_0"), val = tensor("custom")]; + tensor q_179_pad_0 = const()[name = tensor("q_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844712384))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1c8p-7, 0x1.d38p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_179_cast = conv(dilations = var_8936, groups = var_6865, pad = q_179_pad_0, pad_type = q_179_pad_type_0, strides = var_8934, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_363_cast)[name = tensor("q_179_cast")]; + tensor var_8940 = const()[name = tensor("op_8940"), val = tensor([1, 1])]; + tensor var_8942 = const()[name = tensor("op_8942"), val = tensor([1, 1])]; + tensor k_179_pad_type_0 = const()[name = tensor("k_179_pad_type_0"), val = tensor("custom")]; + tensor k_179_pad_0 = const()[name = tensor("k_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845122048))), lut = tensor([-0x1.b84p-6, -0x1.05p-7, 0x1.07p-7, 0x1.b9cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_179_cast = conv(dilations = var_8942, groups = var_6865, pad = k_179_pad_0, pad_type = k_179_pad_type_0, strides = var_8940, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_179_cast")]; + tensor var_8946 = const()[name = tensor("op_8946"), val = tensor([1, 1])]; + tensor var_8948 = const()[name = tensor("op_8948"), val = tensor([1, 1])]; + tensor v_179_pad_type_0 = const()[name = tensor("v_179_pad_type_0"), val = tensor("custom")]; + tensor v_179_pad_0 = const()[name = tensor("v_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845777472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847088256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_179_cast = conv(dilations = var_8948, groups = var_6865, pad = v_179_pad_0, pad_type = v_179_pad_type_0, strides = var_8946, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_179_cast")]; + tensor var_8952 = const()[name = tensor("op_8952"), val = tensor([2, 20, 64, -1])]; + tensor var_8953_cast = reshape(shape = var_8952, x = q_179_cast)[name = tensor("op_8953_cast")]; + tensor var_8954 = const()[name = tensor("op_8954"), val = tensor([2, 20, 64, -1])]; + tensor var_8955_cast = reshape(shape = var_8954, x = k_179_cast)[name = tensor("op_8955_cast")]; + tensor var_8956 = const()[name = tensor("op_8956"), val = tensor([2, 20, 64, -1])]; + tensor var_8957_cast = reshape(shape = var_8956, x = v_179_cast)[name = tensor("op_8957_cast")]; + tensor attn_weights_357_transpose_x_0 = const()[name = tensor("attn_weights_357_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_357_transpose_y_0 = const()[name = tensor("attn_weights_357_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_357_cast = matmul(transpose_x = attn_weights_357_transpose_x_0, transpose_y = attn_weights_357_transpose_y_0, x = var_8953_cast, y = var_8955_cast)[name = tensor("attn_weights_357_cast")]; + tensor attn_weights_359_cast = mul(x = attn_weights_357_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_359_cast")]; + tensor var_8961_cast = softmax(axis = var_6849, x = attn_weights_359_cast)[name = tensor("op_8961_cast")]; + tensor attn_179_transpose_x_0 = const()[name = tensor("attn_179_transpose_x_0"), val = tensor(false)]; + tensor attn_179_transpose_y_0 = const()[name = tensor("attn_179_transpose_y_0"), val = tensor(true)]; + tensor attn_179_cast = matmul(transpose_x = attn_179_transpose_x_0, transpose_y = attn_179_transpose_y_0, x = var_8957_cast, y = var_8961_cast)[name = tensor("attn_179_cast")]; + tensor var_8965 = const()[name = tensor("op_8965"), val = tensor([2, 1280, 1, -1])]; + tensor input_539_cast = reshape(shape = var_8965, x = attn_179_cast)[name = tensor("input_539_cast")]; + tensor var_8970 = const()[name = tensor("op_8970"), val = tensor([1, 1])]; + tensor var_8972 = const()[name = tensor("op_8972"), val = tensor([1, 1])]; + tensor var_8974_pad_type_0 = const()[name = tensor("op_8974_pad_type_0"), val = tensor("custom")]; + tensor var_8974_pad_0 = const()[name = tensor("op_8974_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847088384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847907648))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847907776)))]; + tensor var_8974_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_8972, groups = var_6865, pad = var_8974_pad_0, pad_type = var_8974_pad_type_0, strides = var_8970, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_539_cast)[name = tensor("op_8974_cast")]; + tensor inputs_269_cast = add(x = var_8974_cast, y = inputs_267_cast)[name = tensor("inputs_269_cast")]; + tensor var_8978 = const()[name = tensor("op_8978"), val = tensor([1])]; + tensor channels_mean_269_cast = reduce_mean(axes = var_8978, keep_dims = var_6860, x = inputs_269_cast)[name = tensor("channels_mean_269_cast")]; + tensor zero_mean_269_cast = sub(x = inputs_269_cast, y = channels_mean_269_cast)[name = tensor("zero_mean_269_cast")]; + tensor zero_mean_sq_269_cast = mul(x = zero_mean_269_cast, y = zero_mean_269_cast)[name = tensor("zero_mean_sq_269_cast")]; + tensor var_8982 = const()[name = tensor("op_8982"), val = tensor([1])]; + tensor var_8983_cast = reduce_mean(axes = var_8982, keep_dims = var_6860, x = zero_mean_sq_269_cast)[name = tensor("op_8983_cast")]; + tensor var_8984_to_fp16 = const()[name = tensor("op_8984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8985_cast = add(x = var_8983_cast, y = var_8984_to_fp16)[name = tensor("op_8985_cast")]; + tensor denom_269_epsilon_0_to_fp16 = const()[name = tensor("denom_269_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_269_cast = rsqrt(epsilon = denom_269_epsilon_0_to_fp16, x = var_8985_cast)[name = tensor("denom_269_cast")]; + tensor out_269_cast = mul(x = zero_mean_269_cast, y = denom_269_cast)[name = tensor("out_269_cast")]; + tensor var_8989_to_fp16 = const()[name = tensor("op_8989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847910400)))]; + tensor var_8990_cast = add(x = out_269_cast, y = var_8989_to_fp16)[name = tensor("op_8990_cast")]; + tensor var_8992_to_fp16 = const()[name = tensor("op_8992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847913024)))]; + tensor input_541_cast = mul(x = var_8990_cast, y = var_8992_to_fp16)[name = tensor("input_541_cast")]; + tensor var_9000 = const()[name = tensor("op_9000"), val = tensor([1, 1])]; + tensor var_9002 = const()[name = tensor("op_9002"), val = tensor([1, 1])]; + tensor var_9004_pad_type_0 = const()[name = tensor("op_9004_pad_type_0"), val = tensor("custom")]; + tensor var_9004_pad_0 = const()[name = tensor("op_9004_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847915648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857746112))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857746304)))]; + tensor var_9004_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_9002, groups = var_6865, pad = var_9004_pad_0, pad_type = var_9004_pad_type_0, strides = var_9000, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_541_cast)[name = tensor("op_9004_cast")]; + tensor var_9005_split_sizes_0 = const()[name = tensor("op_9005_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9005_axis_0 = const()[name = tensor("op_9005_axis_0"), val = tensor(1)]; + tensor var_9005_cast_0, tensor var_9005_cast_1 = split(axis = var_9005_axis_0, split_sizes = var_9005_split_sizes_0, x = var_9004_cast)[name = tensor("op_9005_cast")]; + tensor var_9007_mode_0 = const()[name = tensor("op_9007_mode_0"), val = tensor("EXACT")]; + tensor var_9007_cast = gelu(mode = var_9007_mode_0, x = var_9005_cast_1)[name = tensor("op_9007_cast")]; + tensor input_543_cast = mul(x = var_9005_cast_0, y = var_9007_cast)[name = tensor("input_543_cast")]; + tensor var_9011 = const()[name = tensor("op_9011"), val = tensor([1, 1])]; + tensor var_9013 = const()[name = tensor("op_9013"), val = tensor([1, 1])]; + tensor var_9015_pad_type_0 = const()[name = tensor("op_9015_pad_type_0"), val = tensor("custom")]; + tensor var_9015_pad_0 = const()[name = tensor("op_9015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857766848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862682112))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862682304)))]; + tensor var_9015_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_9013, groups = var_6865, pad = var_9015_pad_0, pad_type = var_9015_pad_type_0, strides = var_9011, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_543_cast)[name = tensor("op_9015_cast")]; + tensor inputs_271_cast = add(x = var_9015_cast, y = inputs_269_cast)[name = tensor("inputs_271_cast")]; + tensor var_9025 = const()[name = tensor("op_9025"), val = tensor([1])]; + tensor channels_mean_271_cast = reduce_mean(axes = var_9025, keep_dims = var_6860, x = inputs_271_cast)[name = tensor("channels_mean_271_cast")]; + tensor zero_mean_271_cast = sub(x = inputs_271_cast, y = channels_mean_271_cast)[name = tensor("zero_mean_271_cast")]; + tensor zero_mean_sq_271_cast = mul(x = zero_mean_271_cast, y = zero_mean_271_cast)[name = tensor("zero_mean_sq_271_cast")]; + tensor var_9029 = const()[name = tensor("op_9029"), val = tensor([1])]; + tensor var_9030_cast = reduce_mean(axes = var_9029, keep_dims = var_6860, x = zero_mean_sq_271_cast)[name = tensor("op_9030_cast")]; + tensor var_9031_to_fp16 = const()[name = tensor("op_9031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9032_cast = add(x = var_9030_cast, y = var_9031_to_fp16)[name = tensor("op_9032_cast")]; + tensor denom_271_epsilon_0_to_fp16 = const()[name = tensor("denom_271_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_271_cast = rsqrt(epsilon = denom_271_epsilon_0_to_fp16, x = var_9032_cast)[name = tensor("denom_271_cast")]; + tensor out_271_cast = mul(x = zero_mean_271_cast, y = denom_271_cast)[name = tensor("out_271_cast")]; + tensor var_9036_to_fp16 = const()[name = tensor("op_9036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862684928)))]; + tensor var_9037_cast = add(x = out_271_cast, y = var_9036_to_fp16)[name = tensor("op_9037_cast")]; + tensor var_9039_to_fp16 = const()[name = tensor("op_9039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862687552)))]; + tensor hidden_states_367_cast = mul(x = var_9037_cast, y = var_9039_to_fp16)[name = tensor("hidden_states_367_cast")]; + tensor var_9046 = const()[name = tensor("op_9046"), val = tensor([1, 1])]; + tensor var_9048 = const()[name = tensor("op_9048"), val = tensor([1, 1])]; + tensor q_181_pad_type_0 = const()[name = tensor("q_181_pad_type_0"), val = tensor("custom")]; + tensor q_181_pad_0 = const()[name = tensor("q_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862690176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863509440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_181_cast = conv(dilations = var_9048, groups = var_6865, pad = q_181_pad_0, pad_type = q_181_pad_type_0, strides = var_9046, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("q_181_cast")]; + tensor var_9052 = const()[name = tensor("op_9052"), val = tensor([1, 1])]; + tensor var_9054 = const()[name = tensor("op_9054"), val = tensor([1, 1])]; + tensor k_181_pad_type_0 = const()[name = tensor("k_181_pad_type_0"), val = tensor("custom")]; + tensor k_181_pad_0 = const()[name = tensor("k_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863509568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864328832))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_181_cast = conv(dilations = var_9054, groups = var_6865, pad = k_181_pad_0, pad_type = k_181_pad_type_0, strides = var_9052, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("k_181_cast")]; + tensor var_9058 = const()[name = tensor("op_9058"), val = tensor([1, 1])]; + tensor var_9060 = const()[name = tensor("op_9060"), val = tensor([1, 1])]; + tensor v_181_pad_type_0 = const()[name = tensor("v_181_pad_type_0"), val = tensor("custom")]; + tensor v_181_pad_0 = const()[name = tensor("v_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864328960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865557824))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_181_cast = conv(dilations = var_9060, groups = var_6865, pad = v_181_pad_0, pad_type = v_181_pad_type_0, strides = var_9058, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("v_181_cast")]; + tensor var_9064 = const()[name = tensor("op_9064"), val = tensor([2, 20, 64, -1])]; + tensor var_9065_cast = reshape(shape = var_9064, x = q_181_cast)[name = tensor("op_9065_cast")]; + tensor var_9066 = const()[name = tensor("op_9066"), val = tensor([2, 20, 64, -1])]; + tensor var_9067_cast = reshape(shape = var_9066, x = k_181_cast)[name = tensor("op_9067_cast")]; + tensor var_9068 = const()[name = tensor("op_9068"), val = tensor([2, 20, 64, -1])]; + tensor var_9069_cast = reshape(shape = var_9068, x = v_181_cast)[name = tensor("op_9069_cast")]; + tensor attn_weights_361_transpose_x_0 = const()[name = tensor("attn_weights_361_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_361_transpose_y_0 = const()[name = tensor("attn_weights_361_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_361_cast = matmul(transpose_x = attn_weights_361_transpose_x_0, transpose_y = attn_weights_361_transpose_y_0, x = var_9065_cast, y = var_9067_cast)[name = tensor("attn_weights_361_cast")]; + tensor attn_weights_363_cast = mul(x = attn_weights_361_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_363_cast")]; + tensor var_9073_cast = softmax(axis = var_6849, x = attn_weights_363_cast)[name = tensor("op_9073_cast")]; + tensor attn_181_transpose_x_0 = const()[name = tensor("attn_181_transpose_x_0"), val = tensor(false)]; + tensor attn_181_transpose_y_0 = const()[name = tensor("attn_181_transpose_y_0"), val = tensor(true)]; + tensor attn_181_cast = matmul(transpose_x = attn_181_transpose_x_0, transpose_y = attn_181_transpose_y_0, x = var_9069_cast, y = var_9073_cast)[name = tensor("attn_181_cast")]; + tensor var_9077 = const()[name = tensor("op_9077"), val = tensor([2, 1280, 1, -1])]; + tensor input_545_cast = reshape(shape = var_9077, x = attn_181_cast)[name = tensor("input_545_cast")]; + tensor var_9082 = const()[name = tensor("op_9082"), val = tensor([1, 1])]; + tensor var_9084 = const()[name = tensor("op_9084"), val = tensor([1, 1])]; + tensor var_9086_pad_type_0 = const()[name = tensor("op_9086_pad_type_0"), val = tensor("custom")]; + tensor var_9086_pad_0 = const()[name = tensor("op_9086_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865558016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866786880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866787072)))]; + tensor var_9086_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_9084, groups = var_6865, pad = var_9086_pad_0, pad_type = var_9086_pad_type_0, strides = var_9082, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_545_cast)[name = tensor("op_9086_cast")]; + tensor inputs_273_cast = add(x = var_9086_cast, y = inputs_271_cast)[name = tensor("inputs_273_cast")]; + tensor var_9090 = const()[name = tensor("op_9090"), val = tensor([1])]; + tensor channels_mean_273_cast = reduce_mean(axes = var_9090, keep_dims = var_6860, x = inputs_273_cast)[name = tensor("channels_mean_273_cast")]; + tensor zero_mean_273_cast = sub(x = inputs_273_cast, y = channels_mean_273_cast)[name = tensor("zero_mean_273_cast")]; + tensor zero_mean_sq_273_cast = mul(x = zero_mean_273_cast, y = zero_mean_273_cast)[name = tensor("zero_mean_sq_273_cast")]; + tensor var_9094 = const()[name = tensor("op_9094"), val = tensor([1])]; + tensor var_9095_cast = reduce_mean(axes = var_9094, keep_dims = var_6860, x = zero_mean_sq_273_cast)[name = tensor("op_9095_cast")]; + tensor var_9096_to_fp16 = const()[name = tensor("op_9096_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9097_cast = add(x = var_9095_cast, y = var_9096_to_fp16)[name = tensor("op_9097_cast")]; + tensor denom_273_epsilon_0_to_fp16 = const()[name = tensor("denom_273_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_273_cast = rsqrt(epsilon = denom_273_epsilon_0_to_fp16, x = var_9097_cast)[name = tensor("denom_273_cast")]; + tensor out_273_cast = mul(x = zero_mean_273_cast, y = denom_273_cast)[name = tensor("out_273_cast")]; + tensor var_9101_to_fp16 = const()[name = tensor("op_9101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866789696)))]; + tensor var_9102_cast = add(x = out_273_cast, y = var_9101_to_fp16)[name = tensor("op_9102_cast")]; + tensor var_9104_to_fp16 = const()[name = tensor("op_9104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866792320)))]; + tensor hidden_states_369_cast = mul(x = var_9102_cast, y = var_9104_to_fp16)[name = tensor("hidden_states_369_cast")]; + tensor var_9111 = const()[name = tensor("op_9111"), val = tensor([1, 1])]; + tensor var_9113 = const()[name = tensor("op_9113"), val = tensor([1, 1])]; + tensor q_183_pad_type_0 = const()[name = tensor("q_183_pad_type_0"), val = tensor("custom")]; + tensor q_183_pad_0 = const()[name = tensor("q_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866794944))), lut = tensor([-0x1.2bp-5, -0x1.64cp-7, 0x1.60cp-7, 0x1.2ap-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_183_cast = conv(dilations = var_9113, groups = var_6865, pad = q_183_pad_0, pad_type = q_183_pad_type_0, strides = var_9111, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_369_cast)[name = tensor("q_183_cast")]; + tensor var_9117 = const()[name = tensor("op_9117"), val = tensor([1, 1])]; + tensor var_9119 = const()[name = tensor("op_9119"), val = tensor([1, 1])]; + tensor k_183_pad_type_0 = const()[name = tensor("k_183_pad_type_0"), val = tensor("custom")]; + tensor k_183_pad_0 = const()[name = tensor("k_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867204608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(868515392))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_183_cast = conv(dilations = var_9119, groups = var_6865, pad = k_183_pad_0, pad_type = k_183_pad_type_0, strides = var_9117, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_183_cast")]; + tensor var_9123 = const()[name = tensor("op_9123"), val = tensor([1, 1])]; + tensor var_9125 = const()[name = tensor("op_9125"), val = tensor([1, 1])]; + tensor v_183_pad_type_0 = const()[name = tensor("v_183_pad_type_0"), val = tensor("custom")]; + tensor v_183_pad_0 = const()[name = tensor("v_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(868515520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(869826304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_183_cast = conv(dilations = var_9125, groups = var_6865, pad = v_183_pad_0, pad_type = v_183_pad_type_0, strides = var_9123, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_183_cast")]; + tensor var_9129 = const()[name = tensor("op_9129"), val = tensor([2, 20, 64, -1])]; + tensor var_9130_cast = reshape(shape = var_9129, x = q_183_cast)[name = tensor("op_9130_cast")]; + tensor var_9131 = const()[name = tensor("op_9131"), val = tensor([2, 20, 64, -1])]; + tensor var_9132_cast = reshape(shape = var_9131, x = k_183_cast)[name = tensor("op_9132_cast")]; + tensor var_9133 = const()[name = tensor("op_9133"), val = tensor([2, 20, 64, -1])]; + tensor var_9134_cast = reshape(shape = var_9133, x = v_183_cast)[name = tensor("op_9134_cast")]; + tensor attn_weights_365_transpose_x_0 = const()[name = tensor("attn_weights_365_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_365_transpose_y_0 = const()[name = tensor("attn_weights_365_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_365_cast = matmul(transpose_x = attn_weights_365_transpose_x_0, transpose_y = attn_weights_365_transpose_y_0, x = var_9130_cast, y = var_9132_cast)[name = tensor("attn_weights_365_cast")]; + tensor attn_weights_367_cast = mul(x = attn_weights_365_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_367_cast")]; + tensor var_9138_cast = softmax(axis = var_6849, x = attn_weights_367_cast)[name = tensor("op_9138_cast")]; + tensor attn_183_transpose_x_0 = const()[name = tensor("attn_183_transpose_x_0"), val = tensor(false)]; + tensor attn_183_transpose_y_0 = const()[name = tensor("attn_183_transpose_y_0"), val = tensor(true)]; + tensor attn_183_cast = matmul(transpose_x = attn_183_transpose_x_0, transpose_y = attn_183_transpose_y_0, x = var_9134_cast, y = var_9138_cast)[name = tensor("attn_183_cast")]; + tensor var_9142 = const()[name = tensor("op_9142"), val = tensor([2, 1280, 1, -1])]; + tensor input_547_cast = reshape(shape = var_9142, x = attn_183_cast)[name = tensor("input_547_cast")]; + tensor var_9147 = const()[name = tensor("op_9147"), val = tensor([1, 1])]; + tensor var_9149 = const()[name = tensor("op_9149"), val = tensor([1, 1])]; + tensor var_9151_pad_type_0 = const()[name = tensor("op_9151_pad_type_0"), val = tensor("custom")]; + tensor var_9151_pad_0 = const()[name = tensor("op_9151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(869826432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870645696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870645824)))]; + tensor var_9151_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_9149, groups = var_6865, pad = var_9151_pad_0, pad_type = var_9151_pad_type_0, strides = var_9147, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_547_cast)[name = tensor("op_9151_cast")]; + tensor inputs_275_cast = add(x = var_9151_cast, y = inputs_273_cast)[name = tensor("inputs_275_cast")]; + tensor var_9155 = const()[name = tensor("op_9155"), val = tensor([1])]; + tensor channels_mean_275_cast = reduce_mean(axes = var_9155, keep_dims = var_6860, x = inputs_275_cast)[name = tensor("channels_mean_275_cast")]; + tensor zero_mean_275_cast = sub(x = inputs_275_cast, y = channels_mean_275_cast)[name = tensor("zero_mean_275_cast")]; + tensor zero_mean_sq_275_cast = mul(x = zero_mean_275_cast, y = zero_mean_275_cast)[name = tensor("zero_mean_sq_275_cast")]; + tensor var_9159 = const()[name = tensor("op_9159"), val = tensor([1])]; + tensor var_9160_cast = reduce_mean(axes = var_9159, keep_dims = var_6860, x = zero_mean_sq_275_cast)[name = tensor("op_9160_cast")]; + tensor var_9161_to_fp16 = const()[name = tensor("op_9161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9162_cast = add(x = var_9160_cast, y = var_9161_to_fp16)[name = tensor("op_9162_cast")]; + tensor denom_275_epsilon_0_to_fp16 = const()[name = tensor("denom_275_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_275_cast = rsqrt(epsilon = denom_275_epsilon_0_to_fp16, x = var_9162_cast)[name = tensor("denom_275_cast")]; + tensor out_275_cast = mul(x = zero_mean_275_cast, y = denom_275_cast)[name = tensor("out_275_cast")]; + tensor var_9166_to_fp16 = const()[name = tensor("op_9166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870648448)))]; + tensor var_9167_cast = add(x = out_275_cast, y = var_9166_to_fp16)[name = tensor("op_9167_cast")]; + tensor var_9169_to_fp16 = const()[name = tensor("op_9169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870651072)))]; + tensor input_549_cast = mul(x = var_9167_cast, y = var_9169_to_fp16)[name = tensor("input_549_cast")]; + tensor var_9177 = const()[name = tensor("op_9177"), val = tensor([1, 1])]; + tensor var_9179 = const()[name = tensor("op_9179"), val = tensor([1, 1])]; + tensor var_9181_pad_type_0 = const()[name = tensor("op_9181_pad_type_0"), val = tensor("custom")]; + tensor var_9181_pad_0 = const()[name = tensor("op_9181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870653696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880484160))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880484352)))]; + tensor var_9181_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_9179, groups = var_6865, pad = var_9181_pad_0, pad_type = var_9181_pad_type_0, strides = var_9177, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_549_cast)[name = tensor("op_9181_cast")]; + tensor var_9182_split_sizes_0 = const()[name = tensor("op_9182_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9182_axis_0 = const()[name = tensor("op_9182_axis_0"), val = tensor(1)]; + tensor var_9182_cast_0, tensor var_9182_cast_1 = split(axis = var_9182_axis_0, split_sizes = var_9182_split_sizes_0, x = var_9181_cast)[name = tensor("op_9182_cast")]; + tensor var_9184_mode_0 = const()[name = tensor("op_9184_mode_0"), val = tensor("EXACT")]; + tensor var_9184_cast = gelu(mode = var_9184_mode_0, x = var_9182_cast_1)[name = tensor("op_9184_cast")]; + tensor input_551_cast = mul(x = var_9182_cast_0, y = var_9184_cast)[name = tensor("input_551_cast")]; + tensor var_9188 = const()[name = tensor("op_9188"), val = tensor([1, 1])]; + tensor var_9190 = const()[name = tensor("op_9190"), val = tensor([1, 1])]; + tensor var_9192_pad_type_0 = const()[name = tensor("op_9192_pad_type_0"), val = tensor("custom")]; + tensor var_9192_pad_0 = const()[name = tensor("op_9192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880504896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885420160))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885420352)))]; + tensor var_9192_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_9190, groups = var_6865, pad = var_9192_pad_0, pad_type = var_9192_pad_type_0, strides = var_9188, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_551_cast)[name = tensor("op_9192_cast")]; + tensor inputs_277_cast = add(x = var_9192_cast, y = inputs_275_cast)[name = tensor("inputs_277_cast")]; + tensor var_9202 = const()[name = tensor("op_9202"), val = tensor([1])]; + tensor channels_mean_277_cast = reduce_mean(axes = var_9202, keep_dims = var_6860, x = inputs_277_cast)[name = tensor("channels_mean_277_cast")]; + tensor zero_mean_277_cast = sub(x = inputs_277_cast, y = channels_mean_277_cast)[name = tensor("zero_mean_277_cast")]; + tensor zero_mean_sq_277_cast = mul(x = zero_mean_277_cast, y = zero_mean_277_cast)[name = tensor("zero_mean_sq_277_cast")]; + tensor var_9206 = const()[name = tensor("op_9206"), val = tensor([1])]; + tensor var_9207_cast = reduce_mean(axes = var_9206, keep_dims = var_6860, x = zero_mean_sq_277_cast)[name = tensor("op_9207_cast")]; + tensor var_9208_to_fp16 = const()[name = tensor("op_9208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9209_cast = add(x = var_9207_cast, y = var_9208_to_fp16)[name = tensor("op_9209_cast")]; + tensor denom_277_epsilon_0_to_fp16 = const()[name = tensor("denom_277_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_277_cast = rsqrt(epsilon = denom_277_epsilon_0_to_fp16, x = var_9209_cast)[name = tensor("denom_277_cast")]; + tensor out_277_cast = mul(x = zero_mean_277_cast, y = denom_277_cast)[name = tensor("out_277_cast")]; + tensor var_9213_to_fp16 = const()[name = tensor("op_9213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885422976)))]; + tensor var_9214_cast = add(x = out_277_cast, y = var_9213_to_fp16)[name = tensor("op_9214_cast")]; + tensor var_9216_to_fp16 = const()[name = tensor("op_9216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885425600)))]; + tensor hidden_states_373_cast = mul(x = var_9214_cast, y = var_9216_to_fp16)[name = tensor("hidden_states_373_cast")]; + tensor var_9223 = const()[name = tensor("op_9223"), val = tensor([1, 1])]; + tensor var_9225 = const()[name = tensor("op_9225"), val = tensor([1, 1])]; + tensor q_185_pad_type_0 = const()[name = tensor("q_185_pad_type_0"), val = tensor("custom")]; + tensor q_185_pad_0 = const()[name = tensor("q_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885428224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886247488))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_185_cast = conv(dilations = var_9225, groups = var_6865, pad = q_185_pad_0, pad_type = q_185_pad_type_0, strides = var_9223, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("q_185_cast")]; + tensor var_9229 = const()[name = tensor("op_9229"), val = tensor([1, 1])]; + tensor var_9231 = const()[name = tensor("op_9231"), val = tensor([1, 1])]; + tensor k_185_pad_type_0 = const()[name = tensor("k_185_pad_type_0"), val = tensor("custom")]; + tensor k_185_pad_0 = const()[name = tensor("k_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886247616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887066880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_185_cast = conv(dilations = var_9231, groups = var_6865, pad = k_185_pad_0, pad_type = k_185_pad_type_0, strides = var_9229, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("k_185_cast")]; + tensor var_9235 = const()[name = tensor("op_9235"), val = tensor([1, 1])]; + tensor var_9237 = const()[name = tensor("op_9237"), val = tensor([1, 1])]; + tensor v_185_pad_type_0 = const()[name = tensor("v_185_pad_type_0"), val = tensor("custom")]; + tensor v_185_pad_0 = const()[name = tensor("v_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887067008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888295872))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_185_cast = conv(dilations = var_9237, groups = var_6865, pad = v_185_pad_0, pad_type = v_185_pad_type_0, strides = var_9235, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("v_185_cast")]; + tensor var_9241 = const()[name = tensor("op_9241"), val = tensor([2, 20, 64, -1])]; + tensor var_9242_cast = reshape(shape = var_9241, x = q_185_cast)[name = tensor("op_9242_cast")]; + tensor var_9243 = const()[name = tensor("op_9243"), val = tensor([2, 20, 64, -1])]; + tensor var_9244_cast = reshape(shape = var_9243, x = k_185_cast)[name = tensor("op_9244_cast")]; + tensor var_9245 = const()[name = tensor("op_9245"), val = tensor([2, 20, 64, -1])]; + tensor var_9246_cast = reshape(shape = var_9245, x = v_185_cast)[name = tensor("op_9246_cast")]; + tensor attn_weights_369_transpose_x_0 = const()[name = tensor("attn_weights_369_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_369_transpose_y_0 = const()[name = tensor("attn_weights_369_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_369_cast = matmul(transpose_x = attn_weights_369_transpose_x_0, transpose_y = attn_weights_369_transpose_y_0, x = var_9242_cast, y = var_9244_cast)[name = tensor("attn_weights_369_cast")]; + tensor attn_weights_371_cast = mul(x = attn_weights_369_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_371_cast")]; + tensor var_9250_cast = softmax(axis = var_6849, x = attn_weights_371_cast)[name = tensor("op_9250_cast")]; + tensor attn_185_transpose_x_0 = const()[name = tensor("attn_185_transpose_x_0"), val = tensor(false)]; + tensor attn_185_transpose_y_0 = const()[name = tensor("attn_185_transpose_y_0"), val = tensor(true)]; + tensor attn_185_cast = matmul(transpose_x = attn_185_transpose_x_0, transpose_y = attn_185_transpose_y_0, x = var_9246_cast, y = var_9250_cast)[name = tensor("attn_185_cast")]; + tensor var_9254 = const()[name = tensor("op_9254"), val = tensor([2, 1280, 1, -1])]; + tensor input_553_cast = reshape(shape = var_9254, x = attn_185_cast)[name = tensor("input_553_cast")]; + tensor var_9259 = const()[name = tensor("op_9259"), val = tensor([1, 1])]; + tensor var_9261 = const()[name = tensor("op_9261"), val = tensor([1, 1])]; + tensor var_9263_pad_type_0 = const()[name = tensor("op_9263_pad_type_0"), val = tensor("custom")]; + tensor var_9263_pad_0 = const()[name = tensor("op_9263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888296064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889524928))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889525120)))]; + tensor var_9263_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_9261, groups = var_6865, pad = var_9263_pad_0, pad_type = var_9263_pad_type_0, strides = var_9259, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_553_cast)[name = tensor("op_9263_cast")]; + tensor inputs_279_cast = add(x = var_9263_cast, y = inputs_277_cast)[name = tensor("inputs_279_cast")]; + tensor var_9267 = const()[name = tensor("op_9267"), val = tensor([1])]; + tensor channels_mean_279_cast = reduce_mean(axes = var_9267, keep_dims = var_6860, x = inputs_279_cast)[name = tensor("channels_mean_279_cast")]; + tensor zero_mean_279_cast = sub(x = inputs_279_cast, y = channels_mean_279_cast)[name = tensor("zero_mean_279_cast")]; + tensor zero_mean_sq_279_cast = mul(x = zero_mean_279_cast, y = zero_mean_279_cast)[name = tensor("zero_mean_sq_279_cast")]; + tensor var_9271 = const()[name = tensor("op_9271"), val = tensor([1])]; + tensor var_9272_cast = reduce_mean(axes = var_9271, keep_dims = var_6860, x = zero_mean_sq_279_cast)[name = tensor("op_9272_cast")]; + tensor var_9273_to_fp16 = const()[name = tensor("op_9273_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9274_cast = add(x = var_9272_cast, y = var_9273_to_fp16)[name = tensor("op_9274_cast")]; + tensor denom_279_epsilon_0_to_fp16 = const()[name = tensor("denom_279_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_279_cast = rsqrt(epsilon = denom_279_epsilon_0_to_fp16, x = var_9274_cast)[name = tensor("denom_279_cast")]; + tensor out_279_cast = mul(x = zero_mean_279_cast, y = denom_279_cast)[name = tensor("out_279_cast")]; + tensor var_9278_to_fp16 = const()[name = tensor("op_9278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889527744)))]; + tensor var_9279_cast = add(x = out_279_cast, y = var_9278_to_fp16)[name = tensor("op_9279_cast")]; + tensor var_9281_to_fp16 = const()[name = tensor("op_9281_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889530368)))]; + tensor hidden_states_375_cast = mul(x = var_9279_cast, y = var_9281_to_fp16)[name = tensor("hidden_states_375_cast")]; + tensor var_9288 = const()[name = tensor("op_9288"), val = tensor([1, 1])]; + tensor var_9290 = const()[name = tensor("op_9290"), val = tensor([1, 1])]; + tensor q_187_pad_type_0 = const()[name = tensor("q_187_pad_type_0"), val = tensor("custom")]; + tensor q_187_pad_0 = const()[name = tensor("q_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889532992))), lut = tensor([-0x1.348p-5, -0x1.6fcp-7, 0x1.71cp-7, 0x1.354p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_187_cast = conv(dilations = var_9290, groups = var_6865, pad = q_187_pad_0, pad_type = q_187_pad_type_0, strides = var_9288, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_375_cast)[name = tensor("q_187_cast")]; + tensor var_9294 = const()[name = tensor("op_9294"), val = tensor([1, 1])]; + tensor var_9296 = const()[name = tensor("op_9296"), val = tensor([1, 1])]; + tensor k_187_pad_type_0 = const()[name = tensor("k_187_pad_type_0"), val = tensor("custom")]; + tensor k_187_pad_0 = const()[name = tensor("k_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889942656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(891253440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_187_cast = conv(dilations = var_9296, groups = var_6865, pad = k_187_pad_0, pad_type = k_187_pad_type_0, strides = var_9294, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_187_cast")]; + tensor var_9300 = const()[name = tensor("op_9300"), val = tensor([1, 1])]; + tensor var_9302 = const()[name = tensor("op_9302"), val = tensor([1, 1])]; + tensor v_187_pad_type_0 = const()[name = tensor("v_187_pad_type_0"), val = tensor("custom")]; + tensor v_187_pad_0 = const()[name = tensor("v_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(891253568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892564352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_187_cast = conv(dilations = var_9302, groups = var_6865, pad = v_187_pad_0, pad_type = v_187_pad_type_0, strides = var_9300, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_187_cast")]; + tensor var_9306 = const()[name = tensor("op_9306"), val = tensor([2, 20, 64, -1])]; + tensor var_9307_cast = reshape(shape = var_9306, x = q_187_cast)[name = tensor("op_9307_cast")]; + tensor var_9308 = const()[name = tensor("op_9308"), val = tensor([2, 20, 64, -1])]; + tensor var_9309_cast = reshape(shape = var_9308, x = k_187_cast)[name = tensor("op_9309_cast")]; + tensor var_9310 = const()[name = tensor("op_9310"), val = tensor([2, 20, 64, -1])]; + tensor var_9311_cast = reshape(shape = var_9310, x = v_187_cast)[name = tensor("op_9311_cast")]; + tensor attn_weights_373_transpose_x_0 = const()[name = tensor("attn_weights_373_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_373_transpose_y_0 = const()[name = tensor("attn_weights_373_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_373_cast = matmul(transpose_x = attn_weights_373_transpose_x_0, transpose_y = attn_weights_373_transpose_y_0, x = var_9307_cast, y = var_9309_cast)[name = tensor("attn_weights_373_cast")]; + tensor attn_weights_375_cast = mul(x = attn_weights_373_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_375_cast")]; + tensor var_9315_cast = softmax(axis = var_6849, x = attn_weights_375_cast)[name = tensor("op_9315_cast")]; + tensor attn_187_transpose_x_0 = const()[name = tensor("attn_187_transpose_x_0"), val = tensor(false)]; + tensor attn_187_transpose_y_0 = const()[name = tensor("attn_187_transpose_y_0"), val = tensor(true)]; + tensor attn_187_cast = matmul(transpose_x = attn_187_transpose_x_0, transpose_y = attn_187_transpose_y_0, x = var_9311_cast, y = var_9315_cast)[name = tensor("attn_187_cast")]; + tensor var_9319 = const()[name = tensor("op_9319"), val = tensor([2, 1280, 1, -1])]; + tensor input_555_cast = reshape(shape = var_9319, x = attn_187_cast)[name = tensor("input_555_cast")]; + tensor var_9324 = const()[name = tensor("op_9324"), val = tensor([1, 1])]; + tensor var_9326 = const()[name = tensor("op_9326"), val = tensor([1, 1])]; + tensor var_9328_pad_type_0 = const()[name = tensor("op_9328_pad_type_0"), val = tensor("custom")]; + tensor var_9328_pad_0 = const()[name = tensor("op_9328_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892564480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893383744))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893383872)))]; + tensor var_9328_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_9326, groups = var_6865, pad = var_9328_pad_0, pad_type = var_9328_pad_type_0, strides = var_9324, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_555_cast)[name = tensor("op_9328_cast")]; + tensor inputs_281_cast = add(x = var_9328_cast, y = inputs_279_cast)[name = tensor("inputs_281_cast")]; + tensor var_9332 = const()[name = tensor("op_9332"), val = tensor([1])]; + tensor channels_mean_281_cast = reduce_mean(axes = var_9332, keep_dims = var_6860, x = inputs_281_cast)[name = tensor("channels_mean_281_cast")]; + tensor zero_mean_281_cast = sub(x = inputs_281_cast, y = channels_mean_281_cast)[name = tensor("zero_mean_281_cast")]; + tensor zero_mean_sq_281_cast = mul(x = zero_mean_281_cast, y = zero_mean_281_cast)[name = tensor("zero_mean_sq_281_cast")]; + tensor var_9336 = const()[name = tensor("op_9336"), val = tensor([1])]; + tensor var_9337_cast = reduce_mean(axes = var_9336, keep_dims = var_6860, x = zero_mean_sq_281_cast)[name = tensor("op_9337_cast")]; + tensor var_9338_to_fp16 = const()[name = tensor("op_9338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9339_cast = add(x = var_9337_cast, y = var_9338_to_fp16)[name = tensor("op_9339_cast")]; + tensor denom_281_epsilon_0_to_fp16 = const()[name = tensor("denom_281_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_281_cast = rsqrt(epsilon = denom_281_epsilon_0_to_fp16, x = var_9339_cast)[name = tensor("denom_281_cast")]; + tensor out_281_cast = mul(x = zero_mean_281_cast, y = denom_281_cast)[name = tensor("out_281_cast")]; + tensor var_9343_to_fp16 = const()[name = tensor("op_9343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893386496)))]; + tensor var_9344_cast = add(x = out_281_cast, y = var_9343_to_fp16)[name = tensor("op_9344_cast")]; + tensor var_9346_to_fp16 = const()[name = tensor("op_9346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893389120)))]; + tensor input_557_cast = mul(x = var_9344_cast, y = var_9346_to_fp16)[name = tensor("input_557_cast")]; + tensor var_9354 = const()[name = tensor("op_9354"), val = tensor([1, 1])]; + tensor var_9356 = const()[name = tensor("op_9356"), val = tensor([1, 1])]; + tensor var_9358_pad_type_0 = const()[name = tensor("op_9358_pad_type_0"), val = tensor("custom")]; + tensor var_9358_pad_0 = const()[name = tensor("op_9358_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893391744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903222208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903222400)))]; + tensor var_9358_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_9356, groups = var_6865, pad = var_9358_pad_0, pad_type = var_9358_pad_type_0, strides = var_9354, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_557_cast)[name = tensor("op_9358_cast")]; + tensor var_9359_split_sizes_0 = const()[name = tensor("op_9359_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9359_axis_0 = const()[name = tensor("op_9359_axis_0"), val = tensor(1)]; + tensor var_9359_cast_0, tensor var_9359_cast_1 = split(axis = var_9359_axis_0, split_sizes = var_9359_split_sizes_0, x = var_9358_cast)[name = tensor("op_9359_cast")]; + tensor var_9361_mode_0 = const()[name = tensor("op_9361_mode_0"), val = tensor("EXACT")]; + tensor var_9361_cast = gelu(mode = var_9361_mode_0, x = var_9359_cast_1)[name = tensor("op_9361_cast")]; + tensor input_559_cast = mul(x = var_9359_cast_0, y = var_9361_cast)[name = tensor("input_559_cast")]; + tensor var_9365 = const()[name = tensor("op_9365"), val = tensor([1, 1])]; + tensor var_9367 = const()[name = tensor("op_9367"), val = tensor([1, 1])]; + tensor var_9369_pad_type_0 = const()[name = tensor("op_9369_pad_type_0"), val = tensor("custom")]; + tensor var_9369_pad_0 = const()[name = tensor("op_9369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903242944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908158208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908158400)))]; + tensor var_9369_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_9367, groups = var_6865, pad = var_9369_pad_0, pad_type = var_9369_pad_type_0, strides = var_9365, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_559_cast)[name = tensor("op_9369_cast")]; + tensor inputs_283_cast = add(x = var_9369_cast, y = inputs_281_cast)[name = tensor("inputs_283_cast")]; + tensor var_9379 = const()[name = tensor("op_9379"), val = tensor([1])]; + tensor channels_mean_283_cast = reduce_mean(axes = var_9379, keep_dims = var_6860, x = inputs_283_cast)[name = tensor("channels_mean_283_cast")]; + tensor zero_mean_283_cast = sub(x = inputs_283_cast, y = channels_mean_283_cast)[name = tensor("zero_mean_283_cast")]; + tensor zero_mean_sq_283_cast = mul(x = zero_mean_283_cast, y = zero_mean_283_cast)[name = tensor("zero_mean_sq_283_cast")]; + tensor var_9383 = const()[name = tensor("op_9383"), val = tensor([1])]; + tensor var_9384_cast = reduce_mean(axes = var_9383, keep_dims = var_6860, x = zero_mean_sq_283_cast)[name = tensor("op_9384_cast")]; + tensor var_9385_to_fp16 = const()[name = tensor("op_9385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9386_cast = add(x = var_9384_cast, y = var_9385_to_fp16)[name = tensor("op_9386_cast")]; + tensor denom_283_epsilon_0_to_fp16 = const()[name = tensor("denom_283_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_283_cast = rsqrt(epsilon = denom_283_epsilon_0_to_fp16, x = var_9386_cast)[name = tensor("denom_283_cast")]; + tensor out_283_cast = mul(x = zero_mean_283_cast, y = denom_283_cast)[name = tensor("out_283_cast")]; + tensor var_9390_to_fp16 = const()[name = tensor("op_9390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908161024)))]; + tensor var_9391_cast = add(x = out_283_cast, y = var_9390_to_fp16)[name = tensor("op_9391_cast")]; + tensor var_9393_to_fp16 = const()[name = tensor("op_9393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908163648)))]; + tensor hidden_states_379_cast = mul(x = var_9391_cast, y = var_9393_to_fp16)[name = tensor("hidden_states_379_cast")]; + tensor var_9400 = const()[name = tensor("op_9400"), val = tensor([1, 1])]; + tensor var_9402 = const()[name = tensor("op_9402"), val = tensor([1, 1])]; + tensor q_189_pad_type_0 = const()[name = tensor("q_189_pad_type_0"), val = tensor("custom")]; + tensor q_189_pad_0 = const()[name = tensor("q_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908166272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908985536))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_189_cast = conv(dilations = var_9402, groups = var_6865, pad = q_189_pad_0, pad_type = q_189_pad_type_0, strides = var_9400, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("q_189_cast")]; + tensor var_9406 = const()[name = tensor("op_9406"), val = tensor([1, 1])]; + tensor var_9408 = const()[name = tensor("op_9408"), val = tensor([1, 1])]; + tensor k_189_pad_type_0 = const()[name = tensor("k_189_pad_type_0"), val = tensor("custom")]; + tensor k_189_pad_0 = const()[name = tensor("k_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908985664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909804928))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_189_cast = conv(dilations = var_9408, groups = var_6865, pad = k_189_pad_0, pad_type = k_189_pad_type_0, strides = var_9406, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("k_189_cast")]; + tensor var_9412 = const()[name = tensor("op_9412"), val = tensor([1, 1])]; + tensor var_9414 = const()[name = tensor("op_9414"), val = tensor([1, 1])]; + tensor v_189_pad_type_0 = const()[name = tensor("v_189_pad_type_0"), val = tensor("custom")]; + tensor v_189_pad_0 = const()[name = tensor("v_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909805056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911033920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_189_cast = conv(dilations = var_9414, groups = var_6865, pad = v_189_pad_0, pad_type = v_189_pad_type_0, strides = var_9412, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("v_189_cast")]; + tensor var_9418 = const()[name = tensor("op_9418"), val = tensor([2, 20, 64, -1])]; + tensor var_9419_cast = reshape(shape = var_9418, x = q_189_cast)[name = tensor("op_9419_cast")]; + tensor var_9420 = const()[name = tensor("op_9420"), val = tensor([2, 20, 64, -1])]; + tensor var_9421_cast = reshape(shape = var_9420, x = k_189_cast)[name = tensor("op_9421_cast")]; + tensor var_9422 = const()[name = tensor("op_9422"), val = tensor([2, 20, 64, -1])]; + tensor var_9423_cast = reshape(shape = var_9422, x = v_189_cast)[name = tensor("op_9423_cast")]; + tensor attn_weights_377_transpose_x_0 = const()[name = tensor("attn_weights_377_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_377_transpose_y_0 = const()[name = tensor("attn_weights_377_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_377_cast = matmul(transpose_x = attn_weights_377_transpose_x_0, transpose_y = attn_weights_377_transpose_y_0, x = var_9419_cast, y = var_9421_cast)[name = tensor("attn_weights_377_cast")]; + tensor attn_weights_379_cast = mul(x = attn_weights_377_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_379_cast")]; + tensor var_9427_cast = softmax(axis = var_6849, x = attn_weights_379_cast)[name = tensor("op_9427_cast")]; + tensor attn_189_transpose_x_0 = const()[name = tensor("attn_189_transpose_x_0"), val = tensor(false)]; + tensor attn_189_transpose_y_0 = const()[name = tensor("attn_189_transpose_y_0"), val = tensor(true)]; + tensor attn_189_cast = matmul(transpose_x = attn_189_transpose_x_0, transpose_y = attn_189_transpose_y_0, x = var_9423_cast, y = var_9427_cast)[name = tensor("attn_189_cast")]; + tensor var_9431 = const()[name = tensor("op_9431"), val = tensor([2, 1280, 1, -1])]; + tensor input_561_cast = reshape(shape = var_9431, x = attn_189_cast)[name = tensor("input_561_cast")]; + tensor var_9436 = const()[name = tensor("op_9436"), val = tensor([1, 1])]; + tensor var_9438 = const()[name = tensor("op_9438"), val = tensor([1, 1])]; + tensor var_9440_pad_type_0 = const()[name = tensor("op_9440_pad_type_0"), val = tensor("custom")]; + tensor var_9440_pad_0 = const()[name = tensor("op_9440_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911034112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912262976))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912263168)))]; + tensor var_9440_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_9438, groups = var_6865, pad = var_9440_pad_0, pad_type = var_9440_pad_type_0, strides = var_9436, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_561_cast)[name = tensor("op_9440_cast")]; + tensor inputs_285_cast = add(x = var_9440_cast, y = inputs_283_cast)[name = tensor("inputs_285_cast")]; + tensor var_9444 = const()[name = tensor("op_9444"), val = tensor([1])]; + tensor channels_mean_285_cast = reduce_mean(axes = var_9444, keep_dims = var_6860, x = inputs_285_cast)[name = tensor("channels_mean_285_cast")]; + tensor zero_mean_285_cast = sub(x = inputs_285_cast, y = channels_mean_285_cast)[name = tensor("zero_mean_285_cast")]; + tensor zero_mean_sq_285_cast = mul(x = zero_mean_285_cast, y = zero_mean_285_cast)[name = tensor("zero_mean_sq_285_cast")]; + tensor var_9448 = const()[name = tensor("op_9448"), val = tensor([1])]; + tensor var_9449_cast = reduce_mean(axes = var_9448, keep_dims = var_6860, x = zero_mean_sq_285_cast)[name = tensor("op_9449_cast")]; + tensor var_9450_to_fp16 = const()[name = tensor("op_9450_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9451_cast = add(x = var_9449_cast, y = var_9450_to_fp16)[name = tensor("op_9451_cast")]; + tensor denom_285_epsilon_0_to_fp16 = const()[name = tensor("denom_285_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_285_cast = rsqrt(epsilon = denom_285_epsilon_0_to_fp16, x = var_9451_cast)[name = tensor("denom_285_cast")]; + tensor out_285_cast = mul(x = zero_mean_285_cast, y = denom_285_cast)[name = tensor("out_285_cast")]; + tensor var_9455_to_fp16 = const()[name = tensor("op_9455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912265792)))]; + tensor var_9456_cast = add(x = out_285_cast, y = var_9455_to_fp16)[name = tensor("op_9456_cast")]; + tensor var_9458_to_fp16 = const()[name = tensor("op_9458_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912268416)))]; + tensor hidden_states_381_cast = mul(x = var_9456_cast, y = var_9458_to_fp16)[name = tensor("hidden_states_381_cast")]; + tensor var_9465 = const()[name = tensor("op_9465"), val = tensor([1, 1])]; + tensor var_9467 = const()[name = tensor("op_9467"), val = tensor([1, 1])]; + tensor q_191_pad_type_0 = const()[name = tensor("q_191_pad_type_0"), val = tensor("custom")]; + tensor q_191_pad_0 = const()[name = tensor("q_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912271040))), lut = tensor([-0x1.42cp-5, -0x1.84p-7, 0x1.774p-7, 0x1.3fp-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_191_cast = conv(dilations = var_9467, groups = var_6865, pad = q_191_pad_0, pad_type = q_191_pad_type_0, strides = var_9465, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_381_cast)[name = tensor("q_191_cast")]; + tensor var_9471 = const()[name = tensor("op_9471"), val = tensor([1, 1])]; + tensor var_9473 = const()[name = tensor("op_9473"), val = tensor([1, 1])]; + tensor k_191_pad_type_0 = const()[name = tensor("k_191_pad_type_0"), val = tensor("custom")]; + tensor k_191_pad_0 = const()[name = tensor("k_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912680704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913991488))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_191_cast = conv(dilations = var_9473, groups = var_6865, pad = k_191_pad_0, pad_type = k_191_pad_type_0, strides = var_9471, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_191_cast")]; + tensor var_9477 = const()[name = tensor("op_9477"), val = tensor([1, 1])]; + tensor var_9479 = const()[name = tensor("op_9479"), val = tensor([1, 1])]; + tensor v_191_pad_type_0 = const()[name = tensor("v_191_pad_type_0"), val = tensor("custom")]; + tensor v_191_pad_0 = const()[name = tensor("v_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913991616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(915302400))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_191_cast = conv(dilations = var_9479, groups = var_6865, pad = v_191_pad_0, pad_type = v_191_pad_type_0, strides = var_9477, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_191_cast")]; + tensor var_9483 = const()[name = tensor("op_9483"), val = tensor([2, 20, 64, -1])]; + tensor var_9484_cast = reshape(shape = var_9483, x = q_191_cast)[name = tensor("op_9484_cast")]; + tensor var_9485 = const()[name = tensor("op_9485"), val = tensor([2, 20, 64, -1])]; + tensor var_9486_cast = reshape(shape = var_9485, x = k_191_cast)[name = tensor("op_9486_cast")]; + tensor var_9487 = const()[name = tensor("op_9487"), val = tensor([2, 20, 64, -1])]; + tensor var_9488_cast = reshape(shape = var_9487, x = v_191_cast)[name = tensor("op_9488_cast")]; + tensor attn_weights_381_transpose_x_0 = const()[name = tensor("attn_weights_381_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_381_transpose_y_0 = const()[name = tensor("attn_weights_381_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_381_cast = matmul(transpose_x = attn_weights_381_transpose_x_0, transpose_y = attn_weights_381_transpose_y_0, x = var_9484_cast, y = var_9486_cast)[name = tensor("attn_weights_381_cast")]; + tensor attn_weights_383_cast = mul(x = attn_weights_381_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_383_cast")]; + tensor var_9492_cast = softmax(axis = var_6849, x = attn_weights_383_cast)[name = tensor("op_9492_cast")]; + tensor attn_191_transpose_x_0 = const()[name = tensor("attn_191_transpose_x_0"), val = tensor(false)]; + tensor attn_191_transpose_y_0 = const()[name = tensor("attn_191_transpose_y_0"), val = tensor(true)]; + tensor attn_191_cast = matmul(transpose_x = attn_191_transpose_x_0, transpose_y = attn_191_transpose_y_0, x = var_9488_cast, y = var_9492_cast)[name = tensor("attn_191_cast")]; + tensor var_9496 = const()[name = tensor("op_9496"), val = tensor([2, 1280, 1, -1])]; + tensor input_563_cast = reshape(shape = var_9496, x = attn_191_cast)[name = tensor("input_563_cast")]; + tensor var_9501 = const()[name = tensor("op_9501"), val = tensor([1, 1])]; + tensor var_9503 = const()[name = tensor("op_9503"), val = tensor([1, 1])]; + tensor var_9505_pad_type_0 = const()[name = tensor("op_9505_pad_type_0"), val = tensor("custom")]; + tensor var_9505_pad_0 = const()[name = tensor("op_9505_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(915302528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916121792))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916121920)))]; + tensor var_9505_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_9503, groups = var_6865, pad = var_9505_pad_0, pad_type = var_9505_pad_type_0, strides = var_9501, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_563_cast)[name = tensor("op_9505_cast")]; + tensor inputs_287_cast = add(x = var_9505_cast, y = inputs_285_cast)[name = tensor("inputs_287_cast")]; + tensor var_9509 = const()[name = tensor("op_9509"), val = tensor([1])]; + tensor channels_mean_287_cast = reduce_mean(axes = var_9509, keep_dims = var_6860, x = inputs_287_cast)[name = tensor("channels_mean_287_cast")]; + tensor zero_mean_287_cast = sub(x = inputs_287_cast, y = channels_mean_287_cast)[name = tensor("zero_mean_287_cast")]; + tensor zero_mean_sq_287_cast = mul(x = zero_mean_287_cast, y = zero_mean_287_cast)[name = tensor("zero_mean_sq_287_cast")]; + tensor var_9513 = const()[name = tensor("op_9513"), val = tensor([1])]; + tensor var_9514_cast = reduce_mean(axes = var_9513, keep_dims = var_6860, x = zero_mean_sq_287_cast)[name = tensor("op_9514_cast")]; + tensor var_9515_to_fp16 = const()[name = tensor("op_9515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9516_cast = add(x = var_9514_cast, y = var_9515_to_fp16)[name = tensor("op_9516_cast")]; + tensor denom_287_epsilon_0_to_fp16 = const()[name = tensor("denom_287_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_287_cast = rsqrt(epsilon = denom_287_epsilon_0_to_fp16, x = var_9516_cast)[name = tensor("denom_287_cast")]; + tensor out_287_cast = mul(x = zero_mean_287_cast, y = denom_287_cast)[name = tensor("out_287_cast")]; + tensor var_9520_to_fp16 = const()[name = tensor("op_9520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916124544)))]; + tensor var_9521_cast = add(x = out_287_cast, y = var_9520_to_fp16)[name = tensor("op_9521_cast")]; + tensor var_9523_to_fp16 = const()[name = tensor("op_9523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916127168)))]; + tensor input_565_cast = mul(x = var_9521_cast, y = var_9523_to_fp16)[name = tensor("input_565_cast")]; + tensor var_9531 = const()[name = tensor("op_9531"), val = tensor([1, 1])]; + tensor var_9533 = const()[name = tensor("op_9533"), val = tensor([1, 1])]; + tensor var_9535_pad_type_0 = const()[name = tensor("op_9535_pad_type_0"), val = tensor("custom")]; + tensor var_9535_pad_0 = const()[name = tensor("op_9535_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916129792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925960256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925960448)))]; + tensor var_9535_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_9533, groups = var_6865, pad = var_9535_pad_0, pad_type = var_9535_pad_type_0, strides = var_9531, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_565_cast)[name = tensor("op_9535_cast")]; + tensor var_9536_split_sizes_0 = const()[name = tensor("op_9536_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9536_axis_0 = const()[name = tensor("op_9536_axis_0"), val = tensor(1)]; + tensor var_9536_cast_0, tensor var_9536_cast_1 = split(axis = var_9536_axis_0, split_sizes = var_9536_split_sizes_0, x = var_9535_cast)[name = tensor("op_9536_cast")]; + tensor var_9538_mode_0 = const()[name = tensor("op_9538_mode_0"), val = tensor("EXACT")]; + tensor var_9538_cast = gelu(mode = var_9538_mode_0, x = var_9536_cast_1)[name = tensor("op_9538_cast")]; + tensor input_567_cast = mul(x = var_9536_cast_0, y = var_9538_cast)[name = tensor("input_567_cast")]; + tensor var_9542 = const()[name = tensor("op_9542"), val = tensor([1, 1])]; + tensor var_9544 = const()[name = tensor("op_9544"), val = tensor([1, 1])]; + tensor var_9546_pad_type_0 = const()[name = tensor("op_9546_pad_type_0"), val = tensor("custom")]; + tensor var_9546_pad_0 = const()[name = tensor("op_9546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925980992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930896256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930896448)))]; + tensor var_9546_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_9544, groups = var_6865, pad = var_9546_pad_0, pad_type = var_9546_pad_type_0, strides = var_9542, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_567_cast)[name = tensor("op_9546_cast")]; + tensor inputs_289_cast = add(x = var_9546_cast, y = inputs_287_cast)[name = tensor("inputs_289_cast")]; + tensor var_9556 = const()[name = tensor("op_9556"), val = tensor([1])]; + tensor channels_mean_289_cast = reduce_mean(axes = var_9556, keep_dims = var_6860, x = inputs_289_cast)[name = tensor("channels_mean_289_cast")]; + tensor zero_mean_289_cast = sub(x = inputs_289_cast, y = channels_mean_289_cast)[name = tensor("zero_mean_289_cast")]; + tensor zero_mean_sq_289_cast = mul(x = zero_mean_289_cast, y = zero_mean_289_cast)[name = tensor("zero_mean_sq_289_cast")]; + tensor var_9560 = const()[name = tensor("op_9560"), val = tensor([1])]; + tensor var_9561_cast = reduce_mean(axes = var_9560, keep_dims = var_6860, x = zero_mean_sq_289_cast)[name = tensor("op_9561_cast")]; + tensor var_9562_to_fp16 = const()[name = tensor("op_9562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9563_cast = add(x = var_9561_cast, y = var_9562_to_fp16)[name = tensor("op_9563_cast")]; + tensor denom_289_epsilon_0_to_fp16 = const()[name = tensor("denom_289_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_289_cast = rsqrt(epsilon = denom_289_epsilon_0_to_fp16, x = var_9563_cast)[name = tensor("denom_289_cast")]; + tensor out_289_cast = mul(x = zero_mean_289_cast, y = denom_289_cast)[name = tensor("out_289_cast")]; + tensor var_9567_to_fp16 = const()[name = tensor("op_9567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930899072)))]; + tensor var_9568_cast = add(x = out_289_cast, y = var_9567_to_fp16)[name = tensor("op_9568_cast")]; + tensor var_9570_to_fp16 = const()[name = tensor("op_9570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930901696)))]; + tensor hidden_states_385_cast = mul(x = var_9568_cast, y = var_9570_to_fp16)[name = tensor("hidden_states_385_cast")]; + tensor var_9577 = const()[name = tensor("op_9577"), val = tensor([1, 1])]; + tensor var_9579 = const()[name = tensor("op_9579"), val = tensor([1, 1])]; + tensor q_193_pad_type_0 = const()[name = tensor("q_193_pad_type_0"), val = tensor("custom")]; + tensor q_193_pad_0 = const()[name = tensor("q_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930904320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931723584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_193_cast = conv(dilations = var_9579, groups = var_6865, pad = q_193_pad_0, pad_type = q_193_pad_type_0, strides = var_9577, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("q_193_cast")]; + tensor var_9583 = const()[name = tensor("op_9583"), val = tensor([1, 1])]; + tensor var_9585 = const()[name = tensor("op_9585"), val = tensor([1, 1])]; + tensor k_193_pad_type_0 = const()[name = tensor("k_193_pad_type_0"), val = tensor("custom")]; + tensor k_193_pad_0 = const()[name = tensor("k_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931723712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932542976))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_193_cast = conv(dilations = var_9585, groups = var_6865, pad = k_193_pad_0, pad_type = k_193_pad_type_0, strides = var_9583, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("k_193_cast")]; + tensor var_9589 = const()[name = tensor("op_9589"), val = tensor([1, 1])]; + tensor var_9591 = const()[name = tensor("op_9591"), val = tensor([1, 1])]; + tensor v_193_pad_type_0 = const()[name = tensor("v_193_pad_type_0"), val = tensor("custom")]; + tensor v_193_pad_0 = const()[name = tensor("v_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932543104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933771968))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_193_cast = conv(dilations = var_9591, groups = var_6865, pad = v_193_pad_0, pad_type = v_193_pad_type_0, strides = var_9589, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("v_193_cast")]; + tensor var_9595 = const()[name = tensor("op_9595"), val = tensor([2, 20, 64, -1])]; + tensor var_9596_cast = reshape(shape = var_9595, x = q_193_cast)[name = tensor("op_9596_cast")]; + tensor var_9597 = const()[name = tensor("op_9597"), val = tensor([2, 20, 64, -1])]; + tensor var_9598_cast = reshape(shape = var_9597, x = k_193_cast)[name = tensor("op_9598_cast")]; + tensor var_9599 = const()[name = tensor("op_9599"), val = tensor([2, 20, 64, -1])]; + tensor var_9600_cast = reshape(shape = var_9599, x = v_193_cast)[name = tensor("op_9600_cast")]; + tensor attn_weights_385_transpose_x_0 = const()[name = tensor("attn_weights_385_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_385_transpose_y_0 = const()[name = tensor("attn_weights_385_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_385_cast = matmul(transpose_x = attn_weights_385_transpose_x_0, transpose_y = attn_weights_385_transpose_y_0, x = var_9596_cast, y = var_9598_cast)[name = tensor("attn_weights_385_cast")]; + tensor attn_weights_387_cast = mul(x = attn_weights_385_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_387_cast")]; + tensor var_9604_cast = softmax(axis = var_6849, x = attn_weights_387_cast)[name = tensor("op_9604_cast")]; + tensor attn_193_transpose_x_0 = const()[name = tensor("attn_193_transpose_x_0"), val = tensor(false)]; + tensor attn_193_transpose_y_0 = const()[name = tensor("attn_193_transpose_y_0"), val = tensor(true)]; + tensor attn_193_cast = matmul(transpose_x = attn_193_transpose_x_0, transpose_y = attn_193_transpose_y_0, x = var_9600_cast, y = var_9604_cast)[name = tensor("attn_193_cast")]; + tensor var_9608 = const()[name = tensor("op_9608"), val = tensor([2, 1280, 1, -1])]; + tensor input_569_cast = reshape(shape = var_9608, x = attn_193_cast)[name = tensor("input_569_cast")]; + tensor var_9613 = const()[name = tensor("op_9613"), val = tensor([1, 1])]; + tensor var_9615 = const()[name = tensor("op_9615"), val = tensor([1, 1])]; + tensor var_9617_pad_type_0 = const()[name = tensor("op_9617_pad_type_0"), val = tensor("custom")]; + tensor var_9617_pad_0 = const()[name = tensor("op_9617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933772160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935001024))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935001216)))]; + tensor var_9617_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_9615, groups = var_6865, pad = var_9617_pad_0, pad_type = var_9617_pad_type_0, strides = var_9613, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_569_cast)[name = tensor("op_9617_cast")]; + tensor inputs_291_cast = add(x = var_9617_cast, y = inputs_289_cast)[name = tensor("inputs_291_cast")]; + tensor var_9621 = const()[name = tensor("op_9621"), val = tensor([1])]; + tensor channels_mean_291_cast = reduce_mean(axes = var_9621, keep_dims = var_6860, x = inputs_291_cast)[name = tensor("channels_mean_291_cast")]; + tensor zero_mean_291_cast = sub(x = inputs_291_cast, y = channels_mean_291_cast)[name = tensor("zero_mean_291_cast")]; + tensor zero_mean_sq_291_cast = mul(x = zero_mean_291_cast, y = zero_mean_291_cast)[name = tensor("zero_mean_sq_291_cast")]; + tensor var_9625 = const()[name = tensor("op_9625"), val = tensor([1])]; + tensor var_9626_cast = reduce_mean(axes = var_9625, keep_dims = var_6860, x = zero_mean_sq_291_cast)[name = tensor("op_9626_cast")]; + tensor var_9627_to_fp16 = const()[name = tensor("op_9627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9628_cast = add(x = var_9626_cast, y = var_9627_to_fp16)[name = tensor("op_9628_cast")]; + tensor denom_291_epsilon_0_to_fp16 = const()[name = tensor("denom_291_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_291_cast = rsqrt(epsilon = denom_291_epsilon_0_to_fp16, x = var_9628_cast)[name = tensor("denom_291_cast")]; + tensor out_291_cast = mul(x = zero_mean_291_cast, y = denom_291_cast)[name = tensor("out_291_cast")]; + tensor var_9632_to_fp16 = const()[name = tensor("op_9632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935003840)))]; + tensor var_9633_cast = add(x = out_291_cast, y = var_9632_to_fp16)[name = tensor("op_9633_cast")]; + tensor var_9635_to_fp16 = const()[name = tensor("op_9635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935006464)))]; + tensor hidden_states_387_cast = mul(x = var_9633_cast, y = var_9635_to_fp16)[name = tensor("hidden_states_387_cast")]; + tensor var_9642 = const()[name = tensor("op_9642"), val = tensor([1, 1])]; + tensor var_9644 = const()[name = tensor("op_9644"), val = tensor([1, 1])]; + tensor q_195_pad_type_0 = const()[name = tensor("q_195_pad_type_0"), val = tensor("custom")]; + tensor q_195_pad_0 = const()[name = tensor("q_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935009088))), lut = tensor([-0x1.264p-5, -0x1.604p-7, 0x1.658p-7, 0x1.278p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_195_cast = conv(dilations = var_9644, groups = var_6865, pad = q_195_pad_0, pad_type = q_195_pad_type_0, strides = var_9642, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_387_cast)[name = tensor("q_195_cast")]; + tensor var_9648 = const()[name = tensor("op_9648"), val = tensor([1, 1])]; + tensor var_9650 = const()[name = tensor("op_9650"), val = tensor([1, 1])]; + tensor k_195_pad_type_0 = const()[name = tensor("k_195_pad_type_0"), val = tensor("custom")]; + tensor k_195_pad_0 = const()[name = tensor("k_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935418752))), lut = tensor([-0x1.fa4p-6, -0x1.2dcp-7, 0x1.274p-7, 0x1.f7p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_195_cast = conv(dilations = var_9650, groups = var_6865, pad = k_195_pad_0, pad_type = k_195_pad_type_0, strides = var_9648, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_195_cast")]; + tensor var_9654 = const()[name = tensor("op_9654"), val = tensor([1, 1])]; + tensor var_9656 = const()[name = tensor("op_9656"), val = tensor([1, 1])]; + tensor v_195_pad_type_0 = const()[name = tensor("v_195_pad_type_0"), val = tensor("custom")]; + tensor v_195_pad_0 = const()[name = tensor("v_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(936074176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937384960))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_195_cast = conv(dilations = var_9656, groups = var_6865, pad = v_195_pad_0, pad_type = v_195_pad_type_0, strides = var_9654, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_195_cast")]; + tensor var_9660 = const()[name = tensor("op_9660"), val = tensor([2, 20, 64, -1])]; + tensor var_9661_cast = reshape(shape = var_9660, x = q_195_cast)[name = tensor("op_9661_cast")]; + tensor var_9662 = const()[name = tensor("op_9662"), val = tensor([2, 20, 64, -1])]; + tensor var_9663_cast = reshape(shape = var_9662, x = k_195_cast)[name = tensor("op_9663_cast")]; + tensor var_9664 = const()[name = tensor("op_9664"), val = tensor([2, 20, 64, -1])]; + tensor var_9665_cast = reshape(shape = var_9664, x = v_195_cast)[name = tensor("op_9665_cast")]; + tensor attn_weights_389_transpose_x_0 = const()[name = tensor("attn_weights_389_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_389_transpose_y_0 = const()[name = tensor("attn_weights_389_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_389_cast = matmul(transpose_x = attn_weights_389_transpose_x_0, transpose_y = attn_weights_389_transpose_y_0, x = var_9661_cast, y = var_9663_cast)[name = tensor("attn_weights_389_cast")]; + tensor attn_weights_391_cast = mul(x = attn_weights_389_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_391_cast")]; + tensor var_9669_cast = softmax(axis = var_6849, x = attn_weights_391_cast)[name = tensor("op_9669_cast")]; + tensor attn_195_transpose_x_0 = const()[name = tensor("attn_195_transpose_x_0"), val = tensor(false)]; + tensor attn_195_transpose_y_0 = const()[name = tensor("attn_195_transpose_y_0"), val = tensor(true)]; + tensor attn_195_cast = matmul(transpose_x = attn_195_transpose_x_0, transpose_y = attn_195_transpose_y_0, x = var_9665_cast, y = var_9669_cast)[name = tensor("attn_195_cast")]; + tensor var_9673 = const()[name = tensor("op_9673"), val = tensor([2, 1280, 1, -1])]; + tensor input_571_cast = reshape(shape = var_9673, x = attn_195_cast)[name = tensor("input_571_cast")]; + tensor var_9678 = const()[name = tensor("op_9678"), val = tensor([1, 1])]; + tensor var_9680 = const()[name = tensor("op_9680"), val = tensor([1, 1])]; + tensor var_9682_pad_type_0 = const()[name = tensor("op_9682_pad_type_0"), val = tensor("custom")]; + tensor var_9682_pad_0 = const()[name = tensor("op_9682_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937385088))), lut = tensor([-0x1.3a8p-6, -0x1.79p-8, 0x1.798p-8, 0x1.3bp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937794752)))]; + tensor var_9682_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_9680, groups = var_6865, pad = var_9682_pad_0, pad_type = var_9682_pad_type_0, strides = var_9678, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_571_cast)[name = tensor("op_9682_cast")]; + tensor inputs_293_cast = add(x = var_9682_cast, y = inputs_291_cast)[name = tensor("inputs_293_cast")]; + tensor var_9686 = const()[name = tensor("op_9686"), val = tensor([1])]; + tensor channels_mean_293_cast = reduce_mean(axes = var_9686, keep_dims = var_6860, x = inputs_293_cast)[name = tensor("channels_mean_293_cast")]; + tensor zero_mean_293_cast = sub(x = inputs_293_cast, y = channels_mean_293_cast)[name = tensor("zero_mean_293_cast")]; + tensor zero_mean_sq_293_cast = mul(x = zero_mean_293_cast, y = zero_mean_293_cast)[name = tensor("zero_mean_sq_293_cast")]; + tensor var_9690 = const()[name = tensor("op_9690"), val = tensor([1])]; + tensor var_9691_cast = reduce_mean(axes = var_9690, keep_dims = var_6860, x = zero_mean_sq_293_cast)[name = tensor("op_9691_cast")]; + tensor var_9692_to_fp16 = const()[name = tensor("op_9692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9693_cast = add(x = var_9691_cast, y = var_9692_to_fp16)[name = tensor("op_9693_cast")]; + tensor denom_293_epsilon_0_to_fp16 = const()[name = tensor("denom_293_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_293_cast = rsqrt(epsilon = denom_293_epsilon_0_to_fp16, x = var_9693_cast)[name = tensor("denom_293_cast")]; + tensor out_293_cast = mul(x = zero_mean_293_cast, y = denom_293_cast)[name = tensor("out_293_cast")]; + tensor var_9697_to_fp16 = const()[name = tensor("op_9697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937797376)))]; + tensor var_9698_cast = add(x = out_293_cast, y = var_9697_to_fp16)[name = tensor("op_9698_cast")]; + tensor var_9700_to_fp16 = const()[name = tensor("op_9700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937800000)))]; + tensor input_573_cast = mul(x = var_9698_cast, y = var_9700_to_fp16)[name = tensor("input_573_cast")]; + tensor var_9708 = const()[name = tensor("op_9708"), val = tensor([1, 1])]; + tensor var_9710 = const()[name = tensor("op_9710"), val = tensor([1, 1])]; + tensor var_9712_pad_type_0 = const()[name = tensor("op_9712_pad_type_0"), val = tensor("custom")]; + tensor var_9712_pad_0 = const()[name = tensor("op_9712_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937802624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947633088))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947633280)))]; + tensor var_9712_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_9710, groups = var_6865, pad = var_9712_pad_0, pad_type = var_9712_pad_type_0, strides = var_9708, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_573_cast)[name = tensor("op_9712_cast")]; + tensor var_9713_split_sizes_0 = const()[name = tensor("op_9713_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9713_axis_0 = const()[name = tensor("op_9713_axis_0"), val = tensor(1)]; + tensor var_9713_cast_0, tensor var_9713_cast_1 = split(axis = var_9713_axis_0, split_sizes = var_9713_split_sizes_0, x = var_9712_cast)[name = tensor("op_9713_cast")]; + tensor var_9715_mode_0 = const()[name = tensor("op_9715_mode_0"), val = tensor("EXACT")]; + tensor var_9715_cast = gelu(mode = var_9715_mode_0, x = var_9713_cast_1)[name = tensor("op_9715_cast")]; + tensor input_575_cast = mul(x = var_9713_cast_0, y = var_9715_cast)[name = tensor("input_575_cast")]; + tensor var_9719 = const()[name = tensor("op_9719"), val = tensor([1, 1])]; + tensor var_9721 = const()[name = tensor("op_9721"), val = tensor([1, 1])]; + tensor var_9723_pad_type_0 = const()[name = tensor("op_9723_pad_type_0"), val = tensor("custom")]; + tensor var_9723_pad_0 = const()[name = tensor("op_9723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947653824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952569088))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952569280)))]; + tensor var_9723_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_9721, groups = var_6865, pad = var_9723_pad_0, pad_type = var_9723_pad_type_0, strides = var_9719, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_575_cast)[name = tensor("op_9723_cast")]; + tensor inputs_295_cast = add(x = var_9723_cast, y = inputs_293_cast)[name = tensor("inputs_295_cast")]; + tensor var_9733 = const()[name = tensor("op_9733"), val = tensor([1])]; + tensor channels_mean_295_cast = reduce_mean(axes = var_9733, keep_dims = var_6860, x = inputs_295_cast)[name = tensor("channels_mean_295_cast")]; + tensor zero_mean_295_cast = sub(x = inputs_295_cast, y = channels_mean_295_cast)[name = tensor("zero_mean_295_cast")]; + tensor zero_mean_sq_295_cast = mul(x = zero_mean_295_cast, y = zero_mean_295_cast)[name = tensor("zero_mean_sq_295_cast")]; + tensor var_9737 = const()[name = tensor("op_9737"), val = tensor([1])]; + tensor var_9738_cast = reduce_mean(axes = var_9737, keep_dims = var_6860, x = zero_mean_sq_295_cast)[name = tensor("op_9738_cast")]; + tensor var_9739_to_fp16 = const()[name = tensor("op_9739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9740_cast = add(x = var_9738_cast, y = var_9739_to_fp16)[name = tensor("op_9740_cast")]; + tensor denom_295_epsilon_0_to_fp16 = const()[name = tensor("denom_295_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_295_cast = rsqrt(epsilon = denom_295_epsilon_0_to_fp16, x = var_9740_cast)[name = tensor("denom_295_cast")]; + tensor out_295_cast = mul(x = zero_mean_295_cast, y = denom_295_cast)[name = tensor("out_295_cast")]; + tensor var_9744_to_fp16 = const()[name = tensor("op_9744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952571904)))]; + tensor var_9745_cast = add(x = out_295_cast, y = var_9744_to_fp16)[name = tensor("op_9745_cast")]; + tensor var_9747_to_fp16 = const()[name = tensor("op_9747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952574528)))]; + tensor hidden_states_391_cast = mul(x = var_9745_cast, y = var_9747_to_fp16)[name = tensor("hidden_states_391_cast")]; + tensor var_9754 = const()[name = tensor("op_9754"), val = tensor([1, 1])]; + tensor var_9756 = const()[name = tensor("op_9756"), val = tensor([1, 1])]; + tensor q_197_pad_type_0 = const()[name = tensor("q_197_pad_type_0"), val = tensor("custom")]; + tensor q_197_pad_0 = const()[name = tensor("q_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952577152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953396416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_197_cast = conv(dilations = var_9756, groups = var_6865, pad = q_197_pad_0, pad_type = q_197_pad_type_0, strides = var_9754, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("q_197_cast")]; + tensor var_9760 = const()[name = tensor("op_9760"), val = tensor([1, 1])]; + tensor var_9762 = const()[name = tensor("op_9762"), val = tensor([1, 1])]; + tensor k_197_pad_type_0 = const()[name = tensor("k_197_pad_type_0"), val = tensor("custom")]; + tensor k_197_pad_0 = const()[name = tensor("k_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953396544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954215808))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_197_cast = conv(dilations = var_9762, groups = var_6865, pad = k_197_pad_0, pad_type = k_197_pad_type_0, strides = var_9760, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("k_197_cast")]; + tensor var_9766 = const()[name = tensor("op_9766"), val = tensor([1, 1])]; + tensor var_9768 = const()[name = tensor("op_9768"), val = tensor([1, 1])]; + tensor v_197_pad_type_0 = const()[name = tensor("v_197_pad_type_0"), val = tensor("custom")]; + tensor v_197_pad_0 = const()[name = tensor("v_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954215936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955444800))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_197_cast = conv(dilations = var_9768, groups = var_6865, pad = v_197_pad_0, pad_type = v_197_pad_type_0, strides = var_9766, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("v_197_cast")]; + tensor var_9772 = const()[name = tensor("op_9772"), val = tensor([2, 20, 64, -1])]; + tensor var_9773_cast = reshape(shape = var_9772, x = q_197_cast)[name = tensor("op_9773_cast")]; + tensor var_9774 = const()[name = tensor("op_9774"), val = tensor([2, 20, 64, -1])]; + tensor var_9775_cast = reshape(shape = var_9774, x = k_197_cast)[name = tensor("op_9775_cast")]; + tensor var_9776 = const()[name = tensor("op_9776"), val = tensor([2, 20, 64, -1])]; + tensor var_9777_cast = reshape(shape = var_9776, x = v_197_cast)[name = tensor("op_9777_cast")]; + tensor attn_weights_393_transpose_x_0 = const()[name = tensor("attn_weights_393_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_393_transpose_y_0 = const()[name = tensor("attn_weights_393_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_393_cast = matmul(transpose_x = attn_weights_393_transpose_x_0, transpose_y = attn_weights_393_transpose_y_0, x = var_9773_cast, y = var_9775_cast)[name = tensor("attn_weights_393_cast")]; + tensor attn_weights_395_cast = mul(x = attn_weights_393_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_395_cast")]; + tensor var_9781_cast = softmax(axis = var_6849, x = attn_weights_395_cast)[name = tensor("op_9781_cast")]; + tensor attn_197_transpose_x_0 = const()[name = tensor("attn_197_transpose_x_0"), val = tensor(false)]; + tensor attn_197_transpose_y_0 = const()[name = tensor("attn_197_transpose_y_0"), val = tensor(true)]; + tensor attn_197_cast = matmul(transpose_x = attn_197_transpose_x_0, transpose_y = attn_197_transpose_y_0, x = var_9777_cast, y = var_9781_cast)[name = tensor("attn_197_cast")]; + tensor var_9785 = const()[name = tensor("op_9785"), val = tensor([2, 1280, 1, -1])]; + tensor input_577_cast = reshape(shape = var_9785, x = attn_197_cast)[name = tensor("input_577_cast")]; + tensor var_9790 = const()[name = tensor("op_9790"), val = tensor([1, 1])]; + tensor var_9792 = const()[name = tensor("op_9792"), val = tensor([1, 1])]; + tensor var_9794_pad_type_0 = const()[name = tensor("op_9794_pad_type_0"), val = tensor("custom")]; + tensor var_9794_pad_0 = const()[name = tensor("op_9794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955444992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956673856))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956674048)))]; + tensor var_9794_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_9792, groups = var_6865, pad = var_9794_pad_0, pad_type = var_9794_pad_type_0, strides = var_9790, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_577_cast)[name = tensor("op_9794_cast")]; + tensor inputs_297_cast = add(x = var_9794_cast, y = inputs_295_cast)[name = tensor("inputs_297_cast")]; + tensor var_9798 = const()[name = tensor("op_9798"), val = tensor([1])]; + tensor channels_mean_297_cast = reduce_mean(axes = var_9798, keep_dims = var_6860, x = inputs_297_cast)[name = tensor("channels_mean_297_cast")]; + tensor zero_mean_297_cast = sub(x = inputs_297_cast, y = channels_mean_297_cast)[name = tensor("zero_mean_297_cast")]; + tensor zero_mean_sq_297_cast = mul(x = zero_mean_297_cast, y = zero_mean_297_cast)[name = tensor("zero_mean_sq_297_cast")]; + tensor var_9802 = const()[name = tensor("op_9802"), val = tensor([1])]; + tensor var_9803_cast = reduce_mean(axes = var_9802, keep_dims = var_6860, x = zero_mean_sq_297_cast)[name = tensor("op_9803_cast")]; + tensor var_9804_to_fp16 = const()[name = tensor("op_9804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9805_cast = add(x = var_9803_cast, y = var_9804_to_fp16)[name = tensor("op_9805_cast")]; + tensor denom_297_epsilon_0_to_fp16 = const()[name = tensor("denom_297_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_297_cast = rsqrt(epsilon = denom_297_epsilon_0_to_fp16, x = var_9805_cast)[name = tensor("denom_297_cast")]; + tensor out_297_cast = mul(x = zero_mean_297_cast, y = denom_297_cast)[name = tensor("out_297_cast")]; + tensor var_9809_to_fp16 = const()[name = tensor("op_9809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956676672)))]; + tensor var_9810_cast = add(x = out_297_cast, y = var_9809_to_fp16)[name = tensor("op_9810_cast")]; + tensor var_9812_to_fp16 = const()[name = tensor("op_9812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956679296)))]; + tensor hidden_states_393_cast = mul(x = var_9810_cast, y = var_9812_to_fp16)[name = tensor("hidden_states_393_cast")]; + tensor var_9819 = const()[name = tensor("op_9819"), val = tensor([1, 1])]; + tensor var_9821 = const()[name = tensor("op_9821"), val = tensor([1, 1])]; + tensor q_199_pad_type_0 = const()[name = tensor("q_199_pad_type_0"), val = tensor("custom")]; + tensor q_199_pad_0 = const()[name = tensor("q_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956681920))), lut = tensor([-0x1.164p-5, -0x1.4ecp-7, 0x1.5p-7, 0x1.16p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_199_cast = conv(dilations = var_9821, groups = var_6865, pad = q_199_pad_0, pad_type = q_199_pad_type_0, strides = var_9819, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_393_cast)[name = tensor("q_199_cast")]; + tensor var_9825 = const()[name = tensor("op_9825"), val = tensor([1, 1])]; + tensor var_9827 = const()[name = tensor("op_9827"), val = tensor([1, 1])]; + tensor k_199_pad_type_0 = const()[name = tensor("k_199_pad_type_0"), val = tensor("custom")]; + tensor k_199_pad_0 = const()[name = tensor("k_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(957091584))), lut = tensor([-0x1.cfcp-6, -0x1.12p-7, 0x1.0f8p-7, 0x1.cep-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_199_cast = conv(dilations = var_9827, groups = var_6865, pad = k_199_pad_0, pad_type = k_199_pad_type_0, strides = var_9825, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_199_cast")]; + tensor var_9831 = const()[name = tensor("op_9831"), val = tensor([1, 1])]; + tensor var_9833 = const()[name = tensor("op_9833"), val = tensor([1, 1])]; + tensor v_199_pad_type_0 = const()[name = tensor("v_199_pad_type_0"), val = tensor("custom")]; + tensor v_199_pad_0 = const()[name = tensor("v_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(957747008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959057792))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_199_cast = conv(dilations = var_9833, groups = var_6865, pad = v_199_pad_0, pad_type = v_199_pad_type_0, strides = var_9831, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_199_cast")]; + tensor var_9837 = const()[name = tensor("op_9837"), val = tensor([2, 20, 64, -1])]; + tensor var_9838_cast = reshape(shape = var_9837, x = q_199_cast)[name = tensor("op_9838_cast")]; + tensor var_9839 = const()[name = tensor("op_9839"), val = tensor([2, 20, 64, -1])]; + tensor var_9840_cast = reshape(shape = var_9839, x = k_199_cast)[name = tensor("op_9840_cast")]; + tensor var_9841 = const()[name = tensor("op_9841"), val = tensor([2, 20, 64, -1])]; + tensor var_9842_cast = reshape(shape = var_9841, x = v_199_cast)[name = tensor("op_9842_cast")]; + tensor attn_weights_397_transpose_x_0 = const()[name = tensor("attn_weights_397_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_397_transpose_y_0 = const()[name = tensor("attn_weights_397_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_397_cast = matmul(transpose_x = attn_weights_397_transpose_x_0, transpose_y = attn_weights_397_transpose_y_0, x = var_9838_cast, y = var_9840_cast)[name = tensor("attn_weights_397_cast")]; + tensor attn_weights_399_cast = mul(x = attn_weights_397_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_399_cast")]; + tensor var_9846_cast = softmax(axis = var_6849, x = attn_weights_399_cast)[name = tensor("op_9846_cast")]; + tensor attn_199_transpose_x_0 = const()[name = tensor("attn_199_transpose_x_0"), val = tensor(false)]; + tensor attn_199_transpose_y_0 = const()[name = tensor("attn_199_transpose_y_0"), val = tensor(true)]; + tensor attn_199_cast = matmul(transpose_x = attn_199_transpose_x_0, transpose_y = attn_199_transpose_y_0, x = var_9842_cast, y = var_9846_cast)[name = tensor("attn_199_cast")]; + tensor var_9850 = const()[name = tensor("op_9850"), val = tensor([2, 1280, 1, -1])]; + tensor input_579_cast = reshape(shape = var_9850, x = attn_199_cast)[name = tensor("input_579_cast")]; + tensor var_9855 = const()[name = tensor("op_9855"), val = tensor([1, 1])]; + tensor var_9857 = const()[name = tensor("op_9857"), val = tensor([1, 1])]; + tensor var_9859_pad_type_0 = const()[name = tensor("op_9859_pad_type_0"), val = tensor("custom")]; + tensor var_9859_pad_0 = const()[name = tensor("op_9859_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959057920))), lut = tensor([-0x1.2b8p-6, -0x1.664p-8, 0x1.644p-8, 0x1.2b4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959467584)))]; + tensor var_9859_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_9857, groups = var_6865, pad = var_9859_pad_0, pad_type = var_9859_pad_type_0, strides = var_9855, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_579_cast)[name = tensor("op_9859_cast")]; + tensor inputs_299_cast = add(x = var_9859_cast, y = inputs_297_cast)[name = tensor("inputs_299_cast")]; + tensor var_9863 = const()[name = tensor("op_9863"), val = tensor([1])]; + tensor channels_mean_299_cast = reduce_mean(axes = var_9863, keep_dims = var_6860, x = inputs_299_cast)[name = tensor("channels_mean_299_cast")]; + tensor zero_mean_299_cast = sub(x = inputs_299_cast, y = channels_mean_299_cast)[name = tensor("zero_mean_299_cast")]; + tensor zero_mean_sq_299_cast = mul(x = zero_mean_299_cast, y = zero_mean_299_cast)[name = tensor("zero_mean_sq_299_cast")]; + tensor var_9867 = const()[name = tensor("op_9867"), val = tensor([1])]; + tensor var_9868_cast = reduce_mean(axes = var_9867, keep_dims = var_6860, x = zero_mean_sq_299_cast)[name = tensor("op_9868_cast")]; + tensor var_9869_to_fp16 = const()[name = tensor("op_9869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9870_cast = add(x = var_9868_cast, y = var_9869_to_fp16)[name = tensor("op_9870_cast")]; + tensor denom_299_epsilon_0_to_fp16 = const()[name = tensor("denom_299_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_299_cast = rsqrt(epsilon = denom_299_epsilon_0_to_fp16, x = var_9870_cast)[name = tensor("denom_299_cast")]; + tensor out_299_cast = mul(x = zero_mean_299_cast, y = denom_299_cast)[name = tensor("out_299_cast")]; + tensor var_9874_to_fp16 = const()[name = tensor("op_9874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959470208)))]; + tensor var_9875_cast = add(x = out_299_cast, y = var_9874_to_fp16)[name = tensor("op_9875_cast")]; + tensor var_9877_to_fp16 = const()[name = tensor("op_9877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959472832)))]; + tensor input_581_cast = mul(x = var_9875_cast, y = var_9877_to_fp16)[name = tensor("input_581_cast")]; + tensor var_9885 = const()[name = tensor("op_9885"), val = tensor([1, 1])]; + tensor var_9887 = const()[name = tensor("op_9887"), val = tensor([1, 1])]; + tensor var_9889_pad_type_0 = const()[name = tensor("op_9889_pad_type_0"), val = tensor("custom")]; + tensor var_9889_pad_0 = const()[name = tensor("op_9889_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959475456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969305920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969306112)))]; + tensor var_9889_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_9887, groups = var_6865, pad = var_9889_pad_0, pad_type = var_9889_pad_type_0, strides = var_9885, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_581_cast)[name = tensor("op_9889_cast")]; + tensor var_9890_split_sizes_0 = const()[name = tensor("op_9890_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9890_axis_0 = const()[name = tensor("op_9890_axis_0"), val = tensor(1)]; + tensor var_9890_cast_0, tensor var_9890_cast_1 = split(axis = var_9890_axis_0, split_sizes = var_9890_split_sizes_0, x = var_9889_cast)[name = tensor("op_9890_cast")]; + tensor var_9892_mode_0 = const()[name = tensor("op_9892_mode_0"), val = tensor("EXACT")]; + tensor var_9892_cast = gelu(mode = var_9892_mode_0, x = var_9890_cast_1)[name = tensor("op_9892_cast")]; + tensor input_583_cast = mul(x = var_9890_cast_0, y = var_9892_cast)[name = tensor("input_583_cast")]; + tensor var_9896 = const()[name = tensor("op_9896"), val = tensor([1, 1])]; + tensor var_9898 = const()[name = tensor("op_9898"), val = tensor([1, 1])]; + tensor var_9900_pad_type_0 = const()[name = tensor("op_9900_pad_type_0"), val = tensor("custom")]; + tensor var_9900_pad_0 = const()[name = tensor("op_9900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969326656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974241920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974242112)))]; + tensor var_9900_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_9898, groups = var_6865, pad = var_9900_pad_0, pad_type = var_9900_pad_type_0, strides = var_9896, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_583_cast)[name = tensor("op_9900_cast")]; + tensor inputs_301_cast = add(x = var_9900_cast, y = inputs_299_cast)[name = tensor("inputs_301_cast")]; + tensor var_9910 = const()[name = tensor("op_9910"), val = tensor([1])]; + tensor channels_mean_301_cast = reduce_mean(axes = var_9910, keep_dims = var_6860, x = inputs_301_cast)[name = tensor("channels_mean_301_cast")]; + tensor zero_mean_301_cast = sub(x = inputs_301_cast, y = channels_mean_301_cast)[name = tensor("zero_mean_301_cast")]; + tensor zero_mean_sq_301_cast = mul(x = zero_mean_301_cast, y = zero_mean_301_cast)[name = tensor("zero_mean_sq_301_cast")]; + tensor var_9914 = const()[name = tensor("op_9914"), val = tensor([1])]; + tensor var_9915_cast = reduce_mean(axes = var_9914, keep_dims = var_6860, x = zero_mean_sq_301_cast)[name = tensor("op_9915_cast")]; + tensor var_9916_to_fp16 = const()[name = tensor("op_9916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9917_cast = add(x = var_9915_cast, y = var_9916_to_fp16)[name = tensor("op_9917_cast")]; + tensor denom_301_epsilon_0_to_fp16 = const()[name = tensor("denom_301_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_301_cast = rsqrt(epsilon = denom_301_epsilon_0_to_fp16, x = var_9917_cast)[name = tensor("denom_301_cast")]; + tensor out_301_cast = mul(x = zero_mean_301_cast, y = denom_301_cast)[name = tensor("out_301_cast")]; + tensor var_9921_to_fp16 = const()[name = tensor("op_9921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974244736)))]; + tensor var_9922_cast = add(x = out_301_cast, y = var_9921_to_fp16)[name = tensor("op_9922_cast")]; + tensor var_9924_to_fp16 = const()[name = tensor("op_9924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974247360)))]; + tensor hidden_states_397_cast = mul(x = var_9922_cast, y = var_9924_to_fp16)[name = tensor("hidden_states_397_cast")]; + tensor var_9931 = const()[name = tensor("op_9931"), val = tensor([1, 1])]; + tensor var_9933 = const()[name = tensor("op_9933"), val = tensor([1, 1])]; + tensor q_201_pad_type_0 = const()[name = tensor("q_201_pad_type_0"), val = tensor("custom")]; + tensor q_201_pad_0 = const()[name = tensor("q_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974249984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975069248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_201_cast = conv(dilations = var_9933, groups = var_6865, pad = q_201_pad_0, pad_type = q_201_pad_type_0, strides = var_9931, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("q_201_cast")]; + tensor var_9937 = const()[name = tensor("op_9937"), val = tensor([1, 1])]; + tensor var_9939 = const()[name = tensor("op_9939"), val = tensor([1, 1])]; + tensor k_201_pad_type_0 = const()[name = tensor("k_201_pad_type_0"), val = tensor("custom")]; + tensor k_201_pad_0 = const()[name = tensor("k_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975069376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975888640))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_201_cast = conv(dilations = var_9939, groups = var_6865, pad = k_201_pad_0, pad_type = k_201_pad_type_0, strides = var_9937, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("k_201_cast")]; + tensor var_9943 = const()[name = tensor("op_9943"), val = tensor([1, 1])]; + tensor var_9945 = const()[name = tensor("op_9945"), val = tensor([1, 1])]; + tensor v_201_pad_type_0 = const()[name = tensor("v_201_pad_type_0"), val = tensor("custom")]; + tensor v_201_pad_0 = const()[name = tensor("v_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975888768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977117632))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_201_cast = conv(dilations = var_9945, groups = var_6865, pad = v_201_pad_0, pad_type = v_201_pad_type_0, strides = var_9943, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("v_201_cast")]; + tensor var_9949 = const()[name = tensor("op_9949"), val = tensor([2, 20, 64, -1])]; + tensor var_9950_cast = reshape(shape = var_9949, x = q_201_cast)[name = tensor("op_9950_cast")]; + tensor var_9951 = const()[name = tensor("op_9951"), val = tensor([2, 20, 64, -1])]; + tensor var_9952_cast = reshape(shape = var_9951, x = k_201_cast)[name = tensor("op_9952_cast")]; + tensor var_9953 = const()[name = tensor("op_9953"), val = tensor([2, 20, 64, -1])]; + tensor var_9954_cast = reshape(shape = var_9953, x = v_201_cast)[name = tensor("op_9954_cast")]; + tensor attn_weights_401_transpose_x_0 = const()[name = tensor("attn_weights_401_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_401_transpose_y_0 = const()[name = tensor("attn_weights_401_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_401_cast = matmul(transpose_x = attn_weights_401_transpose_x_0, transpose_y = attn_weights_401_transpose_y_0, x = var_9950_cast, y = var_9952_cast)[name = tensor("attn_weights_401_cast")]; + tensor attn_weights_403_cast = mul(x = attn_weights_401_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_403_cast")]; + tensor var_9958_cast = softmax(axis = var_6849, x = attn_weights_403_cast)[name = tensor("op_9958_cast")]; + tensor attn_201_transpose_x_0 = const()[name = tensor("attn_201_transpose_x_0"), val = tensor(false)]; + tensor attn_201_transpose_y_0 = const()[name = tensor("attn_201_transpose_y_0"), val = tensor(true)]; + tensor attn_201_cast = matmul(transpose_x = attn_201_transpose_x_0, transpose_y = attn_201_transpose_y_0, x = var_9954_cast, y = var_9958_cast)[name = tensor("attn_201_cast")]; + tensor var_9962 = const()[name = tensor("op_9962"), val = tensor([2, 1280, 1, -1])]; + tensor input_585_cast = reshape(shape = var_9962, x = attn_201_cast)[name = tensor("input_585_cast")]; + tensor var_9967 = const()[name = tensor("op_9967"), val = tensor([1, 1])]; + tensor var_9969 = const()[name = tensor("op_9969"), val = tensor([1, 1])]; + tensor var_9971_pad_type_0 = const()[name = tensor("op_9971_pad_type_0"), val = tensor("custom")]; + tensor var_9971_pad_0 = const()[name = tensor("op_9971_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977117824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978346688))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978346880)))]; + tensor var_9971_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_9969, groups = var_6865, pad = var_9971_pad_0, pad_type = var_9971_pad_type_0, strides = var_9967, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_585_cast)[name = tensor("op_9971_cast")]; + tensor inputs_303_cast = add(x = var_9971_cast, y = inputs_301_cast)[name = tensor("inputs_303_cast")]; + tensor var_9975 = const()[name = tensor("op_9975"), val = tensor([1])]; + tensor channels_mean_303_cast = reduce_mean(axes = var_9975, keep_dims = var_6860, x = inputs_303_cast)[name = tensor("channels_mean_303_cast")]; + tensor zero_mean_303_cast = sub(x = inputs_303_cast, y = channels_mean_303_cast)[name = tensor("zero_mean_303_cast")]; + tensor zero_mean_sq_303_cast = mul(x = zero_mean_303_cast, y = zero_mean_303_cast)[name = tensor("zero_mean_sq_303_cast")]; + tensor var_9979 = const()[name = tensor("op_9979"), val = tensor([1])]; + tensor var_9980_cast = reduce_mean(axes = var_9979, keep_dims = var_6860, x = zero_mean_sq_303_cast)[name = tensor("op_9980_cast")]; + tensor var_9981_to_fp16 = const()[name = tensor("op_9981_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9982_cast = add(x = var_9980_cast, y = var_9981_to_fp16)[name = tensor("op_9982_cast")]; + tensor denom_303_epsilon_0_to_fp16 = const()[name = tensor("denom_303_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_303_cast = rsqrt(epsilon = denom_303_epsilon_0_to_fp16, x = var_9982_cast)[name = tensor("denom_303_cast")]; + tensor out_303_cast = mul(x = zero_mean_303_cast, y = denom_303_cast)[name = tensor("out_303_cast")]; + tensor var_9986_to_fp16 = const()[name = tensor("op_9986_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978349504)))]; + tensor var_9987_cast = add(x = out_303_cast, y = var_9986_to_fp16)[name = tensor("op_9987_cast")]; + tensor var_9989_to_fp16 = const()[name = tensor("op_9989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978352128)))]; + tensor hidden_states_399_cast = mul(x = var_9987_cast, y = var_9989_to_fp16)[name = tensor("hidden_states_399_cast")]; + tensor var_9996 = const()[name = tensor("op_9996"), val = tensor([1, 1])]; + tensor var_9998 = const()[name = tensor("op_9998"), val = tensor([1, 1])]; + tensor q_203_pad_type_0 = const()[name = tensor("q_203_pad_type_0"), val = tensor("custom")]; + tensor q_203_pad_0 = const()[name = tensor("q_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978354752))), lut = tensor([-0x1.048p-5, -0x1.3ap-7, 0x1.3cp-7, 0x1.05p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_203_cast = conv(dilations = var_9998, groups = var_6865, pad = q_203_pad_0, pad_type = q_203_pad_type_0, strides = var_9996, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_399_cast)[name = tensor("q_203_cast")]; + tensor var_10002 = const()[name = tensor("op_10002"), val = tensor([1, 1])]; + tensor var_10004 = const()[name = tensor("op_10004"), val = tensor([1, 1])]; + tensor k_203_pad_type_0 = const()[name = tensor("k_203_pad_type_0"), val = tensor("custom")]; + tensor k_203_pad_0 = const()[name = tensor("k_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978764416))), lut = tensor([-0x1.ac4p-6, -0x1.f84p-8, 0x1.f88p-8, 0x1.ac4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_203_cast = conv(dilations = var_10004, groups = var_6865, pad = k_203_pad_0, pad_type = k_203_pad_type_0, strides = var_10002, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_203_cast")]; + tensor var_10008 = const()[name = tensor("op_10008"), val = tensor([1, 1])]; + tensor var_10010 = const()[name = tensor("op_10010"), val = tensor([1, 1])]; + tensor v_203_pad_type_0 = const()[name = tensor("v_203_pad_type_0"), val = tensor("custom")]; + tensor v_203_pad_0 = const()[name = tensor("v_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979419840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(980730624))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_203_cast = conv(dilations = var_10010, groups = var_6865, pad = v_203_pad_0, pad_type = v_203_pad_type_0, strides = var_10008, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_203_cast")]; + tensor var_10014 = const()[name = tensor("op_10014"), val = tensor([2, 20, 64, -1])]; + tensor var_10015_cast = reshape(shape = var_10014, x = q_203_cast)[name = tensor("op_10015_cast")]; + tensor var_10016 = const()[name = tensor("op_10016"), val = tensor([2, 20, 64, -1])]; + tensor var_10017_cast = reshape(shape = var_10016, x = k_203_cast)[name = tensor("op_10017_cast")]; + tensor var_10018 = const()[name = tensor("op_10018"), val = tensor([2, 20, 64, -1])]; + tensor var_10019_cast = reshape(shape = var_10018, x = v_203_cast)[name = tensor("op_10019_cast")]; + tensor attn_weights_405_transpose_x_0 = const()[name = tensor("attn_weights_405_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_405_transpose_y_0 = const()[name = tensor("attn_weights_405_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_405_cast = matmul(transpose_x = attn_weights_405_transpose_x_0, transpose_y = attn_weights_405_transpose_y_0, x = var_10015_cast, y = var_10017_cast)[name = tensor("attn_weights_405_cast")]; + tensor attn_weights_407_cast = mul(x = attn_weights_405_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_407_cast")]; + tensor var_10023_cast = softmax(axis = var_6849, x = attn_weights_407_cast)[name = tensor("op_10023_cast")]; + tensor attn_203_transpose_x_0 = const()[name = tensor("attn_203_transpose_x_0"), val = tensor(false)]; + tensor attn_203_transpose_y_0 = const()[name = tensor("attn_203_transpose_y_0"), val = tensor(true)]; + tensor attn_203_cast = matmul(transpose_x = attn_203_transpose_x_0, transpose_y = attn_203_transpose_y_0, x = var_10019_cast, y = var_10023_cast)[name = tensor("attn_203_cast")]; + tensor var_10027 = const()[name = tensor("op_10027"), val = tensor([2, 1280, 1, -1])]; + tensor input_587_cast = reshape(shape = var_10027, x = attn_203_cast)[name = tensor("input_587_cast")]; + tensor var_10032 = const()[name = tensor("op_10032"), val = tensor([1, 1])]; + tensor var_10034 = const()[name = tensor("op_10034"), val = tensor([1, 1])]; + tensor var_10036_pad_type_0 = const()[name = tensor("op_10036_pad_type_0"), val = tensor("custom")]; + tensor var_10036_pad_0 = const()[name = tensor("op_10036_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(980730752))), lut = tensor([-0x1.1bcp-6, -0x1.55p-8, 0x1.52p-8, 0x1.1acp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981140416)))]; + tensor var_10036_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_10034, groups = var_6865, pad = var_10036_pad_0, pad_type = var_10036_pad_type_0, strides = var_10032, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_587_cast)[name = tensor("op_10036_cast")]; + tensor inputs_305_cast = add(x = var_10036_cast, y = inputs_303_cast)[name = tensor("inputs_305_cast")]; + tensor var_10040 = const()[name = tensor("op_10040"), val = tensor([1])]; + tensor channels_mean_305_cast = reduce_mean(axes = var_10040, keep_dims = var_6860, x = inputs_305_cast)[name = tensor("channels_mean_305_cast")]; + tensor zero_mean_305_cast = sub(x = inputs_305_cast, y = channels_mean_305_cast)[name = tensor("zero_mean_305_cast")]; + tensor zero_mean_sq_305_cast = mul(x = zero_mean_305_cast, y = zero_mean_305_cast)[name = tensor("zero_mean_sq_305_cast")]; + tensor var_10044 = const()[name = tensor("op_10044"), val = tensor([1])]; + tensor var_10045_cast = reduce_mean(axes = var_10044, keep_dims = var_6860, x = zero_mean_sq_305_cast)[name = tensor("op_10045_cast")]; + tensor var_10046_to_fp16 = const()[name = tensor("op_10046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10047_cast = add(x = var_10045_cast, y = var_10046_to_fp16)[name = tensor("op_10047_cast")]; + tensor denom_305_epsilon_0_to_fp16 = const()[name = tensor("denom_305_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_305_cast = rsqrt(epsilon = denom_305_epsilon_0_to_fp16, x = var_10047_cast)[name = tensor("denom_305_cast")]; + tensor out_305_cast = mul(x = zero_mean_305_cast, y = denom_305_cast)[name = tensor("out_305_cast")]; + tensor var_10051_to_fp16 = const()[name = tensor("op_10051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981143040)))]; + tensor var_10052_cast = add(x = out_305_cast, y = var_10051_to_fp16)[name = tensor("op_10052_cast")]; + tensor var_10054_to_fp16 = const()[name = tensor("op_10054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981145664)))]; + tensor input_589_cast = mul(x = var_10052_cast, y = var_10054_to_fp16)[name = tensor("input_589_cast")]; + tensor var_10062 = const()[name = tensor("op_10062"), val = tensor([1, 1])]; + tensor var_10064 = const()[name = tensor("op_10064"), val = tensor([1, 1])]; + tensor var_10066_pad_type_0 = const()[name = tensor("op_10066_pad_type_0"), val = tensor("custom")]; + tensor var_10066_pad_0 = const()[name = tensor("op_10066_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981148288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990978752))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990978944)))]; + tensor var_10066_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_10064, groups = var_6865, pad = var_10066_pad_0, pad_type = var_10066_pad_type_0, strides = var_10062, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_589_cast)[name = tensor("op_10066_cast")]; + tensor var_10067_split_sizes_0 = const()[name = tensor("op_10067_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10067_axis_0 = const()[name = tensor("op_10067_axis_0"), val = tensor(1)]; + tensor var_10067_cast_0, tensor var_10067_cast_1 = split(axis = var_10067_axis_0, split_sizes = var_10067_split_sizes_0, x = var_10066_cast)[name = tensor("op_10067_cast")]; + tensor var_10069_mode_0 = const()[name = tensor("op_10069_mode_0"), val = tensor("EXACT")]; + tensor var_10069_cast = gelu(mode = var_10069_mode_0, x = var_10067_cast_1)[name = tensor("op_10069_cast")]; + tensor input_591_cast = mul(x = var_10067_cast_0, y = var_10069_cast)[name = tensor("input_591_cast")]; + tensor var_10073 = const()[name = tensor("op_10073"), val = tensor([1, 1])]; + tensor var_10075 = const()[name = tensor("op_10075"), val = tensor([1, 1])]; + tensor var_10077_pad_type_0 = const()[name = tensor("op_10077_pad_type_0"), val = tensor("custom")]; + tensor var_10077_pad_0 = const()[name = tensor("op_10077_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990999488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995914752))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995914944)))]; + tensor var_10077_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_10075, groups = var_6865, pad = var_10077_pad_0, pad_type = var_10077_pad_type_0, strides = var_10073, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_591_cast)[name = tensor("op_10077_cast")]; + tensor inputs_307_cast = add(x = var_10077_cast, y = inputs_305_cast)[name = tensor("inputs_307_cast")]; + tensor var_10087 = const()[name = tensor("op_10087"), val = tensor([1])]; + tensor channels_mean_307_cast = reduce_mean(axes = var_10087, keep_dims = var_6860, x = inputs_307_cast)[name = tensor("channels_mean_307_cast")]; + tensor zero_mean_307_cast = sub(x = inputs_307_cast, y = channels_mean_307_cast)[name = tensor("zero_mean_307_cast")]; + tensor zero_mean_sq_307_cast = mul(x = zero_mean_307_cast, y = zero_mean_307_cast)[name = tensor("zero_mean_sq_307_cast")]; + tensor var_10091 = const()[name = tensor("op_10091"), val = tensor([1])]; + tensor var_10092_cast = reduce_mean(axes = var_10091, keep_dims = var_6860, x = zero_mean_sq_307_cast)[name = tensor("op_10092_cast")]; + tensor var_10093_to_fp16 = const()[name = tensor("op_10093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10094_cast = add(x = var_10092_cast, y = var_10093_to_fp16)[name = tensor("op_10094_cast")]; + tensor denom_307_epsilon_0_to_fp16 = const()[name = tensor("denom_307_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_307_cast = rsqrt(epsilon = denom_307_epsilon_0_to_fp16, x = var_10094_cast)[name = tensor("denom_307_cast")]; + tensor out_307_cast = mul(x = zero_mean_307_cast, y = denom_307_cast)[name = tensor("out_307_cast")]; + tensor var_10098_to_fp16 = const()[name = tensor("op_10098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995917568)))]; + tensor var_10099_cast = add(x = out_307_cast, y = var_10098_to_fp16)[name = tensor("op_10099_cast")]; + tensor var_10101_to_fp16 = const()[name = tensor("op_10101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995920192)))]; + tensor hidden_states_403_cast = mul(x = var_10099_cast, y = var_10101_to_fp16)[name = tensor("hidden_states_403_cast")]; + tensor var_10108 = const()[name = tensor("op_10108"), val = tensor([1, 1])]; + tensor var_10110 = const()[name = tensor("op_10110"), val = tensor([1, 1])]; + tensor q_205_pad_type_0 = const()[name = tensor("q_205_pad_type_0"), val = tensor("custom")]; + tensor q_205_pad_0 = const()[name = tensor("q_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995922816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996742080))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_205_cast = conv(dilations = var_10110, groups = var_6865, pad = q_205_pad_0, pad_type = q_205_pad_type_0, strides = var_10108, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("q_205_cast")]; + tensor var_10114 = const()[name = tensor("op_10114"), val = tensor([1, 1])]; + tensor var_10116 = const()[name = tensor("op_10116"), val = tensor([1, 1])]; + tensor k_205_pad_type_0 = const()[name = tensor("k_205_pad_type_0"), val = tensor("custom")]; + tensor k_205_pad_0 = const()[name = tensor("k_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996742208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997561472))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_205_cast = conv(dilations = var_10116, groups = var_6865, pad = k_205_pad_0, pad_type = k_205_pad_type_0, strides = var_10114, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("k_205_cast")]; + tensor var_10120 = const()[name = tensor("op_10120"), val = tensor([1, 1])]; + tensor var_10122 = const()[name = tensor("op_10122"), val = tensor([1, 1])]; + tensor v_205_pad_type_0 = const()[name = tensor("v_205_pad_type_0"), val = tensor("custom")]; + tensor v_205_pad_0 = const()[name = tensor("v_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997561600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998790464))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_205_cast = conv(dilations = var_10122, groups = var_6865, pad = v_205_pad_0, pad_type = v_205_pad_type_0, strides = var_10120, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("v_205_cast")]; + tensor var_10126 = const()[name = tensor("op_10126"), val = tensor([2, 20, 64, -1])]; + tensor var_10127_cast = reshape(shape = var_10126, x = q_205_cast)[name = tensor("op_10127_cast")]; + tensor var_10128 = const()[name = tensor("op_10128"), val = tensor([2, 20, 64, -1])]; + tensor var_10129_cast = reshape(shape = var_10128, x = k_205_cast)[name = tensor("op_10129_cast")]; + tensor var_10130 = const()[name = tensor("op_10130"), val = tensor([2, 20, 64, -1])]; + tensor var_10131_cast = reshape(shape = var_10130, x = v_205_cast)[name = tensor("op_10131_cast")]; + tensor attn_weights_409_transpose_x_0 = const()[name = tensor("attn_weights_409_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_409_transpose_y_0 = const()[name = tensor("attn_weights_409_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_409_cast = matmul(transpose_x = attn_weights_409_transpose_x_0, transpose_y = attn_weights_409_transpose_y_0, x = var_10127_cast, y = var_10129_cast)[name = tensor("attn_weights_409_cast")]; + tensor attn_weights_411_cast = mul(x = attn_weights_409_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_411_cast")]; + tensor var_10135_cast = softmax(axis = var_6849, x = attn_weights_411_cast)[name = tensor("op_10135_cast")]; + tensor attn_205_transpose_x_0 = const()[name = tensor("attn_205_transpose_x_0"), val = tensor(false)]; + tensor attn_205_transpose_y_0 = const()[name = tensor("attn_205_transpose_y_0"), val = tensor(true)]; + tensor attn_205_cast = matmul(transpose_x = attn_205_transpose_x_0, transpose_y = attn_205_transpose_y_0, x = var_10131_cast, y = var_10135_cast)[name = tensor("attn_205_cast")]; + tensor var_10139 = const()[name = tensor("op_10139"), val = tensor([2, 1280, 1, -1])]; + tensor input_593_cast = reshape(shape = var_10139, x = attn_205_cast)[name = tensor("input_593_cast")]; + tensor var_10144 = const()[name = tensor("op_10144"), val = tensor([1, 1])]; + tensor var_10146 = const()[name = tensor("op_10146"), val = tensor([1, 1])]; + tensor var_10148_pad_type_0 = const()[name = tensor("op_10148_pad_type_0"), val = tensor("custom")]; + tensor var_10148_pad_0 = const()[name = tensor("op_10148_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998790656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000019520))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000019712)))]; + tensor var_10148_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_10146, groups = var_6865, pad = var_10148_pad_0, pad_type = var_10148_pad_type_0, strides = var_10144, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_593_cast)[name = tensor("op_10148_cast")]; + tensor inputs_309_cast = add(x = var_10148_cast, y = inputs_307_cast)[name = tensor("inputs_309_cast")]; + tensor var_10152 = const()[name = tensor("op_10152"), val = tensor([1])]; + tensor channels_mean_309_cast = reduce_mean(axes = var_10152, keep_dims = var_6860, x = inputs_309_cast)[name = tensor("channels_mean_309_cast")]; + tensor zero_mean_309_cast = sub(x = inputs_309_cast, y = channels_mean_309_cast)[name = tensor("zero_mean_309_cast")]; + tensor zero_mean_sq_309_cast = mul(x = zero_mean_309_cast, y = zero_mean_309_cast)[name = tensor("zero_mean_sq_309_cast")]; + tensor var_10156 = const()[name = tensor("op_10156"), val = tensor([1])]; + tensor var_10157_cast = reduce_mean(axes = var_10156, keep_dims = var_6860, x = zero_mean_sq_309_cast)[name = tensor("op_10157_cast")]; + tensor var_10158_to_fp16 = const()[name = tensor("op_10158_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10159_cast = add(x = var_10157_cast, y = var_10158_to_fp16)[name = tensor("op_10159_cast")]; + tensor denom_309_epsilon_0_to_fp16 = const()[name = tensor("denom_309_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_309_cast = rsqrt(epsilon = denom_309_epsilon_0_to_fp16, x = var_10159_cast)[name = tensor("denom_309_cast")]; + tensor out_309_cast = mul(x = zero_mean_309_cast, y = denom_309_cast)[name = tensor("out_309_cast")]; + tensor var_10163_to_fp16 = const()[name = tensor("op_10163_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000022336)))]; + tensor var_10164_cast = add(x = out_309_cast, y = var_10163_to_fp16)[name = tensor("op_10164_cast")]; + tensor var_10166_to_fp16 = const()[name = tensor("op_10166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000024960)))]; + tensor hidden_states_405_cast = mul(x = var_10164_cast, y = var_10166_to_fp16)[name = tensor("hidden_states_405_cast")]; + tensor var_10173 = const()[name = tensor("op_10173"), val = tensor([1, 1])]; + tensor var_10175 = const()[name = tensor("op_10175"), val = tensor([1, 1])]; + tensor q_207_pad_type_0 = const()[name = tensor("q_207_pad_type_0"), val = tensor("custom")]; + tensor q_207_pad_0 = const()[name = tensor("q_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000027584))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1ep-7, 0x1.d2cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_207_cast = conv(dilations = var_10175, groups = var_6865, pad = q_207_pad_0, pad_type = q_207_pad_type_0, strides = var_10173, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_405_cast)[name = tensor("q_207_cast")]; + tensor var_10179 = const()[name = tensor("op_10179"), val = tensor([1, 1])]; + tensor var_10181 = const()[name = tensor("op_10181"), val = tensor([1, 1])]; + tensor k_207_pad_type_0 = const()[name = tensor("k_207_pad_type_0"), val = tensor("custom")]; + tensor k_207_pad_0 = const()[name = tensor("k_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000437248))), lut = tensor([-0x1.664p-6, -0x1.a54p-8, 0x1.b0cp-8, 0x1.69p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_207_cast = conv(dilations = var_10181, groups = var_6865, pad = k_207_pad_0, pad_type = k_207_pad_type_0, strides = var_10179, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_207_cast")]; + tensor var_10185 = const()[name = tensor("op_10185"), val = tensor([1, 1])]; + tensor var_10187 = const()[name = tensor("op_10187"), val = tensor([1, 1])]; + tensor v_207_pad_type_0 = const()[name = tensor("v_207_pad_type_0"), val = tensor("custom")]; + tensor v_207_pad_0 = const()[name = tensor("v_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001092672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002403456))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_207_cast = conv(dilations = var_10187, groups = var_6865, pad = v_207_pad_0, pad_type = v_207_pad_type_0, strides = var_10185, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_207_cast")]; + tensor var_10191 = const()[name = tensor("op_10191"), val = tensor([2, 20, 64, -1])]; + tensor var_10192_cast = reshape(shape = var_10191, x = q_207_cast)[name = tensor("op_10192_cast")]; + tensor var_10193 = const()[name = tensor("op_10193"), val = tensor([2, 20, 64, -1])]; + tensor var_10194_cast = reshape(shape = var_10193, x = k_207_cast)[name = tensor("op_10194_cast")]; + tensor var_10195 = const()[name = tensor("op_10195"), val = tensor([2, 20, 64, -1])]; + tensor var_10196_cast = reshape(shape = var_10195, x = v_207_cast)[name = tensor("op_10196_cast")]; + tensor attn_weights_413_transpose_x_0 = const()[name = tensor("attn_weights_413_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_413_transpose_y_0 = const()[name = tensor("attn_weights_413_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_413_cast = matmul(transpose_x = attn_weights_413_transpose_x_0, transpose_y = attn_weights_413_transpose_y_0, x = var_10192_cast, y = var_10194_cast)[name = tensor("attn_weights_413_cast")]; + tensor attn_weights_415_cast = mul(x = attn_weights_413_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_415_cast")]; + tensor var_10200_cast = softmax(axis = var_6849, x = attn_weights_415_cast)[name = tensor("op_10200_cast")]; + tensor attn_207_transpose_x_0 = const()[name = tensor("attn_207_transpose_x_0"), val = tensor(false)]; + tensor attn_207_transpose_y_0 = const()[name = tensor("attn_207_transpose_y_0"), val = tensor(true)]; + tensor attn_207_cast = matmul(transpose_x = attn_207_transpose_x_0, transpose_y = attn_207_transpose_y_0, x = var_10196_cast, y = var_10200_cast)[name = tensor("attn_207_cast")]; + tensor var_10204 = const()[name = tensor("op_10204"), val = tensor([2, 1280, 1, -1])]; + tensor input_595_cast = reshape(shape = var_10204, x = attn_207_cast)[name = tensor("input_595_cast")]; + tensor var_10209 = const()[name = tensor("op_10209"), val = tensor([1, 1])]; + tensor var_10211 = const()[name = tensor("op_10211"), val = tensor([1, 1])]; + tensor var_10213_pad_type_0 = const()[name = tensor("op_10213_pad_type_0"), val = tensor("custom")]; + tensor var_10213_pad_0 = const()[name = tensor("op_10213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002403584))), lut = tensor([-0x1.09p-6, -0x1.3ep-8, 0x1.3b8p-8, 0x1.084p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002813248)))]; + tensor var_10213_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_10211, groups = var_6865, pad = var_10213_pad_0, pad_type = var_10213_pad_type_0, strides = var_10209, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_595_cast)[name = tensor("op_10213_cast")]; + tensor inputs_311_cast = add(x = var_10213_cast, y = inputs_309_cast)[name = tensor("inputs_311_cast")]; + tensor var_10217 = const()[name = tensor("op_10217"), val = tensor([1])]; + tensor channels_mean_311_cast = reduce_mean(axes = var_10217, keep_dims = var_6860, x = inputs_311_cast)[name = tensor("channels_mean_311_cast")]; + tensor zero_mean_311_cast = sub(x = inputs_311_cast, y = channels_mean_311_cast)[name = tensor("zero_mean_311_cast")]; + tensor zero_mean_sq_311_cast = mul(x = zero_mean_311_cast, y = zero_mean_311_cast)[name = tensor("zero_mean_sq_311_cast")]; + tensor var_10221 = const()[name = tensor("op_10221"), val = tensor([1])]; + tensor var_10222_cast = reduce_mean(axes = var_10221, keep_dims = var_6860, x = zero_mean_sq_311_cast)[name = tensor("op_10222_cast")]; + tensor var_10223_to_fp16 = const()[name = tensor("op_10223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10224_cast = add(x = var_10222_cast, y = var_10223_to_fp16)[name = tensor("op_10224_cast")]; + tensor denom_311_epsilon_0_to_fp16 = const()[name = tensor("denom_311_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_311_cast = rsqrt(epsilon = denom_311_epsilon_0_to_fp16, x = var_10224_cast)[name = tensor("denom_311_cast")]; + tensor out_311_cast = mul(x = zero_mean_311_cast, y = denom_311_cast)[name = tensor("out_311_cast")]; + tensor var_10228_to_fp16 = const()[name = tensor("op_10228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002815872)))]; + tensor var_10229_cast = add(x = out_311_cast, y = var_10228_to_fp16)[name = tensor("op_10229_cast")]; + tensor var_10231_to_fp16 = const()[name = tensor("op_10231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002818496)))]; + tensor input_597_cast = mul(x = var_10229_cast, y = var_10231_to_fp16)[name = tensor("input_597_cast")]; + tensor var_10239 = const()[name = tensor("op_10239"), val = tensor([1, 1])]; + tensor var_10241 = const()[name = tensor("op_10241"), val = tensor([1, 1])]; + tensor var_10243_pad_type_0 = const()[name = tensor("op_10243_pad_type_0"), val = tensor("custom")]; + tensor var_10243_pad_0 = const()[name = tensor("op_10243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002821120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012651584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012651776)))]; + tensor var_10243_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_10241, groups = var_6865, pad = var_10243_pad_0, pad_type = var_10243_pad_type_0, strides = var_10239, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_597_cast)[name = tensor("op_10243_cast")]; + tensor var_10244_split_sizes_0 = const()[name = tensor("op_10244_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10244_axis_0 = const()[name = tensor("op_10244_axis_0"), val = tensor(1)]; + tensor var_10244_cast_0, tensor var_10244_cast_1 = split(axis = var_10244_axis_0, split_sizes = var_10244_split_sizes_0, x = var_10243_cast)[name = tensor("op_10244_cast")]; + tensor var_10246_mode_0 = const()[name = tensor("op_10246_mode_0"), val = tensor("EXACT")]; + tensor var_10246_cast = gelu(mode = var_10246_mode_0, x = var_10244_cast_1)[name = tensor("op_10246_cast")]; + tensor input_599_cast = mul(x = var_10244_cast_0, y = var_10246_cast)[name = tensor("input_599_cast")]; + tensor var_10250 = const()[name = tensor("op_10250"), val = tensor([1, 1])]; + tensor var_10252 = const()[name = tensor("op_10252"), val = tensor([1, 1])]; + tensor var_10254_pad_type_0 = const()[name = tensor("op_10254_pad_type_0"), val = tensor("custom")]; + tensor var_10254_pad_0 = const()[name = tensor("op_10254_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012672320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017587584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017587776)))]; + tensor var_10254_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_10252, groups = var_6865, pad = var_10254_pad_0, pad_type = var_10254_pad_type_0, strides = var_10250, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_599_cast)[name = tensor("op_10254_cast")]; + tensor inputs_313_cast = add(x = var_10254_cast, y = inputs_311_cast)[name = tensor("inputs_313_cast")]; + tensor var_10264 = const()[name = tensor("op_10264"), val = tensor([1])]; + tensor channels_mean_313_cast = reduce_mean(axes = var_10264, keep_dims = var_6860, x = inputs_313_cast)[name = tensor("channels_mean_313_cast")]; + tensor zero_mean_313_cast = sub(x = inputs_313_cast, y = channels_mean_313_cast)[name = tensor("zero_mean_313_cast")]; + tensor zero_mean_sq_313_cast = mul(x = zero_mean_313_cast, y = zero_mean_313_cast)[name = tensor("zero_mean_sq_313_cast")]; + tensor var_10268 = const()[name = tensor("op_10268"), val = tensor([1])]; + tensor var_10269_cast = reduce_mean(axes = var_10268, keep_dims = var_6860, x = zero_mean_sq_313_cast)[name = tensor("op_10269_cast")]; + tensor var_10270_to_fp16 = const()[name = tensor("op_10270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10271_cast = add(x = var_10269_cast, y = var_10270_to_fp16)[name = tensor("op_10271_cast")]; + tensor denom_313_epsilon_0_to_fp16 = const()[name = tensor("denom_313_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_313_cast = rsqrt(epsilon = denom_313_epsilon_0_to_fp16, x = var_10271_cast)[name = tensor("denom_313_cast")]; + tensor out_313_cast = mul(x = zero_mean_313_cast, y = denom_313_cast)[name = tensor("out_313_cast")]; + tensor var_10275_to_fp16 = const()[name = tensor("op_10275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017590400)))]; + tensor var_10276_cast = add(x = out_313_cast, y = var_10275_to_fp16)[name = tensor("op_10276_cast")]; + tensor var_10278_to_fp16 = const()[name = tensor("op_10278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017593024)))]; + tensor hidden_states_409_cast = mul(x = var_10276_cast, y = var_10278_to_fp16)[name = tensor("hidden_states_409_cast")]; + tensor var_10285 = const()[name = tensor("op_10285"), val = tensor([1, 1])]; + tensor var_10287 = const()[name = tensor("op_10287"), val = tensor([1, 1])]; + tensor q_209_pad_type_0 = const()[name = tensor("q_209_pad_type_0"), val = tensor("custom")]; + tensor q_209_pad_0 = const()[name = tensor("q_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017595648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018414912))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_209_cast = conv(dilations = var_10287, groups = var_6865, pad = q_209_pad_0, pad_type = q_209_pad_type_0, strides = var_10285, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("q_209_cast")]; + tensor var_10291 = const()[name = tensor("op_10291"), val = tensor([1, 1])]; + tensor var_10293 = const()[name = tensor("op_10293"), val = tensor([1, 1])]; + tensor k_209_pad_type_0 = const()[name = tensor("k_209_pad_type_0"), val = tensor("custom")]; + tensor k_209_pad_0 = const()[name = tensor("k_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018415040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019234304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_209_cast = conv(dilations = var_10293, groups = var_6865, pad = k_209_pad_0, pad_type = k_209_pad_type_0, strides = var_10291, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("k_209_cast")]; + tensor var_10297 = const()[name = tensor("op_10297"), val = tensor([1, 1])]; + tensor var_10299 = const()[name = tensor("op_10299"), val = tensor([1, 1])]; + tensor v_209_pad_type_0 = const()[name = tensor("v_209_pad_type_0"), val = tensor("custom")]; + tensor v_209_pad_0 = const()[name = tensor("v_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019234432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020463296))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_209_cast = conv(dilations = var_10299, groups = var_6865, pad = v_209_pad_0, pad_type = v_209_pad_type_0, strides = var_10297, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("v_209_cast")]; + tensor var_10303 = const()[name = tensor("op_10303"), val = tensor([2, 20, 64, -1])]; + tensor var_10304_cast = reshape(shape = var_10303, x = q_209_cast)[name = tensor("op_10304_cast")]; + tensor var_10305 = const()[name = tensor("op_10305"), val = tensor([2, 20, 64, -1])]; + tensor var_10306_cast = reshape(shape = var_10305, x = k_209_cast)[name = tensor("op_10306_cast")]; + tensor var_10307 = const()[name = tensor("op_10307"), val = tensor([2, 20, 64, -1])]; + tensor var_10308_cast = reshape(shape = var_10307, x = v_209_cast)[name = tensor("op_10308_cast")]; + tensor attn_weights_417_transpose_x_0 = const()[name = tensor("attn_weights_417_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_417_transpose_y_0 = const()[name = tensor("attn_weights_417_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_417_cast = matmul(transpose_x = attn_weights_417_transpose_x_0, transpose_y = attn_weights_417_transpose_y_0, x = var_10304_cast, y = var_10306_cast)[name = tensor("attn_weights_417_cast")]; + tensor attn_weights_419_cast = mul(x = attn_weights_417_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_419_cast")]; + tensor var_10312_cast = softmax(axis = var_6849, x = attn_weights_419_cast)[name = tensor("op_10312_cast")]; + tensor attn_209_transpose_x_0 = const()[name = tensor("attn_209_transpose_x_0"), val = tensor(false)]; + tensor attn_209_transpose_y_0 = const()[name = tensor("attn_209_transpose_y_0"), val = tensor(true)]; + tensor attn_209_cast = matmul(transpose_x = attn_209_transpose_x_0, transpose_y = attn_209_transpose_y_0, x = var_10308_cast, y = var_10312_cast)[name = tensor("attn_209_cast")]; + tensor var_10316 = const()[name = tensor("op_10316"), val = tensor([2, 1280, 1, -1])]; + tensor input_601_cast = reshape(shape = var_10316, x = attn_209_cast)[name = tensor("input_601_cast")]; + tensor var_10321 = const()[name = tensor("op_10321"), val = tensor([1, 1])]; + tensor var_10323 = const()[name = tensor("op_10323"), val = tensor([1, 1])]; + tensor var_10325_pad_type_0 = const()[name = tensor("op_10325_pad_type_0"), val = tensor("custom")]; + tensor var_10325_pad_0 = const()[name = tensor("op_10325_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020463488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021692352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021692544)))]; + tensor var_10325_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_10323, groups = var_6865, pad = var_10325_pad_0, pad_type = var_10325_pad_type_0, strides = var_10321, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_601_cast)[name = tensor("op_10325_cast")]; + tensor inputs_315_cast = add(x = var_10325_cast, y = inputs_313_cast)[name = tensor("inputs_315_cast")]; + tensor var_10329 = const()[name = tensor("op_10329"), val = tensor([1])]; + tensor channels_mean_315_cast = reduce_mean(axes = var_10329, keep_dims = var_6860, x = inputs_315_cast)[name = tensor("channels_mean_315_cast")]; + tensor zero_mean_315_cast = sub(x = inputs_315_cast, y = channels_mean_315_cast)[name = tensor("zero_mean_315_cast")]; + tensor zero_mean_sq_315_cast = mul(x = zero_mean_315_cast, y = zero_mean_315_cast)[name = tensor("zero_mean_sq_315_cast")]; + tensor var_10333 = const()[name = tensor("op_10333"), val = tensor([1])]; + tensor var_10334_cast = reduce_mean(axes = var_10333, keep_dims = var_6860, x = zero_mean_sq_315_cast)[name = tensor("op_10334_cast")]; + tensor var_10335_to_fp16 = const()[name = tensor("op_10335_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10336_cast = add(x = var_10334_cast, y = var_10335_to_fp16)[name = tensor("op_10336_cast")]; + tensor denom_315_epsilon_0_to_fp16 = const()[name = tensor("denom_315_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_315_cast = rsqrt(epsilon = denom_315_epsilon_0_to_fp16, x = var_10336_cast)[name = tensor("denom_315_cast")]; + tensor out_315_cast = mul(x = zero_mean_315_cast, y = denom_315_cast)[name = tensor("out_315_cast")]; + tensor var_10340_to_fp16 = const()[name = tensor("op_10340_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021695168)))]; + tensor var_10341_cast = add(x = out_315_cast, y = var_10340_to_fp16)[name = tensor("op_10341_cast")]; + tensor var_10343_to_fp16 = const()[name = tensor("op_10343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021697792)))]; + tensor hidden_states_411_cast = mul(x = var_10341_cast, y = var_10343_to_fp16)[name = tensor("hidden_states_411_cast")]; + tensor var_10350 = const()[name = tensor("op_10350"), val = tensor([1, 1])]; + tensor var_10352 = const()[name = tensor("op_10352"), val = tensor([1, 1])]; + tensor q_211_pad_type_0 = const()[name = tensor("q_211_pad_type_0"), val = tensor("custom")]; + tensor q_211_pad_0 = const()[name = tensor("q_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021700416))), lut = tensor([-0x1.a5p-6, -0x1.048p-7, 0x1.054p-7, 0x1.a54p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_211_cast = conv(dilations = var_10352, groups = var_6865, pad = q_211_pad_0, pad_type = q_211_pad_type_0, strides = var_10350, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_411_cast)[name = tensor("q_211_cast")]; + tensor var_10356 = const()[name = tensor("op_10356"), val = tensor([1, 1])]; + tensor var_10358 = const()[name = tensor("op_10358"), val = tensor([1, 1])]; + tensor k_211_pad_type_0 = const()[name = tensor("k_211_pad_type_0"), val = tensor("custom")]; + tensor k_211_pad_0 = const()[name = tensor("k_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022110080))), lut = tensor([-0x1.404p-6, -0x1.808p-8, 0x1.7ccp-8, 0x1.3f4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_211_cast = conv(dilations = var_10358, groups = var_6865, pad = k_211_pad_0, pad_type = k_211_pad_type_0, strides = var_10356, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_211_cast")]; + tensor var_10362 = const()[name = tensor("op_10362"), val = tensor([1, 1])]; + tensor var_10364 = const()[name = tensor("op_10364"), val = tensor([1, 1])]; + tensor v_211_pad_type_0 = const()[name = tensor("v_211_pad_type_0"), val = tensor("custom")]; + tensor v_211_pad_0 = const()[name = tensor("v_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022765504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024076288))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_211_cast = conv(dilations = var_10364, groups = var_6865, pad = v_211_pad_0, pad_type = v_211_pad_type_0, strides = var_10362, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_211_cast")]; + tensor var_10368 = const()[name = tensor("op_10368"), val = tensor([2, 20, 64, -1])]; + tensor var_10369_cast = reshape(shape = var_10368, x = q_211_cast)[name = tensor("op_10369_cast")]; + tensor var_10370 = const()[name = tensor("op_10370"), val = tensor([2, 20, 64, -1])]; + tensor var_10371_cast = reshape(shape = var_10370, x = k_211_cast)[name = tensor("op_10371_cast")]; + tensor var_10372 = const()[name = tensor("op_10372"), val = tensor([2, 20, 64, -1])]; + tensor var_10373_cast = reshape(shape = var_10372, x = v_211_cast)[name = tensor("op_10373_cast")]; + tensor attn_weights_421_transpose_x_0 = const()[name = tensor("attn_weights_421_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_421_transpose_y_0 = const()[name = tensor("attn_weights_421_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_421_cast = matmul(transpose_x = attn_weights_421_transpose_x_0, transpose_y = attn_weights_421_transpose_y_0, x = var_10369_cast, y = var_10371_cast)[name = tensor("attn_weights_421_cast")]; + tensor attn_weights_423_cast = mul(x = attn_weights_421_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_423_cast")]; + tensor var_10377_cast = softmax(axis = var_6849, x = attn_weights_423_cast)[name = tensor("op_10377_cast")]; + tensor attn_211_transpose_x_0 = const()[name = tensor("attn_211_transpose_x_0"), val = tensor(false)]; + tensor attn_211_transpose_y_0 = const()[name = tensor("attn_211_transpose_y_0"), val = tensor(true)]; + tensor attn_211_cast = matmul(transpose_x = attn_211_transpose_x_0, transpose_y = attn_211_transpose_y_0, x = var_10373_cast, y = var_10377_cast)[name = tensor("attn_211_cast")]; + tensor var_10381 = const()[name = tensor("op_10381"), val = tensor([2, 1280, 1, -1])]; + tensor input_603_cast = reshape(shape = var_10381, x = attn_211_cast)[name = tensor("input_603_cast")]; + tensor var_10386 = const()[name = tensor("op_10386"), val = tensor([1, 1])]; + tensor var_10388 = const()[name = tensor("op_10388"), val = tensor([1, 1])]; + tensor var_10390_pad_type_0 = const()[name = tensor("op_10390_pad_type_0"), val = tensor("custom")]; + tensor var_10390_pad_0 = const()[name = tensor("op_10390_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024076416))), lut = tensor([-0x1.f0cp-7, -0x1.2bp-8, 0x1.2b8p-8, 0x1.f08p-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024486080)))]; + tensor var_10390_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_10388, groups = var_6865, pad = var_10390_pad_0, pad_type = var_10390_pad_type_0, strides = var_10386, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_603_cast)[name = tensor("op_10390_cast")]; + tensor inputs_317_cast = add(x = var_10390_cast, y = inputs_315_cast)[name = tensor("inputs_317_cast")]; + tensor var_10394 = const()[name = tensor("op_10394"), val = tensor([1])]; + tensor channels_mean_317_cast = reduce_mean(axes = var_10394, keep_dims = var_6860, x = inputs_317_cast)[name = tensor("channels_mean_317_cast")]; + tensor zero_mean_317_cast = sub(x = inputs_317_cast, y = channels_mean_317_cast)[name = tensor("zero_mean_317_cast")]; + tensor zero_mean_sq_317_cast = mul(x = zero_mean_317_cast, y = zero_mean_317_cast)[name = tensor("zero_mean_sq_317_cast")]; + tensor var_10398 = const()[name = tensor("op_10398"), val = tensor([1])]; + tensor var_10399_cast = reduce_mean(axes = var_10398, keep_dims = var_6860, x = zero_mean_sq_317_cast)[name = tensor("op_10399_cast")]; + tensor var_10400_to_fp16 = const()[name = tensor("op_10400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10401_cast = add(x = var_10399_cast, y = var_10400_to_fp16)[name = tensor("op_10401_cast")]; + tensor denom_317_epsilon_0_to_fp16 = const()[name = tensor("denom_317_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_317_cast = rsqrt(epsilon = denom_317_epsilon_0_to_fp16, x = var_10401_cast)[name = tensor("denom_317_cast")]; + tensor out_317_cast = mul(x = zero_mean_317_cast, y = denom_317_cast)[name = tensor("out_317_cast")]; + tensor var_10405_to_fp16 = const()[name = tensor("op_10405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024488704)))]; + tensor var_10406_cast = add(x = out_317_cast, y = var_10405_to_fp16)[name = tensor("op_10406_cast")]; + tensor var_10408_to_fp16 = const()[name = tensor("op_10408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024491328)))]; + tensor input_605_cast = mul(x = var_10406_cast, y = var_10408_to_fp16)[name = tensor("input_605_cast")]; + tensor var_10416 = const()[name = tensor("op_10416"), val = tensor([1, 1])]; + tensor var_10418 = const()[name = tensor("op_10418"), val = tensor([1, 1])]; + tensor var_10420_pad_type_0 = const()[name = tensor("op_10420_pad_type_0"), val = tensor("custom")]; + tensor var_10420_pad_0 = const()[name = tensor("op_10420_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024493952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034324416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034324608)))]; + tensor var_10420_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_10418, groups = var_6865, pad = var_10420_pad_0, pad_type = var_10420_pad_type_0, strides = var_10416, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_605_cast)[name = tensor("op_10420_cast")]; + tensor var_10421_split_sizes_0 = const()[name = tensor("op_10421_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10421_axis_0 = const()[name = tensor("op_10421_axis_0"), val = tensor(1)]; + tensor var_10421_cast_0, tensor var_10421_cast_1 = split(axis = var_10421_axis_0, split_sizes = var_10421_split_sizes_0, x = var_10420_cast)[name = tensor("op_10421_cast")]; + tensor var_10423_mode_0 = const()[name = tensor("op_10423_mode_0"), val = tensor("EXACT")]; + tensor var_10423_cast = gelu(mode = var_10423_mode_0, x = var_10421_cast_1)[name = tensor("op_10423_cast")]; + tensor input_607_cast = mul(x = var_10421_cast_0, y = var_10423_cast)[name = tensor("input_607_cast")]; + tensor var_10427 = const()[name = tensor("op_10427"), val = tensor([1, 1])]; + tensor var_10429 = const()[name = tensor("op_10429"), val = tensor([1, 1])]; + tensor var_10431_pad_type_0 = const()[name = tensor("op_10431_pad_type_0"), val = tensor("custom")]; + tensor var_10431_pad_0 = const()[name = tensor("op_10431_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034345152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039260416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039260608)))]; + tensor var_10431_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_10429, groups = var_6865, pad = var_10431_pad_0, pad_type = var_10431_pad_type_0, strides = var_10427, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_607_cast)[name = tensor("op_10431_cast")]; + tensor inputs_319_cast = add(x = var_10431_cast, y = inputs_317_cast)[name = tensor("inputs_319_cast")]; + tensor var_10441 = const()[name = tensor("op_10441"), val = tensor([1])]; + tensor channels_mean_319_cast = reduce_mean(axes = var_10441, keep_dims = var_6860, x = inputs_319_cast)[name = tensor("channels_mean_319_cast")]; + tensor zero_mean_319_cast = sub(x = inputs_319_cast, y = channels_mean_319_cast)[name = tensor("zero_mean_319_cast")]; + tensor zero_mean_sq_319_cast = mul(x = zero_mean_319_cast, y = zero_mean_319_cast)[name = tensor("zero_mean_sq_319_cast")]; + tensor var_10445 = const()[name = tensor("op_10445"), val = tensor([1])]; + tensor var_10446_cast = reduce_mean(axes = var_10445, keep_dims = var_6860, x = zero_mean_sq_319_cast)[name = tensor("op_10446_cast")]; + tensor var_10447_to_fp16 = const()[name = tensor("op_10447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10448_cast = add(x = var_10446_cast, y = var_10447_to_fp16)[name = tensor("op_10448_cast")]; + tensor denom_319_epsilon_0_to_fp16 = const()[name = tensor("denom_319_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_319_cast = rsqrt(epsilon = denom_319_epsilon_0_to_fp16, x = var_10448_cast)[name = tensor("denom_319_cast")]; + tensor out_319_cast = mul(x = zero_mean_319_cast, y = denom_319_cast)[name = tensor("out_319_cast")]; + tensor var_10452_to_fp16 = const()[name = tensor("op_10452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039263232)))]; + tensor var_10453_cast = add(x = out_319_cast, y = var_10452_to_fp16)[name = tensor("op_10453_cast")]; + tensor var_10455_to_fp16 = const()[name = tensor("op_10455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039265856)))]; + tensor hidden_states_415_cast = mul(x = var_10453_cast, y = var_10455_to_fp16)[name = tensor("hidden_states_415_cast")]; + tensor var_10462 = const()[name = tensor("op_10462"), val = tensor([1, 1])]; + tensor var_10464 = const()[name = tensor("op_10464"), val = tensor([1, 1])]; + tensor q_213_pad_type_0 = const()[name = tensor("q_213_pad_type_0"), val = tensor("custom")]; + tensor q_213_pad_0 = const()[name = tensor("q_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039268480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040087744))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_213_cast = conv(dilations = var_10464, groups = var_6865, pad = q_213_pad_0, pad_type = q_213_pad_type_0, strides = var_10462, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("q_213_cast")]; + tensor var_10468 = const()[name = tensor("op_10468"), val = tensor([1, 1])]; + tensor var_10470 = const()[name = tensor("op_10470"), val = tensor([1, 1])]; + tensor k_213_pad_type_0 = const()[name = tensor("k_213_pad_type_0"), val = tensor("custom")]; + tensor k_213_pad_0 = const()[name = tensor("k_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040087872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040907136))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_213_cast = conv(dilations = var_10470, groups = var_6865, pad = k_213_pad_0, pad_type = k_213_pad_type_0, strides = var_10468, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("k_213_cast")]; + tensor var_10474 = const()[name = tensor("op_10474"), val = tensor([1, 1])]; + tensor var_10476 = const()[name = tensor("op_10476"), val = tensor([1, 1])]; + tensor v_213_pad_type_0 = const()[name = tensor("v_213_pad_type_0"), val = tensor("custom")]; + tensor v_213_pad_0 = const()[name = tensor("v_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040907264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042136128))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_213_cast = conv(dilations = var_10476, groups = var_6865, pad = v_213_pad_0, pad_type = v_213_pad_type_0, strides = var_10474, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("v_213_cast")]; + tensor var_10480 = const()[name = tensor("op_10480"), val = tensor([2, 20, 64, -1])]; + tensor var_10481_cast = reshape(shape = var_10480, x = q_213_cast)[name = tensor("op_10481_cast")]; + tensor var_10482 = const()[name = tensor("op_10482"), val = tensor([2, 20, 64, -1])]; + tensor var_10483_cast = reshape(shape = var_10482, x = k_213_cast)[name = tensor("op_10483_cast")]; + tensor var_10484 = const()[name = tensor("op_10484"), val = tensor([2, 20, 64, -1])]; + tensor var_10485_cast = reshape(shape = var_10484, x = v_213_cast)[name = tensor("op_10485_cast")]; + tensor attn_weights_425_transpose_x_0 = const()[name = tensor("attn_weights_425_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_425_transpose_y_0 = const()[name = tensor("attn_weights_425_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_425_cast = matmul(transpose_x = attn_weights_425_transpose_x_0, transpose_y = attn_weights_425_transpose_y_0, x = var_10481_cast, y = var_10483_cast)[name = tensor("attn_weights_425_cast")]; + tensor attn_weights_427_cast = mul(x = attn_weights_425_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_427_cast")]; + tensor var_10489_cast = softmax(axis = var_6849, x = attn_weights_427_cast)[name = tensor("op_10489_cast")]; + tensor attn_213_transpose_x_0 = const()[name = tensor("attn_213_transpose_x_0"), val = tensor(false)]; + tensor attn_213_transpose_y_0 = const()[name = tensor("attn_213_transpose_y_0"), val = tensor(true)]; + tensor attn_213_cast = matmul(transpose_x = attn_213_transpose_x_0, transpose_y = attn_213_transpose_y_0, x = var_10485_cast, y = var_10489_cast)[name = tensor("attn_213_cast")]; + tensor var_10493 = const()[name = tensor("op_10493"), val = tensor([2, 1280, 1, -1])]; + tensor input_609_cast = reshape(shape = var_10493, x = attn_213_cast)[name = tensor("input_609_cast")]; + tensor var_10498 = const()[name = tensor("op_10498"), val = tensor([1, 1])]; + tensor var_10500 = const()[name = tensor("op_10500"), val = tensor([1, 1])]; + tensor var_10502_pad_type_0 = const()[name = tensor("op_10502_pad_type_0"), val = tensor("custom")]; + tensor var_10502_pad_0 = const()[name = tensor("op_10502_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042136320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043365184))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043365376)))]; + tensor var_10502_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_10500, groups = var_6865, pad = var_10502_pad_0, pad_type = var_10502_pad_type_0, strides = var_10498, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_609_cast)[name = tensor("op_10502_cast")]; + tensor inputs_321_cast = add(x = var_10502_cast, y = inputs_319_cast)[name = tensor("inputs_321_cast")]; + tensor var_10506 = const()[name = tensor("op_10506"), val = tensor([1])]; + tensor channels_mean_321_cast = reduce_mean(axes = var_10506, keep_dims = var_6860, x = inputs_321_cast)[name = tensor("channels_mean_321_cast")]; + tensor zero_mean_321_cast = sub(x = inputs_321_cast, y = channels_mean_321_cast)[name = tensor("zero_mean_321_cast")]; + tensor zero_mean_sq_321_cast = mul(x = zero_mean_321_cast, y = zero_mean_321_cast)[name = tensor("zero_mean_sq_321_cast")]; + tensor var_10510 = const()[name = tensor("op_10510"), val = tensor([1])]; + tensor var_10511_cast = reduce_mean(axes = var_10510, keep_dims = var_6860, x = zero_mean_sq_321_cast)[name = tensor("op_10511_cast")]; + tensor var_10512_to_fp16 = const()[name = tensor("op_10512_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10513_cast = add(x = var_10511_cast, y = var_10512_to_fp16)[name = tensor("op_10513_cast")]; + tensor denom_321_epsilon_0_to_fp16 = const()[name = tensor("denom_321_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_321_cast = rsqrt(epsilon = denom_321_epsilon_0_to_fp16, x = var_10513_cast)[name = tensor("denom_321_cast")]; + tensor out_321_cast = mul(x = zero_mean_321_cast, y = denom_321_cast)[name = tensor("out_321_cast")]; + tensor var_10517_to_fp16 = const()[name = tensor("op_10517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043368000)))]; + tensor var_10518_cast = add(x = out_321_cast, y = var_10517_to_fp16)[name = tensor("op_10518_cast")]; + tensor var_10520_to_fp16 = const()[name = tensor("op_10520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043370624)))]; + tensor hidden_states_417_cast = mul(x = var_10518_cast, y = var_10520_to_fp16)[name = tensor("hidden_states_417_cast")]; + tensor var_10527 = const()[name = tensor("op_10527"), val = tensor([1, 1])]; + tensor var_10529 = const()[name = tensor("op_10529"), val = tensor([1, 1])]; + tensor q_215_pad_type_0 = const()[name = tensor("q_215_pad_type_0"), val = tensor("custom")]; + tensor q_215_pad_0 = const()[name = tensor("q_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043373248))), lut = tensor([-0x1.8bcp-6, -0x1.eep-8, 0x1.ef4p-8, 0x1.8c4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_215_cast = conv(dilations = var_10529, groups = var_6865, pad = q_215_pad_0, pad_type = q_215_pad_type_0, strides = var_10527, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_417_cast)[name = tensor("q_215_cast")]; + tensor var_10533 = const()[name = tensor("op_10533"), val = tensor([1, 1])]; + tensor var_10535 = const()[name = tensor("op_10535"), val = tensor([1, 1])]; + tensor k_215_pad_type_0 = const()[name = tensor("k_215_pad_type_0"), val = tensor("custom")]; + tensor k_215_pad_0 = const()[name = tensor("k_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043782912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045093696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_215_cast = conv(dilations = var_10535, groups = var_6865, pad = k_215_pad_0, pad_type = k_215_pad_type_0, strides = var_10533, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_215_cast")]; + tensor var_10539 = const()[name = tensor("op_10539"), val = tensor([1, 1])]; + tensor var_10541 = const()[name = tensor("op_10541"), val = tensor([1, 1])]; + tensor v_215_pad_type_0 = const()[name = tensor("v_215_pad_type_0"), val = tensor("custom")]; + tensor v_215_pad_0 = const()[name = tensor("v_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045093824))), lut = tensor([-0x1.45cp-6, -0x1.8p-8, 0x1.7fp-8, 0x1.454p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_215_cast = conv(dilations = var_10541, groups = var_6865, pad = v_215_pad_0, pad_type = v_215_pad_type_0, strides = var_10539, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_215_cast")]; + tensor var_10545 = const()[name = tensor("op_10545"), val = tensor([2, 20, 64, -1])]; + tensor var_10546_cast = reshape(shape = var_10545, x = q_215_cast)[name = tensor("op_10546_cast")]; + tensor var_10547 = const()[name = tensor("op_10547"), val = tensor([2, 20, 64, -1])]; + tensor var_10548_cast = reshape(shape = var_10547, x = k_215_cast)[name = tensor("op_10548_cast")]; + tensor var_10549 = const()[name = tensor("op_10549"), val = tensor([2, 20, 64, -1])]; + tensor var_10550_cast = reshape(shape = var_10549, x = v_215_cast)[name = tensor("op_10550_cast")]; + tensor attn_weights_429_transpose_x_0 = const()[name = tensor("attn_weights_429_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_429_transpose_y_0 = const()[name = tensor("attn_weights_429_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_429_cast = matmul(transpose_x = attn_weights_429_transpose_x_0, transpose_y = attn_weights_429_transpose_y_0, x = var_10546_cast, y = var_10548_cast)[name = tensor("attn_weights_429_cast")]; + tensor attn_weights_431_cast = mul(x = attn_weights_429_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_431_cast")]; + tensor var_10554_cast = softmax(axis = var_6849, x = attn_weights_431_cast)[name = tensor("op_10554_cast")]; + tensor attn_215_transpose_x_0 = const()[name = tensor("attn_215_transpose_x_0"), val = tensor(false)]; + tensor attn_215_transpose_y_0 = const()[name = tensor("attn_215_transpose_y_0"), val = tensor(true)]; + tensor attn_215_cast = matmul(transpose_x = attn_215_transpose_x_0, transpose_y = attn_215_transpose_y_0, x = var_10550_cast, y = var_10554_cast)[name = tensor("attn_215_cast")]; + tensor var_10558 = const()[name = tensor("op_10558"), val = tensor([2, 1280, 1, -1])]; + tensor input_611_cast = reshape(shape = var_10558, x = attn_215_cast)[name = tensor("input_611_cast")]; + tensor var_10563 = const()[name = tensor("op_10563"), val = tensor([1, 1])]; + tensor var_10565 = const()[name = tensor("op_10565"), val = tensor([1, 1])]; + tensor var_10567_pad_type_0 = const()[name = tensor("op_10567_pad_type_0"), val = tensor("custom")]; + tensor var_10567_pad_0 = const()[name = tensor("op_10567_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045749248))), lut = tensor([-0x1.9fcp-7, -0x1.f98p-9, 0x1.f8cp-9, 0x1.ap-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046158912)))]; + tensor var_10567_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_10565, groups = var_6865, pad = var_10567_pad_0, pad_type = var_10567_pad_type_0, strides = var_10563, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_611_cast)[name = tensor("op_10567_cast")]; + tensor inputs_323_cast = add(x = var_10567_cast, y = inputs_321_cast)[name = tensor("inputs_323_cast")]; + tensor var_10571 = const()[name = tensor("op_10571"), val = tensor([1])]; + tensor channels_mean_323_cast = reduce_mean(axes = var_10571, keep_dims = var_6860, x = inputs_323_cast)[name = tensor("channels_mean_323_cast")]; + tensor zero_mean_323_cast = sub(x = inputs_323_cast, y = channels_mean_323_cast)[name = tensor("zero_mean_323_cast")]; + tensor zero_mean_sq_323_cast = mul(x = zero_mean_323_cast, y = zero_mean_323_cast)[name = tensor("zero_mean_sq_323_cast")]; + tensor var_10575 = const()[name = tensor("op_10575"), val = tensor([1])]; + tensor var_10576_cast = reduce_mean(axes = var_10575, keep_dims = var_6860, x = zero_mean_sq_323_cast)[name = tensor("op_10576_cast")]; + tensor var_10577_to_fp16 = const()[name = tensor("op_10577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10578_cast = add(x = var_10576_cast, y = var_10577_to_fp16)[name = tensor("op_10578_cast")]; + tensor denom_323_epsilon_0_to_fp16 = const()[name = tensor("denom_323_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_323_cast = rsqrt(epsilon = denom_323_epsilon_0_to_fp16, x = var_10578_cast)[name = tensor("denom_323_cast")]; + tensor out_323_cast = mul(x = zero_mean_323_cast, y = denom_323_cast)[name = tensor("out_323_cast")]; + tensor var_10582_to_fp16 = const()[name = tensor("op_10582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046161536)))]; + tensor var_10583_cast = add(x = out_323_cast, y = var_10582_to_fp16)[name = tensor("op_10583_cast")]; + tensor var_10585_to_fp16 = const()[name = tensor("op_10585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046164160)))]; + tensor input_613_cast = mul(x = var_10583_cast, y = var_10585_to_fp16)[name = tensor("input_613_cast")]; + tensor var_10593 = const()[name = tensor("op_10593"), val = tensor([1, 1])]; + tensor var_10595 = const()[name = tensor("op_10595"), val = tensor([1, 1])]; + tensor var_10597_pad_type_0 = const()[name = tensor("op_10597_pad_type_0"), val = tensor("custom")]; + tensor var_10597_pad_0 = const()[name = tensor("op_10597_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046166784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055997248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055997440)))]; + tensor var_10597_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_10595, groups = var_6865, pad = var_10597_pad_0, pad_type = var_10597_pad_type_0, strides = var_10593, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_613_cast)[name = tensor("op_10597_cast")]; + tensor var_10598_split_sizes_0 = const()[name = tensor("op_10598_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10598_axis_0 = const()[name = tensor("op_10598_axis_0"), val = tensor(1)]; + tensor var_10598_cast_0, tensor var_10598_cast_1 = split(axis = var_10598_axis_0, split_sizes = var_10598_split_sizes_0, x = var_10597_cast)[name = tensor("op_10598_cast")]; + tensor var_10600_mode_0 = const()[name = tensor("op_10600_mode_0"), val = tensor("EXACT")]; + tensor var_10600_cast = gelu(mode = var_10600_mode_0, x = var_10598_cast_1)[name = tensor("op_10600_cast")]; + tensor input_615_cast = mul(x = var_10598_cast_0, y = var_10600_cast)[name = tensor("input_615_cast")]; + tensor var_10604 = const()[name = tensor("op_10604"), val = tensor([1, 1])]; + tensor var_10606 = const()[name = tensor("op_10606"), val = tensor([1, 1])]; + tensor var_10608_pad_type_0 = const()[name = tensor("op_10608_pad_type_0"), val = tensor("custom")]; + tensor var_10608_pad_0 = const()[name = tensor("op_10608_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056017984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060933248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060933440)))]; + tensor var_10608_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_10606, groups = var_6865, pad = var_10608_pad_0, pad_type = var_10608_pad_type_0, strides = var_10604, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_615_cast)[name = tensor("op_10608_cast")]; + tensor hidden_states_421_cast = add(x = var_10608_cast, y = inputs_323_cast)[name = tensor("hidden_states_421_cast")]; + tensor var_10610 = const()[name = tensor("op_10610"), val = tensor([2, 1280, 32, 32])]; + tensor input_617_cast = reshape(shape = var_10610, x = hidden_states_421_cast)[name = tensor("input_617_cast")]; + tensor var_10614 = const()[name = tensor("op_10614"), val = tensor([1, 1])]; + tensor var_10616 = const()[name = tensor("op_10616"), val = tensor([1, 1])]; + tensor hidden_states_423_pad_type_0 = const()[name = tensor("hidden_states_423_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_423_pad_0 = const()[name = tensor("hidden_states_423_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060936064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062574528))), name = tensor("up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062575104)))]; + tensor hidden_states_423_cast = conv(bias = up_blocks_0_attentions_1_proj_out_bias_to_fp16, dilations = var_10616, groups = var_6865, pad = hidden_states_423_pad_0, pad_type = hidden_states_423_pad_type_0, strides = var_10614, weight = up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized, x = input_617_cast)[name = tensor("hidden_states_423_cast")]; + tensor hidden_states_425_cast = add(x = hidden_states_423_cast, y = hidden_states_357_cast)[name = tensor("hidden_states_425_cast")]; + tensor input_619_interleave_0 = const()[name = tensor("input_619_interleave_0"), val = tensor(false)]; + tensor input_619_cast = concat(axis = var_6865, interleave = input_619_interleave_0, values = (hidden_states_425_cast, input_115_cast))[name = tensor("input_619_cast")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([2, 32, 60, 32, 32])]; + tensor reshape_108_cast = reshape(shape = reshape_108_shape_0, x = input_619_cast)[name = tensor("reshape_108_cast")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81_cast = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108_cast)[name = tensor("reduce_mean_81_cast")]; + tensor sub_54_cast = sub(x = reshape_108_cast, y = reduce_mean_81_cast)[name = tensor("sub_54_cast")]; + tensor square_27_cast = square(x = sub_54_cast)[name = tensor("square_27_cast")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83_cast = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27_cast)[name = tensor("reduce_mean_83_cast")]; + tensor add_54_y_0_to_fp16 = const()[name = tensor("add_54_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_54_cast = add(x = reduce_mean_83_cast, y = add_54_y_0_to_fp16)[name = tensor("add_54_cast")]; + tensor sqrt_27_cast = sqrt(x = add_54_cast)[name = tensor("sqrt_27_cast")]; + tensor real_div_27_cast = real_div(x = sub_54_cast, y = sqrt_27_cast)[name = tensor("real_div_27_cast")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([2, 1920, 32, 32])]; + tensor reshape_109_cast = reshape(shape = reshape_109_shape_0, x = real_div_27_cast)[name = tensor("reshape_109_cast")]; + tensor add_55_mean_0_to_fp16 = const()[name = tensor("add_55_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062577728)))]; + tensor add_55_variance_0_to_fp16 = const()[name = tensor("add_55_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062581632)))]; + tensor add_55_gamma_0_to_fp16 = const()[name = tensor("add_55_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062585536)))]; + tensor add_55_beta_0_to_fp16 = const()[name = tensor("add_55_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062589440)))]; + tensor add_55_epsilon_0_to_fp16 = const()[name = tensor("add_55_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_55_cast = batch_norm(beta = add_55_beta_0_to_fp16, epsilon = add_55_epsilon_0_to_fp16, gamma = add_55_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_109_cast)[name = tensor("add_55_cast")]; + tensor input_623_cast = silu(x = add_55_cast)[name = tensor("input_623_cast")]; + tensor var_10634 = const()[name = tensor("op_10634"), val = tensor([1, 1])]; + tensor var_10636 = const()[name = tensor("op_10636"), val = tensor([1, 1])]; + tensor hidden_states_427_pad_type_0 = const()[name = tensor("hidden_states_427_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_427_pad_0 = const()[name = tensor("hidden_states_427_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062593344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079182208))), name = tensor("up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 3, 3])]; + tensor up_blocks_0_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079182400)))]; + tensor hidden_states_427_cast = conv(bias = up_blocks_0_resnets_2_conv1_bias_to_fp16, dilations = var_10636, groups = var_6865, pad = hidden_states_427_pad_0, pad_type = hidden_states_427_pad_type_0, strides = var_10634, weight = up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized, x = input_623_cast)[name = tensor("hidden_states_427_cast")]; + tensor var_10642 = const()[name = tensor("op_10642"), val = tensor([1, 1])]; + tensor var_10644 = const()[name = tensor("op_10644"), val = tensor([1, 1])]; + tensor temb_21_pad_type_0 = const()[name = tensor("temb_21_pad_type_0"), val = tensor("custom")]; + tensor temb_21_pad_0 = const()[name = tensor("temb_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079185024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080413888))), name = tensor("up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080414080)))]; + tensor temb_21_cast = conv(bias = up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_10644, groups = var_6865, pad = temb_21_pad_0, pad_type = temb_21_pad_type_0, strides = var_10642, weight = up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_21_cast")]; + tensor input_627_cast = add(x = hidden_states_427_cast, y = temb_21_cast)[name = tensor("input_627_cast")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_112_cast = reshape(shape = reshape_112_shape_0, x = input_627_cast)[name = tensor("reshape_112_cast")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84_cast = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112_cast)[name = tensor("reduce_mean_84_cast")]; + tensor sub_56_cast = sub(x = reshape_112_cast, y = reduce_mean_84_cast)[name = tensor("sub_56_cast")]; + tensor square_28_cast = square(x = sub_56_cast)[name = tensor("square_28_cast")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86_cast = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28_cast)[name = tensor("reduce_mean_86_cast")]; + tensor add_56_y_0_to_fp16 = const()[name = tensor("add_56_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_56_cast = add(x = reduce_mean_86_cast, y = add_56_y_0_to_fp16)[name = tensor("add_56_cast")]; + tensor sqrt_28_cast = sqrt(x = add_56_cast)[name = tensor("sqrt_28_cast")]; + tensor real_div_28_cast = real_div(x = sub_56_cast, y = sqrt_28_cast)[name = tensor("real_div_28_cast")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_113_cast = reshape(shape = reshape_113_shape_0, x = real_div_28_cast)[name = tensor("reshape_113_cast")]; + tensor add_57_gamma_0_to_fp16 = const()[name = tensor("add_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080416704)))]; + tensor add_57_beta_0_to_fp16 = const()[name = tensor("add_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080419328)))]; + tensor add_57_epsilon_0_to_fp16 = const()[name = tensor("add_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_57_cast = batch_norm(beta = add_57_beta_0_to_fp16, epsilon = add_57_epsilon_0_to_fp16, gamma = add_57_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_113_cast)[name = tensor("add_57_cast")]; + tensor input_631_cast = silu(x = add_57_cast)[name = tensor("input_631_cast")]; + tensor var_10654 = const()[name = tensor("op_10654"), val = tensor([1, 1])]; + tensor var_10656 = const()[name = tensor("op_10656"), val = tensor([1, 1])]; + tensor hidden_states_429_pad_type_0 = const()[name = tensor("hidden_states_429_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_429_pad_0 = const()[name = tensor("hidden_states_429_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080421952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091481216))), name = tensor("up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091481408)))]; + tensor hidden_states_429_cast = conv(bias = up_blocks_0_resnets_2_conv2_bias_to_fp16, dilations = var_10656, groups = var_6865, pad = hidden_states_429_pad_0, pad_type = hidden_states_429_pad_type_0, strides = var_10654, weight = up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized, x = input_631_cast)[name = tensor("hidden_states_429_cast")]; + tensor var_10661 = const()[name = tensor("op_10661"), val = tensor([1, 1])]; + tensor var_10663 = const()[name = tensor("op_10663"), val = tensor([1, 1])]; + tensor x_9_pad_type_0 = const()[name = tensor("x_9_pad_type_0"), val = tensor("custom")]; + tensor x_9_pad_0 = const()[name = tensor("x_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091484032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093941696))), name = tensor("up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 1, 1])]; + tensor up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093942272)))]; + tensor x_9_cast = conv(bias = up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_10663, groups = var_6865, pad = x_9_pad_0, pad_type = x_9_pad_type_0, strides = var_10661, weight = up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_619_cast)[name = tensor("x_9_cast")]; + tensor hidden_states_431_cast = add(x = x_9_cast, y = hidden_states_429_cast)[name = tensor("hidden_states_431_cast")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_116_cast = reshape(shape = reshape_116_shape_0, x = hidden_states_431_cast)[name = tensor("reshape_116_cast")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87_cast = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116_cast)[name = tensor("reduce_mean_87_cast")]; + tensor sub_58_cast = sub(x = reshape_116_cast, y = reduce_mean_87_cast)[name = tensor("sub_58_cast")]; + tensor square_29_cast = square(x = sub_58_cast)[name = tensor("square_29_cast")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89_cast = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29_cast)[name = tensor("reduce_mean_89_cast")]; + tensor add_58_y_0_to_fp16 = const()[name = tensor("add_58_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_58_cast = add(x = reduce_mean_89_cast, y = add_58_y_0_to_fp16)[name = tensor("add_58_cast")]; + tensor sqrt_29_cast = sqrt(x = add_58_cast)[name = tensor("sqrt_29_cast")]; + tensor real_div_29_cast = real_div(x = sub_58_cast, y = sqrt_29_cast)[name = tensor("real_div_29_cast")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_117_cast = reshape(shape = reshape_117_shape_0, x = real_div_29_cast)[name = tensor("reshape_117_cast")]; + tensor add_59_gamma_0_to_fp16 = const()[name = tensor("add_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093944896)))]; + tensor add_59_beta_0_to_fp16 = const()[name = tensor("add_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093947520)))]; + tensor add_59_epsilon_0_to_fp16 = const()[name = tensor("add_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_59_cast = batch_norm(beta = add_59_beta_0_to_fp16, epsilon = add_59_epsilon_0_to_fp16, gamma = add_59_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_117_cast)[name = tensor("add_59_cast")]; + tensor var_10701 = const()[name = tensor("op_10701"), val = tensor([1, 1])]; + tensor var_10703 = const()[name = tensor("op_10703"), val = tensor([1, 1])]; + tensor hidden_states_433_pad_type_0 = const()[name = tensor("hidden_states_433_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_433_pad_0 = const()[name = tensor("hidden_states_433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093950144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095179008))), name = tensor("up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095179200)))]; + tensor hidden_states_433_cast = conv(bias = up_blocks_0_attentions_2_proj_in_bias_to_fp16, dilations = var_10703, groups = var_6865, pad = hidden_states_433_pad_0, pad_type = hidden_states_433_pad_type_0, strides = var_10701, weight = up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized, x = add_59_cast)[name = tensor("hidden_states_433_cast")]; + tensor var_10708 = const()[name = tensor("op_10708"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_325_cast = reshape(shape = var_10708, x = hidden_states_433_cast)[name = tensor("inputs_325_cast")]; + tensor var_10718 = const()[name = tensor("op_10718"), val = tensor([1])]; + tensor channels_mean_325_cast = reduce_mean(axes = var_10718, keep_dims = var_6860, x = inputs_325_cast)[name = tensor("channels_mean_325_cast")]; + tensor zero_mean_325_cast = sub(x = inputs_325_cast, y = channels_mean_325_cast)[name = tensor("zero_mean_325_cast")]; + tensor zero_mean_sq_325_cast = mul(x = zero_mean_325_cast, y = zero_mean_325_cast)[name = tensor("zero_mean_sq_325_cast")]; + tensor var_10722 = const()[name = tensor("op_10722"), val = tensor([1])]; + tensor var_10723_cast = reduce_mean(axes = var_10722, keep_dims = var_6860, x = zero_mean_sq_325_cast)[name = tensor("op_10723_cast")]; + tensor var_10724_to_fp16 = const()[name = tensor("op_10724_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10725_cast = add(x = var_10723_cast, y = var_10724_to_fp16)[name = tensor("op_10725_cast")]; + tensor denom_325_epsilon_0_to_fp16 = const()[name = tensor("denom_325_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_325_cast = rsqrt(epsilon = denom_325_epsilon_0_to_fp16, x = var_10725_cast)[name = tensor("denom_325_cast")]; + tensor out_325_cast = mul(x = zero_mean_325_cast, y = denom_325_cast)[name = tensor("out_325_cast")]; + tensor var_10729_to_fp16 = const()[name = tensor("op_10729_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095181824)))]; + tensor var_10730_cast = add(x = out_325_cast, y = var_10729_to_fp16)[name = tensor("op_10730_cast")]; + tensor var_10732_to_fp16 = const()[name = tensor("op_10732_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095184448)))]; + tensor hidden_states_435_cast = mul(x = var_10730_cast, y = var_10732_to_fp16)[name = tensor("hidden_states_435_cast")]; + tensor var_10739 = const()[name = tensor("op_10739"), val = tensor([1, 1])]; + tensor var_10741 = const()[name = tensor("op_10741"), val = tensor([1, 1])]; + tensor q_217_pad_type_0 = const()[name = tensor("q_217_pad_type_0"), val = tensor("custom")]; + tensor q_217_pad_0 = const()[name = tensor("q_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095187072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096006336))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_217_cast = conv(dilations = var_10741, groups = var_6865, pad = q_217_pad_0, pad_type = q_217_pad_type_0, strides = var_10739, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("q_217_cast")]; + tensor var_10745 = const()[name = tensor("op_10745"), val = tensor([1, 1])]; + tensor var_10747 = const()[name = tensor("op_10747"), val = tensor([1, 1])]; + tensor k_217_pad_type_0 = const()[name = tensor("k_217_pad_type_0"), val = tensor("custom")]; + tensor k_217_pad_0 = const()[name = tensor("k_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096006464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096825728))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_217_cast = conv(dilations = var_10747, groups = var_6865, pad = k_217_pad_0, pad_type = k_217_pad_type_0, strides = var_10745, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("k_217_cast")]; + tensor var_10751 = const()[name = tensor("op_10751"), val = tensor([1, 1])]; + tensor var_10753 = const()[name = tensor("op_10753"), val = tensor([1, 1])]; + tensor v_217_pad_type_0 = const()[name = tensor("v_217_pad_type_0"), val = tensor("custom")]; + tensor v_217_pad_0 = const()[name = tensor("v_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096825856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1098054720))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_217_cast = conv(dilations = var_10753, groups = var_6865, pad = v_217_pad_0, pad_type = v_217_pad_type_0, strides = var_10751, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("v_217_cast")]; + tensor var_10757 = const()[name = tensor("op_10757"), val = tensor([2, 20, 64, -1])]; + tensor var_10758_cast = reshape(shape = var_10757, x = q_217_cast)[name = tensor("op_10758_cast")]; + tensor var_10759 = const()[name = tensor("op_10759"), val = tensor([2, 20, 64, -1])]; + tensor var_10760_cast = reshape(shape = var_10759, x = k_217_cast)[name = tensor("op_10760_cast")]; + tensor var_10761 = const()[name = tensor("op_10761"), val = tensor([2, 20, 64, -1])]; + tensor var_10762_cast = reshape(shape = var_10761, x = v_217_cast)[name = tensor("op_10762_cast")]; + tensor attn_weights_433_transpose_x_0 = const()[name = tensor("attn_weights_433_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_433_transpose_y_0 = const()[name = tensor("attn_weights_433_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_433_cast = matmul(transpose_x = attn_weights_433_transpose_x_0, transpose_y = attn_weights_433_transpose_y_0, x = var_10758_cast, y = var_10760_cast)[name = tensor("attn_weights_433_cast")]; + tensor attn_weights_435_cast = mul(x = attn_weights_433_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_435_cast")]; + tensor var_10766_cast = softmax(axis = var_6849, x = attn_weights_435_cast)[name = tensor("op_10766_cast")]; + tensor attn_217_transpose_x_0 = const()[name = tensor("attn_217_transpose_x_0"), val = tensor(false)]; + tensor attn_217_transpose_y_0 = const()[name = tensor("attn_217_transpose_y_0"), val = tensor(true)]; + tensor attn_217_cast = matmul(transpose_x = attn_217_transpose_x_0, transpose_y = attn_217_transpose_y_0, x = var_10762_cast, y = var_10766_cast)[name = tensor("attn_217_cast")]; + tensor var_10770 = const()[name = tensor("op_10770"), val = tensor([2, 1280, 1, -1])]; + tensor input_635_cast = reshape(shape = var_10770, x = attn_217_cast)[name = tensor("input_635_cast")]; + tensor var_10775 = const()[name = tensor("op_10775"), val = tensor([1, 1])]; + tensor var_10777 = const()[name = tensor("op_10777"), val = tensor([1, 1])]; + tensor var_10779_pad_type_0 = const()[name = tensor("op_10779_pad_type_0"), val = tensor("custom")]; + tensor var_10779_pad_0 = const()[name = tensor("op_10779_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1098054912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099283776))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099283968)))]; + tensor var_10779_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_10777, groups = var_6865, pad = var_10779_pad_0, pad_type = var_10779_pad_type_0, strides = var_10775, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_635_cast)[name = tensor("op_10779_cast")]; + tensor inputs_327_cast = add(x = var_10779_cast, y = inputs_325_cast)[name = tensor("inputs_327_cast")]; + tensor var_10783 = const()[name = tensor("op_10783"), val = tensor([1])]; + tensor channels_mean_327_cast = reduce_mean(axes = var_10783, keep_dims = var_6860, x = inputs_327_cast)[name = tensor("channels_mean_327_cast")]; + tensor zero_mean_327_cast = sub(x = inputs_327_cast, y = channels_mean_327_cast)[name = tensor("zero_mean_327_cast")]; + tensor zero_mean_sq_327_cast = mul(x = zero_mean_327_cast, y = zero_mean_327_cast)[name = tensor("zero_mean_sq_327_cast")]; + tensor var_10787 = const()[name = tensor("op_10787"), val = tensor([1])]; + tensor var_10788_cast = reduce_mean(axes = var_10787, keep_dims = var_6860, x = zero_mean_sq_327_cast)[name = tensor("op_10788_cast")]; + tensor var_10789_to_fp16 = const()[name = tensor("op_10789_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10790_cast = add(x = var_10788_cast, y = var_10789_to_fp16)[name = tensor("op_10790_cast")]; + tensor denom_327_epsilon_0_to_fp16 = const()[name = tensor("denom_327_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_327_cast = rsqrt(epsilon = denom_327_epsilon_0_to_fp16, x = var_10790_cast)[name = tensor("denom_327_cast")]; + tensor out_327_cast = mul(x = zero_mean_327_cast, y = denom_327_cast)[name = tensor("out_327_cast")]; + tensor var_10794_to_fp16 = const()[name = tensor("op_10794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099286592)))]; + tensor var_10795_cast = add(x = out_327_cast, y = var_10794_to_fp16)[name = tensor("op_10795_cast")]; + tensor var_10797_to_fp16 = const()[name = tensor("op_10797_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099289216)))]; + tensor hidden_states_437_cast = mul(x = var_10795_cast, y = var_10797_to_fp16)[name = tensor("hidden_states_437_cast")]; + tensor var_10804 = const()[name = tensor("op_10804"), val = tensor([1, 1])]; + tensor var_10806 = const()[name = tensor("op_10806"), val = tensor([1, 1])]; + tensor q_219_pad_type_0 = const()[name = tensor("q_219_pad_type_0"), val = tensor("custom")]; + tensor q_219_pad_0 = const()[name = tensor("q_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099291840))), lut = tensor([-0x1.c78p-6, -0x1.154p-7, 0x1.148p-7, 0x1.c7p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_219_cast = conv(dilations = var_10806, groups = var_6865, pad = q_219_pad_0, pad_type = q_219_pad_type_0, strides = var_10804, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_437_cast)[name = tensor("q_219_cast")]; + tensor var_10810 = const()[name = tensor("op_10810"), val = tensor([1, 1])]; + tensor var_10812 = const()[name = tensor("op_10812"), val = tensor([1, 1])]; + tensor k_219_pad_type_0 = const()[name = tensor("k_219_pad_type_0"), val = tensor("custom")]; + tensor k_219_pad_0 = const()[name = tensor("k_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099701504))), lut = tensor([-0x1.a8p-6, -0x1.ef8p-8, 0x1.f2cp-8, 0x1.a9p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_219_cast = conv(dilations = var_10812, groups = var_6865, pad = k_219_pad_0, pad_type = k_219_pad_type_0, strides = var_10810, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_219_cast")]; + tensor var_10816 = const()[name = tensor("op_10816"), val = tensor([1, 1])]; + tensor var_10818 = const()[name = tensor("op_10818"), val = tensor([1, 1])]; + tensor v_219_pad_type_0 = const()[name = tensor("v_219_pad_type_0"), val = tensor("custom")]; + tensor v_219_pad_0 = const()[name = tensor("v_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100356928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101667712))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_219_cast = conv(dilations = var_10818, groups = var_6865, pad = v_219_pad_0, pad_type = v_219_pad_type_0, strides = var_10816, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_219_cast")]; + tensor var_10822 = const()[name = tensor("op_10822"), val = tensor([2, 20, 64, -1])]; + tensor var_10823_cast = reshape(shape = var_10822, x = q_219_cast)[name = tensor("op_10823_cast")]; + tensor var_10824 = const()[name = tensor("op_10824"), val = tensor([2, 20, 64, -1])]; + tensor var_10825_cast = reshape(shape = var_10824, x = k_219_cast)[name = tensor("op_10825_cast")]; + tensor var_10826 = const()[name = tensor("op_10826"), val = tensor([2, 20, 64, -1])]; + tensor var_10827_cast = reshape(shape = var_10826, x = v_219_cast)[name = tensor("op_10827_cast")]; + tensor attn_weights_437_transpose_x_0 = const()[name = tensor("attn_weights_437_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_437_transpose_y_0 = const()[name = tensor("attn_weights_437_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_437_cast = matmul(transpose_x = attn_weights_437_transpose_x_0, transpose_y = attn_weights_437_transpose_y_0, x = var_10823_cast, y = var_10825_cast)[name = tensor("attn_weights_437_cast")]; + tensor attn_weights_439_cast = mul(x = attn_weights_437_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_439_cast")]; + tensor var_10831_cast = softmax(axis = var_6849, x = attn_weights_439_cast)[name = tensor("op_10831_cast")]; + tensor attn_219_transpose_x_0 = const()[name = tensor("attn_219_transpose_x_0"), val = tensor(false)]; + tensor attn_219_transpose_y_0 = const()[name = tensor("attn_219_transpose_y_0"), val = tensor(true)]; + tensor attn_219_cast = matmul(transpose_x = attn_219_transpose_x_0, transpose_y = attn_219_transpose_y_0, x = var_10827_cast, y = var_10831_cast)[name = tensor("attn_219_cast")]; + tensor var_10835 = const()[name = tensor("op_10835"), val = tensor([2, 1280, 1, -1])]; + tensor input_637_cast = reshape(shape = var_10835, x = attn_219_cast)[name = tensor("input_637_cast")]; + tensor var_10840 = const()[name = tensor("op_10840"), val = tensor([1, 1])]; + tensor var_10842 = const()[name = tensor("op_10842"), val = tensor([1, 1])]; + tensor var_10844_pad_type_0 = const()[name = tensor("op_10844_pad_type_0"), val = tensor("custom")]; + tensor var_10844_pad_0 = const()[name = tensor("op_10844_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101667840))), lut = tensor([-0x1.48cp-7, -0x1.828p-9, 0x1.83cp-9, 0x1.49cp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102077504)))]; + tensor var_10844_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_10842, groups = var_6865, pad = var_10844_pad_0, pad_type = var_10844_pad_type_0, strides = var_10840, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_637_cast)[name = tensor("op_10844_cast")]; + tensor inputs_329_cast = add(x = var_10844_cast, y = inputs_327_cast)[name = tensor("inputs_329_cast")]; + tensor var_10848 = const()[name = tensor("op_10848"), val = tensor([1])]; + tensor channels_mean_329_cast = reduce_mean(axes = var_10848, keep_dims = var_6860, x = inputs_329_cast)[name = tensor("channels_mean_329_cast")]; + tensor zero_mean_329_cast = sub(x = inputs_329_cast, y = channels_mean_329_cast)[name = tensor("zero_mean_329_cast")]; + tensor zero_mean_sq_329_cast = mul(x = zero_mean_329_cast, y = zero_mean_329_cast)[name = tensor("zero_mean_sq_329_cast")]; + tensor var_10852 = const()[name = tensor("op_10852"), val = tensor([1])]; + tensor var_10853_cast = reduce_mean(axes = var_10852, keep_dims = var_6860, x = zero_mean_sq_329_cast)[name = tensor("op_10853_cast")]; + tensor var_10854_to_fp16 = const()[name = tensor("op_10854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10855_cast = add(x = var_10853_cast, y = var_10854_to_fp16)[name = tensor("op_10855_cast")]; + tensor denom_329_epsilon_0_to_fp16 = const()[name = tensor("denom_329_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_329_cast = rsqrt(epsilon = denom_329_epsilon_0_to_fp16, x = var_10855_cast)[name = tensor("denom_329_cast")]; + tensor out_329_cast = mul(x = zero_mean_329_cast, y = denom_329_cast)[name = tensor("out_329_cast")]; + tensor var_10859_to_fp16 = const()[name = tensor("op_10859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102080128)))]; + tensor var_10860_cast = add(x = out_329_cast, y = var_10859_to_fp16)[name = tensor("op_10860_cast")]; + tensor var_10862_to_fp16 = const()[name = tensor("op_10862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102082752)))]; + tensor input_639_cast = mul(x = var_10860_cast, y = var_10862_to_fp16)[name = tensor("input_639_cast")]; + tensor var_10870 = const()[name = tensor("op_10870"), val = tensor([1, 1])]; + tensor var_10872 = const()[name = tensor("op_10872"), val = tensor([1, 1])]; + tensor var_10874_pad_type_0 = const()[name = tensor("op_10874_pad_type_0"), val = tensor("custom")]; + tensor var_10874_pad_0 = const()[name = tensor("op_10874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102085376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111915840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111916032)))]; + tensor var_10874_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_10872, groups = var_6865, pad = var_10874_pad_0, pad_type = var_10874_pad_type_0, strides = var_10870, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_639_cast)[name = tensor("op_10874_cast")]; + tensor var_10875_split_sizes_0 = const()[name = tensor("op_10875_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10875_axis_0 = const()[name = tensor("op_10875_axis_0"), val = tensor(1)]; + tensor var_10875_cast_0, tensor var_10875_cast_1 = split(axis = var_10875_axis_0, split_sizes = var_10875_split_sizes_0, x = var_10874_cast)[name = tensor("op_10875_cast")]; + tensor var_10877_mode_0 = const()[name = tensor("op_10877_mode_0"), val = tensor("EXACT")]; + tensor var_10877_cast = gelu(mode = var_10877_mode_0, x = var_10875_cast_1)[name = tensor("op_10877_cast")]; + tensor input_641_cast = mul(x = var_10875_cast_0, y = var_10877_cast)[name = tensor("input_641_cast")]; + tensor var_10881 = const()[name = tensor("op_10881"), val = tensor([1, 1])]; + tensor var_10883 = const()[name = tensor("op_10883"), val = tensor([1, 1])]; + tensor var_10885_pad_type_0 = const()[name = tensor("op_10885_pad_type_0"), val = tensor("custom")]; + tensor var_10885_pad_0 = const()[name = tensor("op_10885_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111936576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116851840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116852032)))]; + tensor var_10885_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_10883, groups = var_6865, pad = var_10885_pad_0, pad_type = var_10885_pad_type_0, strides = var_10881, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_641_cast)[name = tensor("op_10885_cast")]; + tensor inputs_331_cast = add(x = var_10885_cast, y = inputs_329_cast)[name = tensor("inputs_331_cast")]; + tensor var_10895 = const()[name = tensor("op_10895"), val = tensor([1])]; + tensor channels_mean_331_cast = reduce_mean(axes = var_10895, keep_dims = var_6860, x = inputs_331_cast)[name = tensor("channels_mean_331_cast")]; + tensor zero_mean_331_cast = sub(x = inputs_331_cast, y = channels_mean_331_cast)[name = tensor("zero_mean_331_cast")]; + tensor zero_mean_sq_331_cast = mul(x = zero_mean_331_cast, y = zero_mean_331_cast)[name = tensor("zero_mean_sq_331_cast")]; + tensor var_10899 = const()[name = tensor("op_10899"), val = tensor([1])]; + tensor var_10900_cast = reduce_mean(axes = var_10899, keep_dims = var_6860, x = zero_mean_sq_331_cast)[name = tensor("op_10900_cast")]; + tensor var_10901_to_fp16 = const()[name = tensor("op_10901_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10902_cast = add(x = var_10900_cast, y = var_10901_to_fp16)[name = tensor("op_10902_cast")]; + tensor denom_331_epsilon_0_to_fp16 = const()[name = tensor("denom_331_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_331_cast = rsqrt(epsilon = denom_331_epsilon_0_to_fp16, x = var_10902_cast)[name = tensor("denom_331_cast")]; + tensor out_331_cast = mul(x = zero_mean_331_cast, y = denom_331_cast)[name = tensor("out_331_cast")]; + tensor var_10906_to_fp16 = const()[name = tensor("op_10906_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116854656)))]; + tensor var_10907_cast = add(x = out_331_cast, y = var_10906_to_fp16)[name = tensor("op_10907_cast")]; + tensor var_10909_to_fp16 = const()[name = tensor("op_10909_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116857280)))]; + tensor hidden_states_441_cast = mul(x = var_10907_cast, y = var_10909_to_fp16)[name = tensor("hidden_states_441_cast")]; + tensor var_10916 = const()[name = tensor("op_10916"), val = tensor([1, 1])]; + tensor var_10918 = const()[name = tensor("op_10918"), val = tensor([1, 1])]; + tensor q_221_pad_type_0 = const()[name = tensor("q_221_pad_type_0"), val = tensor("custom")]; + tensor q_221_pad_0 = const()[name = tensor("q_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116859904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117679168))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_221_cast = conv(dilations = var_10918, groups = var_6865, pad = q_221_pad_0, pad_type = q_221_pad_type_0, strides = var_10916, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("q_221_cast")]; + tensor var_10922 = const()[name = tensor("op_10922"), val = tensor([1, 1])]; + tensor var_10924 = const()[name = tensor("op_10924"), val = tensor([1, 1])]; + tensor k_221_pad_type_0 = const()[name = tensor("k_221_pad_type_0"), val = tensor("custom")]; + tensor k_221_pad_0 = const()[name = tensor("k_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117679296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118498560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_221_cast = conv(dilations = var_10924, groups = var_6865, pad = k_221_pad_0, pad_type = k_221_pad_type_0, strides = var_10922, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("k_221_cast")]; + tensor var_10928 = const()[name = tensor("op_10928"), val = tensor([1, 1])]; + tensor var_10930 = const()[name = tensor("op_10930"), val = tensor([1, 1])]; + tensor v_221_pad_type_0 = const()[name = tensor("v_221_pad_type_0"), val = tensor("custom")]; + tensor v_221_pad_0 = const()[name = tensor("v_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118498688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119727552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_221_cast = conv(dilations = var_10930, groups = var_6865, pad = v_221_pad_0, pad_type = v_221_pad_type_0, strides = var_10928, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("v_221_cast")]; + tensor var_10934 = const()[name = tensor("op_10934"), val = tensor([2, 20, 64, -1])]; + tensor var_10935_cast = reshape(shape = var_10934, x = q_221_cast)[name = tensor("op_10935_cast")]; + tensor var_10936 = const()[name = tensor("op_10936"), val = tensor([2, 20, 64, -1])]; + tensor var_10937_cast = reshape(shape = var_10936, x = k_221_cast)[name = tensor("op_10937_cast")]; + tensor var_10938 = const()[name = tensor("op_10938"), val = tensor([2, 20, 64, -1])]; + tensor var_10939_cast = reshape(shape = var_10938, x = v_221_cast)[name = tensor("op_10939_cast")]; + tensor attn_weights_441_transpose_x_0 = const()[name = tensor("attn_weights_441_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_441_transpose_y_0 = const()[name = tensor("attn_weights_441_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_441_cast = matmul(transpose_x = attn_weights_441_transpose_x_0, transpose_y = attn_weights_441_transpose_y_0, x = var_10935_cast, y = var_10937_cast)[name = tensor("attn_weights_441_cast")]; + tensor attn_weights_443_cast = mul(x = attn_weights_441_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_443_cast")]; + tensor var_10943_cast = softmax(axis = var_6849, x = attn_weights_443_cast)[name = tensor("op_10943_cast")]; + tensor attn_221_transpose_x_0 = const()[name = tensor("attn_221_transpose_x_0"), val = tensor(false)]; + tensor attn_221_transpose_y_0 = const()[name = tensor("attn_221_transpose_y_0"), val = tensor(true)]; + tensor attn_221_cast = matmul(transpose_x = attn_221_transpose_x_0, transpose_y = attn_221_transpose_y_0, x = var_10939_cast, y = var_10943_cast)[name = tensor("attn_221_cast")]; + tensor var_10947 = const()[name = tensor("op_10947"), val = tensor([2, 1280, 1, -1])]; + tensor input_643_cast = reshape(shape = var_10947, x = attn_221_cast)[name = tensor("input_643_cast")]; + tensor var_10952 = const()[name = tensor("op_10952"), val = tensor([1, 1])]; + tensor var_10954 = const()[name = tensor("op_10954"), val = tensor([1, 1])]; + tensor var_10956_pad_type_0 = const()[name = tensor("op_10956_pad_type_0"), val = tensor("custom")]; + tensor var_10956_pad_0 = const()[name = tensor("op_10956_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119727744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120956608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120956800)))]; + tensor var_10956_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_10954, groups = var_6865, pad = var_10956_pad_0, pad_type = var_10956_pad_type_0, strides = var_10952, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_643_cast)[name = tensor("op_10956_cast")]; + tensor inputs_333_cast = add(x = var_10956_cast, y = inputs_331_cast)[name = tensor("inputs_333_cast")]; + tensor var_10960 = const()[name = tensor("op_10960"), val = tensor([1])]; + tensor channels_mean_333_cast = reduce_mean(axes = var_10960, keep_dims = var_6860, x = inputs_333_cast)[name = tensor("channels_mean_333_cast")]; + tensor zero_mean_333_cast = sub(x = inputs_333_cast, y = channels_mean_333_cast)[name = tensor("zero_mean_333_cast")]; + tensor zero_mean_sq_333_cast = mul(x = zero_mean_333_cast, y = zero_mean_333_cast)[name = tensor("zero_mean_sq_333_cast")]; + tensor var_10964 = const()[name = tensor("op_10964"), val = tensor([1])]; + tensor var_10965_cast = reduce_mean(axes = var_10964, keep_dims = var_6860, x = zero_mean_sq_333_cast)[name = tensor("op_10965_cast")]; + tensor var_10966_to_fp16 = const()[name = tensor("op_10966_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10967_cast = add(x = var_10965_cast, y = var_10966_to_fp16)[name = tensor("op_10967_cast")]; + tensor denom_333_epsilon_0_to_fp16 = const()[name = tensor("denom_333_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_333_cast = rsqrt(epsilon = denom_333_epsilon_0_to_fp16, x = var_10967_cast)[name = tensor("denom_333_cast")]; + tensor out_333_cast = mul(x = zero_mean_333_cast, y = denom_333_cast)[name = tensor("out_333_cast")]; + tensor var_10971_to_fp16 = const()[name = tensor("op_10971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120959424)))]; + tensor var_10972_cast = add(x = out_333_cast, y = var_10971_to_fp16)[name = tensor("op_10972_cast")]; + tensor var_10974_to_fp16 = const()[name = tensor("op_10974_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120962048)))]; + tensor hidden_states_443_cast = mul(x = var_10972_cast, y = var_10974_to_fp16)[name = tensor("hidden_states_443_cast")]; + tensor var_10981 = const()[name = tensor("op_10981"), val = tensor([1, 1])]; + tensor var_10983 = const()[name = tensor("op_10983"), val = tensor([1, 1])]; + tensor q_223_pad_type_0 = const()[name = tensor("q_223_pad_type_0"), val = tensor("custom")]; + tensor q_223_pad_0 = const()[name = tensor("q_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120964672))), lut = tensor([-0x1.304p-5, -0x1.668p-7, 0x1.624p-7, 0x1.2f8p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_223_cast = conv(dilations = var_10983, groups = var_6865, pad = q_223_pad_0, pad_type = q_223_pad_type_0, strides = var_10981, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_443_cast)[name = tensor("q_223_cast")]; + tensor var_10987 = const()[name = tensor("op_10987"), val = tensor([1, 1])]; + tensor var_10989 = const()[name = tensor("op_10989"), val = tensor([1, 1])]; + tensor k_223_pad_type_0 = const()[name = tensor("k_223_pad_type_0"), val = tensor("custom")]; + tensor k_223_pad_0 = const()[name = tensor("k_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1121374336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122685120))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_223_cast = conv(dilations = var_10989, groups = var_6865, pad = k_223_pad_0, pad_type = k_223_pad_type_0, strides = var_10987, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_223_cast")]; + tensor var_10993 = const()[name = tensor("op_10993"), val = tensor([1, 1])]; + tensor var_10995 = const()[name = tensor("op_10995"), val = tensor([1, 1])]; + tensor v_223_pad_type_0 = const()[name = tensor("v_223_pad_type_0"), val = tensor("custom")]; + tensor v_223_pad_0 = const()[name = tensor("v_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122685248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123996032))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_223_cast = conv(dilations = var_10995, groups = var_6865, pad = v_223_pad_0, pad_type = v_223_pad_type_0, strides = var_10993, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_223_cast")]; + tensor var_10999 = const()[name = tensor("op_10999"), val = tensor([2, 20, 64, -1])]; + tensor var_11000_cast = reshape(shape = var_10999, x = q_223_cast)[name = tensor("op_11000_cast")]; + tensor var_11001 = const()[name = tensor("op_11001"), val = tensor([2, 20, 64, -1])]; + tensor var_11002_cast = reshape(shape = var_11001, x = k_223_cast)[name = tensor("op_11002_cast")]; + tensor var_11003 = const()[name = tensor("op_11003"), val = tensor([2, 20, 64, -1])]; + tensor var_11004_cast = reshape(shape = var_11003, x = v_223_cast)[name = tensor("op_11004_cast")]; + tensor attn_weights_445_transpose_x_0 = const()[name = tensor("attn_weights_445_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_445_transpose_y_0 = const()[name = tensor("attn_weights_445_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_445_cast = matmul(transpose_x = attn_weights_445_transpose_x_0, transpose_y = attn_weights_445_transpose_y_0, x = var_11000_cast, y = var_11002_cast)[name = tensor("attn_weights_445_cast")]; + tensor attn_weights_447_cast = mul(x = attn_weights_445_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_447_cast")]; + tensor var_11008_cast = softmax(axis = var_6849, x = attn_weights_447_cast)[name = tensor("op_11008_cast")]; + tensor attn_223_transpose_x_0 = const()[name = tensor("attn_223_transpose_x_0"), val = tensor(false)]; + tensor attn_223_transpose_y_0 = const()[name = tensor("attn_223_transpose_y_0"), val = tensor(true)]; + tensor attn_223_cast = matmul(transpose_x = attn_223_transpose_x_0, transpose_y = attn_223_transpose_y_0, x = var_11004_cast, y = var_11008_cast)[name = tensor("attn_223_cast")]; + tensor var_11012 = const()[name = tensor("op_11012"), val = tensor([2, 1280, 1, -1])]; + tensor input_645_cast = reshape(shape = var_11012, x = attn_223_cast)[name = tensor("input_645_cast")]; + tensor var_11017 = const()[name = tensor("op_11017"), val = tensor([1, 1])]; + tensor var_11019 = const()[name = tensor("op_11019"), val = tensor([1, 1])]; + tensor var_11021_pad_type_0 = const()[name = tensor("op_11021_pad_type_0"), val = tensor("custom")]; + tensor var_11021_pad_0 = const()[name = tensor("op_11021_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123996160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124815424))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124815552)))]; + tensor var_11021_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_11019, groups = var_6865, pad = var_11021_pad_0, pad_type = var_11021_pad_type_0, strides = var_11017, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_645_cast)[name = tensor("op_11021_cast")]; + tensor inputs_335_cast = add(x = var_11021_cast, y = inputs_333_cast)[name = tensor("inputs_335_cast")]; + tensor var_11025 = const()[name = tensor("op_11025"), val = tensor([1])]; + tensor channels_mean_335_cast = reduce_mean(axes = var_11025, keep_dims = var_6860, x = inputs_335_cast)[name = tensor("channels_mean_335_cast")]; + tensor zero_mean_335_cast = sub(x = inputs_335_cast, y = channels_mean_335_cast)[name = tensor("zero_mean_335_cast")]; + tensor zero_mean_sq_335_cast = mul(x = zero_mean_335_cast, y = zero_mean_335_cast)[name = tensor("zero_mean_sq_335_cast")]; + tensor var_11029 = const()[name = tensor("op_11029"), val = tensor([1])]; + tensor var_11030_cast = reduce_mean(axes = var_11029, keep_dims = var_6860, x = zero_mean_sq_335_cast)[name = tensor("op_11030_cast")]; + tensor var_11031_to_fp16 = const()[name = tensor("op_11031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11032_cast = add(x = var_11030_cast, y = var_11031_to_fp16)[name = tensor("op_11032_cast")]; + tensor denom_335_epsilon_0_to_fp16 = const()[name = tensor("denom_335_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_335_cast = rsqrt(epsilon = denom_335_epsilon_0_to_fp16, x = var_11032_cast)[name = tensor("denom_335_cast")]; + tensor out_335_cast = mul(x = zero_mean_335_cast, y = denom_335_cast)[name = tensor("out_335_cast")]; + tensor var_11036_to_fp16 = const()[name = tensor("op_11036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124818176)))]; + tensor var_11037_cast = add(x = out_335_cast, y = var_11036_to_fp16)[name = tensor("op_11037_cast")]; + tensor var_11039_to_fp16 = const()[name = tensor("op_11039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124820800)))]; + tensor input_647_cast = mul(x = var_11037_cast, y = var_11039_to_fp16)[name = tensor("input_647_cast")]; + tensor var_11047 = const()[name = tensor("op_11047"), val = tensor([1, 1])]; + tensor var_11049 = const()[name = tensor("op_11049"), val = tensor([1, 1])]; + tensor var_11051_pad_type_0 = const()[name = tensor("op_11051_pad_type_0"), val = tensor("custom")]; + tensor var_11051_pad_0 = const()[name = tensor("op_11051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124823424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134653888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134654080)))]; + tensor var_11051_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_11049, groups = var_6865, pad = var_11051_pad_0, pad_type = var_11051_pad_type_0, strides = var_11047, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_647_cast)[name = tensor("op_11051_cast")]; + tensor var_11052_split_sizes_0 = const()[name = tensor("op_11052_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11052_axis_0 = const()[name = tensor("op_11052_axis_0"), val = tensor(1)]; + tensor var_11052_cast_0, tensor var_11052_cast_1 = split(axis = var_11052_axis_0, split_sizes = var_11052_split_sizes_0, x = var_11051_cast)[name = tensor("op_11052_cast")]; + tensor var_11054_mode_0 = const()[name = tensor("op_11054_mode_0"), val = tensor("EXACT")]; + tensor var_11054_cast = gelu(mode = var_11054_mode_0, x = var_11052_cast_1)[name = tensor("op_11054_cast")]; + tensor input_649_cast = mul(x = var_11052_cast_0, y = var_11054_cast)[name = tensor("input_649_cast")]; + tensor var_11058 = const()[name = tensor("op_11058"), val = tensor([1, 1])]; + tensor var_11060 = const()[name = tensor("op_11060"), val = tensor([1, 1])]; + tensor var_11062_pad_type_0 = const()[name = tensor("op_11062_pad_type_0"), val = tensor("custom")]; + tensor var_11062_pad_0 = const()[name = tensor("op_11062_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134674624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139589888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139590080)))]; + tensor var_11062_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_11060, groups = var_6865, pad = var_11062_pad_0, pad_type = var_11062_pad_type_0, strides = var_11058, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_649_cast)[name = tensor("op_11062_cast")]; + tensor inputs_337_cast = add(x = var_11062_cast, y = inputs_335_cast)[name = tensor("inputs_337_cast")]; + tensor var_11072 = const()[name = tensor("op_11072"), val = tensor([1])]; + tensor channels_mean_337_cast = reduce_mean(axes = var_11072, keep_dims = var_6860, x = inputs_337_cast)[name = tensor("channels_mean_337_cast")]; + tensor zero_mean_337_cast = sub(x = inputs_337_cast, y = channels_mean_337_cast)[name = tensor("zero_mean_337_cast")]; + tensor zero_mean_sq_337_cast = mul(x = zero_mean_337_cast, y = zero_mean_337_cast)[name = tensor("zero_mean_sq_337_cast")]; + tensor var_11076 = const()[name = tensor("op_11076"), val = tensor([1])]; + tensor var_11077_cast = reduce_mean(axes = var_11076, keep_dims = var_6860, x = zero_mean_sq_337_cast)[name = tensor("op_11077_cast")]; + tensor var_11078_to_fp16 = const()[name = tensor("op_11078_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11079_cast = add(x = var_11077_cast, y = var_11078_to_fp16)[name = tensor("op_11079_cast")]; + tensor denom_337_epsilon_0_to_fp16 = const()[name = tensor("denom_337_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_337_cast = rsqrt(epsilon = denom_337_epsilon_0_to_fp16, x = var_11079_cast)[name = tensor("denom_337_cast")]; + tensor out_337_cast = mul(x = zero_mean_337_cast, y = denom_337_cast)[name = tensor("out_337_cast")]; + tensor var_11083_to_fp16 = const()[name = tensor("op_11083_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139592704)))]; + tensor var_11084_cast = add(x = out_337_cast, y = var_11083_to_fp16)[name = tensor("op_11084_cast")]; + tensor var_11086_to_fp16 = const()[name = tensor("op_11086_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139595328)))]; + tensor hidden_states_447_cast = mul(x = var_11084_cast, y = var_11086_to_fp16)[name = tensor("hidden_states_447_cast")]; + tensor var_11093 = const()[name = tensor("op_11093"), val = tensor([1, 1])]; + tensor var_11095 = const()[name = tensor("op_11095"), val = tensor([1, 1])]; + tensor q_225_pad_type_0 = const()[name = tensor("q_225_pad_type_0"), val = tensor("custom")]; + tensor q_225_pad_0 = const()[name = tensor("q_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139597952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1140417216))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_225_cast = conv(dilations = var_11095, groups = var_6865, pad = q_225_pad_0, pad_type = q_225_pad_type_0, strides = var_11093, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("q_225_cast")]; + tensor var_11099 = const()[name = tensor("op_11099"), val = tensor([1, 1])]; + tensor var_11101 = const()[name = tensor("op_11101"), val = tensor([1, 1])]; + tensor k_225_pad_type_0 = const()[name = tensor("k_225_pad_type_0"), val = tensor("custom")]; + tensor k_225_pad_0 = const()[name = tensor("k_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1140417344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141236608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_225_cast = conv(dilations = var_11101, groups = var_6865, pad = k_225_pad_0, pad_type = k_225_pad_type_0, strides = var_11099, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("k_225_cast")]; + tensor var_11105 = const()[name = tensor("op_11105"), val = tensor([1, 1])]; + tensor var_11107 = const()[name = tensor("op_11107"), val = tensor([1, 1])]; + tensor v_225_pad_type_0 = const()[name = tensor("v_225_pad_type_0"), val = tensor("custom")]; + tensor v_225_pad_0 = const()[name = tensor("v_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141236736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142465600))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_225_cast = conv(dilations = var_11107, groups = var_6865, pad = v_225_pad_0, pad_type = v_225_pad_type_0, strides = var_11105, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("v_225_cast")]; + tensor var_11111 = const()[name = tensor("op_11111"), val = tensor([2, 20, 64, -1])]; + tensor var_11112_cast = reshape(shape = var_11111, x = q_225_cast)[name = tensor("op_11112_cast")]; + tensor var_11113 = const()[name = tensor("op_11113"), val = tensor([2, 20, 64, -1])]; + tensor var_11114_cast = reshape(shape = var_11113, x = k_225_cast)[name = tensor("op_11114_cast")]; + tensor var_11115 = const()[name = tensor("op_11115"), val = tensor([2, 20, 64, -1])]; + tensor var_11116_cast = reshape(shape = var_11115, x = v_225_cast)[name = tensor("op_11116_cast")]; + tensor attn_weights_449_transpose_x_0 = const()[name = tensor("attn_weights_449_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_449_transpose_y_0 = const()[name = tensor("attn_weights_449_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_449_cast = matmul(transpose_x = attn_weights_449_transpose_x_0, transpose_y = attn_weights_449_transpose_y_0, x = var_11112_cast, y = var_11114_cast)[name = tensor("attn_weights_449_cast")]; + tensor attn_weights_451_cast = mul(x = attn_weights_449_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_451_cast")]; + tensor var_11120_cast = softmax(axis = var_6849, x = attn_weights_451_cast)[name = tensor("op_11120_cast")]; + tensor attn_225_transpose_x_0 = const()[name = tensor("attn_225_transpose_x_0"), val = tensor(false)]; + tensor attn_225_transpose_y_0 = const()[name = tensor("attn_225_transpose_y_0"), val = tensor(true)]; + tensor attn_225_cast = matmul(transpose_x = attn_225_transpose_x_0, transpose_y = attn_225_transpose_y_0, x = var_11116_cast, y = var_11120_cast)[name = tensor("attn_225_cast")]; + tensor var_11124 = const()[name = tensor("op_11124"), val = tensor([2, 1280, 1, -1])]; + tensor input_651_cast = reshape(shape = var_11124, x = attn_225_cast)[name = tensor("input_651_cast")]; + tensor var_11129 = const()[name = tensor("op_11129"), val = tensor([1, 1])]; + tensor var_11131 = const()[name = tensor("op_11131"), val = tensor([1, 1])]; + tensor var_11133_pad_type_0 = const()[name = tensor("op_11133_pad_type_0"), val = tensor("custom")]; + tensor var_11133_pad_0 = const()[name = tensor("op_11133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142465792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143694656))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143694848)))]; + tensor var_11133_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_11131, groups = var_6865, pad = var_11133_pad_0, pad_type = var_11133_pad_type_0, strides = var_11129, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_651_cast)[name = tensor("op_11133_cast")]; + tensor inputs_339_cast = add(x = var_11133_cast, y = inputs_337_cast)[name = tensor("inputs_339_cast")]; + tensor var_11137 = const()[name = tensor("op_11137"), val = tensor([1])]; + tensor channels_mean_339_cast = reduce_mean(axes = var_11137, keep_dims = var_6860, x = inputs_339_cast)[name = tensor("channels_mean_339_cast")]; + tensor zero_mean_339_cast = sub(x = inputs_339_cast, y = channels_mean_339_cast)[name = tensor("zero_mean_339_cast")]; + tensor zero_mean_sq_339_cast = mul(x = zero_mean_339_cast, y = zero_mean_339_cast)[name = tensor("zero_mean_sq_339_cast")]; + tensor var_11141 = const()[name = tensor("op_11141"), val = tensor([1])]; + tensor var_11142_cast = reduce_mean(axes = var_11141, keep_dims = var_6860, x = zero_mean_sq_339_cast)[name = tensor("op_11142_cast")]; + tensor var_11143_to_fp16 = const()[name = tensor("op_11143_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11144_cast = add(x = var_11142_cast, y = var_11143_to_fp16)[name = tensor("op_11144_cast")]; + tensor denom_339_epsilon_0_to_fp16 = const()[name = tensor("denom_339_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_339_cast = rsqrt(epsilon = denom_339_epsilon_0_to_fp16, x = var_11144_cast)[name = tensor("denom_339_cast")]; + tensor out_339_cast = mul(x = zero_mean_339_cast, y = denom_339_cast)[name = tensor("out_339_cast")]; + tensor var_11148_to_fp16 = const()[name = tensor("op_11148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143697472)))]; + tensor var_11149_cast = add(x = out_339_cast, y = var_11148_to_fp16)[name = tensor("op_11149_cast")]; + tensor var_11151_to_fp16 = const()[name = tensor("op_11151_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143700096)))]; + tensor hidden_states_449_cast = mul(x = var_11149_cast, y = var_11151_to_fp16)[name = tensor("hidden_states_449_cast")]; + tensor var_11158 = const()[name = tensor("op_11158"), val = tensor([1, 1])]; + tensor var_11160 = const()[name = tensor("op_11160"), val = tensor([1, 1])]; + tensor q_227_pad_type_0 = const()[name = tensor("q_227_pad_type_0"), val = tensor("custom")]; + tensor q_227_pad_0 = const()[name = tensor("q_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143702720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1144521984))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_227_cast = conv(dilations = var_11160, groups = var_6865, pad = q_227_pad_0, pad_type = q_227_pad_type_0, strides = var_11158, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_449_cast)[name = tensor("q_227_cast")]; + tensor var_11164 = const()[name = tensor("op_11164"), val = tensor([1, 1])]; + tensor var_11166 = const()[name = tensor("op_11166"), val = tensor([1, 1])]; + tensor k_227_pad_type_0 = const()[name = tensor("k_227_pad_type_0"), val = tensor("custom")]; + tensor k_227_pad_0 = const()[name = tensor("k_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1144522112))), lut = tensor([-0x1.fdcp-6, -0x1.22cp-7, 0x1.24cp-7, 0x1.ff4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_227_cast = conv(dilations = var_11166, groups = var_6865, pad = k_227_pad_0, pad_type = k_227_pad_type_0, strides = var_11164, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_227_cast")]; + tensor var_11170 = const()[name = tensor("op_11170"), val = tensor([1, 1])]; + tensor var_11172 = const()[name = tensor("op_11172"), val = tensor([1, 1])]; + tensor v_227_pad_type_0 = const()[name = tensor("v_227_pad_type_0"), val = tensor("custom")]; + tensor v_227_pad_0 = const()[name = tensor("v_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145177536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146488320))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_227_cast = conv(dilations = var_11172, groups = var_6865, pad = v_227_pad_0, pad_type = v_227_pad_type_0, strides = var_11170, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_227_cast")]; + tensor var_11176 = const()[name = tensor("op_11176"), val = tensor([2, 20, 64, -1])]; + tensor var_11177_cast = reshape(shape = var_11176, x = q_227_cast)[name = tensor("op_11177_cast")]; + tensor var_11178 = const()[name = tensor("op_11178"), val = tensor([2, 20, 64, -1])]; + tensor var_11179_cast = reshape(shape = var_11178, x = k_227_cast)[name = tensor("op_11179_cast")]; + tensor var_11180 = const()[name = tensor("op_11180"), val = tensor([2, 20, 64, -1])]; + tensor var_11181_cast = reshape(shape = var_11180, x = v_227_cast)[name = tensor("op_11181_cast")]; + tensor attn_weights_453_transpose_x_0 = const()[name = tensor("attn_weights_453_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_453_transpose_y_0 = const()[name = tensor("attn_weights_453_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_453_cast = matmul(transpose_x = attn_weights_453_transpose_x_0, transpose_y = attn_weights_453_transpose_y_0, x = var_11177_cast, y = var_11179_cast)[name = tensor("attn_weights_453_cast")]; + tensor attn_weights_455_cast = mul(x = attn_weights_453_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_455_cast")]; + tensor var_11185_cast = softmax(axis = var_6849, x = attn_weights_455_cast)[name = tensor("op_11185_cast")]; + tensor attn_227_transpose_x_0 = const()[name = tensor("attn_227_transpose_x_0"), val = tensor(false)]; + tensor attn_227_transpose_y_0 = const()[name = tensor("attn_227_transpose_y_0"), val = tensor(true)]; + tensor attn_227_cast = matmul(transpose_x = attn_227_transpose_x_0, transpose_y = attn_227_transpose_y_0, x = var_11181_cast, y = var_11185_cast)[name = tensor("attn_227_cast")]; + tensor var_11189 = const()[name = tensor("op_11189"), val = tensor([2, 1280, 1, -1])]; + tensor input_653_cast = reshape(shape = var_11189, x = attn_227_cast)[name = tensor("input_653_cast")]; + tensor var_11194 = const()[name = tensor("op_11194"), val = tensor([1, 1])]; + tensor var_11196 = const()[name = tensor("op_11196"), val = tensor([1, 1])]; + tensor var_11198_pad_type_0 = const()[name = tensor("op_11198_pad_type_0"), val = tensor("custom")]; + tensor var_11198_pad_0 = const()[name = tensor("op_11198_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146488448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147307712))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147307840)))]; + tensor var_11198_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_11196, groups = var_6865, pad = var_11198_pad_0, pad_type = var_11198_pad_type_0, strides = var_11194, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_653_cast)[name = tensor("op_11198_cast")]; + tensor inputs_341_cast = add(x = var_11198_cast, y = inputs_339_cast)[name = tensor("inputs_341_cast")]; + tensor var_11202 = const()[name = tensor("op_11202"), val = tensor([1])]; + tensor channels_mean_341_cast = reduce_mean(axes = var_11202, keep_dims = var_6860, x = inputs_341_cast)[name = tensor("channels_mean_341_cast")]; + tensor zero_mean_341_cast = sub(x = inputs_341_cast, y = channels_mean_341_cast)[name = tensor("zero_mean_341_cast")]; + tensor zero_mean_sq_341_cast = mul(x = zero_mean_341_cast, y = zero_mean_341_cast)[name = tensor("zero_mean_sq_341_cast")]; + tensor var_11206 = const()[name = tensor("op_11206"), val = tensor([1])]; + tensor var_11207_cast = reduce_mean(axes = var_11206, keep_dims = var_6860, x = zero_mean_sq_341_cast)[name = tensor("op_11207_cast")]; + tensor var_11208_to_fp16 = const()[name = tensor("op_11208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11209_cast = add(x = var_11207_cast, y = var_11208_to_fp16)[name = tensor("op_11209_cast")]; + tensor denom_341_epsilon_0_to_fp16 = const()[name = tensor("denom_341_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_341_cast = rsqrt(epsilon = denom_341_epsilon_0_to_fp16, x = var_11209_cast)[name = tensor("denom_341_cast")]; + tensor out_341_cast = mul(x = zero_mean_341_cast, y = denom_341_cast)[name = tensor("out_341_cast")]; + tensor var_11213_to_fp16 = const()[name = tensor("op_11213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147310464)))]; + tensor var_11214_cast = add(x = out_341_cast, y = var_11213_to_fp16)[name = tensor("op_11214_cast")]; + tensor var_11216_to_fp16 = const()[name = tensor("op_11216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147313088)))]; + tensor input_655_cast = mul(x = var_11214_cast, y = var_11216_to_fp16)[name = tensor("input_655_cast")]; + tensor var_11224 = const()[name = tensor("op_11224"), val = tensor([1, 1])]; + tensor var_11226 = const()[name = tensor("op_11226"), val = tensor([1, 1])]; + tensor var_11228_pad_type_0 = const()[name = tensor("op_11228_pad_type_0"), val = tensor("custom")]; + tensor var_11228_pad_0 = const()[name = tensor("op_11228_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147315712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157146176))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157146368)))]; + tensor var_11228_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_11226, groups = var_6865, pad = var_11228_pad_0, pad_type = var_11228_pad_type_0, strides = var_11224, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_655_cast)[name = tensor("op_11228_cast")]; + tensor var_11229_split_sizes_0 = const()[name = tensor("op_11229_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11229_axis_0 = const()[name = tensor("op_11229_axis_0"), val = tensor(1)]; + tensor var_11229_cast_0, tensor var_11229_cast_1 = split(axis = var_11229_axis_0, split_sizes = var_11229_split_sizes_0, x = var_11228_cast)[name = tensor("op_11229_cast")]; + tensor var_11231_mode_0 = const()[name = tensor("op_11231_mode_0"), val = tensor("EXACT")]; + tensor var_11231_cast = gelu(mode = var_11231_mode_0, x = var_11229_cast_1)[name = tensor("op_11231_cast")]; + tensor input_657_cast = mul(x = var_11229_cast_0, y = var_11231_cast)[name = tensor("input_657_cast")]; + tensor var_11235 = const()[name = tensor("op_11235"), val = tensor([1, 1])]; + tensor var_11237 = const()[name = tensor("op_11237"), val = tensor([1, 1])]; + tensor var_11239_pad_type_0 = const()[name = tensor("op_11239_pad_type_0"), val = tensor("custom")]; + tensor var_11239_pad_0 = const()[name = tensor("op_11239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157166912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162082176))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162082368)))]; + tensor var_11239_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_11237, groups = var_6865, pad = var_11239_pad_0, pad_type = var_11239_pad_type_0, strides = var_11235, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_657_cast)[name = tensor("op_11239_cast")]; + tensor inputs_343_cast = add(x = var_11239_cast, y = inputs_341_cast)[name = tensor("inputs_343_cast")]; + tensor var_11249 = const()[name = tensor("op_11249"), val = tensor([1])]; + tensor channels_mean_343_cast = reduce_mean(axes = var_11249, keep_dims = var_6860, x = inputs_343_cast)[name = tensor("channels_mean_343_cast")]; + tensor zero_mean_343_cast = sub(x = inputs_343_cast, y = channels_mean_343_cast)[name = tensor("zero_mean_343_cast")]; + tensor zero_mean_sq_343_cast = mul(x = zero_mean_343_cast, y = zero_mean_343_cast)[name = tensor("zero_mean_sq_343_cast")]; + tensor var_11253 = const()[name = tensor("op_11253"), val = tensor([1])]; + tensor var_11254_cast = reduce_mean(axes = var_11253, keep_dims = var_6860, x = zero_mean_sq_343_cast)[name = tensor("op_11254_cast")]; + tensor var_11255_to_fp16 = const()[name = tensor("op_11255_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11256_cast = add(x = var_11254_cast, y = var_11255_to_fp16)[name = tensor("op_11256_cast")]; + tensor denom_343_epsilon_0_to_fp16 = const()[name = tensor("denom_343_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_343_cast = rsqrt(epsilon = denom_343_epsilon_0_to_fp16, x = var_11256_cast)[name = tensor("denom_343_cast")]; + tensor out_343_cast = mul(x = zero_mean_343_cast, y = denom_343_cast)[name = tensor("out_343_cast")]; + tensor var_11260_to_fp16 = const()[name = tensor("op_11260_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162084992)))]; + tensor var_11261_cast = add(x = out_343_cast, y = var_11260_to_fp16)[name = tensor("op_11261_cast")]; + tensor var_11263_to_fp16 = const()[name = tensor("op_11263_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162087616)))]; + tensor hidden_states_453_cast = mul(x = var_11261_cast, y = var_11263_to_fp16)[name = tensor("hidden_states_453_cast")]; + tensor var_11270 = const()[name = tensor("op_11270"), val = tensor([1, 1])]; + tensor var_11272 = const()[name = tensor("op_11272"), val = tensor([1, 1])]; + tensor q_229_pad_type_0 = const()[name = tensor("q_229_pad_type_0"), val = tensor("custom")]; + tensor q_229_pad_0 = const()[name = tensor("q_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162090240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162909504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_229_cast = conv(dilations = var_11272, groups = var_6865, pad = q_229_pad_0, pad_type = q_229_pad_type_0, strides = var_11270, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("q_229_cast")]; + tensor var_11276 = const()[name = tensor("op_11276"), val = tensor([1, 1])]; + tensor var_11278 = const()[name = tensor("op_11278"), val = tensor([1, 1])]; + tensor k_229_pad_type_0 = const()[name = tensor("k_229_pad_type_0"), val = tensor("custom")]; + tensor k_229_pad_0 = const()[name = tensor("k_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162909632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163728896))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_229_cast = conv(dilations = var_11278, groups = var_6865, pad = k_229_pad_0, pad_type = k_229_pad_type_0, strides = var_11276, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("k_229_cast")]; + tensor var_11282 = const()[name = tensor("op_11282"), val = tensor([1, 1])]; + tensor var_11284 = const()[name = tensor("op_11284"), val = tensor([1, 1])]; + tensor v_229_pad_type_0 = const()[name = tensor("v_229_pad_type_0"), val = tensor("custom")]; + tensor v_229_pad_0 = const()[name = tensor("v_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163729024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164957888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_229_cast = conv(dilations = var_11284, groups = var_6865, pad = v_229_pad_0, pad_type = v_229_pad_type_0, strides = var_11282, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("v_229_cast")]; + tensor var_11288 = const()[name = tensor("op_11288"), val = tensor([2, 20, 64, -1])]; + tensor var_11289_cast = reshape(shape = var_11288, x = q_229_cast)[name = tensor("op_11289_cast")]; + tensor var_11290 = const()[name = tensor("op_11290"), val = tensor([2, 20, 64, -1])]; + tensor var_11291_cast = reshape(shape = var_11290, x = k_229_cast)[name = tensor("op_11291_cast")]; + tensor var_11292 = const()[name = tensor("op_11292"), val = tensor([2, 20, 64, -1])]; + tensor var_11293_cast = reshape(shape = var_11292, x = v_229_cast)[name = tensor("op_11293_cast")]; + tensor attn_weights_457_transpose_x_0 = const()[name = tensor("attn_weights_457_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_457_transpose_y_0 = const()[name = tensor("attn_weights_457_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_457_cast = matmul(transpose_x = attn_weights_457_transpose_x_0, transpose_y = attn_weights_457_transpose_y_0, x = var_11289_cast, y = var_11291_cast)[name = tensor("attn_weights_457_cast")]; + tensor attn_weights_459_cast = mul(x = attn_weights_457_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_459_cast")]; + tensor var_11297_cast = softmax(axis = var_6849, x = attn_weights_459_cast)[name = tensor("op_11297_cast")]; + tensor attn_229_transpose_x_0 = const()[name = tensor("attn_229_transpose_x_0"), val = tensor(false)]; + tensor attn_229_transpose_y_0 = const()[name = tensor("attn_229_transpose_y_0"), val = tensor(true)]; + tensor attn_229_cast = matmul(transpose_x = attn_229_transpose_x_0, transpose_y = attn_229_transpose_y_0, x = var_11293_cast, y = var_11297_cast)[name = tensor("attn_229_cast")]; + tensor var_11301 = const()[name = tensor("op_11301"), val = tensor([2, 1280, 1, -1])]; + tensor input_659_cast = reshape(shape = var_11301, x = attn_229_cast)[name = tensor("input_659_cast")]; + tensor var_11306 = const()[name = tensor("op_11306"), val = tensor([1, 1])]; + tensor var_11308 = const()[name = tensor("op_11308"), val = tensor([1, 1])]; + tensor var_11310_pad_type_0 = const()[name = tensor("op_11310_pad_type_0"), val = tensor("custom")]; + tensor var_11310_pad_0 = const()[name = tensor("op_11310_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164958080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166186944))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166187136)))]; + tensor var_11310_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_11308, groups = var_6865, pad = var_11310_pad_0, pad_type = var_11310_pad_type_0, strides = var_11306, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_659_cast)[name = tensor("op_11310_cast")]; + tensor inputs_345_cast = add(x = var_11310_cast, y = inputs_343_cast)[name = tensor("inputs_345_cast")]; + tensor var_11314 = const()[name = tensor("op_11314"), val = tensor([1])]; + tensor channels_mean_345_cast = reduce_mean(axes = var_11314, keep_dims = var_6860, x = inputs_345_cast)[name = tensor("channels_mean_345_cast")]; + tensor zero_mean_345_cast = sub(x = inputs_345_cast, y = channels_mean_345_cast)[name = tensor("zero_mean_345_cast")]; + tensor zero_mean_sq_345_cast = mul(x = zero_mean_345_cast, y = zero_mean_345_cast)[name = tensor("zero_mean_sq_345_cast")]; + tensor var_11318 = const()[name = tensor("op_11318"), val = tensor([1])]; + tensor var_11319_cast = reduce_mean(axes = var_11318, keep_dims = var_6860, x = zero_mean_sq_345_cast)[name = tensor("op_11319_cast")]; + tensor var_11320_to_fp16 = const()[name = tensor("op_11320_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11321_cast = add(x = var_11319_cast, y = var_11320_to_fp16)[name = tensor("op_11321_cast")]; + tensor denom_345_epsilon_0_to_fp16 = const()[name = tensor("denom_345_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_345_cast = rsqrt(epsilon = denom_345_epsilon_0_to_fp16, x = var_11321_cast)[name = tensor("denom_345_cast")]; + tensor out_345_cast = mul(x = zero_mean_345_cast, y = denom_345_cast)[name = tensor("out_345_cast")]; + tensor var_11325_to_fp16 = const()[name = tensor("op_11325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166189760)))]; + tensor var_11326_cast = add(x = out_345_cast, y = var_11325_to_fp16)[name = tensor("op_11326_cast")]; + tensor var_11328_to_fp16 = const()[name = tensor("op_11328_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166192384)))]; + tensor hidden_states_455_cast = mul(x = var_11326_cast, y = var_11328_to_fp16)[name = tensor("hidden_states_455_cast")]; + tensor var_11335 = const()[name = tensor("op_11335"), val = tensor([1, 1])]; + tensor var_11337 = const()[name = tensor("op_11337"), val = tensor([1, 1])]; + tensor q_231_pad_type_0 = const()[name = tensor("q_231_pad_type_0"), val = tensor("custom")]; + tensor q_231_pad_0 = const()[name = tensor("q_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166195008))), lut = tensor([-0x1.028p-5, -0x1.36cp-7, 0x1.348p-7, 0x1.024p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_231_cast = conv(dilations = var_11337, groups = var_6865, pad = q_231_pad_0, pad_type = q_231_pad_type_0, strides = var_11335, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_455_cast)[name = tensor("q_231_cast")]; + tensor var_11341 = const()[name = tensor("op_11341"), val = tensor([1, 1])]; + tensor var_11343 = const()[name = tensor("op_11343"), val = tensor([1, 1])]; + tensor k_231_pad_type_0 = const()[name = tensor("k_231_pad_type_0"), val = tensor("custom")]; + tensor k_231_pad_0 = const()[name = tensor("k_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166604672))), lut = tensor([-0x1.b3cp-6, -0x1.f7cp-8, 0x1.fb8p-8, 0x1.b6p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_231_cast = conv(dilations = var_11343, groups = var_6865, pad = k_231_pad_0, pad_type = k_231_pad_type_0, strides = var_11341, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_231_cast")]; + tensor var_11347 = const()[name = tensor("op_11347"), val = tensor([1, 1])]; + tensor var_11349 = const()[name = tensor("op_11349"), val = tensor([1, 1])]; + tensor v_231_pad_type_0 = const()[name = tensor("v_231_pad_type_0"), val = tensor("custom")]; + tensor v_231_pad_0 = const()[name = tensor("v_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1167260096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168570880))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_231_cast = conv(dilations = var_11349, groups = var_6865, pad = v_231_pad_0, pad_type = v_231_pad_type_0, strides = var_11347, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_231_cast")]; + tensor var_11353 = const()[name = tensor("op_11353"), val = tensor([2, 20, 64, -1])]; + tensor var_11354_cast = reshape(shape = var_11353, x = q_231_cast)[name = tensor("op_11354_cast")]; + tensor var_11355 = const()[name = tensor("op_11355"), val = tensor([2, 20, 64, -1])]; + tensor var_11356_cast = reshape(shape = var_11355, x = k_231_cast)[name = tensor("op_11356_cast")]; + tensor var_11357 = const()[name = tensor("op_11357"), val = tensor([2, 20, 64, -1])]; + tensor var_11358_cast = reshape(shape = var_11357, x = v_231_cast)[name = tensor("op_11358_cast")]; + tensor attn_weights_461_transpose_x_0 = const()[name = tensor("attn_weights_461_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_461_transpose_y_0 = const()[name = tensor("attn_weights_461_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_461_cast = matmul(transpose_x = attn_weights_461_transpose_x_0, transpose_y = attn_weights_461_transpose_y_0, x = var_11354_cast, y = var_11356_cast)[name = tensor("attn_weights_461_cast")]; + tensor attn_weights_463_cast = mul(x = attn_weights_461_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_463_cast")]; + tensor var_11362_cast = softmax(axis = var_6849, x = attn_weights_463_cast)[name = tensor("op_11362_cast")]; + tensor attn_231_transpose_x_0 = const()[name = tensor("attn_231_transpose_x_0"), val = tensor(false)]; + tensor attn_231_transpose_y_0 = const()[name = tensor("attn_231_transpose_y_0"), val = tensor(true)]; + tensor attn_231_cast = matmul(transpose_x = attn_231_transpose_x_0, transpose_y = attn_231_transpose_y_0, x = var_11358_cast, y = var_11362_cast)[name = tensor("attn_231_cast")]; + tensor var_11366 = const()[name = tensor("op_11366"), val = tensor([2, 1280, 1, -1])]; + tensor input_661_cast = reshape(shape = var_11366, x = attn_231_cast)[name = tensor("input_661_cast")]; + tensor var_11371 = const()[name = tensor("op_11371"), val = tensor([1, 1])]; + tensor var_11373 = const()[name = tensor("op_11373"), val = tensor([1, 1])]; + tensor var_11375_pad_type_0 = const()[name = tensor("op_11375_pad_type_0"), val = tensor("custom")]; + tensor var_11375_pad_0 = const()[name = tensor("op_11375_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168571008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169390272))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169390400)))]; + tensor var_11375_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_11373, groups = var_6865, pad = var_11375_pad_0, pad_type = var_11375_pad_type_0, strides = var_11371, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_661_cast)[name = tensor("op_11375_cast")]; + tensor inputs_347_cast = add(x = var_11375_cast, y = inputs_345_cast)[name = tensor("inputs_347_cast")]; + tensor var_11379 = const()[name = tensor("op_11379"), val = tensor([1])]; + tensor channels_mean_347_cast = reduce_mean(axes = var_11379, keep_dims = var_6860, x = inputs_347_cast)[name = tensor("channels_mean_347_cast")]; + tensor zero_mean_347_cast = sub(x = inputs_347_cast, y = channels_mean_347_cast)[name = tensor("zero_mean_347_cast")]; + tensor zero_mean_sq_347_cast = mul(x = zero_mean_347_cast, y = zero_mean_347_cast)[name = tensor("zero_mean_sq_347_cast")]; + tensor var_11383 = const()[name = tensor("op_11383"), val = tensor([1])]; + tensor var_11384_cast = reduce_mean(axes = var_11383, keep_dims = var_6860, x = zero_mean_sq_347_cast)[name = tensor("op_11384_cast")]; + tensor var_11385_to_fp16 = const()[name = tensor("op_11385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11386_cast = add(x = var_11384_cast, y = var_11385_to_fp16)[name = tensor("op_11386_cast")]; + tensor denom_347_epsilon_0_to_fp16 = const()[name = tensor("denom_347_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_347_cast = rsqrt(epsilon = denom_347_epsilon_0_to_fp16, x = var_11386_cast)[name = tensor("denom_347_cast")]; + tensor out_347_cast = mul(x = zero_mean_347_cast, y = denom_347_cast)[name = tensor("out_347_cast")]; + tensor var_11390_to_fp16 = const()[name = tensor("op_11390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169393024)))]; + tensor var_11391_cast = add(x = out_347_cast, y = var_11390_to_fp16)[name = tensor("op_11391_cast")]; + tensor var_11393_to_fp16 = const()[name = tensor("op_11393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169395648)))]; + tensor input_663_cast = mul(x = var_11391_cast, y = var_11393_to_fp16)[name = tensor("input_663_cast")]; + tensor var_11401 = const()[name = tensor("op_11401"), val = tensor([1, 1])]; + tensor var_11403 = const()[name = tensor("op_11403"), val = tensor([1, 1])]; + tensor var_11405_pad_type_0 = const()[name = tensor("op_11405_pad_type_0"), val = tensor("custom")]; + tensor var_11405_pad_0 = const()[name = tensor("op_11405_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169398272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179228736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179228928)))]; + tensor var_11405_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_11403, groups = var_6865, pad = var_11405_pad_0, pad_type = var_11405_pad_type_0, strides = var_11401, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_663_cast)[name = tensor("op_11405_cast")]; + tensor var_11406_split_sizes_0 = const()[name = tensor("op_11406_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11406_axis_0 = const()[name = tensor("op_11406_axis_0"), val = tensor(1)]; + tensor var_11406_cast_0, tensor var_11406_cast_1 = split(axis = var_11406_axis_0, split_sizes = var_11406_split_sizes_0, x = var_11405_cast)[name = tensor("op_11406_cast")]; + tensor var_11408_mode_0 = const()[name = tensor("op_11408_mode_0"), val = tensor("EXACT")]; + tensor var_11408_cast = gelu(mode = var_11408_mode_0, x = var_11406_cast_1)[name = tensor("op_11408_cast")]; + tensor input_665_cast = mul(x = var_11406_cast_0, y = var_11408_cast)[name = tensor("input_665_cast")]; + tensor var_11412 = const()[name = tensor("op_11412"), val = tensor([1, 1])]; + tensor var_11414 = const()[name = tensor("op_11414"), val = tensor([1, 1])]; + tensor var_11416_pad_type_0 = const()[name = tensor("op_11416_pad_type_0"), val = tensor("custom")]; + tensor var_11416_pad_0 = const()[name = tensor("op_11416_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179249472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184164736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184164928)))]; + tensor var_11416_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_11414, groups = var_6865, pad = var_11416_pad_0, pad_type = var_11416_pad_type_0, strides = var_11412, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_665_cast)[name = tensor("op_11416_cast")]; + tensor inputs_349_cast = add(x = var_11416_cast, y = inputs_347_cast)[name = tensor("inputs_349_cast")]; + tensor var_11426 = const()[name = tensor("op_11426"), val = tensor([1])]; + tensor channels_mean_349_cast = reduce_mean(axes = var_11426, keep_dims = var_6860, x = inputs_349_cast)[name = tensor("channels_mean_349_cast")]; + tensor zero_mean_349_cast = sub(x = inputs_349_cast, y = channels_mean_349_cast)[name = tensor("zero_mean_349_cast")]; + tensor zero_mean_sq_349_cast = mul(x = zero_mean_349_cast, y = zero_mean_349_cast)[name = tensor("zero_mean_sq_349_cast")]; + tensor var_11430 = const()[name = tensor("op_11430"), val = tensor([1])]; + tensor var_11431_cast = reduce_mean(axes = var_11430, keep_dims = var_6860, x = zero_mean_sq_349_cast)[name = tensor("op_11431_cast")]; + tensor var_11432_to_fp16 = const()[name = tensor("op_11432_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11433_cast = add(x = var_11431_cast, y = var_11432_to_fp16)[name = tensor("op_11433_cast")]; + tensor denom_349_epsilon_0_to_fp16 = const()[name = tensor("denom_349_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_349_cast = rsqrt(epsilon = denom_349_epsilon_0_to_fp16, x = var_11433_cast)[name = tensor("denom_349_cast")]; + tensor out_349_cast = mul(x = zero_mean_349_cast, y = denom_349_cast)[name = tensor("out_349_cast")]; + tensor var_11437_to_fp16 = const()[name = tensor("op_11437_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184167552)))]; + tensor var_11438_cast = add(x = out_349_cast, y = var_11437_to_fp16)[name = tensor("op_11438_cast")]; + tensor var_11440_to_fp16 = const()[name = tensor("op_11440_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184170176)))]; + tensor hidden_states_459_cast = mul(x = var_11438_cast, y = var_11440_to_fp16)[name = tensor("hidden_states_459_cast")]; + tensor var_11447 = const()[name = tensor("op_11447"), val = tensor([1, 1])]; + tensor var_11449 = const()[name = tensor("op_11449"), val = tensor([1, 1])]; + tensor q_233_pad_type_0 = const()[name = tensor("q_233_pad_type_0"), val = tensor("custom")]; + tensor q_233_pad_0 = const()[name = tensor("q_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184172800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184992064))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_233_cast = conv(dilations = var_11449, groups = var_6865, pad = q_233_pad_0, pad_type = q_233_pad_type_0, strides = var_11447, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("q_233_cast")]; + tensor var_11453 = const()[name = tensor("op_11453"), val = tensor([1, 1])]; + tensor var_11455 = const()[name = tensor("op_11455"), val = tensor([1, 1])]; + tensor k_233_pad_type_0 = const()[name = tensor("k_233_pad_type_0"), val = tensor("custom")]; + tensor k_233_pad_0 = const()[name = tensor("k_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184992192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1185811456))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_233_cast = conv(dilations = var_11455, groups = var_6865, pad = k_233_pad_0, pad_type = k_233_pad_type_0, strides = var_11453, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("k_233_cast")]; + tensor var_11459 = const()[name = tensor("op_11459"), val = tensor([1, 1])]; + tensor var_11461 = const()[name = tensor("op_11461"), val = tensor([1, 1])]; + tensor v_233_pad_type_0 = const()[name = tensor("v_233_pad_type_0"), val = tensor("custom")]; + tensor v_233_pad_0 = const()[name = tensor("v_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1185811584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1187040448))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_233_cast = conv(dilations = var_11461, groups = var_6865, pad = v_233_pad_0, pad_type = v_233_pad_type_0, strides = var_11459, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("v_233_cast")]; + tensor var_11465 = const()[name = tensor("op_11465"), val = tensor([2, 20, 64, -1])]; + tensor var_11466_cast = reshape(shape = var_11465, x = q_233_cast)[name = tensor("op_11466_cast")]; + tensor var_11467 = const()[name = tensor("op_11467"), val = tensor([2, 20, 64, -1])]; + tensor var_11468_cast = reshape(shape = var_11467, x = k_233_cast)[name = tensor("op_11468_cast")]; + tensor var_11469 = const()[name = tensor("op_11469"), val = tensor([2, 20, 64, -1])]; + tensor var_11470_cast = reshape(shape = var_11469, x = v_233_cast)[name = tensor("op_11470_cast")]; + tensor attn_weights_465_transpose_x_0 = const()[name = tensor("attn_weights_465_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_465_transpose_y_0 = const()[name = tensor("attn_weights_465_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_465_cast = matmul(transpose_x = attn_weights_465_transpose_x_0, transpose_y = attn_weights_465_transpose_y_0, x = var_11466_cast, y = var_11468_cast)[name = tensor("attn_weights_465_cast")]; + tensor attn_weights_467_cast = mul(x = attn_weights_465_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_467_cast")]; + tensor var_11474_cast = softmax(axis = var_6849, x = attn_weights_467_cast)[name = tensor("op_11474_cast")]; + tensor attn_233_transpose_x_0 = const()[name = tensor("attn_233_transpose_x_0"), val = tensor(false)]; + tensor attn_233_transpose_y_0 = const()[name = tensor("attn_233_transpose_y_0"), val = tensor(true)]; + tensor attn_233_cast = matmul(transpose_x = attn_233_transpose_x_0, transpose_y = attn_233_transpose_y_0, x = var_11470_cast, y = var_11474_cast)[name = tensor("attn_233_cast")]; + tensor var_11478 = const()[name = tensor("op_11478"), val = tensor([2, 1280, 1, -1])]; + tensor input_667_cast = reshape(shape = var_11478, x = attn_233_cast)[name = tensor("input_667_cast")]; + tensor var_11483 = const()[name = tensor("op_11483"), val = tensor([1, 1])]; + tensor var_11485 = const()[name = tensor("op_11485"), val = tensor([1, 1])]; + tensor var_11487_pad_type_0 = const()[name = tensor("op_11487_pad_type_0"), val = tensor("custom")]; + tensor var_11487_pad_0 = const()[name = tensor("op_11487_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1187040640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188269504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188269696)))]; + tensor var_11487_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_11485, groups = var_6865, pad = var_11487_pad_0, pad_type = var_11487_pad_type_0, strides = var_11483, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_667_cast)[name = tensor("op_11487_cast")]; + tensor inputs_351_cast = add(x = var_11487_cast, y = inputs_349_cast)[name = tensor("inputs_351_cast")]; + tensor var_11491 = const()[name = tensor("op_11491"), val = tensor([1])]; + tensor channels_mean_351_cast = reduce_mean(axes = var_11491, keep_dims = var_6860, x = inputs_351_cast)[name = tensor("channels_mean_351_cast")]; + tensor zero_mean_351_cast = sub(x = inputs_351_cast, y = channels_mean_351_cast)[name = tensor("zero_mean_351_cast")]; + tensor zero_mean_sq_351_cast = mul(x = zero_mean_351_cast, y = zero_mean_351_cast)[name = tensor("zero_mean_sq_351_cast")]; + tensor var_11495 = const()[name = tensor("op_11495"), val = tensor([1])]; + tensor var_11496_cast = reduce_mean(axes = var_11495, keep_dims = var_6860, x = zero_mean_sq_351_cast)[name = tensor("op_11496_cast")]; + tensor var_11497_to_fp16 = const()[name = tensor("op_11497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11498_cast = add(x = var_11496_cast, y = var_11497_to_fp16)[name = tensor("op_11498_cast")]; + tensor denom_351_epsilon_0_to_fp16 = const()[name = tensor("denom_351_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_351_cast = rsqrt(epsilon = denom_351_epsilon_0_to_fp16, x = var_11498_cast)[name = tensor("denom_351_cast")]; + tensor out_351_cast = mul(x = zero_mean_351_cast, y = denom_351_cast)[name = tensor("out_351_cast")]; + tensor var_11502_to_fp16 = const()[name = tensor("op_11502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188272320)))]; + tensor var_11503_cast = add(x = out_351_cast, y = var_11502_to_fp16)[name = tensor("op_11503_cast")]; + tensor var_11505_to_fp16 = const()[name = tensor("op_11505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188274944)))]; + tensor hidden_states_461_cast = mul(x = var_11503_cast, y = var_11505_to_fp16)[name = tensor("hidden_states_461_cast")]; + tensor var_11512 = const()[name = tensor("op_11512"), val = tensor([1, 1])]; + tensor var_11514 = const()[name = tensor("op_11514"), val = tensor([1, 1])]; + tensor q_235_pad_type_0 = const()[name = tensor("q_235_pad_type_0"), val = tensor("custom")]; + tensor q_235_pad_0 = const()[name = tensor("q_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188277568))), lut = tensor([-0x1.c8cp-6, -0x1.17p-7, 0x1.178p-7, 0x1.c9p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_235_cast = conv(dilations = var_11514, groups = var_6865, pad = q_235_pad_0, pad_type = q_235_pad_type_0, strides = var_11512, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_461_cast)[name = tensor("q_235_cast")]; + tensor var_11518 = const()[name = tensor("op_11518"), val = tensor([1, 1])]; + tensor var_11520 = const()[name = tensor("op_11520"), val = tensor([1, 1])]; + tensor k_235_pad_type_0 = const()[name = tensor("k_235_pad_type_0"), val = tensor("custom")]; + tensor k_235_pad_0 = const()[name = tensor("k_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188687232))), lut = tensor([-0x1.718p-6, -0x1.b18p-8, 0x1.b68p-8, 0x1.728p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_235_cast = conv(dilations = var_11520, groups = var_6865, pad = k_235_pad_0, pad_type = k_235_pad_type_0, strides = var_11518, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_235_cast")]; + tensor var_11524 = const()[name = tensor("op_11524"), val = tensor([1, 1])]; + tensor var_11526 = const()[name = tensor("op_11526"), val = tensor([1, 1])]; + tensor v_235_pad_type_0 = const()[name = tensor("v_235_pad_type_0"), val = tensor("custom")]; + tensor v_235_pad_0 = const()[name = tensor("v_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189342656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190653440))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_235_cast = conv(dilations = var_11526, groups = var_6865, pad = v_235_pad_0, pad_type = v_235_pad_type_0, strides = var_11524, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_235_cast")]; + tensor var_11530 = const()[name = tensor("op_11530"), val = tensor([2, 20, 64, -1])]; + tensor var_11531_cast = reshape(shape = var_11530, x = q_235_cast)[name = tensor("op_11531_cast")]; + tensor var_11532 = const()[name = tensor("op_11532"), val = tensor([2, 20, 64, -1])]; + tensor var_11533_cast = reshape(shape = var_11532, x = k_235_cast)[name = tensor("op_11533_cast")]; + tensor var_11534 = const()[name = tensor("op_11534"), val = tensor([2, 20, 64, -1])]; + tensor var_11535_cast = reshape(shape = var_11534, x = v_235_cast)[name = tensor("op_11535_cast")]; + tensor attn_weights_469_transpose_x_0 = const()[name = tensor("attn_weights_469_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_469_transpose_y_0 = const()[name = tensor("attn_weights_469_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_469_cast = matmul(transpose_x = attn_weights_469_transpose_x_0, transpose_y = attn_weights_469_transpose_y_0, x = var_11531_cast, y = var_11533_cast)[name = tensor("attn_weights_469_cast")]; + tensor attn_weights_471_cast = mul(x = attn_weights_469_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_471_cast")]; + tensor var_11539_cast = softmax(axis = var_6849, x = attn_weights_471_cast)[name = tensor("op_11539_cast")]; + tensor attn_235_transpose_x_0 = const()[name = tensor("attn_235_transpose_x_0"), val = tensor(false)]; + tensor attn_235_transpose_y_0 = const()[name = tensor("attn_235_transpose_y_0"), val = tensor(true)]; + tensor attn_235_cast = matmul(transpose_x = attn_235_transpose_x_0, transpose_y = attn_235_transpose_y_0, x = var_11535_cast, y = var_11539_cast)[name = tensor("attn_235_cast")]; + tensor var_11543 = const()[name = tensor("op_11543"), val = tensor([2, 1280, 1, -1])]; + tensor input_669_cast = reshape(shape = var_11543, x = attn_235_cast)[name = tensor("input_669_cast")]; + tensor var_11548 = const()[name = tensor("op_11548"), val = tensor([1, 1])]; + tensor var_11550 = const()[name = tensor("op_11550"), val = tensor([1, 1])]; + tensor var_11552_pad_type_0 = const()[name = tensor("op_11552_pad_type_0"), val = tensor("custom")]; + tensor var_11552_pad_0 = const()[name = tensor("op_11552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190653568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191472832))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191472960)))]; + tensor var_11552_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_11550, groups = var_6865, pad = var_11552_pad_0, pad_type = var_11552_pad_type_0, strides = var_11548, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_669_cast)[name = tensor("op_11552_cast")]; + tensor inputs_353_cast = add(x = var_11552_cast, y = inputs_351_cast)[name = tensor("inputs_353_cast")]; + tensor var_11556 = const()[name = tensor("op_11556"), val = tensor([1])]; + tensor channels_mean_353_cast = reduce_mean(axes = var_11556, keep_dims = var_6860, x = inputs_353_cast)[name = tensor("channels_mean_353_cast")]; + tensor zero_mean_353_cast = sub(x = inputs_353_cast, y = channels_mean_353_cast)[name = tensor("zero_mean_353_cast")]; + tensor zero_mean_sq_353_cast = mul(x = zero_mean_353_cast, y = zero_mean_353_cast)[name = tensor("zero_mean_sq_353_cast")]; + tensor var_11560 = const()[name = tensor("op_11560"), val = tensor([1])]; + tensor var_11561_cast = reduce_mean(axes = var_11560, keep_dims = var_6860, x = zero_mean_sq_353_cast)[name = tensor("op_11561_cast")]; + tensor var_11562_to_fp16 = const()[name = tensor("op_11562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11563_cast = add(x = var_11561_cast, y = var_11562_to_fp16)[name = tensor("op_11563_cast")]; + tensor denom_353_epsilon_0_to_fp16 = const()[name = tensor("denom_353_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_353_cast = rsqrt(epsilon = denom_353_epsilon_0_to_fp16, x = var_11563_cast)[name = tensor("denom_353_cast")]; + tensor out_353_cast = mul(x = zero_mean_353_cast, y = denom_353_cast)[name = tensor("out_353_cast")]; + tensor var_11567_to_fp16 = const()[name = tensor("op_11567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191475584)))]; + tensor var_11568_cast = add(x = out_353_cast, y = var_11567_to_fp16)[name = tensor("op_11568_cast")]; + tensor var_11570_to_fp16 = const()[name = tensor("op_11570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191478208)))]; + tensor input_671_cast = mul(x = var_11568_cast, y = var_11570_to_fp16)[name = tensor("input_671_cast")]; + tensor var_11578 = const()[name = tensor("op_11578"), val = tensor([1, 1])]; + tensor var_11580 = const()[name = tensor("op_11580"), val = tensor([1, 1])]; + tensor var_11582_pad_type_0 = const()[name = tensor("op_11582_pad_type_0"), val = tensor("custom")]; + tensor var_11582_pad_0 = const()[name = tensor("op_11582_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191480832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201311296))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201311488)))]; + tensor var_11582_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_11580, groups = var_6865, pad = var_11582_pad_0, pad_type = var_11582_pad_type_0, strides = var_11578, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_671_cast)[name = tensor("op_11582_cast")]; + tensor var_11583_split_sizes_0 = const()[name = tensor("op_11583_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11583_axis_0 = const()[name = tensor("op_11583_axis_0"), val = tensor(1)]; + tensor var_11583_cast_0, tensor var_11583_cast_1 = split(axis = var_11583_axis_0, split_sizes = var_11583_split_sizes_0, x = var_11582_cast)[name = tensor("op_11583_cast")]; + tensor var_11585_mode_0 = const()[name = tensor("op_11585_mode_0"), val = tensor("EXACT")]; + tensor var_11585_cast = gelu(mode = var_11585_mode_0, x = var_11583_cast_1)[name = tensor("op_11585_cast")]; + tensor input_673_cast = mul(x = var_11583_cast_0, y = var_11585_cast)[name = tensor("input_673_cast")]; + tensor var_11589 = const()[name = tensor("op_11589"), val = tensor([1, 1])]; + tensor var_11591 = const()[name = tensor("op_11591"), val = tensor([1, 1])]; + tensor var_11593_pad_type_0 = const()[name = tensor("op_11593_pad_type_0"), val = tensor("custom")]; + tensor var_11593_pad_0 = const()[name = tensor("op_11593_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201332032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206247296))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206247488)))]; + tensor var_11593_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_11591, groups = var_6865, pad = var_11593_pad_0, pad_type = var_11593_pad_type_0, strides = var_11589, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_673_cast)[name = tensor("op_11593_cast")]; + tensor inputs_355_cast = add(x = var_11593_cast, y = inputs_353_cast)[name = tensor("inputs_355_cast")]; + tensor var_11603 = const()[name = tensor("op_11603"), val = tensor([1])]; + tensor channels_mean_355_cast = reduce_mean(axes = var_11603, keep_dims = var_6860, x = inputs_355_cast)[name = tensor("channels_mean_355_cast")]; + tensor zero_mean_355_cast = sub(x = inputs_355_cast, y = channels_mean_355_cast)[name = tensor("zero_mean_355_cast")]; + tensor zero_mean_sq_355_cast = mul(x = zero_mean_355_cast, y = zero_mean_355_cast)[name = tensor("zero_mean_sq_355_cast")]; + tensor var_11607 = const()[name = tensor("op_11607"), val = tensor([1])]; + tensor var_11608_cast = reduce_mean(axes = var_11607, keep_dims = var_6860, x = zero_mean_sq_355_cast)[name = tensor("op_11608_cast")]; + tensor var_11609_to_fp16 = const()[name = tensor("op_11609_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11610_cast = add(x = var_11608_cast, y = var_11609_to_fp16)[name = tensor("op_11610_cast")]; + tensor denom_355_epsilon_0_to_fp16 = const()[name = tensor("denom_355_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_355_cast = rsqrt(epsilon = denom_355_epsilon_0_to_fp16, x = var_11610_cast)[name = tensor("denom_355_cast")]; + tensor out_355_cast = mul(x = zero_mean_355_cast, y = denom_355_cast)[name = tensor("out_355_cast")]; + tensor var_11614_to_fp16 = const()[name = tensor("op_11614_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206250112)))]; + tensor var_11615_cast = add(x = out_355_cast, y = var_11614_to_fp16)[name = tensor("op_11615_cast")]; + tensor var_11617_to_fp16 = const()[name = tensor("op_11617_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206252736)))]; + tensor hidden_states_465_cast = mul(x = var_11615_cast, y = var_11617_to_fp16)[name = tensor("hidden_states_465_cast")]; + tensor var_11624 = const()[name = tensor("op_11624"), val = tensor([1, 1])]; + tensor var_11626 = const()[name = tensor("op_11626"), val = tensor([1, 1])]; + tensor q_237_pad_type_0 = const()[name = tensor("q_237_pad_type_0"), val = tensor("custom")]; + tensor q_237_pad_0 = const()[name = tensor("q_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206255360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207074624))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_237_cast = conv(dilations = var_11626, groups = var_6865, pad = q_237_pad_0, pad_type = q_237_pad_type_0, strides = var_11624, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("q_237_cast")]; + tensor var_11630 = const()[name = tensor("op_11630"), val = tensor([1, 1])]; + tensor var_11632 = const()[name = tensor("op_11632"), val = tensor([1, 1])]; + tensor k_237_pad_type_0 = const()[name = tensor("k_237_pad_type_0"), val = tensor("custom")]; + tensor k_237_pad_0 = const()[name = tensor("k_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207074752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207894016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_237_cast = conv(dilations = var_11632, groups = var_6865, pad = k_237_pad_0, pad_type = k_237_pad_type_0, strides = var_11630, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("k_237_cast")]; + tensor var_11636 = const()[name = tensor("op_11636"), val = tensor([1, 1])]; + tensor var_11638 = const()[name = tensor("op_11638"), val = tensor([1, 1])]; + tensor v_237_pad_type_0 = const()[name = tensor("v_237_pad_type_0"), val = tensor("custom")]; + tensor v_237_pad_0 = const()[name = tensor("v_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207894144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209123008))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_237_cast = conv(dilations = var_11638, groups = var_6865, pad = v_237_pad_0, pad_type = v_237_pad_type_0, strides = var_11636, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("v_237_cast")]; + tensor var_11642 = const()[name = tensor("op_11642"), val = tensor([2, 20, 64, -1])]; + tensor var_11643_cast = reshape(shape = var_11642, x = q_237_cast)[name = tensor("op_11643_cast")]; + tensor var_11644 = const()[name = tensor("op_11644"), val = tensor([2, 20, 64, -1])]; + tensor var_11645_cast = reshape(shape = var_11644, x = k_237_cast)[name = tensor("op_11645_cast")]; + tensor var_11646 = const()[name = tensor("op_11646"), val = tensor([2, 20, 64, -1])]; + tensor var_11647_cast = reshape(shape = var_11646, x = v_237_cast)[name = tensor("op_11647_cast")]; + tensor attn_weights_473_transpose_x_0 = const()[name = tensor("attn_weights_473_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_473_transpose_y_0 = const()[name = tensor("attn_weights_473_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_473_cast = matmul(transpose_x = attn_weights_473_transpose_x_0, transpose_y = attn_weights_473_transpose_y_0, x = var_11643_cast, y = var_11645_cast)[name = tensor("attn_weights_473_cast")]; + tensor attn_weights_475_cast = mul(x = attn_weights_473_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_475_cast")]; + tensor var_11651_cast = softmax(axis = var_6849, x = attn_weights_475_cast)[name = tensor("op_11651_cast")]; + tensor attn_237_transpose_x_0 = const()[name = tensor("attn_237_transpose_x_0"), val = tensor(false)]; + tensor attn_237_transpose_y_0 = const()[name = tensor("attn_237_transpose_y_0"), val = tensor(true)]; + tensor attn_237_cast = matmul(transpose_x = attn_237_transpose_x_0, transpose_y = attn_237_transpose_y_0, x = var_11647_cast, y = var_11651_cast)[name = tensor("attn_237_cast")]; + tensor var_11655 = const()[name = tensor("op_11655"), val = tensor([2, 1280, 1, -1])]; + tensor input_675_cast = reshape(shape = var_11655, x = attn_237_cast)[name = tensor("input_675_cast")]; + tensor var_11660 = const()[name = tensor("op_11660"), val = tensor([1, 1])]; + tensor var_11662 = const()[name = tensor("op_11662"), val = tensor([1, 1])]; + tensor var_11664_pad_type_0 = const()[name = tensor("op_11664_pad_type_0"), val = tensor("custom")]; + tensor var_11664_pad_0 = const()[name = tensor("op_11664_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209123200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210352064))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210352256)))]; + tensor var_11664_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_11662, groups = var_6865, pad = var_11664_pad_0, pad_type = var_11664_pad_type_0, strides = var_11660, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_675_cast)[name = tensor("op_11664_cast")]; + tensor inputs_357_cast = add(x = var_11664_cast, y = inputs_355_cast)[name = tensor("inputs_357_cast")]; + tensor var_11668 = const()[name = tensor("op_11668"), val = tensor([1])]; + tensor channels_mean_357_cast = reduce_mean(axes = var_11668, keep_dims = var_6860, x = inputs_357_cast)[name = tensor("channels_mean_357_cast")]; + tensor zero_mean_357_cast = sub(x = inputs_357_cast, y = channels_mean_357_cast)[name = tensor("zero_mean_357_cast")]; + tensor zero_mean_sq_357_cast = mul(x = zero_mean_357_cast, y = zero_mean_357_cast)[name = tensor("zero_mean_sq_357_cast")]; + tensor var_11672 = const()[name = tensor("op_11672"), val = tensor([1])]; + tensor var_11673_cast = reduce_mean(axes = var_11672, keep_dims = var_6860, x = zero_mean_sq_357_cast)[name = tensor("op_11673_cast")]; + tensor var_11674_to_fp16 = const()[name = tensor("op_11674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11675_cast = add(x = var_11673_cast, y = var_11674_to_fp16)[name = tensor("op_11675_cast")]; + tensor denom_357_epsilon_0_to_fp16 = const()[name = tensor("denom_357_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_357_cast = rsqrt(epsilon = denom_357_epsilon_0_to_fp16, x = var_11675_cast)[name = tensor("denom_357_cast")]; + tensor out_357_cast = mul(x = zero_mean_357_cast, y = denom_357_cast)[name = tensor("out_357_cast")]; + tensor var_11679_to_fp16 = const()[name = tensor("op_11679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210354880)))]; + tensor var_11680_cast = add(x = out_357_cast, y = var_11679_to_fp16)[name = tensor("op_11680_cast")]; + tensor var_11682_to_fp16 = const()[name = tensor("op_11682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210357504)))]; + tensor hidden_states_467_cast = mul(x = var_11680_cast, y = var_11682_to_fp16)[name = tensor("hidden_states_467_cast")]; + tensor var_11689 = const()[name = tensor("op_11689"), val = tensor([1, 1])]; + tensor var_11691 = const()[name = tensor("op_11691"), val = tensor([1, 1])]; + tensor q_239_pad_type_0 = const()[name = tensor("q_239_pad_type_0"), val = tensor("custom")]; + tensor q_239_pad_0 = const()[name = tensor("q_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210360128))), lut = tensor([-0x1.b08p-6, -0x1.0c4p-7, 0x1.09p-7, 0x1.af4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_239_cast = conv(dilations = var_11691, groups = var_6865, pad = q_239_pad_0, pad_type = q_239_pad_type_0, strides = var_11689, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_467_cast)[name = tensor("q_239_cast")]; + tensor var_11695 = const()[name = tensor("op_11695"), val = tensor([1, 1])]; + tensor var_11697 = const()[name = tensor("op_11697"), val = tensor([1, 1])]; + tensor k_239_pad_type_0 = const()[name = tensor("k_239_pad_type_0"), val = tensor("custom")]; + tensor k_239_pad_0 = const()[name = tensor("k_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210769792))), lut = tensor([-0x1.50cp-6, -0x1.90cp-8, 0x1.8e4p-8, 0x1.504p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_239_cast = conv(dilations = var_11697, groups = var_6865, pad = k_239_pad_0, pad_type = k_239_pad_type_0, strides = var_11695, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_239_cast")]; + tensor var_11701 = const()[name = tensor("op_11701"), val = tensor([1, 1])]; + tensor var_11703 = const()[name = tensor("op_11703"), val = tensor([1, 1])]; + tensor v_239_pad_type_0 = const()[name = tensor("v_239_pad_type_0"), val = tensor("custom")]; + tensor v_239_pad_0 = const()[name = tensor("v_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211425216))), lut = tensor([-0x1.80cp-6, -0x1.ac8p-8, 0x1.b34p-8, 0x1.82cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_239_cast = conv(dilations = var_11703, groups = var_6865, pad = v_239_pad_0, pad_type = v_239_pad_type_0, strides = var_11701, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_239_cast")]; + tensor var_11707 = const()[name = tensor("op_11707"), val = tensor([2, 20, 64, -1])]; + tensor var_11708_cast = reshape(shape = var_11707, x = q_239_cast)[name = tensor("op_11708_cast")]; + tensor var_11709 = const()[name = tensor("op_11709"), val = tensor([2, 20, 64, -1])]; + tensor var_11710_cast = reshape(shape = var_11709, x = k_239_cast)[name = tensor("op_11710_cast")]; + tensor var_11711 = const()[name = tensor("op_11711"), val = tensor([2, 20, 64, -1])]; + tensor var_11712_cast = reshape(shape = var_11711, x = v_239_cast)[name = tensor("op_11712_cast")]; + tensor attn_weights_477_transpose_x_0 = const()[name = tensor("attn_weights_477_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_477_transpose_y_0 = const()[name = tensor("attn_weights_477_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_477_cast = matmul(transpose_x = attn_weights_477_transpose_x_0, transpose_y = attn_weights_477_transpose_y_0, x = var_11708_cast, y = var_11710_cast)[name = tensor("attn_weights_477_cast")]; + tensor attn_weights_479_cast = mul(x = attn_weights_477_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_479_cast")]; + tensor var_11716_cast = softmax(axis = var_6849, x = attn_weights_479_cast)[name = tensor("op_11716_cast")]; + tensor attn_239_transpose_x_0 = const()[name = tensor("attn_239_transpose_x_0"), val = tensor(false)]; + tensor attn_239_transpose_y_0 = const()[name = tensor("attn_239_transpose_y_0"), val = tensor(true)]; + tensor attn_239_cast = matmul(transpose_x = attn_239_transpose_x_0, transpose_y = attn_239_transpose_y_0, x = var_11712_cast, y = var_11716_cast)[name = tensor("attn_239_cast")]; + tensor var_11720 = const()[name = tensor("op_11720"), val = tensor([2, 1280, 1, -1])]; + tensor input_677_cast = reshape(shape = var_11720, x = attn_239_cast)[name = tensor("input_677_cast")]; + tensor var_11725 = const()[name = tensor("op_11725"), val = tensor([1, 1])]; + tensor var_11727 = const()[name = tensor("op_11727"), val = tensor([1, 1])]; + tensor var_11729_pad_type_0 = const()[name = tensor("op_11729_pad_type_0"), val = tensor("custom")]; + tensor var_11729_pad_0 = const()[name = tensor("op_11729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212080640))), lut = tensor([-0x1.7e8p-8, 0x1.7fp-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212285504)))]; + tensor var_11729_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_11727, groups = var_6865, pad = var_11729_pad_0, pad_type = var_11729_pad_type_0, strides = var_11725, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_677_cast)[name = tensor("op_11729_cast")]; + tensor inputs_359_cast = add(x = var_11729_cast, y = inputs_357_cast)[name = tensor("inputs_359_cast")]; + tensor var_11733 = const()[name = tensor("op_11733"), val = tensor([1])]; + tensor channels_mean_359_cast = reduce_mean(axes = var_11733, keep_dims = var_6860, x = inputs_359_cast)[name = tensor("channels_mean_359_cast")]; + tensor zero_mean_359_cast = sub(x = inputs_359_cast, y = channels_mean_359_cast)[name = tensor("zero_mean_359_cast")]; + tensor zero_mean_sq_359_cast = mul(x = zero_mean_359_cast, y = zero_mean_359_cast)[name = tensor("zero_mean_sq_359_cast")]; + tensor var_11737 = const()[name = tensor("op_11737"), val = tensor([1])]; + tensor var_11738_cast = reduce_mean(axes = var_11737, keep_dims = var_6860, x = zero_mean_sq_359_cast)[name = tensor("op_11738_cast")]; + tensor var_11739_to_fp16 = const()[name = tensor("op_11739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11740_cast = add(x = var_11738_cast, y = var_11739_to_fp16)[name = tensor("op_11740_cast")]; + tensor denom_359_epsilon_0_to_fp16 = const()[name = tensor("denom_359_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_359_cast = rsqrt(epsilon = denom_359_epsilon_0_to_fp16, x = var_11740_cast)[name = tensor("denom_359_cast")]; + tensor out_359_cast = mul(x = zero_mean_359_cast, y = denom_359_cast)[name = tensor("out_359_cast")]; + tensor var_11744_to_fp16 = const()[name = tensor("op_11744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212288128)))]; + tensor var_11745_cast = add(x = out_359_cast, y = var_11744_to_fp16)[name = tensor("op_11745_cast")]; + tensor var_11747_to_fp16 = const()[name = tensor("op_11747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212290752)))]; + tensor input_679_cast = mul(x = var_11745_cast, y = var_11747_to_fp16)[name = tensor("input_679_cast")]; + tensor var_11755 = const()[name = tensor("op_11755"), val = tensor([1, 1])]; + tensor var_11757 = const()[name = tensor("op_11757"), val = tensor([1, 1])]; + tensor var_11759_pad_type_0 = const()[name = tensor("op_11759_pad_type_0"), val = tensor("custom")]; + tensor var_11759_pad_0 = const()[name = tensor("op_11759_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212293376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222123840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222124032)))]; + tensor var_11759_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_11757, groups = var_6865, pad = var_11759_pad_0, pad_type = var_11759_pad_type_0, strides = var_11755, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_679_cast)[name = tensor("op_11759_cast")]; + tensor var_11760_split_sizes_0 = const()[name = tensor("op_11760_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11760_axis_0 = const()[name = tensor("op_11760_axis_0"), val = tensor(1)]; + tensor var_11760_cast_0, tensor var_11760_cast_1 = split(axis = var_11760_axis_0, split_sizes = var_11760_split_sizes_0, x = var_11759_cast)[name = tensor("op_11760_cast")]; + tensor var_11762_mode_0 = const()[name = tensor("op_11762_mode_0"), val = tensor("EXACT")]; + tensor var_11762_cast = gelu(mode = var_11762_mode_0, x = var_11760_cast_1)[name = tensor("op_11762_cast")]; + tensor input_681_cast = mul(x = var_11760_cast_0, y = var_11762_cast)[name = tensor("input_681_cast")]; + tensor var_11766 = const()[name = tensor("op_11766"), val = tensor([1, 1])]; + tensor var_11768 = const()[name = tensor("op_11768"), val = tensor([1, 1])]; + tensor var_11770_pad_type_0 = const()[name = tensor("op_11770_pad_type_0"), val = tensor("custom")]; + tensor var_11770_pad_0 = const()[name = tensor("op_11770_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222144576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227059840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227060032)))]; + tensor var_11770_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_11768, groups = var_6865, pad = var_11770_pad_0, pad_type = var_11770_pad_type_0, strides = var_11766, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_681_cast)[name = tensor("op_11770_cast")]; + tensor inputs_361_cast = add(x = var_11770_cast, y = inputs_359_cast)[name = tensor("inputs_361_cast")]; + tensor var_11780 = const()[name = tensor("op_11780"), val = tensor([1])]; + tensor channels_mean_361_cast = reduce_mean(axes = var_11780, keep_dims = var_6860, x = inputs_361_cast)[name = tensor("channels_mean_361_cast")]; + tensor zero_mean_361_cast = sub(x = inputs_361_cast, y = channels_mean_361_cast)[name = tensor("zero_mean_361_cast")]; + tensor zero_mean_sq_361_cast = mul(x = zero_mean_361_cast, y = zero_mean_361_cast)[name = tensor("zero_mean_sq_361_cast")]; + tensor var_11784 = const()[name = tensor("op_11784"), val = tensor([1])]; + tensor var_11785_cast = reduce_mean(axes = var_11784, keep_dims = var_6860, x = zero_mean_sq_361_cast)[name = tensor("op_11785_cast")]; + tensor var_11786_to_fp16 = const()[name = tensor("op_11786_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11787_cast = add(x = var_11785_cast, y = var_11786_to_fp16)[name = tensor("op_11787_cast")]; + tensor denom_361_epsilon_0_to_fp16 = const()[name = tensor("denom_361_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_361_cast = rsqrt(epsilon = denom_361_epsilon_0_to_fp16, x = var_11787_cast)[name = tensor("denom_361_cast")]; + tensor out_361_cast = mul(x = zero_mean_361_cast, y = denom_361_cast)[name = tensor("out_361_cast")]; + tensor var_11791_to_fp16 = const()[name = tensor("op_11791_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227062656)))]; + tensor var_11792_cast = add(x = out_361_cast, y = var_11791_to_fp16)[name = tensor("op_11792_cast")]; + tensor var_11794_to_fp16 = const()[name = tensor("op_11794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227065280)))]; + tensor hidden_states_471_cast = mul(x = var_11792_cast, y = var_11794_to_fp16)[name = tensor("hidden_states_471_cast")]; + tensor var_11801 = const()[name = tensor("op_11801"), val = tensor([1, 1])]; + tensor var_11803 = const()[name = tensor("op_11803"), val = tensor([1, 1])]; + tensor q_241_pad_type_0 = const()[name = tensor("q_241_pad_type_0"), val = tensor("custom")]; + tensor q_241_pad_0 = const()[name = tensor("q_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227067904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227887168))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_241_cast = conv(dilations = var_11803, groups = var_6865, pad = q_241_pad_0, pad_type = q_241_pad_type_0, strides = var_11801, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("q_241_cast")]; + tensor var_11807 = const()[name = tensor("op_11807"), val = tensor([1, 1])]; + tensor var_11809 = const()[name = tensor("op_11809"), val = tensor([1, 1])]; + tensor k_241_pad_type_0 = const()[name = tensor("k_241_pad_type_0"), val = tensor("custom")]; + tensor k_241_pad_0 = const()[name = tensor("k_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227887296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228706560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_241_cast = conv(dilations = var_11809, groups = var_6865, pad = k_241_pad_0, pad_type = k_241_pad_type_0, strides = var_11807, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("k_241_cast")]; + tensor var_11813 = const()[name = tensor("op_11813"), val = tensor([1, 1])]; + tensor var_11815 = const()[name = tensor("op_11815"), val = tensor([1, 1])]; + tensor v_241_pad_type_0 = const()[name = tensor("v_241_pad_type_0"), val = tensor("custom")]; + tensor v_241_pad_0 = const()[name = tensor("v_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228706688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229935552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_241_cast = conv(dilations = var_11815, groups = var_6865, pad = v_241_pad_0, pad_type = v_241_pad_type_0, strides = var_11813, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("v_241_cast")]; + tensor var_11819 = const()[name = tensor("op_11819"), val = tensor([2, 20, 64, -1])]; + tensor var_11820_cast = reshape(shape = var_11819, x = q_241_cast)[name = tensor("op_11820_cast")]; + tensor var_11821 = const()[name = tensor("op_11821"), val = tensor([2, 20, 64, -1])]; + tensor var_11822_cast = reshape(shape = var_11821, x = k_241_cast)[name = tensor("op_11822_cast")]; + tensor var_11823 = const()[name = tensor("op_11823"), val = tensor([2, 20, 64, -1])]; + tensor var_11824_cast = reshape(shape = var_11823, x = v_241_cast)[name = tensor("op_11824_cast")]; + tensor attn_weights_481_transpose_x_0 = const()[name = tensor("attn_weights_481_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_481_transpose_y_0 = const()[name = tensor("attn_weights_481_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_481_cast = matmul(transpose_x = attn_weights_481_transpose_x_0, transpose_y = attn_weights_481_transpose_y_0, x = var_11820_cast, y = var_11822_cast)[name = tensor("attn_weights_481_cast")]; + tensor attn_weights_483_cast = mul(x = attn_weights_481_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_483_cast")]; + tensor var_11828_cast = softmax(axis = var_6849, x = attn_weights_483_cast)[name = tensor("op_11828_cast")]; + tensor attn_241_transpose_x_0 = const()[name = tensor("attn_241_transpose_x_0"), val = tensor(false)]; + tensor attn_241_transpose_y_0 = const()[name = tensor("attn_241_transpose_y_0"), val = tensor(true)]; + tensor attn_241_cast = matmul(transpose_x = attn_241_transpose_x_0, transpose_y = attn_241_transpose_y_0, x = var_11824_cast, y = var_11828_cast)[name = tensor("attn_241_cast")]; + tensor var_11832 = const()[name = tensor("op_11832"), val = tensor([2, 1280, 1, -1])]; + tensor input_683_cast = reshape(shape = var_11832, x = attn_241_cast)[name = tensor("input_683_cast")]; + tensor var_11837 = const()[name = tensor("op_11837"), val = tensor([1, 1])]; + tensor var_11839 = const()[name = tensor("op_11839"), val = tensor([1, 1])]; + tensor var_11841_pad_type_0 = const()[name = tensor("op_11841_pad_type_0"), val = tensor("custom")]; + tensor var_11841_pad_0 = const()[name = tensor("op_11841_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229935744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231164608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231164800)))]; + tensor var_11841_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_11839, groups = var_6865, pad = var_11841_pad_0, pad_type = var_11841_pad_type_0, strides = var_11837, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_683_cast)[name = tensor("op_11841_cast")]; + tensor inputs_363_cast = add(x = var_11841_cast, y = inputs_361_cast)[name = tensor("inputs_363_cast")]; + tensor var_11845 = const()[name = tensor("op_11845"), val = tensor([1])]; + tensor channels_mean_363_cast = reduce_mean(axes = var_11845, keep_dims = var_6860, x = inputs_363_cast)[name = tensor("channels_mean_363_cast")]; + tensor zero_mean_363_cast = sub(x = inputs_363_cast, y = channels_mean_363_cast)[name = tensor("zero_mean_363_cast")]; + tensor zero_mean_sq_363_cast = mul(x = zero_mean_363_cast, y = zero_mean_363_cast)[name = tensor("zero_mean_sq_363_cast")]; + tensor var_11849 = const()[name = tensor("op_11849"), val = tensor([1])]; + tensor var_11850_cast = reduce_mean(axes = var_11849, keep_dims = var_6860, x = zero_mean_sq_363_cast)[name = tensor("op_11850_cast")]; + tensor var_11851_to_fp16 = const()[name = tensor("op_11851_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11852_cast = add(x = var_11850_cast, y = var_11851_to_fp16)[name = tensor("op_11852_cast")]; + tensor denom_363_epsilon_0_to_fp16 = const()[name = tensor("denom_363_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_363_cast = rsqrt(epsilon = denom_363_epsilon_0_to_fp16, x = var_11852_cast)[name = tensor("denom_363_cast")]; + tensor out_363_cast = mul(x = zero_mean_363_cast, y = denom_363_cast)[name = tensor("out_363_cast")]; + tensor var_11856_to_fp16 = const()[name = tensor("op_11856_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231167424)))]; + tensor var_11857_cast = add(x = out_363_cast, y = var_11856_to_fp16)[name = tensor("op_11857_cast")]; + tensor var_11859_to_fp16 = const()[name = tensor("op_11859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231170048)))]; + tensor hidden_states_473_cast = mul(x = var_11857_cast, y = var_11859_to_fp16)[name = tensor("hidden_states_473_cast")]; + tensor var_11866 = const()[name = tensor("op_11866"), val = tensor([1, 1])]; + tensor var_11868 = const()[name = tensor("op_11868"), val = tensor([1, 1])]; + tensor q_243_pad_type_0 = const()[name = tensor("q_243_pad_type_0"), val = tensor("custom")]; + tensor q_243_pad_0 = const()[name = tensor("q_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231172672))), lut = tensor([-0x1.81cp-6, -0x1.e1p-8, 0x1.e7p-8, 0x1.83cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_243_cast = conv(dilations = var_11868, groups = var_6865, pad = q_243_pad_0, pad_type = q_243_pad_type_0, strides = var_11866, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_473_cast)[name = tensor("q_243_cast")]; + tensor var_11872 = const()[name = tensor("op_11872"), val = tensor([1, 1])]; + tensor var_11874 = const()[name = tensor("op_11874"), val = tensor([1, 1])]; + tensor k_243_pad_type_0 = const()[name = tensor("k_243_pad_type_0"), val = tensor("custom")]; + tensor k_243_pad_0 = const()[name = tensor("k_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231582336))), lut = tensor([-0x1.22p-6, -0x1.5dp-8, 0x1.5f8p-8, 0x1.224p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_243_cast = conv(dilations = var_11874, groups = var_6865, pad = k_243_pad_0, pad_type = k_243_pad_type_0, strides = var_11872, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_243_cast")]; + tensor var_11878 = const()[name = tensor("op_11878"), val = tensor([1, 1])]; + tensor var_11880 = const()[name = tensor("op_11880"), val = tensor([1, 1])]; + tensor v_243_pad_type_0 = const()[name = tensor("v_243_pad_type_0"), val = tensor("custom")]; + tensor v_243_pad_0 = const()[name = tensor("v_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232237760))), lut = tensor([-0x1.4cp-6, -0x1.7bp-8, 0x1.798p-8, 0x1.4b8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_243_cast = conv(dilations = var_11880, groups = var_6865, pad = v_243_pad_0, pad_type = v_243_pad_type_0, strides = var_11878, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_243_cast")]; + tensor var_11884 = const()[name = tensor("op_11884"), val = tensor([2, 20, 64, -1])]; + tensor var_11885_cast = reshape(shape = var_11884, x = q_243_cast)[name = tensor("op_11885_cast")]; + tensor var_11886 = const()[name = tensor("op_11886"), val = tensor([2, 20, 64, -1])]; + tensor var_11887_cast = reshape(shape = var_11886, x = k_243_cast)[name = tensor("op_11887_cast")]; + tensor var_11888 = const()[name = tensor("op_11888"), val = tensor([2, 20, 64, -1])]; + tensor var_11889_cast = reshape(shape = var_11888, x = v_243_cast)[name = tensor("op_11889_cast")]; + tensor attn_weights_485_transpose_x_0 = const()[name = tensor("attn_weights_485_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_485_transpose_y_0 = const()[name = tensor("attn_weights_485_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_485_cast = matmul(transpose_x = attn_weights_485_transpose_x_0, transpose_y = attn_weights_485_transpose_y_0, x = var_11885_cast, y = var_11887_cast)[name = tensor("attn_weights_485_cast")]; + tensor attn_weights_487_cast = mul(x = attn_weights_485_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_487_cast")]; + tensor var_11893_cast = softmax(axis = var_6849, x = attn_weights_487_cast)[name = tensor("op_11893_cast")]; + tensor attn_243_transpose_x_0 = const()[name = tensor("attn_243_transpose_x_0"), val = tensor(false)]; + tensor attn_243_transpose_y_0 = const()[name = tensor("attn_243_transpose_y_0"), val = tensor(true)]; + tensor attn_243_cast = matmul(transpose_x = attn_243_transpose_x_0, transpose_y = attn_243_transpose_y_0, x = var_11889_cast, y = var_11893_cast)[name = tensor("attn_243_cast")]; + tensor var_11897 = const()[name = tensor("op_11897"), val = tensor([2, 1280, 1, -1])]; + tensor input_685_cast = reshape(shape = var_11897, x = attn_243_cast)[name = tensor("input_685_cast")]; + tensor var_11902 = const()[name = tensor("op_11902"), val = tensor([1, 1])]; + tensor var_11904 = const()[name = tensor("op_11904"), val = tensor([1, 1])]; + tensor var_11906_pad_type_0 = const()[name = tensor("op_11906_pad_type_0"), val = tensor("custom")]; + tensor var_11906_pad_0 = const()[name = tensor("op_11906_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232893184))), lut = tensor([-0x1.4bcp-7, -0x1.8a8p-9, 0x1.84cp-9, 0x1.4a8p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233302848)))]; + tensor var_11906_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_11904, groups = var_6865, pad = var_11906_pad_0, pad_type = var_11906_pad_type_0, strides = var_11902, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_685_cast)[name = tensor("op_11906_cast")]; + tensor inputs_365_cast = add(x = var_11906_cast, y = inputs_363_cast)[name = tensor("inputs_365_cast")]; + tensor var_11910 = const()[name = tensor("op_11910"), val = tensor([1])]; + tensor channels_mean_365_cast = reduce_mean(axes = var_11910, keep_dims = var_6860, x = inputs_365_cast)[name = tensor("channels_mean_365_cast")]; + tensor zero_mean_365_cast = sub(x = inputs_365_cast, y = channels_mean_365_cast)[name = tensor("zero_mean_365_cast")]; + tensor zero_mean_sq_365_cast = mul(x = zero_mean_365_cast, y = zero_mean_365_cast)[name = tensor("zero_mean_sq_365_cast")]; + tensor var_11914 = const()[name = tensor("op_11914"), val = tensor([1])]; + tensor var_11915_cast = reduce_mean(axes = var_11914, keep_dims = var_6860, x = zero_mean_sq_365_cast)[name = tensor("op_11915_cast")]; + tensor var_11916_to_fp16 = const()[name = tensor("op_11916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11917_cast = add(x = var_11915_cast, y = var_11916_to_fp16)[name = tensor("op_11917_cast")]; + tensor denom_365_epsilon_0_to_fp16 = const()[name = tensor("denom_365_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_365_cast = rsqrt(epsilon = denom_365_epsilon_0_to_fp16, x = var_11917_cast)[name = tensor("denom_365_cast")]; + tensor out_365_cast = mul(x = zero_mean_365_cast, y = denom_365_cast)[name = tensor("out_365_cast")]; + tensor var_11921_to_fp16 = const()[name = tensor("op_11921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233305472)))]; + tensor var_11922_cast = add(x = out_365_cast, y = var_11921_to_fp16)[name = tensor("op_11922_cast")]; + tensor var_11924_to_fp16 = const()[name = tensor("op_11924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233308096)))]; + tensor input_687_cast = mul(x = var_11922_cast, y = var_11924_to_fp16)[name = tensor("input_687_cast")]; + tensor var_11932 = const()[name = tensor("op_11932"), val = tensor([1, 1])]; + tensor var_11934 = const()[name = tensor("op_11934"), val = tensor([1, 1])]; + tensor var_11936_pad_type_0 = const()[name = tensor("op_11936_pad_type_0"), val = tensor("custom")]; + tensor var_11936_pad_0 = const()[name = tensor("op_11936_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233310720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243141184))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243141376)))]; + tensor var_11936_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_11934, groups = var_6865, pad = var_11936_pad_0, pad_type = var_11936_pad_type_0, strides = var_11932, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_687_cast)[name = tensor("op_11936_cast")]; + tensor var_11937_split_sizes_0 = const()[name = tensor("op_11937_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11937_axis_0 = const()[name = tensor("op_11937_axis_0"), val = tensor(1)]; + tensor var_11937_cast_0, tensor var_11937_cast_1 = split(axis = var_11937_axis_0, split_sizes = var_11937_split_sizes_0, x = var_11936_cast)[name = tensor("op_11937_cast")]; + tensor var_11939_mode_0 = const()[name = tensor("op_11939_mode_0"), val = tensor("EXACT")]; + tensor var_11939_cast = gelu(mode = var_11939_mode_0, x = var_11937_cast_1)[name = tensor("op_11939_cast")]; + tensor input_689_cast = mul(x = var_11937_cast_0, y = var_11939_cast)[name = tensor("input_689_cast")]; + tensor var_11943 = const()[name = tensor("op_11943"), val = tensor([1, 1])]; + tensor var_11945 = const()[name = tensor("op_11945"), val = tensor([1, 1])]; + tensor var_11947_pad_type_0 = const()[name = tensor("op_11947_pad_type_0"), val = tensor("custom")]; + tensor var_11947_pad_0 = const()[name = tensor("op_11947_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243161920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248077184))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248077376)))]; + tensor var_11947_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_11945, groups = var_6865, pad = var_11947_pad_0, pad_type = var_11947_pad_type_0, strides = var_11943, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_689_cast)[name = tensor("op_11947_cast")]; + tensor inputs_367_cast = add(x = var_11947_cast, y = inputs_365_cast)[name = tensor("inputs_367_cast")]; + tensor var_11957 = const()[name = tensor("op_11957"), val = tensor([1])]; + tensor channels_mean_367_cast = reduce_mean(axes = var_11957, keep_dims = var_6860, x = inputs_367_cast)[name = tensor("channels_mean_367_cast")]; + tensor zero_mean_367_cast = sub(x = inputs_367_cast, y = channels_mean_367_cast)[name = tensor("zero_mean_367_cast")]; + tensor zero_mean_sq_367_cast = mul(x = zero_mean_367_cast, y = zero_mean_367_cast)[name = tensor("zero_mean_sq_367_cast")]; + tensor var_11961 = const()[name = tensor("op_11961"), val = tensor([1])]; + tensor var_11962_cast = reduce_mean(axes = var_11961, keep_dims = var_6860, x = zero_mean_sq_367_cast)[name = tensor("op_11962_cast")]; + tensor var_11963_to_fp16 = const()[name = tensor("op_11963_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11964_cast = add(x = var_11962_cast, y = var_11963_to_fp16)[name = tensor("op_11964_cast")]; + tensor denom_367_epsilon_0_to_fp16 = const()[name = tensor("denom_367_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_367_cast = rsqrt(epsilon = denom_367_epsilon_0_to_fp16, x = var_11964_cast)[name = tensor("denom_367_cast")]; + tensor out_367_cast = mul(x = zero_mean_367_cast, y = denom_367_cast)[name = tensor("out_367_cast")]; + tensor var_11968_to_fp16 = const()[name = tensor("op_11968_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248080000)))]; + tensor var_11969_cast = add(x = out_367_cast, y = var_11968_to_fp16)[name = tensor("op_11969_cast")]; + tensor var_11971_to_fp16 = const()[name = tensor("op_11971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248082624)))]; + tensor hidden_states_477_cast = mul(x = var_11969_cast, y = var_11971_to_fp16)[name = tensor("hidden_states_477_cast")]; + tensor var_11978 = const()[name = tensor("op_11978"), val = tensor([1, 1])]; + tensor var_11980 = const()[name = tensor("op_11980"), val = tensor([1, 1])]; + tensor q_245_pad_type_0 = const()[name = tensor("q_245_pad_type_0"), val = tensor("custom")]; + tensor q_245_pad_0 = const()[name = tensor("q_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248085248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248904512))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_245_cast = conv(dilations = var_11980, groups = var_6865, pad = q_245_pad_0, pad_type = q_245_pad_type_0, strides = var_11978, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("q_245_cast")]; + tensor var_11984 = const()[name = tensor("op_11984"), val = tensor([1, 1])]; + tensor var_11986 = const()[name = tensor("op_11986"), val = tensor([1, 1])]; + tensor k_245_pad_type_0 = const()[name = tensor("k_245_pad_type_0"), val = tensor("custom")]; + tensor k_245_pad_0 = const()[name = tensor("k_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248904640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249723904))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_245_cast = conv(dilations = var_11986, groups = var_6865, pad = k_245_pad_0, pad_type = k_245_pad_type_0, strides = var_11984, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("k_245_cast")]; + tensor var_11990 = const()[name = tensor("op_11990"), val = tensor([1, 1])]; + tensor var_11992 = const()[name = tensor("op_11992"), val = tensor([1, 1])]; + tensor v_245_pad_type_0 = const()[name = tensor("v_245_pad_type_0"), val = tensor("custom")]; + tensor v_245_pad_0 = const()[name = tensor("v_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249724032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1250952896))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_245_cast = conv(dilations = var_11992, groups = var_6865, pad = v_245_pad_0, pad_type = v_245_pad_type_0, strides = var_11990, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("v_245_cast")]; + tensor var_11996 = const()[name = tensor("op_11996"), val = tensor([2, 20, 64, -1])]; + tensor var_11997_cast = reshape(shape = var_11996, x = q_245_cast)[name = tensor("op_11997_cast")]; + tensor var_11998 = const()[name = tensor("op_11998"), val = tensor([2, 20, 64, -1])]; + tensor var_11999_cast = reshape(shape = var_11998, x = k_245_cast)[name = tensor("op_11999_cast")]; + tensor var_12000 = const()[name = tensor("op_12000"), val = tensor([2, 20, 64, -1])]; + tensor var_12001_cast = reshape(shape = var_12000, x = v_245_cast)[name = tensor("op_12001_cast")]; + tensor attn_weights_489_transpose_x_0 = const()[name = tensor("attn_weights_489_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_489_transpose_y_0 = const()[name = tensor("attn_weights_489_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_489_cast = matmul(transpose_x = attn_weights_489_transpose_x_0, transpose_y = attn_weights_489_transpose_y_0, x = var_11997_cast, y = var_11999_cast)[name = tensor("attn_weights_489_cast")]; + tensor attn_weights_491_cast = mul(x = attn_weights_489_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_491_cast")]; + tensor var_12005_cast = softmax(axis = var_6849, x = attn_weights_491_cast)[name = tensor("op_12005_cast")]; + tensor attn_245_transpose_x_0 = const()[name = tensor("attn_245_transpose_x_0"), val = tensor(false)]; + tensor attn_245_transpose_y_0 = const()[name = tensor("attn_245_transpose_y_0"), val = tensor(true)]; + tensor attn_245_cast = matmul(transpose_x = attn_245_transpose_x_0, transpose_y = attn_245_transpose_y_0, x = var_12001_cast, y = var_12005_cast)[name = tensor("attn_245_cast")]; + tensor var_12009 = const()[name = tensor("op_12009"), val = tensor([2, 1280, 1, -1])]; + tensor input_691_cast = reshape(shape = var_12009, x = attn_245_cast)[name = tensor("input_691_cast")]; + tensor var_12014 = const()[name = tensor("op_12014"), val = tensor([1, 1])]; + tensor var_12016 = const()[name = tensor("op_12016"), val = tensor([1, 1])]; + tensor var_12018_pad_type_0 = const()[name = tensor("op_12018_pad_type_0"), val = tensor("custom")]; + tensor var_12018_pad_0 = const()[name = tensor("op_12018_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1250953088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252181952))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252182144)))]; + tensor var_12018_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_12016, groups = var_6865, pad = var_12018_pad_0, pad_type = var_12018_pad_type_0, strides = var_12014, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_691_cast)[name = tensor("op_12018_cast")]; + tensor inputs_369_cast = add(x = var_12018_cast, y = inputs_367_cast)[name = tensor("inputs_369_cast")]; + tensor var_12022 = const()[name = tensor("op_12022"), val = tensor([1])]; + tensor channels_mean_369_cast = reduce_mean(axes = var_12022, keep_dims = var_6860, x = inputs_369_cast)[name = tensor("channels_mean_369_cast")]; + tensor zero_mean_369_cast = sub(x = inputs_369_cast, y = channels_mean_369_cast)[name = tensor("zero_mean_369_cast")]; + tensor zero_mean_sq_369_cast = mul(x = zero_mean_369_cast, y = zero_mean_369_cast)[name = tensor("zero_mean_sq_369_cast")]; + tensor var_12026 = const()[name = tensor("op_12026"), val = tensor([1])]; + tensor var_12027_cast = reduce_mean(axes = var_12026, keep_dims = var_6860, x = zero_mean_sq_369_cast)[name = tensor("op_12027_cast")]; + tensor var_12028_to_fp16 = const()[name = tensor("op_12028_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12029_cast = add(x = var_12027_cast, y = var_12028_to_fp16)[name = tensor("op_12029_cast")]; + tensor denom_369_epsilon_0_to_fp16 = const()[name = tensor("denom_369_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_369_cast = rsqrt(epsilon = denom_369_epsilon_0_to_fp16, x = var_12029_cast)[name = tensor("denom_369_cast")]; + tensor out_369_cast = mul(x = zero_mean_369_cast, y = denom_369_cast)[name = tensor("out_369_cast")]; + tensor var_12033_to_fp16 = const()[name = tensor("op_12033_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252184768)))]; + tensor var_12034_cast = add(x = out_369_cast, y = var_12033_to_fp16)[name = tensor("op_12034_cast")]; + tensor var_12036_to_fp16 = const()[name = tensor("op_12036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252187392)))]; + tensor hidden_states_479_cast = mul(x = var_12034_cast, y = var_12036_to_fp16)[name = tensor("hidden_states_479_cast")]; + tensor var_12043 = const()[name = tensor("op_12043"), val = tensor([1, 1])]; + tensor var_12045 = const()[name = tensor("op_12045"), val = tensor([1, 1])]; + tensor q_247_pad_type_0 = const()[name = tensor("q_247_pad_type_0"), val = tensor("custom")]; + tensor q_247_pad_0 = const()[name = tensor("q_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252190016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1253418880))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_247_cast = conv(dilations = var_12045, groups = var_6865, pad = q_247_pad_0, pad_type = q_247_pad_type_0, strides = var_12043, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_479_cast)[name = tensor("q_247_cast")]; + tensor var_12049 = const()[name = tensor("op_12049"), val = tensor([1, 1])]; + tensor var_12051 = const()[name = tensor("op_12051"), val = tensor([1, 1])]; + tensor k_247_pad_type_0 = const()[name = tensor("k_247_pad_type_0"), val = tensor("custom")]; + tensor k_247_pad_0 = const()[name = tensor("k_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1253419072))), lut = tensor([-0x1.128p-6, -0x1.4dp-8, 0x1.4e8p-8, 0x1.12cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_247_cast = conv(dilations = var_12051, groups = var_6865, pad = k_247_pad_0, pad_type = k_247_pad_type_0, strides = var_12049, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_247_cast")]; + tensor var_12055 = const()[name = tensor("op_12055"), val = tensor([1, 1])]; + tensor var_12057 = const()[name = tensor("op_12057"), val = tensor([1, 1])]; + tensor v_247_pad_type_0 = const()[name = tensor("v_247_pad_type_0"), val = tensor("custom")]; + tensor v_247_pad_0 = const()[name = tensor("v_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254074496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255385280))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_247_cast = conv(dilations = var_12057, groups = var_6865, pad = v_247_pad_0, pad_type = v_247_pad_type_0, strides = var_12055, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_247_cast")]; + tensor var_12061 = const()[name = tensor("op_12061"), val = tensor([2, 20, 64, -1])]; + tensor var_12062_cast = reshape(shape = var_12061, x = q_247_cast)[name = tensor("op_12062_cast")]; + tensor var_12063 = const()[name = tensor("op_12063"), val = tensor([2, 20, 64, -1])]; + tensor var_12064_cast = reshape(shape = var_12063, x = k_247_cast)[name = tensor("op_12064_cast")]; + tensor var_12065 = const()[name = tensor("op_12065"), val = tensor([2, 20, 64, -1])]; + tensor var_12066_cast = reshape(shape = var_12065, x = v_247_cast)[name = tensor("op_12066_cast")]; + tensor attn_weights_493_transpose_x_0 = const()[name = tensor("attn_weights_493_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_493_transpose_y_0 = const()[name = tensor("attn_weights_493_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_493_cast = matmul(transpose_x = attn_weights_493_transpose_x_0, transpose_y = attn_weights_493_transpose_y_0, x = var_12062_cast, y = var_12064_cast)[name = tensor("attn_weights_493_cast")]; + tensor attn_weights_495_cast = mul(x = attn_weights_493_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_495_cast")]; + tensor var_12070_cast = softmax(axis = var_6849, x = attn_weights_495_cast)[name = tensor("op_12070_cast")]; + tensor attn_247_transpose_x_0 = const()[name = tensor("attn_247_transpose_x_0"), val = tensor(false)]; + tensor attn_247_transpose_y_0 = const()[name = tensor("attn_247_transpose_y_0"), val = tensor(true)]; + tensor attn_247_cast = matmul(transpose_x = attn_247_transpose_x_0, transpose_y = attn_247_transpose_y_0, x = var_12066_cast, y = var_12070_cast)[name = tensor("attn_247_cast")]; + tensor var_12074 = const()[name = tensor("op_12074"), val = tensor([2, 1280, 1, -1])]; + tensor input_693_cast = reshape(shape = var_12074, x = attn_247_cast)[name = tensor("input_693_cast")]; + tensor var_12079 = const()[name = tensor("op_12079"), val = tensor([1, 1])]; + tensor var_12081 = const()[name = tensor("op_12081"), val = tensor([1, 1])]; + tensor var_12083_pad_type_0 = const()[name = tensor("op_12083_pad_type_0"), val = tensor("custom")]; + tensor var_12083_pad_0 = const()[name = tensor("op_12083_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255385408))), lut = tensor([-0x1.614p-8, 0x1.6p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255590272)))]; + tensor var_12083_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_12081, groups = var_6865, pad = var_12083_pad_0, pad_type = var_12083_pad_type_0, strides = var_12079, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_693_cast)[name = tensor("op_12083_cast")]; + tensor inputs_371_cast = add(x = var_12083_cast, y = inputs_369_cast)[name = tensor("inputs_371_cast")]; + tensor var_12087 = const()[name = tensor("op_12087"), val = tensor([1])]; + tensor channels_mean_371_cast = reduce_mean(axes = var_12087, keep_dims = var_6860, x = inputs_371_cast)[name = tensor("channels_mean_371_cast")]; + tensor zero_mean_371_cast = sub(x = inputs_371_cast, y = channels_mean_371_cast)[name = tensor("zero_mean_371_cast")]; + tensor zero_mean_sq_371_cast = mul(x = zero_mean_371_cast, y = zero_mean_371_cast)[name = tensor("zero_mean_sq_371_cast")]; + tensor var_12091 = const()[name = tensor("op_12091"), val = tensor([1])]; + tensor var_12092_cast = reduce_mean(axes = var_12091, keep_dims = var_6860, x = zero_mean_sq_371_cast)[name = tensor("op_12092_cast")]; + tensor var_12093_to_fp16 = const()[name = tensor("op_12093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12094_cast = add(x = var_12092_cast, y = var_12093_to_fp16)[name = tensor("op_12094_cast")]; + tensor denom_371_epsilon_0_to_fp16 = const()[name = tensor("denom_371_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_371_cast = rsqrt(epsilon = denom_371_epsilon_0_to_fp16, x = var_12094_cast)[name = tensor("denom_371_cast")]; + tensor out_371_cast = mul(x = zero_mean_371_cast, y = denom_371_cast)[name = tensor("out_371_cast")]; + tensor var_12098_to_fp16 = const()[name = tensor("op_12098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255592896)))]; + tensor var_12099_cast = add(x = out_371_cast, y = var_12098_to_fp16)[name = tensor("op_12099_cast")]; + tensor var_12101_to_fp16 = const()[name = tensor("op_12101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255595520)))]; + tensor input_695_cast = mul(x = var_12099_cast, y = var_12101_to_fp16)[name = tensor("input_695_cast")]; + tensor var_12109 = const()[name = tensor("op_12109"), val = tensor([1, 1])]; + tensor var_12111 = const()[name = tensor("op_12111"), val = tensor([1, 1])]; + tensor var_12113_pad_type_0 = const()[name = tensor("op_12113_pad_type_0"), val = tensor("custom")]; + tensor var_12113_pad_0 = const()[name = tensor("op_12113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255598144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265428608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265428800)))]; + tensor var_12113_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_12111, groups = var_6865, pad = var_12113_pad_0, pad_type = var_12113_pad_type_0, strides = var_12109, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_695_cast)[name = tensor("op_12113_cast")]; + tensor var_12114_split_sizes_0 = const()[name = tensor("op_12114_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12114_axis_0 = const()[name = tensor("op_12114_axis_0"), val = tensor(1)]; + tensor var_12114_cast_0, tensor var_12114_cast_1 = split(axis = var_12114_axis_0, split_sizes = var_12114_split_sizes_0, x = var_12113_cast)[name = tensor("op_12114_cast")]; + tensor var_12116_mode_0 = const()[name = tensor("op_12116_mode_0"), val = tensor("EXACT")]; + tensor var_12116_cast = gelu(mode = var_12116_mode_0, x = var_12114_cast_1)[name = tensor("op_12116_cast")]; + tensor input_697_cast = mul(x = var_12114_cast_0, y = var_12116_cast)[name = tensor("input_697_cast")]; + tensor var_12120 = const()[name = tensor("op_12120"), val = tensor([1, 1])]; + tensor var_12122 = const()[name = tensor("op_12122"), val = tensor([1, 1])]; + tensor var_12124_pad_type_0 = const()[name = tensor("op_12124_pad_type_0"), val = tensor("custom")]; + tensor var_12124_pad_0 = const()[name = tensor("op_12124_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265449344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270364608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270364800)))]; + tensor var_12124_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_12122, groups = var_6865, pad = var_12124_pad_0, pad_type = var_12124_pad_type_0, strides = var_12120, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_697_cast)[name = tensor("op_12124_cast")]; + tensor inputs_373_cast = add(x = var_12124_cast, y = inputs_371_cast)[name = tensor("inputs_373_cast")]; + tensor var_12134 = const()[name = tensor("op_12134"), val = tensor([1])]; + tensor channels_mean_373_cast = reduce_mean(axes = var_12134, keep_dims = var_6860, x = inputs_373_cast)[name = tensor("channels_mean_373_cast")]; + tensor zero_mean_373_cast = sub(x = inputs_373_cast, y = channels_mean_373_cast)[name = tensor("zero_mean_373_cast")]; + tensor zero_mean_sq_373_cast = mul(x = zero_mean_373_cast, y = zero_mean_373_cast)[name = tensor("zero_mean_sq_373_cast")]; + tensor var_12138 = const()[name = tensor("op_12138"), val = tensor([1])]; + tensor var_12139_cast = reduce_mean(axes = var_12138, keep_dims = var_6860, x = zero_mean_sq_373_cast)[name = tensor("op_12139_cast")]; + tensor var_12140_to_fp16 = const()[name = tensor("op_12140_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12141_cast = add(x = var_12139_cast, y = var_12140_to_fp16)[name = tensor("op_12141_cast")]; + tensor denom_373_epsilon_0_to_fp16 = const()[name = tensor("denom_373_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_373_cast = rsqrt(epsilon = denom_373_epsilon_0_to_fp16, x = var_12141_cast)[name = tensor("denom_373_cast")]; + tensor out_373_cast = mul(x = zero_mean_373_cast, y = denom_373_cast)[name = tensor("out_373_cast")]; + tensor var_12145_to_fp16 = const()[name = tensor("op_12145_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270367424)))]; + tensor var_12146_cast = add(x = out_373_cast, y = var_12145_to_fp16)[name = tensor("op_12146_cast")]; + tensor var_12148_to_fp16 = const()[name = tensor("op_12148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270370048)))]; + tensor hidden_states_483_cast = mul(x = var_12146_cast, y = var_12148_to_fp16)[name = tensor("hidden_states_483_cast")]; + tensor var_12155 = const()[name = tensor("op_12155"), val = tensor([1, 1])]; + tensor var_12157 = const()[name = tensor("op_12157"), val = tensor([1, 1])]; + tensor q_249_pad_type_0 = const()[name = tensor("q_249_pad_type_0"), val = tensor("custom")]; + tensor q_249_pad_0 = const()[name = tensor("q_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270372672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271191936))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_249_cast = conv(dilations = var_12157, groups = var_6865, pad = q_249_pad_0, pad_type = q_249_pad_type_0, strides = var_12155, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("q_249_cast")]; + tensor var_12161 = const()[name = tensor("op_12161"), val = tensor([1, 1])]; + tensor var_12163 = const()[name = tensor("op_12163"), val = tensor([1, 1])]; + tensor k_249_pad_type_0 = const()[name = tensor("k_249_pad_type_0"), val = tensor("custom")]; + tensor k_249_pad_0 = const()[name = tensor("k_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271192064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272011328))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_249_cast = conv(dilations = var_12163, groups = var_6865, pad = k_249_pad_0, pad_type = k_249_pad_type_0, strides = var_12161, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("k_249_cast")]; + tensor var_12167 = const()[name = tensor("op_12167"), val = tensor([1, 1])]; + tensor var_12169 = const()[name = tensor("op_12169"), val = tensor([1, 1])]; + tensor v_249_pad_type_0 = const()[name = tensor("v_249_pad_type_0"), val = tensor("custom")]; + tensor v_249_pad_0 = const()[name = tensor("v_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272011456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273240320))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_249_cast = conv(dilations = var_12169, groups = var_6865, pad = v_249_pad_0, pad_type = v_249_pad_type_0, strides = var_12167, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("v_249_cast")]; + tensor var_12173 = const()[name = tensor("op_12173"), val = tensor([2, 20, 64, -1])]; + tensor var_12174_cast = reshape(shape = var_12173, x = q_249_cast)[name = tensor("op_12174_cast")]; + tensor var_12175 = const()[name = tensor("op_12175"), val = tensor([2, 20, 64, -1])]; + tensor var_12176_cast = reshape(shape = var_12175, x = k_249_cast)[name = tensor("op_12176_cast")]; + tensor var_12177 = const()[name = tensor("op_12177"), val = tensor([2, 20, 64, -1])]; + tensor var_12178_cast = reshape(shape = var_12177, x = v_249_cast)[name = tensor("op_12178_cast")]; + tensor attn_weights_497_transpose_x_0 = const()[name = tensor("attn_weights_497_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_497_transpose_y_0 = const()[name = tensor("attn_weights_497_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_497_cast = matmul(transpose_x = attn_weights_497_transpose_x_0, transpose_y = attn_weights_497_transpose_y_0, x = var_12174_cast, y = var_12176_cast)[name = tensor("attn_weights_497_cast")]; + tensor attn_weights_499_cast = mul(x = attn_weights_497_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_499_cast")]; + tensor var_12182_cast = softmax(axis = var_6849, x = attn_weights_499_cast)[name = tensor("op_12182_cast")]; + tensor attn_249_transpose_x_0 = const()[name = tensor("attn_249_transpose_x_0"), val = tensor(false)]; + tensor attn_249_transpose_y_0 = const()[name = tensor("attn_249_transpose_y_0"), val = tensor(true)]; + tensor attn_249_cast = matmul(transpose_x = attn_249_transpose_x_0, transpose_y = attn_249_transpose_y_0, x = var_12178_cast, y = var_12182_cast)[name = tensor("attn_249_cast")]; + tensor var_12186 = const()[name = tensor("op_12186"), val = tensor([2, 1280, 1, -1])]; + tensor input_699_cast = reshape(shape = var_12186, x = attn_249_cast)[name = tensor("input_699_cast")]; + tensor var_12191 = const()[name = tensor("op_12191"), val = tensor([1, 1])]; + tensor var_12193 = const()[name = tensor("op_12193"), val = tensor([1, 1])]; + tensor var_12195_pad_type_0 = const()[name = tensor("op_12195_pad_type_0"), val = tensor("custom")]; + tensor var_12195_pad_0 = const()[name = tensor("op_12195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273240512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274469376))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274469568)))]; + tensor var_12195_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_12193, groups = var_6865, pad = var_12195_pad_0, pad_type = var_12195_pad_type_0, strides = var_12191, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_699_cast)[name = tensor("op_12195_cast")]; + tensor inputs_375_cast = add(x = var_12195_cast, y = inputs_373_cast)[name = tensor("inputs_375_cast")]; + tensor var_12199 = const()[name = tensor("op_12199"), val = tensor([1])]; + tensor channels_mean_375_cast = reduce_mean(axes = var_12199, keep_dims = var_6860, x = inputs_375_cast)[name = tensor("channels_mean_375_cast")]; + tensor zero_mean_375_cast = sub(x = inputs_375_cast, y = channels_mean_375_cast)[name = tensor("zero_mean_375_cast")]; + tensor zero_mean_sq_375_cast = mul(x = zero_mean_375_cast, y = zero_mean_375_cast)[name = tensor("zero_mean_sq_375_cast")]; + tensor var_12203 = const()[name = tensor("op_12203"), val = tensor([1])]; + tensor var_12204_cast = reduce_mean(axes = var_12203, keep_dims = var_6860, x = zero_mean_sq_375_cast)[name = tensor("op_12204_cast")]; + tensor var_12205_to_fp16 = const()[name = tensor("op_12205_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12206_cast = add(x = var_12204_cast, y = var_12205_to_fp16)[name = tensor("op_12206_cast")]; + tensor denom_375_epsilon_0_to_fp16 = const()[name = tensor("denom_375_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_375_cast = rsqrt(epsilon = denom_375_epsilon_0_to_fp16, x = var_12206_cast)[name = tensor("denom_375_cast")]; + tensor out_375_cast = mul(x = zero_mean_375_cast, y = denom_375_cast)[name = tensor("out_375_cast")]; + tensor var_12210_to_fp16 = const()[name = tensor("op_12210_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274472192)))]; + tensor var_12211_cast = add(x = out_375_cast, y = var_12210_to_fp16)[name = tensor("op_12211_cast")]; + tensor var_12213_to_fp16 = const()[name = tensor("op_12213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274474816)))]; + tensor hidden_states_485_cast = mul(x = var_12211_cast, y = var_12213_to_fp16)[name = tensor("hidden_states_485_cast")]; + tensor var_12220 = const()[name = tensor("op_12220"), val = tensor([1, 1])]; + tensor var_12222 = const()[name = tensor("op_12222"), val = tensor([1, 1])]; + tensor q_251_pad_type_0 = const()[name = tensor("q_251_pad_type_0"), val = tensor("custom")]; + tensor q_251_pad_0 = const()[name = tensor("q_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274477440))), lut = tensor([-0x1.648p-6, -0x1.c48p-8, 0x1.c1p-8, 0x1.644p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_251_cast = conv(dilations = var_12222, groups = var_6865, pad = q_251_pad_0, pad_type = q_251_pad_type_0, strides = var_12220, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_485_cast)[name = tensor("q_251_cast")]; + tensor var_12226 = const()[name = tensor("op_12226"), val = tensor([1, 1])]; + tensor var_12228 = const()[name = tensor("op_12228"), val = tensor([1, 1])]; + tensor k_251_pad_type_0 = const()[name = tensor("k_251_pad_type_0"), val = tensor("custom")]; + tensor k_251_pad_0 = const()[name = tensor("k_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274887104))), lut = tensor([-0x1.efp-7, -0x1.328p-8, 0x1.318p-8, 0x1.ee8p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_251_cast = conv(dilations = var_12228, groups = var_6865, pad = k_251_pad_0, pad_type = k_251_pad_type_0, strides = var_12226, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_251_cast")]; + tensor var_12232 = const()[name = tensor("op_12232"), val = tensor([1, 1])]; + tensor var_12234 = const()[name = tensor("op_12234"), val = tensor([1, 1])]; + tensor v_251_pad_type_0 = const()[name = tensor("v_251_pad_type_0"), val = tensor("custom")]; + tensor v_251_pad_0 = const()[name = tensor("v_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1275542528))), lut = tensor([-0x1.1b8p-6, -0x1.4bcp-8, 0x1.478p-8, 0x1.1a8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_251_cast = conv(dilations = var_12234, groups = var_6865, pad = v_251_pad_0, pad_type = v_251_pad_type_0, strides = var_12232, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_251_cast")]; + tensor var_12238 = const()[name = tensor("op_12238"), val = tensor([2, 20, 64, -1])]; + tensor var_12239_cast = reshape(shape = var_12238, x = q_251_cast)[name = tensor("op_12239_cast")]; + tensor var_12240 = const()[name = tensor("op_12240"), val = tensor([2, 20, 64, -1])]; + tensor var_12241_cast = reshape(shape = var_12240, x = k_251_cast)[name = tensor("op_12241_cast")]; + tensor var_12242 = const()[name = tensor("op_12242"), val = tensor([2, 20, 64, -1])]; + tensor var_12243_cast = reshape(shape = var_12242, x = v_251_cast)[name = tensor("op_12243_cast")]; + tensor attn_weights_501_transpose_x_0 = const()[name = tensor("attn_weights_501_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_501_transpose_y_0 = const()[name = tensor("attn_weights_501_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_501_cast = matmul(transpose_x = attn_weights_501_transpose_x_0, transpose_y = attn_weights_501_transpose_y_0, x = var_12239_cast, y = var_12241_cast)[name = tensor("attn_weights_501_cast")]; + tensor attn_weights_503_cast = mul(x = attn_weights_501_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_503_cast")]; + tensor var_12247_cast = softmax(axis = var_6849, x = attn_weights_503_cast)[name = tensor("op_12247_cast")]; + tensor attn_251_transpose_x_0 = const()[name = tensor("attn_251_transpose_x_0"), val = tensor(false)]; + tensor attn_251_transpose_y_0 = const()[name = tensor("attn_251_transpose_y_0"), val = tensor(true)]; + tensor attn_251_cast = matmul(transpose_x = attn_251_transpose_x_0, transpose_y = attn_251_transpose_y_0, x = var_12243_cast, y = var_12247_cast)[name = tensor("attn_251_cast")]; + tensor var_12251 = const()[name = tensor("op_12251"), val = tensor([2, 1280, 1, -1])]; + tensor input_701_cast = reshape(shape = var_12251, x = attn_251_cast)[name = tensor("input_701_cast")]; + tensor var_12256 = const()[name = tensor("op_12256"), val = tensor([1, 1])]; + tensor var_12258 = const()[name = tensor("op_12258"), val = tensor([1, 1])]; + tensor var_12260_pad_type_0 = const()[name = tensor("op_12260_pad_type_0"), val = tensor("custom")]; + tensor var_12260_pad_0 = const()[name = tensor("op_12260_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276197952))), lut = tensor([-0x1.478p-8, 0x1.488p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276402816)))]; + tensor var_12260_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_12258, groups = var_6865, pad = var_12260_pad_0, pad_type = var_12260_pad_type_0, strides = var_12256, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_701_cast)[name = tensor("op_12260_cast")]; + tensor inputs_377_cast = add(x = var_12260_cast, y = inputs_375_cast)[name = tensor("inputs_377_cast")]; + tensor var_12264 = const()[name = tensor("op_12264"), val = tensor([1])]; + tensor channels_mean_377_cast = reduce_mean(axes = var_12264, keep_dims = var_6860, x = inputs_377_cast)[name = tensor("channels_mean_377_cast")]; + tensor zero_mean_377_cast = sub(x = inputs_377_cast, y = channels_mean_377_cast)[name = tensor("zero_mean_377_cast")]; + tensor zero_mean_sq_377_cast = mul(x = zero_mean_377_cast, y = zero_mean_377_cast)[name = tensor("zero_mean_sq_377_cast")]; + tensor var_12268 = const()[name = tensor("op_12268"), val = tensor([1])]; + tensor var_12269_cast = reduce_mean(axes = var_12268, keep_dims = var_6860, x = zero_mean_sq_377_cast)[name = tensor("op_12269_cast")]; + tensor var_12270_to_fp16 = const()[name = tensor("op_12270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12271_cast = add(x = var_12269_cast, y = var_12270_to_fp16)[name = tensor("op_12271_cast")]; + tensor denom_377_epsilon_0_to_fp16 = const()[name = tensor("denom_377_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_377_cast = rsqrt(epsilon = denom_377_epsilon_0_to_fp16, x = var_12271_cast)[name = tensor("denom_377_cast")]; + tensor out_377_cast = mul(x = zero_mean_377_cast, y = denom_377_cast)[name = tensor("out_377_cast")]; + tensor var_12275_to_fp16 = const()[name = tensor("op_12275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276405440)))]; + tensor var_12276_cast = add(x = out_377_cast, y = var_12275_to_fp16)[name = tensor("op_12276_cast")]; + tensor var_12278_to_fp16 = const()[name = tensor("op_12278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276408064)))]; + tensor input_703_cast = mul(x = var_12276_cast, y = var_12278_to_fp16)[name = tensor("input_703_cast")]; + tensor var_12286 = const()[name = tensor("op_12286"), val = tensor([1, 1])]; + tensor var_12288 = const()[name = tensor("op_12288"), val = tensor([1, 1])]; + tensor var_12290_pad_type_0 = const()[name = tensor("op_12290_pad_type_0"), val = tensor("custom")]; + tensor var_12290_pad_0 = const()[name = tensor("op_12290_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276410688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286241152))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286241344)))]; + tensor var_12290_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_12288, groups = var_6865, pad = var_12290_pad_0, pad_type = var_12290_pad_type_0, strides = var_12286, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_703_cast)[name = tensor("op_12290_cast")]; + tensor var_12291_split_sizes_0 = const()[name = tensor("op_12291_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12291_axis_0 = const()[name = tensor("op_12291_axis_0"), val = tensor(1)]; + tensor var_12291_cast_0, tensor var_12291_cast_1 = split(axis = var_12291_axis_0, split_sizes = var_12291_split_sizes_0, x = var_12290_cast)[name = tensor("op_12291_cast")]; + tensor var_12293_mode_0 = const()[name = tensor("op_12293_mode_0"), val = tensor("EXACT")]; + tensor var_12293_cast = gelu(mode = var_12293_mode_0, x = var_12291_cast_1)[name = tensor("op_12293_cast")]; + tensor input_705_cast = mul(x = var_12291_cast_0, y = var_12293_cast)[name = tensor("input_705_cast")]; + tensor var_12297 = const()[name = tensor("op_12297"), val = tensor([1, 1])]; + tensor var_12299 = const()[name = tensor("op_12299"), val = tensor([1, 1])]; + tensor var_12301_pad_type_0 = const()[name = tensor("op_12301_pad_type_0"), val = tensor("custom")]; + tensor var_12301_pad_0 = const()[name = tensor("op_12301_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286261888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291177152))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291177344)))]; + tensor var_12301_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_12299, groups = var_6865, pad = var_12301_pad_0, pad_type = var_12301_pad_type_0, strides = var_12297, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_705_cast)[name = tensor("op_12301_cast")]; + tensor inputs_379_cast = add(x = var_12301_cast, y = inputs_377_cast)[name = tensor("inputs_379_cast")]; + tensor var_12311 = const()[name = tensor("op_12311"), val = tensor([1])]; + tensor channels_mean_379_cast = reduce_mean(axes = var_12311, keep_dims = var_6860, x = inputs_379_cast)[name = tensor("channels_mean_379_cast")]; + tensor zero_mean_379_cast = sub(x = inputs_379_cast, y = channels_mean_379_cast)[name = tensor("zero_mean_379_cast")]; + tensor zero_mean_sq_379_cast = mul(x = zero_mean_379_cast, y = zero_mean_379_cast)[name = tensor("zero_mean_sq_379_cast")]; + tensor var_12315 = const()[name = tensor("op_12315"), val = tensor([1])]; + tensor var_12316_cast = reduce_mean(axes = var_12315, keep_dims = var_6860, x = zero_mean_sq_379_cast)[name = tensor("op_12316_cast")]; + tensor var_12317_to_fp16 = const()[name = tensor("op_12317_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12318_cast = add(x = var_12316_cast, y = var_12317_to_fp16)[name = tensor("op_12318_cast")]; + tensor denom_379_epsilon_0_to_fp16 = const()[name = tensor("denom_379_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_379_cast = rsqrt(epsilon = denom_379_epsilon_0_to_fp16, x = var_12318_cast)[name = tensor("denom_379_cast")]; + tensor out_379_cast = mul(x = zero_mean_379_cast, y = denom_379_cast)[name = tensor("out_379_cast")]; + tensor var_12322_to_fp16 = const()[name = tensor("op_12322_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291179968)))]; + tensor var_12323_cast = add(x = out_379_cast, y = var_12322_to_fp16)[name = tensor("op_12323_cast")]; + tensor var_12325_to_fp16 = const()[name = tensor("op_12325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291182592)))]; + tensor hidden_states_489_cast = mul(x = var_12323_cast, y = var_12325_to_fp16)[name = tensor("hidden_states_489_cast")]; + tensor var_12332 = const()[name = tensor("op_12332"), val = tensor([1, 1])]; + tensor var_12334 = const()[name = tensor("op_12334"), val = tensor([1, 1])]; + tensor q_253_pad_type_0 = const()[name = tensor("q_253_pad_type_0"), val = tensor("custom")]; + tensor q_253_pad_0 = const()[name = tensor("q_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291185216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292004480))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_253_cast = conv(dilations = var_12334, groups = var_6865, pad = q_253_pad_0, pad_type = q_253_pad_type_0, strides = var_12332, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("q_253_cast")]; + tensor var_12338 = const()[name = tensor("op_12338"), val = tensor([1, 1])]; + tensor var_12340 = const()[name = tensor("op_12340"), val = tensor([1, 1])]; + tensor k_253_pad_type_0 = const()[name = tensor("k_253_pad_type_0"), val = tensor("custom")]; + tensor k_253_pad_0 = const()[name = tensor("k_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292004608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292823872))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_253_cast = conv(dilations = var_12340, groups = var_6865, pad = k_253_pad_0, pad_type = k_253_pad_type_0, strides = var_12338, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("k_253_cast")]; + tensor var_12344 = const()[name = tensor("op_12344"), val = tensor([1, 1])]; + tensor var_12346 = const()[name = tensor("op_12346"), val = tensor([1, 1])]; + tensor v_253_pad_type_0 = const()[name = tensor("v_253_pad_type_0"), val = tensor("custom")]; + tensor v_253_pad_0 = const()[name = tensor("v_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292824000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294052864))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_253_cast = conv(dilations = var_12346, groups = var_6865, pad = v_253_pad_0, pad_type = v_253_pad_type_0, strides = var_12344, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("v_253_cast")]; + tensor var_12350 = const()[name = tensor("op_12350"), val = tensor([2, 20, 64, -1])]; + tensor var_12351_cast = reshape(shape = var_12350, x = q_253_cast)[name = tensor("op_12351_cast")]; + tensor var_12352 = const()[name = tensor("op_12352"), val = tensor([2, 20, 64, -1])]; + tensor var_12353_cast = reshape(shape = var_12352, x = k_253_cast)[name = tensor("op_12353_cast")]; + tensor var_12354 = const()[name = tensor("op_12354"), val = tensor([2, 20, 64, -1])]; + tensor var_12355_cast = reshape(shape = var_12354, x = v_253_cast)[name = tensor("op_12355_cast")]; + tensor attn_weights_505_transpose_x_0 = const()[name = tensor("attn_weights_505_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_505_transpose_y_0 = const()[name = tensor("attn_weights_505_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_505_cast = matmul(transpose_x = attn_weights_505_transpose_x_0, transpose_y = attn_weights_505_transpose_y_0, x = var_12351_cast, y = var_12353_cast)[name = tensor("attn_weights_505_cast")]; + tensor attn_weights_507_cast = mul(x = attn_weights_505_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_507_cast")]; + tensor var_12359_cast = softmax(axis = var_6849, x = attn_weights_507_cast)[name = tensor("op_12359_cast")]; + tensor attn_253_transpose_x_0 = const()[name = tensor("attn_253_transpose_x_0"), val = tensor(false)]; + tensor attn_253_transpose_y_0 = const()[name = tensor("attn_253_transpose_y_0"), val = tensor(true)]; + tensor attn_253_cast = matmul(transpose_x = attn_253_transpose_x_0, transpose_y = attn_253_transpose_y_0, x = var_12355_cast, y = var_12359_cast)[name = tensor("attn_253_cast")]; + tensor var_12363 = const()[name = tensor("op_12363"), val = tensor([2, 1280, 1, -1])]; + tensor input_707_cast = reshape(shape = var_12363, x = attn_253_cast)[name = tensor("input_707_cast")]; + tensor var_12368 = const()[name = tensor("op_12368"), val = tensor([1, 1])]; + tensor var_12370 = const()[name = tensor("op_12370"), val = tensor([1, 1])]; + tensor var_12372_pad_type_0 = const()[name = tensor("op_12372_pad_type_0"), val = tensor("custom")]; + tensor var_12372_pad_0 = const()[name = tensor("op_12372_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294053056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295281920))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295282112)))]; + tensor var_12372_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_12370, groups = var_6865, pad = var_12372_pad_0, pad_type = var_12372_pad_type_0, strides = var_12368, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_707_cast)[name = tensor("op_12372_cast")]; + tensor inputs_381_cast = add(x = var_12372_cast, y = inputs_379_cast)[name = tensor("inputs_381_cast")]; + tensor var_12376 = const()[name = tensor("op_12376"), val = tensor([1])]; + tensor channels_mean_381_cast = reduce_mean(axes = var_12376, keep_dims = var_6860, x = inputs_381_cast)[name = tensor("channels_mean_381_cast")]; + tensor zero_mean_381_cast = sub(x = inputs_381_cast, y = channels_mean_381_cast)[name = tensor("zero_mean_381_cast")]; + tensor zero_mean_sq_381_cast = mul(x = zero_mean_381_cast, y = zero_mean_381_cast)[name = tensor("zero_mean_sq_381_cast")]; + tensor var_12380 = const()[name = tensor("op_12380"), val = tensor([1])]; + tensor var_12381_cast = reduce_mean(axes = var_12380, keep_dims = var_6860, x = zero_mean_sq_381_cast)[name = tensor("op_12381_cast")]; + tensor var_12382_to_fp16 = const()[name = tensor("op_12382_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12383_cast = add(x = var_12381_cast, y = var_12382_to_fp16)[name = tensor("op_12383_cast")]; + tensor denom_381_epsilon_0_to_fp16 = const()[name = tensor("denom_381_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_381_cast = rsqrt(epsilon = denom_381_epsilon_0_to_fp16, x = var_12383_cast)[name = tensor("denom_381_cast")]; + tensor out_381_cast = mul(x = zero_mean_381_cast, y = denom_381_cast)[name = tensor("out_381_cast")]; + tensor var_12387_to_fp16 = const()[name = tensor("op_12387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295284736)))]; + tensor var_12388_cast = add(x = out_381_cast, y = var_12387_to_fp16)[name = tensor("op_12388_cast")]; + tensor var_12390_to_fp16 = const()[name = tensor("op_12390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295287360)))]; + tensor hidden_states_491_cast = mul(x = var_12388_cast, y = var_12390_to_fp16)[name = tensor("hidden_states_491_cast")]; + tensor var_12397 = const()[name = tensor("op_12397"), val = tensor([1, 1])]; + tensor var_12399 = const()[name = tensor("op_12399"), val = tensor([1, 1])]; + tensor q_255_pad_type_0 = const()[name = tensor("q_255_pad_type_0"), val = tensor("custom")]; + tensor q_255_pad_0 = const()[name = tensor("q_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295289984))), lut = tensor([-0x1.698p-6, -0x1.c78p-8, 0x1.c54p-8, 0x1.688p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_255_cast = conv(dilations = var_12399, groups = var_6865, pad = q_255_pad_0, pad_type = q_255_pad_type_0, strides = var_12397, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_491_cast)[name = tensor("q_255_cast")]; + tensor var_12403 = const()[name = tensor("op_12403"), val = tensor([1, 1])]; + tensor var_12405 = const()[name = tensor("op_12405"), val = tensor([1, 1])]; + tensor k_255_pad_type_0 = const()[name = tensor("k_255_pad_type_0"), val = tensor("custom")]; + tensor k_255_pad_0 = const()[name = tensor("k_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295699648))), lut = tensor([-0x1.fdcp-8, 0x1.fep-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_255_cast = conv(dilations = var_12405, groups = var_6865, pad = k_255_pad_0, pad_type = k_255_pad_type_0, strides = var_12403, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_255_cast")]; + tensor var_12409 = const()[name = tensor("op_12409"), val = tensor([1, 1])]; + tensor var_12411 = const()[name = tensor("op_12411"), val = tensor([1, 1])]; + tensor v_255_pad_type_0 = const()[name = tensor("v_255_pad_type_0"), val = tensor("custom")]; + tensor v_255_pad_0 = const()[name = tensor("v_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296027392))), lut = tensor([-0x1.e5cp-7, -0x1.21cp-8, 0x1.20cp-8, 0x1.e54p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_255_cast = conv(dilations = var_12411, groups = var_6865, pad = v_255_pad_0, pad_type = v_255_pad_type_0, strides = var_12409, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_255_cast")]; + tensor var_12415 = const()[name = tensor("op_12415"), val = tensor([2, 20, 64, -1])]; + tensor var_12416_cast = reshape(shape = var_12415, x = q_255_cast)[name = tensor("op_12416_cast")]; + tensor var_12417 = const()[name = tensor("op_12417"), val = tensor([2, 20, 64, -1])]; + tensor var_12418_cast = reshape(shape = var_12417, x = k_255_cast)[name = tensor("op_12418_cast")]; + tensor var_12419 = const()[name = tensor("op_12419"), val = tensor([2, 20, 64, -1])]; + tensor var_12420_cast = reshape(shape = var_12419, x = v_255_cast)[name = tensor("op_12420_cast")]; + tensor attn_weights_509_transpose_x_0 = const()[name = tensor("attn_weights_509_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_509_transpose_y_0 = const()[name = tensor("attn_weights_509_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_509_cast = matmul(transpose_x = attn_weights_509_transpose_x_0, transpose_y = attn_weights_509_transpose_y_0, x = var_12416_cast, y = var_12418_cast)[name = tensor("attn_weights_509_cast")]; + tensor attn_weights_511_cast = mul(x = attn_weights_509_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_511_cast")]; + tensor var_12424_cast = softmax(axis = var_6849, x = attn_weights_511_cast)[name = tensor("op_12424_cast")]; + tensor attn_255_transpose_x_0 = const()[name = tensor("attn_255_transpose_x_0"), val = tensor(false)]; + tensor attn_255_transpose_y_0 = const()[name = tensor("attn_255_transpose_y_0"), val = tensor(true)]; + tensor attn_255_cast = matmul(transpose_x = attn_255_transpose_x_0, transpose_y = attn_255_transpose_y_0, x = var_12420_cast, y = var_12424_cast)[name = tensor("attn_255_cast")]; + tensor var_12428 = const()[name = tensor("op_12428"), val = tensor([2, 1280, 1, -1])]; + tensor input_709_cast = reshape(shape = var_12428, x = attn_255_cast)[name = tensor("input_709_cast")]; + tensor var_12433 = const()[name = tensor("op_12433"), val = tensor([1, 1])]; + tensor var_12435 = const()[name = tensor("op_12435"), val = tensor([1, 1])]; + tensor var_12437_pad_type_0 = const()[name = tensor("op_12437_pad_type_0"), val = tensor("custom")]; + tensor var_12437_pad_0 = const()[name = tensor("op_12437_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296682816))), lut = tensor([-0x1.414p-8, 0x1.42p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296887680)))]; + tensor var_12437_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_12435, groups = var_6865, pad = var_12437_pad_0, pad_type = var_12437_pad_type_0, strides = var_12433, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_709_cast)[name = tensor("op_12437_cast")]; + tensor inputs_383_cast = add(x = var_12437_cast, y = inputs_381_cast)[name = tensor("inputs_383_cast")]; + tensor var_12441 = const()[name = tensor("op_12441"), val = tensor([1])]; + tensor channels_mean_383_cast = reduce_mean(axes = var_12441, keep_dims = var_6860, x = inputs_383_cast)[name = tensor("channels_mean_383_cast")]; + tensor zero_mean_383_cast = sub(x = inputs_383_cast, y = channels_mean_383_cast)[name = tensor("zero_mean_383_cast")]; + tensor zero_mean_sq_383_cast = mul(x = zero_mean_383_cast, y = zero_mean_383_cast)[name = tensor("zero_mean_sq_383_cast")]; + tensor var_12445 = const()[name = tensor("op_12445"), val = tensor([1])]; + tensor var_12446_cast = reduce_mean(axes = var_12445, keep_dims = var_6860, x = zero_mean_sq_383_cast)[name = tensor("op_12446_cast")]; + tensor var_12447_to_fp16 = const()[name = tensor("op_12447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12448_cast = add(x = var_12446_cast, y = var_12447_to_fp16)[name = tensor("op_12448_cast")]; + tensor denom_383_epsilon_0_to_fp16 = const()[name = tensor("denom_383_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_383_cast = rsqrt(epsilon = denom_383_epsilon_0_to_fp16, x = var_12448_cast)[name = tensor("denom_383_cast")]; + tensor out_383_cast = mul(x = zero_mean_383_cast, y = denom_383_cast)[name = tensor("out_383_cast")]; + tensor var_12452_to_fp16 = const()[name = tensor("op_12452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296890304)))]; + tensor var_12453_cast = add(x = out_383_cast, y = var_12452_to_fp16)[name = tensor("op_12453_cast")]; + tensor var_12455_to_fp16 = const()[name = tensor("op_12455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296892928)))]; + tensor input_711_cast = mul(x = var_12453_cast, y = var_12455_to_fp16)[name = tensor("input_711_cast")]; + tensor var_12463 = const()[name = tensor("op_12463"), val = tensor([1, 1])]; + tensor var_12465 = const()[name = tensor("op_12465"), val = tensor([1, 1])]; + tensor var_12467_pad_type_0 = const()[name = tensor("op_12467_pad_type_0"), val = tensor("custom")]; + tensor var_12467_pad_0 = const()[name = tensor("op_12467_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296895552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306726016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306726208)))]; + tensor var_12467_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_12465, groups = var_6865, pad = var_12467_pad_0, pad_type = var_12467_pad_type_0, strides = var_12463, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_711_cast)[name = tensor("op_12467_cast")]; + tensor var_12468_split_sizes_0 = const()[name = tensor("op_12468_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12468_axis_0 = const()[name = tensor("op_12468_axis_0"), val = tensor(1)]; + tensor var_12468_cast_0, tensor var_12468_cast_1 = split(axis = var_12468_axis_0, split_sizes = var_12468_split_sizes_0, x = var_12467_cast)[name = tensor("op_12468_cast")]; + tensor var_12470_mode_0 = const()[name = tensor("op_12470_mode_0"), val = tensor("EXACT")]; + tensor var_12470_cast = gelu(mode = var_12470_mode_0, x = var_12468_cast_1)[name = tensor("op_12470_cast")]; + tensor input_713_cast = mul(x = var_12468_cast_0, y = var_12470_cast)[name = tensor("input_713_cast")]; + tensor var_12474 = const()[name = tensor("op_12474"), val = tensor([1, 1])]; + tensor var_12476 = const()[name = tensor("op_12476"), val = tensor([1, 1])]; + tensor var_12478_pad_type_0 = const()[name = tensor("op_12478_pad_type_0"), val = tensor("custom")]; + tensor var_12478_pad_0 = const()[name = tensor("op_12478_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306746752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311662016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311662208)))]; + tensor var_12478_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_12476, groups = var_6865, pad = var_12478_pad_0, pad_type = var_12478_pad_type_0, strides = var_12474, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_713_cast)[name = tensor("op_12478_cast")]; + tensor hidden_states_495_cast = add(x = var_12478_cast, y = inputs_383_cast)[name = tensor("hidden_states_495_cast")]; + tensor var_12480 = const()[name = tensor("op_12480"), val = tensor([2, 1280, 32, 32])]; + tensor input_715_cast = reshape(shape = var_12480, x = hidden_states_495_cast)[name = tensor("input_715_cast")]; + tensor var_12484 = const()[name = tensor("op_12484"), val = tensor([1, 1])]; + tensor var_12486 = const()[name = tensor("op_12486"), val = tensor([1, 1])]; + tensor hidden_states_497_pad_type_0 = const()[name = tensor("hidden_states_497_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_497_pad_0 = const()[name = tensor("hidden_states_497_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311664832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312893696))), name = tensor("up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312893888)))]; + tensor hidden_states_497_cast = conv(bias = up_blocks_0_attentions_2_proj_out_bias_to_fp16, dilations = var_12486, groups = var_6865, pad = hidden_states_497_pad_0, pad_type = hidden_states_497_pad_type_0, strides = var_12484, weight = up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized, x = input_715_cast)[name = tensor("hidden_states_497_cast")]; + tensor input_717_cast = add(x = hidden_states_497_cast, y = hidden_states_431_cast)[name = tensor("input_717_cast")]; + tensor input_719_scale_factor_height_0 = const()[name = tensor("input_719_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_719_scale_factor_width_0 = const()[name = tensor("input_719_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_719_cast = upsample_nearest_neighbor(scale_factor_height = input_719_scale_factor_height_0, scale_factor_width = input_719_scale_factor_width_0, x = input_717_cast)[name = tensor("input_719_cast")]; + tensor var_12495 = const()[name = tensor("op_12495"), val = tensor([1, 1])]; + tensor var_12497 = const()[name = tensor("op_12497"), val = tensor([1, 1])]; + tensor hidden_states_499_pad_type_0 = const()[name = tensor("hidden_states_499_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_499_pad_0 = const()[name = tensor("hidden_states_499_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312896512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327642176))), name = tensor("up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_0_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327642752)))]; + tensor hidden_states_499_cast = conv(bias = up_blocks_0_upsamplers_0_conv_bias_to_fp16, dilations = var_12497, groups = var_6865, pad = hidden_states_499_pad_0, pad_type = hidden_states_499_pad_type_0, strides = var_12495, weight = up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized, x = input_719_cast)[name = tensor("hidden_states_499_cast")]; + tensor var_12502 = const()[name = tensor("op_12502"), val = tensor(3)]; + tensor var_12513 = const()[name = tensor("op_12513"), val = tensor(true)]; + tensor var_12518 = const()[name = tensor("op_12518"), val = tensor(1)]; + tensor input_721_interleave_0 = const()[name = tensor("input_721_interleave_0"), val = tensor(false)]; + tensor input_721_cast = concat(axis = var_12518, interleave = input_721_interleave_0, values = (hidden_states_499_cast, input_113_cast))[name = tensor("input_721_cast")]; + tensor reshape_120_shape_0 = const()[name = tensor("reshape_120_shape_0"), val = tensor([2, 32, 60, 64, 64])]; + tensor reshape_120_cast = reshape(shape = reshape_120_shape_0, x = input_721_cast)[name = tensor("reshape_120_cast")]; + tensor reduce_mean_90_axes_0 = const()[name = tensor("reduce_mean_90_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_90_keep_dims_0 = const()[name = tensor("reduce_mean_90_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_90_cast = reduce_mean(axes = reduce_mean_90_axes_0, keep_dims = reduce_mean_90_keep_dims_0, x = reshape_120_cast)[name = tensor("reduce_mean_90_cast")]; + tensor sub_60_cast = sub(x = reshape_120_cast, y = reduce_mean_90_cast)[name = tensor("sub_60_cast")]; + tensor square_30_cast = square(x = sub_60_cast)[name = tensor("square_30_cast")]; + tensor reduce_mean_92_axes_0 = const()[name = tensor("reduce_mean_92_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_92_keep_dims_0 = const()[name = tensor("reduce_mean_92_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_92_cast = reduce_mean(axes = reduce_mean_92_axes_0, keep_dims = reduce_mean_92_keep_dims_0, x = square_30_cast)[name = tensor("reduce_mean_92_cast")]; + tensor add_60_y_0_to_fp16 = const()[name = tensor("add_60_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_60_cast = add(x = reduce_mean_92_cast, y = add_60_y_0_to_fp16)[name = tensor("add_60_cast")]; + tensor sqrt_30_cast = sqrt(x = add_60_cast)[name = tensor("sqrt_30_cast")]; + tensor real_div_30_cast = real_div(x = sub_60_cast, y = sqrt_30_cast)[name = tensor("real_div_30_cast")]; + tensor reshape_121_shape_0 = const()[name = tensor("reshape_121_shape_0"), val = tensor([2, 1920, 64, 64])]; + tensor reshape_121_cast = reshape(shape = reshape_121_shape_0, x = real_div_30_cast)[name = tensor("reshape_121_cast")]; + tensor add_61_gamma_0_to_fp16 = const()[name = tensor("add_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327645376)))]; + tensor add_61_beta_0_to_fp16 = const()[name = tensor("add_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327649280)))]; + tensor add_61_epsilon_0_to_fp16 = const()[name = tensor("add_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_61_cast = batch_norm(beta = add_61_beta_0_to_fp16, epsilon = add_61_epsilon_0_to_fp16, gamma = add_61_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_121_cast)[name = tensor("add_61_cast")]; + tensor input_725_cast = silu(x = add_61_cast)[name = tensor("input_725_cast")]; + tensor var_12547 = const()[name = tensor("op_12547"), val = tensor([1, 1])]; + tensor var_12549 = const()[name = tensor("op_12549"), val = tensor([1, 1])]; + tensor hidden_states_501_pad_type_0 = const()[name = tensor("hidden_states_501_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_501_pad_0 = const()[name = tensor("hidden_states_501_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327653184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335947648))), name = tensor("up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1920, 3, 3])]; + tensor up_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335947840)))]; + tensor hidden_states_501_cast = conv(bias = up_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_12549, groups = var_12518, pad = hidden_states_501_pad_0, pad_type = hidden_states_501_pad_type_0, strides = var_12547, weight = up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_725_cast)[name = tensor("hidden_states_501_cast")]; + tensor var_12555 = const()[name = tensor("op_12555"), val = tensor([1, 1])]; + tensor var_12557 = const()[name = tensor("op_12557"), val = tensor([1, 1])]; + tensor temb_23_pad_type_0 = const()[name = tensor("temb_23_pad_type_0"), val = tensor("custom")]; + tensor temb_23_pad_0 = const()[name = tensor("temb_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335949184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336563648))), name = tensor("up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336563840)))]; + tensor temb_23_cast = conv(bias = up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_12557, groups = var_12518, pad = temb_23_pad_0, pad_type = temb_23_pad_type_0, strides = var_12555, weight = up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_23_cast")]; + tensor input_729_cast = add(x = hidden_states_501_cast, y = temb_23_cast)[name = tensor("input_729_cast")]; + tensor reshape_124_shape_0 = const()[name = tensor("reshape_124_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_124_cast = reshape(shape = reshape_124_shape_0, x = input_729_cast)[name = tensor("reshape_124_cast")]; + tensor reduce_mean_93_axes_0 = const()[name = tensor("reduce_mean_93_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_93_keep_dims_0 = const()[name = tensor("reduce_mean_93_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_93_cast = reduce_mean(axes = reduce_mean_93_axes_0, keep_dims = reduce_mean_93_keep_dims_0, x = reshape_124_cast)[name = tensor("reduce_mean_93_cast")]; + tensor sub_62_cast = sub(x = reshape_124_cast, y = reduce_mean_93_cast)[name = tensor("sub_62_cast")]; + tensor square_31_cast = square(x = sub_62_cast)[name = tensor("square_31_cast")]; + tensor reduce_mean_95_axes_0 = const()[name = tensor("reduce_mean_95_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_95_keep_dims_0 = const()[name = tensor("reduce_mean_95_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_95_cast = reduce_mean(axes = reduce_mean_95_axes_0, keep_dims = reduce_mean_95_keep_dims_0, x = square_31_cast)[name = tensor("reduce_mean_95_cast")]; + tensor add_62_y_0_to_fp16 = const()[name = tensor("add_62_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_62_cast = add(x = reduce_mean_95_cast, y = add_62_y_0_to_fp16)[name = tensor("add_62_cast")]; + tensor sqrt_31_cast = sqrt(x = add_62_cast)[name = tensor("sqrt_31_cast")]; + tensor real_div_31_cast = real_div(x = sub_62_cast, y = sqrt_31_cast)[name = tensor("real_div_31_cast")]; + tensor reshape_125_shape_0 = const()[name = tensor("reshape_125_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_125_cast = reshape(shape = reshape_125_shape_0, x = real_div_31_cast)[name = tensor("reshape_125_cast")]; + tensor add_63_gamma_0_to_fp16 = const()[name = tensor("add_63_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336565184)))]; + tensor add_63_beta_0_to_fp16 = const()[name = tensor("add_63_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336566528)))]; + tensor add_63_epsilon_0_to_fp16 = const()[name = tensor("add_63_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_63_cast = batch_norm(beta = add_63_beta_0_to_fp16, epsilon = add_63_epsilon_0_to_fp16, gamma = add_63_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_125_cast)[name = tensor("add_63_cast")]; + tensor input_733_cast = silu(x = add_63_cast)[name = tensor("input_733_cast")]; + tensor var_12567 = const()[name = tensor("op_12567"), val = tensor([1, 1])]; + tensor var_12569 = const()[name = tensor("op_12569"), val = tensor([1, 1])]; + tensor hidden_states_503_pad_type_0 = const()[name = tensor("hidden_states_503_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_503_pad_0 = const()[name = tensor("hidden_states_503_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336567872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340254336))), name = tensor("up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340254912)))]; + tensor hidden_states_503_cast = conv(bias = up_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_12569, groups = var_12518, pad = hidden_states_503_pad_0, pad_type = hidden_states_503_pad_type_0, strides = var_12567, weight = up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_733_cast)[name = tensor("hidden_states_503_cast")]; + tensor var_12574 = const()[name = tensor("op_12574"), val = tensor([1, 1])]; + tensor var_12576 = const()[name = tensor("op_12576"), val = tensor([1, 1])]; + tensor x_11_pad_type_0 = const()[name = tensor("x_11_pad_type_0"), val = tensor("custom")]; + tensor x_11_pad_0 = const()[name = tensor("x_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340256256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341485120))), name = tensor("up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1920, 1, 1])]; + tensor up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341485696)))]; + tensor x_11_cast = conv(bias = up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_12576, groups = var_12518, pad = x_11_pad_0, pad_type = x_11_pad_type_0, strides = var_12574, weight = up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_721_cast)[name = tensor("x_11_cast")]; + tensor hidden_states_505_cast = add(x = x_11_cast, y = hidden_states_503_cast)[name = tensor("hidden_states_505_cast")]; + tensor reshape_128_shape_0 = const()[name = tensor("reshape_128_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_128_cast = reshape(shape = reshape_128_shape_0, x = hidden_states_505_cast)[name = tensor("reshape_128_cast")]; + tensor reduce_mean_96_axes_0 = const()[name = tensor("reduce_mean_96_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_96_keep_dims_0 = const()[name = tensor("reduce_mean_96_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_96_cast = reduce_mean(axes = reduce_mean_96_axes_0, keep_dims = reduce_mean_96_keep_dims_0, x = reshape_128_cast)[name = tensor("reduce_mean_96_cast")]; + tensor sub_64_cast = sub(x = reshape_128_cast, y = reduce_mean_96_cast)[name = tensor("sub_64_cast")]; + tensor square_32_cast = square(x = sub_64_cast)[name = tensor("square_32_cast")]; + tensor reduce_mean_98_axes_0 = const()[name = tensor("reduce_mean_98_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_98_keep_dims_0 = const()[name = tensor("reduce_mean_98_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_98_cast = reduce_mean(axes = reduce_mean_98_axes_0, keep_dims = reduce_mean_98_keep_dims_0, x = square_32_cast)[name = tensor("reduce_mean_98_cast")]; + tensor add_64_y_0_to_fp16 = const()[name = tensor("add_64_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_64_cast = add(x = reduce_mean_98_cast, y = add_64_y_0_to_fp16)[name = tensor("add_64_cast")]; + tensor sqrt_32_cast = sqrt(x = add_64_cast)[name = tensor("sqrt_32_cast")]; + tensor real_div_32_cast = real_div(x = sub_64_cast, y = sqrt_32_cast)[name = tensor("real_div_32_cast")]; + tensor reshape_129_shape_0 = const()[name = tensor("reshape_129_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_129_cast = reshape(shape = reshape_129_shape_0, x = real_div_32_cast)[name = tensor("reshape_129_cast")]; + tensor add_65_gamma_0_to_fp16 = const()[name = tensor("add_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341487040)))]; + tensor add_65_beta_0_to_fp16 = const()[name = tensor("add_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341488384)))]; + tensor add_65_epsilon_0_to_fp16 = const()[name = tensor("add_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_65_cast = batch_norm(beta = add_65_beta_0_to_fp16, epsilon = add_65_epsilon_0_to_fp16, gamma = add_65_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_129_cast)[name = tensor("add_65_cast")]; + tensor var_12598 = const()[name = tensor("op_12598"), val = tensor([1, 1])]; + tensor var_12600 = const()[name = tensor("op_12600"), val = tensor([1, 1])]; + tensor hidden_states_507_pad_type_0 = const()[name = tensor("hidden_states_507_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_507_pad_0 = const()[name = tensor("hidden_states_507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341489728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341899392))), name = tensor("up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341899968)))]; + tensor hidden_states_507_cast = conv(bias = up_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_12600, groups = var_12518, pad = hidden_states_507_pad_0, pad_type = hidden_states_507_pad_type_0, strides = var_12598, weight = up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_65_cast)[name = tensor("hidden_states_507_cast")]; + tensor var_12605 = const()[name = tensor("op_12605"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_385_cast = reshape(shape = var_12605, x = hidden_states_507_cast)[name = tensor("inputs_385_cast")]; + tensor var_12615 = const()[name = tensor("op_12615"), val = tensor([1])]; + tensor channels_mean_385_cast = reduce_mean(axes = var_12615, keep_dims = var_12513, x = inputs_385_cast)[name = tensor("channels_mean_385_cast")]; + tensor zero_mean_385_cast = sub(x = inputs_385_cast, y = channels_mean_385_cast)[name = tensor("zero_mean_385_cast")]; + tensor zero_mean_sq_385_cast = mul(x = zero_mean_385_cast, y = zero_mean_385_cast)[name = tensor("zero_mean_sq_385_cast")]; + tensor var_12619 = const()[name = tensor("op_12619"), val = tensor([1])]; + tensor var_12620_cast = reduce_mean(axes = var_12619, keep_dims = var_12513, x = zero_mean_sq_385_cast)[name = tensor("op_12620_cast")]; + tensor var_12621_to_fp16 = const()[name = tensor("op_12621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12622_cast = add(x = var_12620_cast, y = var_12621_to_fp16)[name = tensor("op_12622_cast")]; + tensor denom_385_epsilon_0_to_fp16 = const()[name = tensor("denom_385_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_385_cast = rsqrt(epsilon = denom_385_epsilon_0_to_fp16, x = var_12622_cast)[name = tensor("denom_385_cast")]; + tensor out_385_cast = mul(x = zero_mean_385_cast, y = denom_385_cast)[name = tensor("out_385_cast")]; + tensor var_12626_to_fp16 = const()[name = tensor("op_12626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341901312)))]; + tensor var_12627_cast = add(x = out_385_cast, y = var_12626_to_fp16)[name = tensor("op_12627_cast")]; + tensor var_12629_to_fp16 = const()[name = tensor("op_12629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341902656)))]; + tensor hidden_states_509_cast = mul(x = var_12627_cast, y = var_12629_to_fp16)[name = tensor("hidden_states_509_cast")]; + tensor var_12636 = const()[name = tensor("op_12636"), val = tensor([1, 1])]; + tensor var_12638 = const()[name = tensor("op_12638"), val = tensor([1, 1])]; + tensor q_257_pad_type_0 = const()[name = tensor("q_257_pad_type_0"), val = tensor("custom")]; + tensor q_257_pad_0 = const()[name = tensor("q_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341904000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342211264))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_257_cast = conv(dilations = var_12638, groups = var_12518, pad = q_257_pad_0, pad_type = q_257_pad_type_0, strides = var_12636, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("q_257_cast")]; + tensor var_12642 = const()[name = tensor("op_12642"), val = tensor([1, 1])]; + tensor var_12644 = const()[name = tensor("op_12644"), val = tensor([1, 1])]; + tensor k_257_pad_type_0 = const()[name = tensor("k_257_pad_type_0"), val = tensor("custom")]; + tensor k_257_pad_0 = const()[name = tensor("k_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342211456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342518720))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_257_cast = conv(dilations = var_12644, groups = var_12518, pad = k_257_pad_0, pad_type = k_257_pad_type_0, strides = var_12642, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("k_257_cast")]; + tensor var_12648 = const()[name = tensor("op_12648"), val = tensor([1, 1])]; + tensor var_12650 = const()[name = tensor("op_12650"), val = tensor([1, 1])]; + tensor v_257_pad_type_0 = const()[name = tensor("v_257_pad_type_0"), val = tensor("custom")]; + tensor v_257_pad_0 = const()[name = tensor("v_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342518912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342928576))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_257_cast = conv(dilations = var_12650, groups = var_12518, pad = v_257_pad_0, pad_type = v_257_pad_type_0, strides = var_12648, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("v_257_cast")]; + tensor var_12654 = const()[name = tensor("op_12654"), val = tensor([2, 10, 64, -1])]; + tensor var_12655_cast = reshape(shape = var_12654, x = q_257_cast)[name = tensor("op_12655_cast")]; + tensor var_12656 = const()[name = tensor("op_12656"), val = tensor([2, 10, 64, -1])]; + tensor var_12657_cast = reshape(shape = var_12656, x = k_257_cast)[name = tensor("op_12657_cast")]; + tensor var_12658 = const()[name = tensor("op_12658"), val = tensor([2, 10, 64, -1])]; + tensor var_12659_cast = reshape(shape = var_12658, x = v_257_cast)[name = tensor("op_12659_cast")]; + tensor attn_weights_513_transpose_x_0 = const()[name = tensor("attn_weights_513_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_513_transpose_y_0 = const()[name = tensor("attn_weights_513_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_513_cast = matmul(transpose_x = attn_weights_513_transpose_x_0, transpose_y = attn_weights_513_transpose_y_0, x = var_12655_cast, y = var_12657_cast)[name = tensor("attn_weights_513_cast")]; + tensor var_12509_to_fp16 = const()[name = tensor("op_12509_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_515_cast = mul(x = attn_weights_513_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_515_cast")]; + tensor var_12663_cast = softmax(axis = var_12502, x = attn_weights_515_cast)[name = tensor("op_12663_cast")]; + tensor attn_257_transpose_x_0 = const()[name = tensor("attn_257_transpose_x_0"), val = tensor(false)]; + tensor attn_257_transpose_y_0 = const()[name = tensor("attn_257_transpose_y_0"), val = tensor(true)]; + tensor attn_257_cast = matmul(transpose_x = attn_257_transpose_x_0, transpose_y = attn_257_transpose_y_0, x = var_12659_cast, y = var_12663_cast)[name = tensor("attn_257_cast")]; + tensor var_12667 = const()[name = tensor("op_12667"), val = tensor([2, 640, 1, -1])]; + tensor input_737_cast = reshape(shape = var_12667, x = attn_257_cast)[name = tensor("input_737_cast")]; + tensor var_12672 = const()[name = tensor("op_12672"), val = tensor([1, 1])]; + tensor var_12674 = const()[name = tensor("op_12674"), val = tensor([1, 1])]; + tensor var_12676_pad_type_0 = const()[name = tensor("op_12676_pad_type_0"), val = tensor("custom")]; + tensor var_12676_pad_0 = const()[name = tensor("op_12676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342929152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343338816))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343339392)))]; + tensor var_12676_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_12674, groups = var_12518, pad = var_12676_pad_0, pad_type = var_12676_pad_type_0, strides = var_12672, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_737_cast)[name = tensor("op_12676_cast")]; + tensor inputs_387_cast = add(x = var_12676_cast, y = inputs_385_cast)[name = tensor("inputs_387_cast")]; + tensor var_12680 = const()[name = tensor("op_12680"), val = tensor([1])]; + tensor channels_mean_387_cast = reduce_mean(axes = var_12680, keep_dims = var_12513, x = inputs_387_cast)[name = tensor("channels_mean_387_cast")]; + tensor zero_mean_387_cast = sub(x = inputs_387_cast, y = channels_mean_387_cast)[name = tensor("zero_mean_387_cast")]; + tensor zero_mean_sq_387_cast = mul(x = zero_mean_387_cast, y = zero_mean_387_cast)[name = tensor("zero_mean_sq_387_cast")]; + tensor var_12684 = const()[name = tensor("op_12684"), val = tensor([1])]; + tensor var_12685_cast = reduce_mean(axes = var_12684, keep_dims = var_12513, x = zero_mean_sq_387_cast)[name = tensor("op_12685_cast")]; + tensor var_12686_to_fp16 = const()[name = tensor("op_12686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12687_cast = add(x = var_12685_cast, y = var_12686_to_fp16)[name = tensor("op_12687_cast")]; + tensor denom_387_epsilon_0_to_fp16 = const()[name = tensor("denom_387_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_387_cast = rsqrt(epsilon = denom_387_epsilon_0_to_fp16, x = var_12687_cast)[name = tensor("denom_387_cast")]; + tensor out_387_cast = mul(x = zero_mean_387_cast, y = denom_387_cast)[name = tensor("out_387_cast")]; + tensor var_12691_to_fp16 = const()[name = tensor("op_12691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343340736)))]; + tensor var_12692_cast = add(x = out_387_cast, y = var_12691_to_fp16)[name = tensor("op_12692_cast")]; + tensor var_12694_to_fp16 = const()[name = tensor("op_12694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343342080)))]; + tensor hidden_states_511_cast = mul(x = var_12692_cast, y = var_12694_to_fp16)[name = tensor("hidden_states_511_cast")]; + tensor var_12701 = const()[name = tensor("op_12701"), val = tensor([1, 1])]; + tensor var_12703 = const()[name = tensor("op_12703"), val = tensor([1, 1])]; + tensor q_259_pad_type_0 = const()[name = tensor("q_259_pad_type_0"), val = tensor("custom")]; + tensor q_259_pad_0 = const()[name = tensor("q_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343343424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343753088))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_259_cast = conv(dilations = var_12703, groups = var_12518, pad = q_259_pad_0, pad_type = q_259_pad_type_0, strides = var_12701, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_511_cast)[name = tensor("q_259_cast")]; + tensor var_12707 = const()[name = tensor("op_12707"), val = tensor([1, 1])]; + tensor var_12709 = const()[name = tensor("op_12709"), val = tensor([1, 1])]; + tensor k_259_pad_type_0 = const()[name = tensor("k_259_pad_type_0"), val = tensor("custom")]; + tensor k_259_pad_0 = const()[name = tensor("k_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343753664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344736768))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_259_cast = conv(dilations = var_12709, groups = var_12518, pad = k_259_pad_0, pad_type = k_259_pad_type_0, strides = var_12707, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_259_cast")]; + tensor var_12713 = const()[name = tensor("op_12713"), val = tensor([1, 1])]; + tensor var_12715 = const()[name = tensor("op_12715"), val = tensor([1, 1])]; + tensor v_259_pad_type_0 = const()[name = tensor("v_259_pad_type_0"), val = tensor("custom")]; + tensor v_259_pad_0 = const()[name = tensor("v_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344736960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345392384))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_259_cast = conv(dilations = var_12715, groups = var_12518, pad = v_259_pad_0, pad_type = v_259_pad_type_0, strides = var_12713, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_259_cast")]; + tensor var_12719 = const()[name = tensor("op_12719"), val = tensor([2, 10, 64, -1])]; + tensor var_12720_cast = reshape(shape = var_12719, x = q_259_cast)[name = tensor("op_12720_cast")]; + tensor var_12721 = const()[name = tensor("op_12721"), val = tensor([2, 10, 64, -1])]; + tensor var_12722_cast = reshape(shape = var_12721, x = k_259_cast)[name = tensor("op_12722_cast")]; + tensor var_12723 = const()[name = tensor("op_12723"), val = tensor([2, 10, 64, -1])]; + tensor var_12724_cast = reshape(shape = var_12723, x = v_259_cast)[name = tensor("op_12724_cast")]; + tensor attn_weights_517_transpose_x_0 = const()[name = tensor("attn_weights_517_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_517_transpose_y_0 = const()[name = tensor("attn_weights_517_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_517_cast = matmul(transpose_x = attn_weights_517_transpose_x_0, transpose_y = attn_weights_517_transpose_y_0, x = var_12720_cast, y = var_12722_cast)[name = tensor("attn_weights_517_cast")]; + tensor attn_weights_519_cast = mul(x = attn_weights_517_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_519_cast")]; + tensor var_12728_cast = softmax(axis = var_12502, x = attn_weights_519_cast)[name = tensor("op_12728_cast")]; + tensor attn_259_transpose_x_0 = const()[name = tensor("attn_259_transpose_x_0"), val = tensor(false)]; + tensor attn_259_transpose_y_0 = const()[name = tensor("attn_259_transpose_y_0"), val = tensor(true)]; + tensor attn_259_cast = matmul(transpose_x = attn_259_transpose_x_0, transpose_y = attn_259_transpose_y_0, x = var_12724_cast, y = var_12728_cast)[name = tensor("attn_259_cast")]; + tensor var_12732 = const()[name = tensor("op_12732"), val = tensor([2, 640, 1, -1])]; + tensor input_739_cast = reshape(shape = var_12732, x = attn_259_cast)[name = tensor("input_739_cast")]; + tensor var_12737 = const()[name = tensor("op_12737"), val = tensor([1, 1])]; + tensor var_12739 = const()[name = tensor("op_12739"), val = tensor([1, 1])]; + tensor var_12741_pad_type_0 = const()[name = tensor("op_12741_pad_type_0"), val = tensor("custom")]; + tensor var_12741_pad_0 = const()[name = tensor("op_12741_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345392512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345699776))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345699968)))]; + tensor var_12741_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_12739, groups = var_12518, pad = var_12741_pad_0, pad_type = var_12741_pad_type_0, strides = var_12737, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_739_cast)[name = tensor("op_12741_cast")]; + tensor inputs_389_cast = add(x = var_12741_cast, y = inputs_387_cast)[name = tensor("inputs_389_cast")]; + tensor var_12745 = const()[name = tensor("op_12745"), val = tensor([1])]; + tensor channels_mean_389_cast = reduce_mean(axes = var_12745, keep_dims = var_12513, x = inputs_389_cast)[name = tensor("channels_mean_389_cast")]; + tensor zero_mean_389_cast = sub(x = inputs_389_cast, y = channels_mean_389_cast)[name = tensor("zero_mean_389_cast")]; + tensor zero_mean_sq_389_cast = mul(x = zero_mean_389_cast, y = zero_mean_389_cast)[name = tensor("zero_mean_sq_389_cast")]; + tensor var_12749 = const()[name = tensor("op_12749"), val = tensor([1])]; + tensor var_12750_cast = reduce_mean(axes = var_12749, keep_dims = var_12513, x = zero_mean_sq_389_cast)[name = tensor("op_12750_cast")]; + tensor var_12751_to_fp16 = const()[name = tensor("op_12751_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12752_cast = add(x = var_12750_cast, y = var_12751_to_fp16)[name = tensor("op_12752_cast")]; + tensor denom_389_epsilon_0_to_fp16 = const()[name = tensor("denom_389_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_389_cast = rsqrt(epsilon = denom_389_epsilon_0_to_fp16, x = var_12752_cast)[name = tensor("denom_389_cast")]; + tensor out_389_cast = mul(x = zero_mean_389_cast, y = denom_389_cast)[name = tensor("out_389_cast")]; + tensor var_12756_to_fp16 = const()[name = tensor("op_12756_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345701312)))]; + tensor var_12757_cast = add(x = out_389_cast, y = var_12756_to_fp16)[name = tensor("op_12757_cast")]; + tensor var_12759_to_fp16 = const()[name = tensor("op_12759_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345702656)))]; + tensor input_741_cast = mul(x = var_12757_cast, y = var_12759_to_fp16)[name = tensor("input_741_cast")]; + tensor var_12767 = const()[name = tensor("op_12767"), val = tensor([1, 1])]; + tensor var_12769 = const()[name = tensor("op_12769"), val = tensor([1, 1])]; + tensor var_12771_pad_type_0 = const()[name = tensor("op_12771_pad_type_0"), val = tensor("custom")]; + tensor var_12771_pad_0 = const()[name = tensor("op_12771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345704000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348980864))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348981440)))]; + tensor var_12771_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_12769, groups = var_12518, pad = var_12771_pad_0, pad_type = var_12771_pad_type_0, strides = var_12767, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_741_cast)[name = tensor("op_12771_cast")]; + tensor var_12772_split_sizes_0 = const()[name = tensor("op_12772_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12772_axis_0 = const()[name = tensor("op_12772_axis_0"), val = tensor(1)]; + tensor var_12772_cast_0, tensor var_12772_cast_1 = split(axis = var_12772_axis_0, split_sizes = var_12772_split_sizes_0, x = var_12771_cast)[name = tensor("op_12772_cast")]; + tensor var_12774_mode_0 = const()[name = tensor("op_12774_mode_0"), val = tensor("EXACT")]; + tensor var_12774_cast = gelu(mode = var_12774_mode_0, x = var_12772_cast_1)[name = tensor("op_12774_cast")]; + tensor input_743_cast = mul(x = var_12772_cast_0, y = var_12774_cast)[name = tensor("input_743_cast")]; + tensor var_12778 = const()[name = tensor("op_12778"), val = tensor([1, 1])]; + tensor var_12780 = const()[name = tensor("op_12780"), val = tensor([1, 1])]; + tensor var_12782_pad_type_0 = const()[name = tensor("op_12782_pad_type_0"), val = tensor("custom")]; + tensor var_12782_pad_0 = const()[name = tensor("op_12782_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348991744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350630208))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350630784)))]; + tensor var_12782_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_12780, groups = var_12518, pad = var_12782_pad_0, pad_type = var_12782_pad_type_0, strides = var_12778, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_743_cast)[name = tensor("op_12782_cast")]; + tensor inputs_391_cast = add(x = var_12782_cast, y = inputs_389_cast)[name = tensor("inputs_391_cast")]; + tensor var_12792 = const()[name = tensor("op_12792"), val = tensor([1])]; + tensor channels_mean_391_cast = reduce_mean(axes = var_12792, keep_dims = var_12513, x = inputs_391_cast)[name = tensor("channels_mean_391_cast")]; + tensor zero_mean_391_cast = sub(x = inputs_391_cast, y = channels_mean_391_cast)[name = tensor("zero_mean_391_cast")]; + tensor zero_mean_sq_391_cast = mul(x = zero_mean_391_cast, y = zero_mean_391_cast)[name = tensor("zero_mean_sq_391_cast")]; + tensor var_12796 = const()[name = tensor("op_12796"), val = tensor([1])]; + tensor var_12797_cast = reduce_mean(axes = var_12796, keep_dims = var_12513, x = zero_mean_sq_391_cast)[name = tensor("op_12797_cast")]; + tensor var_12798_to_fp16 = const()[name = tensor("op_12798_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12799_cast = add(x = var_12797_cast, y = var_12798_to_fp16)[name = tensor("op_12799_cast")]; + tensor denom_391_epsilon_0_to_fp16 = const()[name = tensor("denom_391_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_391_cast = rsqrt(epsilon = denom_391_epsilon_0_to_fp16, x = var_12799_cast)[name = tensor("denom_391_cast")]; + tensor out_391_cast = mul(x = zero_mean_391_cast, y = denom_391_cast)[name = tensor("out_391_cast")]; + tensor var_12803_to_fp16 = const()[name = tensor("op_12803_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350632128)))]; + tensor var_12804_cast = add(x = out_391_cast, y = var_12803_to_fp16)[name = tensor("op_12804_cast")]; + tensor var_12806_to_fp16 = const()[name = tensor("op_12806_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350633472)))]; + tensor hidden_states_515_cast = mul(x = var_12804_cast, y = var_12806_to_fp16)[name = tensor("hidden_states_515_cast")]; + tensor var_12813 = const()[name = tensor("op_12813"), val = tensor([1, 1])]; + tensor var_12815 = const()[name = tensor("op_12815"), val = tensor([1, 1])]; + tensor q_261_pad_type_0 = const()[name = tensor("q_261_pad_type_0"), val = tensor("custom")]; + tensor q_261_pad_0 = const()[name = tensor("q_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350634816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350942080))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_261_cast = conv(dilations = var_12815, groups = var_12518, pad = q_261_pad_0, pad_type = q_261_pad_type_0, strides = var_12813, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("q_261_cast")]; + tensor var_12819 = const()[name = tensor("op_12819"), val = tensor([1, 1])]; + tensor var_12821 = const()[name = tensor("op_12821"), val = tensor([1, 1])]; + tensor k_261_pad_type_0 = const()[name = tensor("k_261_pad_type_0"), val = tensor("custom")]; + tensor k_261_pad_0 = const()[name = tensor("k_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350942272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351249536))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_261_cast = conv(dilations = var_12821, groups = var_12518, pad = k_261_pad_0, pad_type = k_261_pad_type_0, strides = var_12819, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("k_261_cast")]; + tensor var_12825 = const()[name = tensor("op_12825"), val = tensor([1, 1])]; + tensor var_12827 = const()[name = tensor("op_12827"), val = tensor([1, 1])]; + tensor v_261_pad_type_0 = const()[name = tensor("v_261_pad_type_0"), val = tensor("custom")]; + tensor v_261_pad_0 = const()[name = tensor("v_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351249728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351659392))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_261_cast = conv(dilations = var_12827, groups = var_12518, pad = v_261_pad_0, pad_type = v_261_pad_type_0, strides = var_12825, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("v_261_cast")]; + tensor var_12831 = const()[name = tensor("op_12831"), val = tensor([2, 10, 64, -1])]; + tensor var_12832_cast = reshape(shape = var_12831, x = q_261_cast)[name = tensor("op_12832_cast")]; + tensor var_12833 = const()[name = tensor("op_12833"), val = tensor([2, 10, 64, -1])]; + tensor var_12834_cast = reshape(shape = var_12833, x = k_261_cast)[name = tensor("op_12834_cast")]; + tensor var_12835 = const()[name = tensor("op_12835"), val = tensor([2, 10, 64, -1])]; + tensor var_12836_cast = reshape(shape = var_12835, x = v_261_cast)[name = tensor("op_12836_cast")]; + tensor attn_weights_521_transpose_x_0 = const()[name = tensor("attn_weights_521_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_521_transpose_y_0 = const()[name = tensor("attn_weights_521_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_521_cast = matmul(transpose_x = attn_weights_521_transpose_x_0, transpose_y = attn_weights_521_transpose_y_0, x = var_12832_cast, y = var_12834_cast)[name = tensor("attn_weights_521_cast")]; + tensor attn_weights_523_cast = mul(x = attn_weights_521_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_523_cast")]; + tensor var_12840_cast = softmax(axis = var_12502, x = attn_weights_523_cast)[name = tensor("op_12840_cast")]; + tensor attn_261_transpose_x_0 = const()[name = tensor("attn_261_transpose_x_0"), val = tensor(false)]; + tensor attn_261_transpose_y_0 = const()[name = tensor("attn_261_transpose_y_0"), val = tensor(true)]; + tensor attn_261_cast = matmul(transpose_x = attn_261_transpose_x_0, transpose_y = attn_261_transpose_y_0, x = var_12836_cast, y = var_12840_cast)[name = tensor("attn_261_cast")]; + tensor var_12844 = const()[name = tensor("op_12844"), val = tensor([2, 640, 1, -1])]; + tensor input_745_cast = reshape(shape = var_12844, x = attn_261_cast)[name = tensor("input_745_cast")]; + tensor var_12849 = const()[name = tensor("op_12849"), val = tensor([1, 1])]; + tensor var_12851 = const()[name = tensor("op_12851"), val = tensor([1, 1])]; + tensor var_12853_pad_type_0 = const()[name = tensor("op_12853_pad_type_0"), val = tensor("custom")]; + tensor var_12853_pad_0 = const()[name = tensor("op_12853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351659968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352069632))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352070208)))]; + tensor var_12853_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_12851, groups = var_12518, pad = var_12853_pad_0, pad_type = var_12853_pad_type_0, strides = var_12849, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_745_cast)[name = tensor("op_12853_cast")]; + tensor inputs_393_cast = add(x = var_12853_cast, y = inputs_391_cast)[name = tensor("inputs_393_cast")]; + tensor var_12857 = const()[name = tensor("op_12857"), val = tensor([1])]; + tensor channels_mean_393_cast = reduce_mean(axes = var_12857, keep_dims = var_12513, x = inputs_393_cast)[name = tensor("channels_mean_393_cast")]; + tensor zero_mean_393_cast = sub(x = inputs_393_cast, y = channels_mean_393_cast)[name = tensor("zero_mean_393_cast")]; + tensor zero_mean_sq_393_cast = mul(x = zero_mean_393_cast, y = zero_mean_393_cast)[name = tensor("zero_mean_sq_393_cast")]; + tensor var_12861 = const()[name = tensor("op_12861"), val = tensor([1])]; + tensor var_12862_cast = reduce_mean(axes = var_12861, keep_dims = var_12513, x = zero_mean_sq_393_cast)[name = tensor("op_12862_cast")]; + tensor var_12863_to_fp16 = const()[name = tensor("op_12863_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12864_cast = add(x = var_12862_cast, y = var_12863_to_fp16)[name = tensor("op_12864_cast")]; + tensor denom_393_epsilon_0_to_fp16 = const()[name = tensor("denom_393_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_393_cast = rsqrt(epsilon = denom_393_epsilon_0_to_fp16, x = var_12864_cast)[name = tensor("denom_393_cast")]; + tensor out_393_cast = mul(x = zero_mean_393_cast, y = denom_393_cast)[name = tensor("out_393_cast")]; + tensor var_12868_to_fp16 = const()[name = tensor("op_12868_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352071552)))]; + tensor var_12869_cast = add(x = out_393_cast, y = var_12868_to_fp16)[name = tensor("op_12869_cast")]; + tensor var_12871_to_fp16 = const()[name = tensor("op_12871_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352072896)))]; + tensor hidden_states_517_cast = mul(x = var_12869_cast, y = var_12871_to_fp16)[name = tensor("hidden_states_517_cast")]; + tensor var_12878 = const()[name = tensor("op_12878"), val = tensor([1, 1])]; + tensor var_12880 = const()[name = tensor("op_12880"), val = tensor([1, 1])]; + tensor q_263_pad_type_0 = const()[name = tensor("q_263_pad_type_0"), val = tensor("custom")]; + tensor q_263_pad_0 = const()[name = tensor("q_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352074240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352279104))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_263_cast = conv(dilations = var_12880, groups = var_12518, pad = q_263_pad_0, pad_type = q_263_pad_type_0, strides = var_12878, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_517_cast)[name = tensor("q_263_cast")]; + tensor var_12884 = const()[name = tensor("op_12884"), val = tensor([1, 1])]; + tensor var_12886 = const()[name = tensor("op_12886"), val = tensor([1, 1])]; + tensor k_263_pad_type_0 = const()[name = tensor("k_263_pad_type_0"), val = tensor("custom")]; + tensor k_263_pad_0 = const()[name = tensor("k_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352279232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352934656))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_263_cast = conv(dilations = var_12886, groups = var_12518, pad = k_263_pad_0, pad_type = k_263_pad_type_0, strides = var_12884, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_263_cast")]; + tensor var_12890 = const()[name = tensor("op_12890"), val = tensor([1, 1])]; + tensor var_12892 = const()[name = tensor("op_12892"), val = tensor([1, 1])]; + tensor v_263_pad_type_0 = const()[name = tensor("v_263_pad_type_0"), val = tensor("custom")]; + tensor v_263_pad_0 = const()[name = tensor("v_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352934784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353590208))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_263_cast = conv(dilations = var_12892, groups = var_12518, pad = v_263_pad_0, pad_type = v_263_pad_type_0, strides = var_12890, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_263_cast")]; + tensor var_12896 = const()[name = tensor("op_12896"), val = tensor([2, 10, 64, -1])]; + tensor var_12897_cast = reshape(shape = var_12896, x = q_263_cast)[name = tensor("op_12897_cast")]; + tensor var_12898 = const()[name = tensor("op_12898"), val = tensor([2, 10, 64, -1])]; + tensor var_12899_cast = reshape(shape = var_12898, x = k_263_cast)[name = tensor("op_12899_cast")]; + tensor var_12900 = const()[name = tensor("op_12900"), val = tensor([2, 10, 64, -1])]; + tensor var_12901_cast = reshape(shape = var_12900, x = v_263_cast)[name = tensor("op_12901_cast")]; + tensor attn_weights_525_transpose_x_0 = const()[name = tensor("attn_weights_525_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_525_transpose_y_0 = const()[name = tensor("attn_weights_525_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_525_cast = matmul(transpose_x = attn_weights_525_transpose_x_0, transpose_y = attn_weights_525_transpose_y_0, x = var_12897_cast, y = var_12899_cast)[name = tensor("attn_weights_525_cast")]; + tensor attn_weights_527_cast = mul(x = attn_weights_525_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_527_cast")]; + tensor var_12905_cast = softmax(axis = var_12502, x = attn_weights_527_cast)[name = tensor("op_12905_cast")]; + tensor attn_263_transpose_x_0 = const()[name = tensor("attn_263_transpose_x_0"), val = tensor(false)]; + tensor attn_263_transpose_y_0 = const()[name = tensor("attn_263_transpose_y_0"), val = tensor(true)]; + tensor attn_263_cast = matmul(transpose_x = attn_263_transpose_x_0, transpose_y = attn_263_transpose_y_0, x = var_12901_cast, y = var_12905_cast)[name = tensor("attn_263_cast")]; + tensor var_12909 = const()[name = tensor("op_12909"), val = tensor([2, 640, 1, -1])]; + tensor input_747_cast = reshape(shape = var_12909, x = attn_263_cast)[name = tensor("input_747_cast")]; + tensor var_12914 = const()[name = tensor("op_12914"), val = tensor([1, 1])]; + tensor var_12916 = const()[name = tensor("op_12916"), val = tensor([1, 1])]; + tensor var_12918_pad_type_0 = const()[name = tensor("op_12918_pad_type_0"), val = tensor("custom")]; + tensor var_12918_pad_0 = const()[name = tensor("op_12918_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353590336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353897600))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353897792)))]; + tensor var_12918_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_12916, groups = var_12518, pad = var_12918_pad_0, pad_type = var_12918_pad_type_0, strides = var_12914, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_747_cast)[name = tensor("op_12918_cast")]; + tensor inputs_395_cast = add(x = var_12918_cast, y = inputs_393_cast)[name = tensor("inputs_395_cast")]; + tensor var_12922 = const()[name = tensor("op_12922"), val = tensor([1])]; + tensor channels_mean_395_cast = reduce_mean(axes = var_12922, keep_dims = var_12513, x = inputs_395_cast)[name = tensor("channels_mean_395_cast")]; + tensor zero_mean_395_cast = sub(x = inputs_395_cast, y = channels_mean_395_cast)[name = tensor("zero_mean_395_cast")]; + tensor zero_mean_sq_395_cast = mul(x = zero_mean_395_cast, y = zero_mean_395_cast)[name = tensor("zero_mean_sq_395_cast")]; + tensor var_12926 = const()[name = tensor("op_12926"), val = tensor([1])]; + tensor var_12927_cast = reduce_mean(axes = var_12926, keep_dims = var_12513, x = zero_mean_sq_395_cast)[name = tensor("op_12927_cast")]; + tensor var_12928_to_fp16 = const()[name = tensor("op_12928_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12929_cast = add(x = var_12927_cast, y = var_12928_to_fp16)[name = tensor("op_12929_cast")]; + tensor denom_395_epsilon_0_to_fp16 = const()[name = tensor("denom_395_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_395_cast = rsqrt(epsilon = denom_395_epsilon_0_to_fp16, x = var_12929_cast)[name = tensor("denom_395_cast")]; + tensor out_395_cast = mul(x = zero_mean_395_cast, y = denom_395_cast)[name = tensor("out_395_cast")]; + tensor var_12933_to_fp16 = const()[name = tensor("op_12933_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353899136)))]; + tensor var_12934_cast = add(x = out_395_cast, y = var_12933_to_fp16)[name = tensor("op_12934_cast")]; + tensor var_12936_to_fp16 = const()[name = tensor("op_12936_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353900480)))]; + tensor input_749_cast = mul(x = var_12934_cast, y = var_12936_to_fp16)[name = tensor("input_749_cast")]; + tensor var_12944 = const()[name = tensor("op_12944"), val = tensor([1, 1])]; + tensor var_12946 = const()[name = tensor("op_12946"), val = tensor([1, 1])]; + tensor var_12948_pad_type_0 = const()[name = tensor("op_12948_pad_type_0"), val = tensor("custom")]; + tensor var_12948_pad_0 = const()[name = tensor("op_12948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353901824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357178688))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357179264)))]; + tensor var_12948_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_12946, groups = var_12518, pad = var_12948_pad_0, pad_type = var_12948_pad_type_0, strides = var_12944, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_749_cast)[name = tensor("op_12948_cast")]; + tensor var_12949_split_sizes_0 = const()[name = tensor("op_12949_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12949_axis_0 = const()[name = tensor("op_12949_axis_0"), val = tensor(1)]; + tensor var_12949_cast_0, tensor var_12949_cast_1 = split(axis = var_12949_axis_0, split_sizes = var_12949_split_sizes_0, x = var_12948_cast)[name = tensor("op_12949_cast")]; + tensor var_12951_mode_0 = const()[name = tensor("op_12951_mode_0"), val = tensor("EXACT")]; + tensor var_12951_cast = gelu(mode = var_12951_mode_0, x = var_12949_cast_1)[name = tensor("op_12951_cast")]; + tensor input_751_cast = mul(x = var_12949_cast_0, y = var_12951_cast)[name = tensor("input_751_cast")]; + tensor var_12955 = const()[name = tensor("op_12955"), val = tensor([1, 1])]; + tensor var_12957 = const()[name = tensor("op_12957"), val = tensor([1, 1])]; + tensor var_12959_pad_type_0 = const()[name = tensor("op_12959_pad_type_0"), val = tensor("custom")]; + tensor var_12959_pad_0 = const()[name = tensor("op_12959_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357189568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358828032))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358828608)))]; + tensor var_12959_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_12957, groups = var_12518, pad = var_12959_pad_0, pad_type = var_12959_pad_type_0, strides = var_12955, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_751_cast)[name = tensor("op_12959_cast")]; + tensor hidden_states_521_cast = add(x = var_12959_cast, y = inputs_395_cast)[name = tensor("hidden_states_521_cast")]; + tensor var_12961 = const()[name = tensor("op_12961"), val = tensor([2, 640, 64, 64])]; + tensor input_753_cast = reshape(shape = var_12961, x = hidden_states_521_cast)[name = tensor("input_753_cast")]; + tensor var_12965 = const()[name = tensor("op_12965"), val = tensor([1, 1])]; + tensor var_12967 = const()[name = tensor("op_12967"), val = tensor([1, 1])]; + tensor hidden_states_523_pad_type_0 = const()[name = tensor("hidden_states_523_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_523_pad_0 = const()[name = tensor("hidden_states_523_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358829952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359239616))), name = tensor("up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359240192)))]; + tensor hidden_states_523_cast = conv(bias = up_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_12967, groups = var_12518, pad = hidden_states_523_pad_0, pad_type = hidden_states_523_pad_type_0, strides = var_12965, weight = up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_753_cast)[name = tensor("hidden_states_523_cast")]; + tensor hidden_states_525_cast = add(x = hidden_states_523_cast, y = hidden_states_505_cast)[name = tensor("hidden_states_525_cast")]; + tensor input_755_interleave_0 = const()[name = tensor("input_755_interleave_0"), val = tensor(false)]; + tensor input_755_cast = concat(axis = var_12518, interleave = input_755_interleave_0, values = (hidden_states_525_cast, input_79_cast))[name = tensor("input_755_cast")]; + tensor reshape_132_shape_0 = const()[name = tensor("reshape_132_shape_0"), val = tensor([2, 32, 40, 64, 64])]; + tensor reshape_132_cast = reshape(shape = reshape_132_shape_0, x = input_755_cast)[name = tensor("reshape_132_cast")]; + tensor reduce_mean_99_axes_0 = const()[name = tensor("reduce_mean_99_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_99_keep_dims_0 = const()[name = tensor("reduce_mean_99_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_99_cast = reduce_mean(axes = reduce_mean_99_axes_0, keep_dims = reduce_mean_99_keep_dims_0, x = reshape_132_cast)[name = tensor("reduce_mean_99_cast")]; + tensor sub_66_cast = sub(x = reshape_132_cast, y = reduce_mean_99_cast)[name = tensor("sub_66_cast")]; + tensor square_33_cast = square(x = sub_66_cast)[name = tensor("square_33_cast")]; + tensor reduce_mean_101_axes_0 = const()[name = tensor("reduce_mean_101_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_101_keep_dims_0 = const()[name = tensor("reduce_mean_101_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_101_cast = reduce_mean(axes = reduce_mean_101_axes_0, keep_dims = reduce_mean_101_keep_dims_0, x = square_33_cast)[name = tensor("reduce_mean_101_cast")]; + tensor add_66_y_0_to_fp16 = const()[name = tensor("add_66_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_66_cast = add(x = reduce_mean_101_cast, y = add_66_y_0_to_fp16)[name = tensor("add_66_cast")]; + tensor sqrt_33_cast = sqrt(x = add_66_cast)[name = tensor("sqrt_33_cast")]; + tensor real_div_33_cast = real_div(x = sub_66_cast, y = sqrt_33_cast)[name = tensor("real_div_33_cast")]; + tensor reshape_133_shape_0 = const()[name = tensor("reshape_133_shape_0"), val = tensor([2, 1280, 64, 64])]; + tensor reshape_133_cast = reshape(shape = reshape_133_shape_0, x = real_div_33_cast)[name = tensor("reshape_133_cast")]; + tensor add_67_gamma_0_to_fp16 = const()[name = tensor("add_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359241536)))]; + tensor add_67_beta_0_to_fp16 = const()[name = tensor("add_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359244160)))]; + tensor add_67_epsilon_0_to_fp16 = const()[name = tensor("add_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_67_cast = batch_norm(beta = add_67_beta_0_to_fp16, epsilon = add_67_epsilon_0_to_fp16, gamma = add_67_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_133_cast)[name = tensor("add_67_cast")]; + tensor input_759_cast = silu(x = add_67_cast)[name = tensor("input_759_cast")]; + tensor var_12985 = const()[name = tensor("op_12985"), val = tensor([1, 1])]; + tensor var_12987 = const()[name = tensor("op_12987"), val = tensor([1, 1])]; + tensor hidden_states_527_pad_type_0 = const()[name = tensor("hidden_states_527_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_527_pad_0 = const()[name = tensor("hidden_states_527_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359246784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366619648))), name = tensor("up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1280, 3, 3])]; + tensor up_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366620224)))]; + tensor hidden_states_527_cast = conv(bias = up_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_12987, groups = var_12518, pad = hidden_states_527_pad_0, pad_type = hidden_states_527_pad_type_0, strides = var_12985, weight = up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_759_cast)[name = tensor("hidden_states_527_cast")]; + tensor var_12993 = const()[name = tensor("op_12993"), val = tensor([1, 1])]; + tensor var_12995 = const()[name = tensor("op_12995"), val = tensor([1, 1])]; + tensor temb_25_pad_type_0 = const()[name = tensor("temb_25_pad_type_0"), val = tensor("custom")]; + tensor temb_25_pad_0 = const()[name = tensor("temb_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366621568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367440832))), name = tensor("up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367441408)))]; + tensor temb_25_cast = conv(bias = up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_12995, groups = var_12518, pad = temb_25_pad_0, pad_type = temb_25_pad_type_0, strides = var_12993, weight = up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_25_cast")]; + tensor input_763_cast = add(x = hidden_states_527_cast, y = temb_25_cast)[name = tensor("input_763_cast")]; + tensor reshape_136_shape_0 = const()[name = tensor("reshape_136_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_136_cast = reshape(shape = reshape_136_shape_0, x = input_763_cast)[name = tensor("reshape_136_cast")]; + tensor reduce_mean_102_axes_0 = const()[name = tensor("reduce_mean_102_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_102_keep_dims_0 = const()[name = tensor("reduce_mean_102_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_102_cast = reduce_mean(axes = reduce_mean_102_axes_0, keep_dims = reduce_mean_102_keep_dims_0, x = reshape_136_cast)[name = tensor("reduce_mean_102_cast")]; + tensor sub_68_cast = sub(x = reshape_136_cast, y = reduce_mean_102_cast)[name = tensor("sub_68_cast")]; + tensor square_34_cast = square(x = sub_68_cast)[name = tensor("square_34_cast")]; + tensor reduce_mean_104_axes_0 = const()[name = tensor("reduce_mean_104_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_104_keep_dims_0 = const()[name = tensor("reduce_mean_104_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_104_cast = reduce_mean(axes = reduce_mean_104_axes_0, keep_dims = reduce_mean_104_keep_dims_0, x = square_34_cast)[name = tensor("reduce_mean_104_cast")]; + tensor add_68_y_0_to_fp16 = const()[name = tensor("add_68_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_68_cast = add(x = reduce_mean_104_cast, y = add_68_y_0_to_fp16)[name = tensor("add_68_cast")]; + tensor sqrt_34_cast = sqrt(x = add_68_cast)[name = tensor("sqrt_34_cast")]; + tensor real_div_34_cast = real_div(x = sub_68_cast, y = sqrt_34_cast)[name = tensor("real_div_34_cast")]; + tensor reshape_137_shape_0 = const()[name = tensor("reshape_137_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_137_cast = reshape(shape = reshape_137_shape_0, x = real_div_34_cast)[name = tensor("reshape_137_cast")]; + tensor add_69_gamma_0_to_fp16 = const()[name = tensor("add_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367442752)))]; + tensor add_69_beta_0_to_fp16 = const()[name = tensor("add_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367444096)))]; + tensor add_69_epsilon_0_to_fp16 = const()[name = tensor("add_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_69_cast = batch_norm(beta = add_69_beta_0_to_fp16, epsilon = add_69_epsilon_0_to_fp16, gamma = add_69_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_137_cast)[name = tensor("add_69_cast")]; + tensor input_767_cast = silu(x = add_69_cast)[name = tensor("input_767_cast")]; + tensor var_13005 = const()[name = tensor("op_13005"), val = tensor([1, 1])]; + tensor var_13007 = const()[name = tensor("op_13007"), val = tensor([1, 1])]; + tensor hidden_states_529_pad_type_0 = const()[name = tensor("hidden_states_529_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_529_pad_0 = const()[name = tensor("hidden_states_529_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367445440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371131904))), name = tensor("up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371132480)))]; + tensor hidden_states_529_cast = conv(bias = up_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_13007, groups = var_12518, pad = hidden_states_529_pad_0, pad_type = hidden_states_529_pad_type_0, strides = var_13005, weight = up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_767_cast)[name = tensor("hidden_states_529_cast")]; + tensor var_13012 = const()[name = tensor("op_13012"), val = tensor([1, 1])]; + tensor var_13014 = const()[name = tensor("op_13014"), val = tensor([1, 1])]; + tensor x_13_pad_type_0 = const()[name = tensor("x_13_pad_type_0"), val = tensor("custom")]; + tensor x_13_pad_0 = const()[name = tensor("x_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371133824)))]; + tensor up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372772288)))]; + tensor x_13_cast = conv(bias = up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13014, groups = var_12518, pad = x_13_pad_0, pad_type = x_13_pad_type_0, strides = var_13012, weight = up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16, x = input_755_cast)[name = tensor("x_13_cast")]; + tensor hidden_states_531_cast = add(x = x_13_cast, y = hidden_states_529_cast)[name = tensor("hidden_states_531_cast")]; + tensor reshape_140_shape_0 = const()[name = tensor("reshape_140_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_140_cast = reshape(shape = reshape_140_shape_0, x = hidden_states_531_cast)[name = tensor("reshape_140_cast")]; + tensor reduce_mean_105_axes_0 = const()[name = tensor("reduce_mean_105_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_105_keep_dims_0 = const()[name = tensor("reduce_mean_105_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_105_cast = reduce_mean(axes = reduce_mean_105_axes_0, keep_dims = reduce_mean_105_keep_dims_0, x = reshape_140_cast)[name = tensor("reduce_mean_105_cast")]; + tensor sub_70_cast = sub(x = reshape_140_cast, y = reduce_mean_105_cast)[name = tensor("sub_70_cast")]; + tensor square_35_cast = square(x = sub_70_cast)[name = tensor("square_35_cast")]; + tensor reduce_mean_107_axes_0 = const()[name = tensor("reduce_mean_107_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_107_keep_dims_0 = const()[name = tensor("reduce_mean_107_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_107_cast = reduce_mean(axes = reduce_mean_107_axes_0, keep_dims = reduce_mean_107_keep_dims_0, x = square_35_cast)[name = tensor("reduce_mean_107_cast")]; + tensor add_70_y_0_to_fp16 = const()[name = tensor("add_70_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_70_cast = add(x = reduce_mean_107_cast, y = add_70_y_0_to_fp16)[name = tensor("add_70_cast")]; + tensor sqrt_35_cast = sqrt(x = add_70_cast)[name = tensor("sqrt_35_cast")]; + tensor real_div_35_cast = real_div(x = sub_70_cast, y = sqrt_35_cast)[name = tensor("real_div_35_cast")]; + tensor reshape_141_shape_0 = const()[name = tensor("reshape_141_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_141_cast = reshape(shape = reshape_141_shape_0, x = real_div_35_cast)[name = tensor("reshape_141_cast")]; + tensor add_71_gamma_0_to_fp16 = const()[name = tensor("add_71_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372773632)))]; + tensor add_71_beta_0_to_fp16 = const()[name = tensor("add_71_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372774976)))]; + tensor add_71_epsilon_0_to_fp16 = const()[name = tensor("add_71_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_71_cast = batch_norm(beta = add_71_beta_0_to_fp16, epsilon = add_71_epsilon_0_to_fp16, gamma = add_71_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_141_cast)[name = tensor("add_71_cast")]; + tensor var_13036 = const()[name = tensor("op_13036"), val = tensor([1, 1])]; + tensor var_13038 = const()[name = tensor("op_13038"), val = tensor([1, 1])]; + tensor hidden_states_533_pad_type_0 = const()[name = tensor("hidden_states_533_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_533_pad_0 = const()[name = tensor("hidden_states_533_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372776320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373185984))), name = tensor("up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373186560)))]; + tensor hidden_states_533_cast = conv(bias = up_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_13038, groups = var_12518, pad = hidden_states_533_pad_0, pad_type = hidden_states_533_pad_type_0, strides = var_13036, weight = up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_71_cast)[name = tensor("hidden_states_533_cast")]; + tensor var_13043 = const()[name = tensor("op_13043"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_397_cast = reshape(shape = var_13043, x = hidden_states_533_cast)[name = tensor("inputs_397_cast")]; + tensor var_13053 = const()[name = tensor("op_13053"), val = tensor([1])]; + tensor channels_mean_397_cast = reduce_mean(axes = var_13053, keep_dims = var_12513, x = inputs_397_cast)[name = tensor("channels_mean_397_cast")]; + tensor zero_mean_397_cast = sub(x = inputs_397_cast, y = channels_mean_397_cast)[name = tensor("zero_mean_397_cast")]; + tensor zero_mean_sq_397_cast = mul(x = zero_mean_397_cast, y = zero_mean_397_cast)[name = tensor("zero_mean_sq_397_cast")]; + tensor var_13057 = const()[name = tensor("op_13057"), val = tensor([1])]; + tensor var_13058_cast = reduce_mean(axes = var_13057, keep_dims = var_12513, x = zero_mean_sq_397_cast)[name = tensor("op_13058_cast")]; + tensor var_13059_to_fp16 = const()[name = tensor("op_13059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13060_cast = add(x = var_13058_cast, y = var_13059_to_fp16)[name = tensor("op_13060_cast")]; + tensor denom_397_epsilon_0_to_fp16 = const()[name = tensor("denom_397_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_397_cast = rsqrt(epsilon = denom_397_epsilon_0_to_fp16, x = var_13060_cast)[name = tensor("denom_397_cast")]; + tensor out_397_cast = mul(x = zero_mean_397_cast, y = denom_397_cast)[name = tensor("out_397_cast")]; + tensor var_13064_to_fp16 = const()[name = tensor("op_13064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373187904)))]; + tensor var_13065_cast = add(x = out_397_cast, y = var_13064_to_fp16)[name = tensor("op_13065_cast")]; + tensor var_13067_to_fp16 = const()[name = tensor("op_13067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373189248)))]; + tensor hidden_states_535_cast = mul(x = var_13065_cast, y = var_13067_to_fp16)[name = tensor("hidden_states_535_cast")]; + tensor var_13074 = const()[name = tensor("op_13074"), val = tensor([1, 1])]; + tensor var_13076 = const()[name = tensor("op_13076"), val = tensor([1, 1])]; + tensor q_265_pad_type_0 = const()[name = tensor("q_265_pad_type_0"), val = tensor("custom")]; + tensor q_265_pad_0 = const()[name = tensor("q_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373190592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373497856))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_265_cast = conv(dilations = var_13076, groups = var_12518, pad = q_265_pad_0, pad_type = q_265_pad_type_0, strides = var_13074, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("q_265_cast")]; + tensor var_13080 = const()[name = tensor("op_13080"), val = tensor([1, 1])]; + tensor var_13082 = const()[name = tensor("op_13082"), val = tensor([1, 1])]; + tensor k_265_pad_type_0 = const()[name = tensor("k_265_pad_type_0"), val = tensor("custom")]; + tensor k_265_pad_0 = const()[name = tensor("k_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373498048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373805312))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_265_cast = conv(dilations = var_13082, groups = var_12518, pad = k_265_pad_0, pad_type = k_265_pad_type_0, strides = var_13080, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("k_265_cast")]; + tensor var_13086 = const()[name = tensor("op_13086"), val = tensor([1, 1])]; + tensor var_13088 = const()[name = tensor("op_13088"), val = tensor([1, 1])]; + tensor v_265_pad_type_0 = const()[name = tensor("v_265_pad_type_0"), val = tensor("custom")]; + tensor v_265_pad_0 = const()[name = tensor("v_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373805504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374215168))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_265_cast = conv(dilations = var_13088, groups = var_12518, pad = v_265_pad_0, pad_type = v_265_pad_type_0, strides = var_13086, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("v_265_cast")]; + tensor var_13092 = const()[name = tensor("op_13092"), val = tensor([2, 10, 64, -1])]; + tensor var_13093_cast = reshape(shape = var_13092, x = q_265_cast)[name = tensor("op_13093_cast")]; + tensor var_13094 = const()[name = tensor("op_13094"), val = tensor([2, 10, 64, -1])]; + tensor var_13095_cast = reshape(shape = var_13094, x = k_265_cast)[name = tensor("op_13095_cast")]; + tensor var_13096 = const()[name = tensor("op_13096"), val = tensor([2, 10, 64, -1])]; + tensor var_13097_cast = reshape(shape = var_13096, x = v_265_cast)[name = tensor("op_13097_cast")]; + tensor attn_weights_529_transpose_x_0 = const()[name = tensor("attn_weights_529_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_529_transpose_y_0 = const()[name = tensor("attn_weights_529_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_529_cast = matmul(transpose_x = attn_weights_529_transpose_x_0, transpose_y = attn_weights_529_transpose_y_0, x = var_13093_cast, y = var_13095_cast)[name = tensor("attn_weights_529_cast")]; + tensor attn_weights_531_cast = mul(x = attn_weights_529_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_531_cast")]; + tensor var_13101_cast = softmax(axis = var_12502, x = attn_weights_531_cast)[name = tensor("op_13101_cast")]; + tensor attn_265_transpose_x_0 = const()[name = tensor("attn_265_transpose_x_0"), val = tensor(false)]; + tensor attn_265_transpose_y_0 = const()[name = tensor("attn_265_transpose_y_0"), val = tensor(true)]; + tensor attn_265_cast = matmul(transpose_x = attn_265_transpose_x_0, transpose_y = attn_265_transpose_y_0, x = var_13097_cast, y = var_13101_cast)[name = tensor("attn_265_cast")]; + tensor var_13105 = const()[name = tensor("op_13105"), val = tensor([2, 640, 1, -1])]; + tensor input_771_cast = reshape(shape = var_13105, x = attn_265_cast)[name = tensor("input_771_cast")]; + tensor var_13110 = const()[name = tensor("op_13110"), val = tensor([1, 1])]; + tensor var_13112 = const()[name = tensor("op_13112"), val = tensor([1, 1])]; + tensor var_13114_pad_type_0 = const()[name = tensor("op_13114_pad_type_0"), val = tensor("custom")]; + tensor var_13114_pad_0 = const()[name = tensor("op_13114_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374215744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374625408))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374625984)))]; + tensor var_13114_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13112, groups = var_12518, pad = var_13114_pad_0, pad_type = var_13114_pad_type_0, strides = var_13110, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_771_cast)[name = tensor("op_13114_cast")]; + tensor inputs_399_cast = add(x = var_13114_cast, y = inputs_397_cast)[name = tensor("inputs_399_cast")]; + tensor var_13118 = const()[name = tensor("op_13118"), val = tensor([1])]; + tensor channels_mean_399_cast = reduce_mean(axes = var_13118, keep_dims = var_12513, x = inputs_399_cast)[name = tensor("channels_mean_399_cast")]; + tensor zero_mean_399_cast = sub(x = inputs_399_cast, y = channels_mean_399_cast)[name = tensor("zero_mean_399_cast")]; + tensor zero_mean_sq_399_cast = mul(x = zero_mean_399_cast, y = zero_mean_399_cast)[name = tensor("zero_mean_sq_399_cast")]; + tensor var_13122 = const()[name = tensor("op_13122"), val = tensor([1])]; + tensor var_13123_cast = reduce_mean(axes = var_13122, keep_dims = var_12513, x = zero_mean_sq_399_cast)[name = tensor("op_13123_cast")]; + tensor var_13124_to_fp16 = const()[name = tensor("op_13124_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13125_cast = add(x = var_13123_cast, y = var_13124_to_fp16)[name = tensor("op_13125_cast")]; + tensor denom_399_epsilon_0_to_fp16 = const()[name = tensor("denom_399_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_399_cast = rsqrt(epsilon = denom_399_epsilon_0_to_fp16, x = var_13125_cast)[name = tensor("denom_399_cast")]; + tensor out_399_cast = mul(x = zero_mean_399_cast, y = denom_399_cast)[name = tensor("out_399_cast")]; + tensor var_13129_to_fp16 = const()[name = tensor("op_13129_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374627328)))]; + tensor var_13130_cast = add(x = out_399_cast, y = var_13129_to_fp16)[name = tensor("op_13130_cast")]; + tensor var_13132_to_fp16 = const()[name = tensor("op_13132_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374628672)))]; + tensor hidden_states_537_cast = mul(x = var_13130_cast, y = var_13132_to_fp16)[name = tensor("hidden_states_537_cast")]; + tensor var_13139 = const()[name = tensor("op_13139"), val = tensor([1, 1])]; + tensor var_13141 = const()[name = tensor("op_13141"), val = tensor([1, 1])]; + tensor q_267_pad_type_0 = const()[name = tensor("q_267_pad_type_0"), val = tensor("custom")]; + tensor q_267_pad_0 = const()[name = tensor("q_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374630016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374937280))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_267_cast = conv(dilations = var_13141, groups = var_12518, pad = q_267_pad_0, pad_type = q_267_pad_type_0, strides = var_13139, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_537_cast)[name = tensor("q_267_cast")]; + tensor var_13145 = const()[name = tensor("op_13145"), val = tensor([1, 1])]; + tensor var_13147 = const()[name = tensor("op_13147"), val = tensor([1, 1])]; + tensor k_267_pad_type_0 = const()[name = tensor("k_267_pad_type_0"), val = tensor("custom")]; + tensor k_267_pad_0 = const()[name = tensor("k_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374937472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375920576))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_267_cast = conv(dilations = var_13147, groups = var_12518, pad = k_267_pad_0, pad_type = k_267_pad_type_0, strides = var_13145, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_267_cast")]; + tensor var_13151 = const()[name = tensor("op_13151"), val = tensor([1, 1])]; + tensor var_13153 = const()[name = tensor("op_13153"), val = tensor([1, 1])]; + tensor v_267_pad_type_0 = const()[name = tensor("v_267_pad_type_0"), val = tensor("custom")]; + tensor v_267_pad_0 = const()[name = tensor("v_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375920768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1376903872))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_267_cast = conv(dilations = var_13153, groups = var_12518, pad = v_267_pad_0, pad_type = v_267_pad_type_0, strides = var_13151, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_267_cast")]; + tensor var_13157 = const()[name = tensor("op_13157"), val = tensor([2, 10, 64, -1])]; + tensor var_13158_cast = reshape(shape = var_13157, x = q_267_cast)[name = tensor("op_13158_cast")]; + tensor var_13159 = const()[name = tensor("op_13159"), val = tensor([2, 10, 64, -1])]; + tensor var_13160_cast = reshape(shape = var_13159, x = k_267_cast)[name = tensor("op_13160_cast")]; + tensor var_13161 = const()[name = tensor("op_13161"), val = tensor([2, 10, 64, -1])]; + tensor var_13162_cast = reshape(shape = var_13161, x = v_267_cast)[name = tensor("op_13162_cast")]; + tensor attn_weights_533_transpose_x_0 = const()[name = tensor("attn_weights_533_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_533_transpose_y_0 = const()[name = tensor("attn_weights_533_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_533_cast = matmul(transpose_x = attn_weights_533_transpose_x_0, transpose_y = attn_weights_533_transpose_y_0, x = var_13158_cast, y = var_13160_cast)[name = tensor("attn_weights_533_cast")]; + tensor attn_weights_535_cast = mul(x = attn_weights_533_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_535_cast")]; + tensor var_13166_cast = softmax(axis = var_12502, x = attn_weights_535_cast)[name = tensor("op_13166_cast")]; + tensor attn_267_transpose_x_0 = const()[name = tensor("attn_267_transpose_x_0"), val = tensor(false)]; + tensor attn_267_transpose_y_0 = const()[name = tensor("attn_267_transpose_y_0"), val = tensor(true)]; + tensor attn_267_cast = matmul(transpose_x = attn_267_transpose_x_0, transpose_y = attn_267_transpose_y_0, x = var_13162_cast, y = var_13166_cast)[name = tensor("attn_267_cast")]; + tensor var_13170 = const()[name = tensor("op_13170"), val = tensor([2, 640, 1, -1])]; + tensor input_773_cast = reshape(shape = var_13170, x = attn_267_cast)[name = tensor("input_773_cast")]; + tensor var_13175 = const()[name = tensor("op_13175"), val = tensor([1, 1])]; + tensor var_13177 = const()[name = tensor("op_13177"), val = tensor([1, 1])]; + tensor var_13179_pad_type_0 = const()[name = tensor("op_13179_pad_type_0"), val = tensor("custom")]; + tensor var_13179_pad_0 = const()[name = tensor("op_13179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1376904064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377211328))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377211520)))]; + tensor var_13179_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13177, groups = var_12518, pad = var_13179_pad_0, pad_type = var_13179_pad_type_0, strides = var_13175, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_773_cast)[name = tensor("op_13179_cast")]; + tensor inputs_401_cast = add(x = var_13179_cast, y = inputs_399_cast)[name = tensor("inputs_401_cast")]; + tensor var_13183 = const()[name = tensor("op_13183"), val = tensor([1])]; + tensor channels_mean_401_cast = reduce_mean(axes = var_13183, keep_dims = var_12513, x = inputs_401_cast)[name = tensor("channels_mean_401_cast")]; + tensor zero_mean_401_cast = sub(x = inputs_401_cast, y = channels_mean_401_cast)[name = tensor("zero_mean_401_cast")]; + tensor zero_mean_sq_401_cast = mul(x = zero_mean_401_cast, y = zero_mean_401_cast)[name = tensor("zero_mean_sq_401_cast")]; + tensor var_13187 = const()[name = tensor("op_13187"), val = tensor([1])]; + tensor var_13188_cast = reduce_mean(axes = var_13187, keep_dims = var_12513, x = zero_mean_sq_401_cast)[name = tensor("op_13188_cast")]; + tensor var_13189_to_fp16 = const()[name = tensor("op_13189_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13190_cast = add(x = var_13188_cast, y = var_13189_to_fp16)[name = tensor("op_13190_cast")]; + tensor denom_401_epsilon_0_to_fp16 = const()[name = tensor("denom_401_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_401_cast = rsqrt(epsilon = denom_401_epsilon_0_to_fp16, x = var_13190_cast)[name = tensor("denom_401_cast")]; + tensor out_401_cast = mul(x = zero_mean_401_cast, y = denom_401_cast)[name = tensor("out_401_cast")]; + tensor var_13194_to_fp16 = const()[name = tensor("op_13194_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377212864)))]; + tensor var_13195_cast = add(x = out_401_cast, y = var_13194_to_fp16)[name = tensor("op_13195_cast")]; + tensor var_13197_to_fp16 = const()[name = tensor("op_13197_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377214208)))]; + tensor input_775_cast = mul(x = var_13195_cast, y = var_13197_to_fp16)[name = tensor("input_775_cast")]; + tensor var_13205 = const()[name = tensor("op_13205"), val = tensor([1, 1])]; + tensor var_13207 = const()[name = tensor("op_13207"), val = tensor([1, 1])]; + tensor var_13209_pad_type_0 = const()[name = tensor("op_13209_pad_type_0"), val = tensor("custom")]; + tensor var_13209_pad_0 = const()[name = tensor("op_13209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377215552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380492416))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380492992)))]; + tensor var_13209_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13207, groups = var_12518, pad = var_13209_pad_0, pad_type = var_13209_pad_type_0, strides = var_13205, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_775_cast)[name = tensor("op_13209_cast")]; + tensor var_13210_split_sizes_0 = const()[name = tensor("op_13210_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13210_axis_0 = const()[name = tensor("op_13210_axis_0"), val = tensor(1)]; + tensor var_13210_cast_0, tensor var_13210_cast_1 = split(axis = var_13210_axis_0, split_sizes = var_13210_split_sizes_0, x = var_13209_cast)[name = tensor("op_13210_cast")]; + tensor var_13212_mode_0 = const()[name = tensor("op_13212_mode_0"), val = tensor("EXACT")]; + tensor var_13212_cast = gelu(mode = var_13212_mode_0, x = var_13210_cast_1)[name = tensor("op_13212_cast")]; + tensor input_777_cast = mul(x = var_13210_cast_0, y = var_13212_cast)[name = tensor("input_777_cast")]; + tensor var_13216 = const()[name = tensor("op_13216"), val = tensor([1, 1])]; + tensor var_13218 = const()[name = tensor("op_13218"), val = tensor([1, 1])]; + tensor var_13220_pad_type_0 = const()[name = tensor("op_13220_pad_type_0"), val = tensor("custom")]; + tensor var_13220_pad_0 = const()[name = tensor("op_13220_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380503296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382141760))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382142336)))]; + tensor var_13220_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13218, groups = var_12518, pad = var_13220_pad_0, pad_type = var_13220_pad_type_0, strides = var_13216, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_777_cast)[name = tensor("op_13220_cast")]; + tensor inputs_403_cast = add(x = var_13220_cast, y = inputs_401_cast)[name = tensor("inputs_403_cast")]; + tensor var_13230 = const()[name = tensor("op_13230"), val = tensor([1])]; + tensor channels_mean_403_cast = reduce_mean(axes = var_13230, keep_dims = var_12513, x = inputs_403_cast)[name = tensor("channels_mean_403_cast")]; + tensor zero_mean_403_cast = sub(x = inputs_403_cast, y = channels_mean_403_cast)[name = tensor("zero_mean_403_cast")]; + tensor zero_mean_sq_403_cast = mul(x = zero_mean_403_cast, y = zero_mean_403_cast)[name = tensor("zero_mean_sq_403_cast")]; + tensor var_13234 = const()[name = tensor("op_13234"), val = tensor([1])]; + tensor var_13235_cast = reduce_mean(axes = var_13234, keep_dims = var_12513, x = zero_mean_sq_403_cast)[name = tensor("op_13235_cast")]; + tensor var_13236_to_fp16 = const()[name = tensor("op_13236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13237_cast = add(x = var_13235_cast, y = var_13236_to_fp16)[name = tensor("op_13237_cast")]; + tensor denom_403_epsilon_0_to_fp16 = const()[name = tensor("denom_403_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_403_cast = rsqrt(epsilon = denom_403_epsilon_0_to_fp16, x = var_13237_cast)[name = tensor("denom_403_cast")]; + tensor out_403_cast = mul(x = zero_mean_403_cast, y = denom_403_cast)[name = tensor("out_403_cast")]; + tensor var_13241_to_fp16 = const()[name = tensor("op_13241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382143680)))]; + tensor var_13242_cast = add(x = out_403_cast, y = var_13241_to_fp16)[name = tensor("op_13242_cast")]; + tensor var_13244_to_fp16 = const()[name = tensor("op_13244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382145024)))]; + tensor hidden_states_541_cast = mul(x = var_13242_cast, y = var_13244_to_fp16)[name = tensor("hidden_states_541_cast")]; + tensor var_13251 = const()[name = tensor("op_13251"), val = tensor([1, 1])]; + tensor var_13253 = const()[name = tensor("op_13253"), val = tensor([1, 1])]; + tensor q_269_pad_type_0 = const()[name = tensor("q_269_pad_type_0"), val = tensor("custom")]; + tensor q_269_pad_0 = const()[name = tensor("q_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382146368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382453632))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_269_cast = conv(dilations = var_13253, groups = var_12518, pad = q_269_pad_0, pad_type = q_269_pad_type_0, strides = var_13251, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("q_269_cast")]; + tensor var_13257 = const()[name = tensor("op_13257"), val = tensor([1, 1])]; + tensor var_13259 = const()[name = tensor("op_13259"), val = tensor([1, 1])]; + tensor k_269_pad_type_0 = const()[name = tensor("k_269_pad_type_0"), val = tensor("custom")]; + tensor k_269_pad_0 = const()[name = tensor("k_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382453824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382863488))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_269_cast = conv(dilations = var_13259, groups = var_12518, pad = k_269_pad_0, pad_type = k_269_pad_type_0, strides = var_13257, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("k_269_cast")]; + tensor var_13263 = const()[name = tensor("op_13263"), val = tensor([1, 1])]; + tensor var_13265 = const()[name = tensor("op_13265"), val = tensor([1, 1])]; + tensor v_269_pad_type_0 = const()[name = tensor("v_269_pad_type_0"), val = tensor("custom")]; + tensor v_269_pad_0 = const()[name = tensor("v_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382864064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383273728))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_269_cast = conv(dilations = var_13265, groups = var_12518, pad = v_269_pad_0, pad_type = v_269_pad_type_0, strides = var_13263, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("v_269_cast")]; + tensor var_13269 = const()[name = tensor("op_13269"), val = tensor([2, 10, 64, -1])]; + tensor var_13270_cast = reshape(shape = var_13269, x = q_269_cast)[name = tensor("op_13270_cast")]; + tensor var_13271 = const()[name = tensor("op_13271"), val = tensor([2, 10, 64, -1])]; + tensor var_13272_cast = reshape(shape = var_13271, x = k_269_cast)[name = tensor("op_13272_cast")]; + tensor var_13273 = const()[name = tensor("op_13273"), val = tensor([2, 10, 64, -1])]; + tensor var_13274_cast = reshape(shape = var_13273, x = v_269_cast)[name = tensor("op_13274_cast")]; + tensor attn_weights_537_transpose_x_0 = const()[name = tensor("attn_weights_537_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_537_transpose_y_0 = const()[name = tensor("attn_weights_537_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_537_cast = matmul(transpose_x = attn_weights_537_transpose_x_0, transpose_y = attn_weights_537_transpose_y_0, x = var_13270_cast, y = var_13272_cast)[name = tensor("attn_weights_537_cast")]; + tensor attn_weights_539_cast = mul(x = attn_weights_537_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_539_cast")]; + tensor var_13278_cast = softmax(axis = var_12502, x = attn_weights_539_cast)[name = tensor("op_13278_cast")]; + tensor attn_269_transpose_x_0 = const()[name = tensor("attn_269_transpose_x_0"), val = tensor(false)]; + tensor attn_269_transpose_y_0 = const()[name = tensor("attn_269_transpose_y_0"), val = tensor(true)]; + tensor attn_269_cast = matmul(transpose_x = attn_269_transpose_x_0, transpose_y = attn_269_transpose_y_0, x = var_13274_cast, y = var_13278_cast)[name = tensor("attn_269_cast")]; + tensor var_13282 = const()[name = tensor("op_13282"), val = tensor([2, 640, 1, -1])]; + tensor input_779_cast = reshape(shape = var_13282, x = attn_269_cast)[name = tensor("input_779_cast")]; + tensor var_13287 = const()[name = tensor("op_13287"), val = tensor([1, 1])]; + tensor var_13289 = const()[name = tensor("op_13289"), val = tensor([1, 1])]; + tensor var_13291_pad_type_0 = const()[name = tensor("op_13291_pad_type_0"), val = tensor("custom")]; + tensor var_13291_pad_0 = const()[name = tensor("op_13291_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383274304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383683968))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383684544)))]; + tensor var_13291_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13289, groups = var_12518, pad = var_13291_pad_0, pad_type = var_13291_pad_type_0, strides = var_13287, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_779_cast)[name = tensor("op_13291_cast")]; + tensor inputs_405_cast = add(x = var_13291_cast, y = inputs_403_cast)[name = tensor("inputs_405_cast")]; + tensor var_13295 = const()[name = tensor("op_13295"), val = tensor([1])]; + tensor channels_mean_405_cast = reduce_mean(axes = var_13295, keep_dims = var_12513, x = inputs_405_cast)[name = tensor("channels_mean_405_cast")]; + tensor zero_mean_405_cast = sub(x = inputs_405_cast, y = channels_mean_405_cast)[name = tensor("zero_mean_405_cast")]; + tensor zero_mean_sq_405_cast = mul(x = zero_mean_405_cast, y = zero_mean_405_cast)[name = tensor("zero_mean_sq_405_cast")]; + tensor var_13299 = const()[name = tensor("op_13299"), val = tensor([1])]; + tensor var_13300_cast = reduce_mean(axes = var_13299, keep_dims = var_12513, x = zero_mean_sq_405_cast)[name = tensor("op_13300_cast")]; + tensor var_13301_to_fp16 = const()[name = tensor("op_13301_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13302_cast = add(x = var_13300_cast, y = var_13301_to_fp16)[name = tensor("op_13302_cast")]; + tensor denom_405_epsilon_0_to_fp16 = const()[name = tensor("denom_405_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_405_cast = rsqrt(epsilon = denom_405_epsilon_0_to_fp16, x = var_13302_cast)[name = tensor("denom_405_cast")]; + tensor out_405_cast = mul(x = zero_mean_405_cast, y = denom_405_cast)[name = tensor("out_405_cast")]; + tensor var_13306_to_fp16 = const()[name = tensor("op_13306_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383685888)))]; + tensor var_13307_cast = add(x = out_405_cast, y = var_13306_to_fp16)[name = tensor("op_13307_cast")]; + tensor var_13309_to_fp16 = const()[name = tensor("op_13309_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383687232)))]; + tensor hidden_states_543_cast = mul(x = var_13307_cast, y = var_13309_to_fp16)[name = tensor("hidden_states_543_cast")]; + tensor var_13316 = const()[name = tensor("op_13316"), val = tensor([1, 1])]; + tensor var_13318 = const()[name = tensor("op_13318"), val = tensor([1, 1])]; + tensor q_271_pad_type_0 = const()[name = tensor("q_271_pad_type_0"), val = tensor("custom")]; + tensor q_271_pad_0 = const()[name = tensor("q_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383688576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383893440))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_271_cast = conv(dilations = var_13318, groups = var_12518, pad = q_271_pad_0, pad_type = q_271_pad_type_0, strides = var_13316, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_543_cast)[name = tensor("q_271_cast")]; + tensor var_13322 = const()[name = tensor("op_13322"), val = tensor([1, 1])]; + tensor var_13324 = const()[name = tensor("op_13324"), val = tensor([1, 1])]; + tensor k_271_pad_type_0 = const()[name = tensor("k_271_pad_type_0"), val = tensor("custom")]; + tensor k_271_pad_0 = const()[name = tensor("k_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383893568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384876672))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_271_cast = conv(dilations = var_13324, groups = var_12518, pad = k_271_pad_0, pad_type = k_271_pad_type_0, strides = var_13322, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_271_cast")]; + tensor var_13328 = const()[name = tensor("op_13328"), val = tensor([1, 1])]; + tensor var_13330 = const()[name = tensor("op_13330"), val = tensor([1, 1])]; + tensor v_271_pad_type_0 = const()[name = tensor("v_271_pad_type_0"), val = tensor("custom")]; + tensor v_271_pad_0 = const()[name = tensor("v_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384876864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385859968))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_271_cast = conv(dilations = var_13330, groups = var_12518, pad = v_271_pad_0, pad_type = v_271_pad_type_0, strides = var_13328, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_271_cast")]; + tensor var_13334 = const()[name = tensor("op_13334"), val = tensor([2, 10, 64, -1])]; + tensor var_13335_cast = reshape(shape = var_13334, x = q_271_cast)[name = tensor("op_13335_cast")]; + tensor var_13336 = const()[name = tensor("op_13336"), val = tensor([2, 10, 64, -1])]; + tensor var_13337_cast = reshape(shape = var_13336, x = k_271_cast)[name = tensor("op_13337_cast")]; + tensor var_13338 = const()[name = tensor("op_13338"), val = tensor([2, 10, 64, -1])]; + tensor var_13339_cast = reshape(shape = var_13338, x = v_271_cast)[name = tensor("op_13339_cast")]; + tensor attn_weights_541_transpose_x_0 = const()[name = tensor("attn_weights_541_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_541_transpose_y_0 = const()[name = tensor("attn_weights_541_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_541_cast = matmul(transpose_x = attn_weights_541_transpose_x_0, transpose_y = attn_weights_541_transpose_y_0, x = var_13335_cast, y = var_13337_cast)[name = tensor("attn_weights_541_cast")]; + tensor attn_weights_543_cast = mul(x = attn_weights_541_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_543_cast")]; + tensor var_13343_cast = softmax(axis = var_12502, x = attn_weights_543_cast)[name = tensor("op_13343_cast")]; + tensor attn_271_transpose_x_0 = const()[name = tensor("attn_271_transpose_x_0"), val = tensor(false)]; + tensor attn_271_transpose_y_0 = const()[name = tensor("attn_271_transpose_y_0"), val = tensor(true)]; + tensor attn_271_cast = matmul(transpose_x = attn_271_transpose_x_0, transpose_y = attn_271_transpose_y_0, x = var_13339_cast, y = var_13343_cast)[name = tensor("attn_271_cast")]; + tensor var_13347 = const()[name = tensor("op_13347"), val = tensor([2, 640, 1, -1])]; + tensor input_781_cast = reshape(shape = var_13347, x = attn_271_cast)[name = tensor("input_781_cast")]; + tensor var_13352 = const()[name = tensor("op_13352"), val = tensor([1, 1])]; + tensor var_13354 = const()[name = tensor("op_13354"), val = tensor([1, 1])]; + tensor var_13356_pad_type_0 = const()[name = tensor("op_13356_pad_type_0"), val = tensor("custom")]; + tensor var_13356_pad_0 = const()[name = tensor("op_13356_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385860160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386167424))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386167616)))]; + tensor var_13356_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13354, groups = var_12518, pad = var_13356_pad_0, pad_type = var_13356_pad_type_0, strides = var_13352, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_781_cast)[name = tensor("op_13356_cast")]; + tensor inputs_407_cast = add(x = var_13356_cast, y = inputs_405_cast)[name = tensor("inputs_407_cast")]; + tensor var_13360 = const()[name = tensor("op_13360"), val = tensor([1])]; + tensor channels_mean_407_cast = reduce_mean(axes = var_13360, keep_dims = var_12513, x = inputs_407_cast)[name = tensor("channels_mean_407_cast")]; + tensor zero_mean_407_cast = sub(x = inputs_407_cast, y = channels_mean_407_cast)[name = tensor("zero_mean_407_cast")]; + tensor zero_mean_sq_407_cast = mul(x = zero_mean_407_cast, y = zero_mean_407_cast)[name = tensor("zero_mean_sq_407_cast")]; + tensor var_13364 = const()[name = tensor("op_13364"), val = tensor([1])]; + tensor var_13365_cast = reduce_mean(axes = var_13364, keep_dims = var_12513, x = zero_mean_sq_407_cast)[name = tensor("op_13365_cast")]; + tensor var_13366_to_fp16 = const()[name = tensor("op_13366_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13367_cast = add(x = var_13365_cast, y = var_13366_to_fp16)[name = tensor("op_13367_cast")]; + tensor denom_407_epsilon_0_to_fp16 = const()[name = tensor("denom_407_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_407_cast = rsqrt(epsilon = denom_407_epsilon_0_to_fp16, x = var_13367_cast)[name = tensor("denom_407_cast")]; + tensor out_407_cast = mul(x = zero_mean_407_cast, y = denom_407_cast)[name = tensor("out_407_cast")]; + tensor var_13371_to_fp16 = const()[name = tensor("op_13371_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386168960)))]; + tensor var_13372_cast = add(x = out_407_cast, y = var_13371_to_fp16)[name = tensor("op_13372_cast")]; + tensor var_13374_to_fp16 = const()[name = tensor("op_13374_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386170304)))]; + tensor input_783_cast = mul(x = var_13372_cast, y = var_13374_to_fp16)[name = tensor("input_783_cast")]; + tensor var_13382 = const()[name = tensor("op_13382"), val = tensor([1, 1])]; + tensor var_13384 = const()[name = tensor("op_13384"), val = tensor([1, 1])]; + tensor var_13386_pad_type_0 = const()[name = tensor("op_13386_pad_type_0"), val = tensor("custom")]; + tensor var_13386_pad_0 = const()[name = tensor("op_13386_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386171648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389448512))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389449088)))]; + tensor var_13386_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13384, groups = var_12518, pad = var_13386_pad_0, pad_type = var_13386_pad_type_0, strides = var_13382, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_783_cast)[name = tensor("op_13386_cast")]; + tensor var_13387_split_sizes_0 = const()[name = tensor("op_13387_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13387_axis_0 = const()[name = tensor("op_13387_axis_0"), val = tensor(1)]; + tensor var_13387_cast_0, tensor var_13387_cast_1 = split(axis = var_13387_axis_0, split_sizes = var_13387_split_sizes_0, x = var_13386_cast)[name = tensor("op_13387_cast")]; + tensor var_13389_mode_0 = const()[name = tensor("op_13389_mode_0"), val = tensor("EXACT")]; + tensor var_13389_cast = gelu(mode = var_13389_mode_0, x = var_13387_cast_1)[name = tensor("op_13389_cast")]; + tensor input_785_cast = mul(x = var_13387_cast_0, y = var_13389_cast)[name = tensor("input_785_cast")]; + tensor var_13393 = const()[name = tensor("op_13393"), val = tensor([1, 1])]; + tensor var_13395 = const()[name = tensor("op_13395"), val = tensor([1, 1])]; + tensor var_13397_pad_type_0 = const()[name = tensor("op_13397_pad_type_0"), val = tensor("custom")]; + tensor var_13397_pad_0 = const()[name = tensor("op_13397_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389459392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391097856))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391098432)))]; + tensor var_13397_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13395, groups = var_12518, pad = var_13397_pad_0, pad_type = var_13397_pad_type_0, strides = var_13393, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_785_cast)[name = tensor("op_13397_cast")]; + tensor hidden_states_547_cast = add(x = var_13397_cast, y = inputs_407_cast)[name = tensor("hidden_states_547_cast")]; + tensor var_13399 = const()[name = tensor("op_13399"), val = tensor([2, 640, 64, 64])]; + tensor input_787_cast = reshape(shape = var_13399, x = hidden_states_547_cast)[name = tensor("input_787_cast")]; + tensor var_13403 = const()[name = tensor("op_13403"), val = tensor([1, 1])]; + tensor var_13405 = const()[name = tensor("op_13405"), val = tensor([1, 1])]; + tensor hidden_states_549_pad_type_0 = const()[name = tensor("hidden_states_549_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_549_pad_0 = const()[name = tensor("hidden_states_549_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391099776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391509440))), name = tensor("up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391510016)))]; + tensor hidden_states_549_cast = conv(bias = up_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_13405, groups = var_12518, pad = hidden_states_549_pad_0, pad_type = hidden_states_549_pad_type_0, strides = var_13403, weight = up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_787_cast)[name = tensor("hidden_states_549_cast")]; + tensor hidden_states_551_cast = add(x = hidden_states_549_cast, y = hidden_states_531_cast)[name = tensor("hidden_states_551_cast")]; + tensor input_789_interleave_0 = const()[name = tensor("input_789_interleave_0"), val = tensor(false)]; + tensor input_789_cast = concat(axis = var_12518, interleave = input_789_interleave_0, values = (hidden_states_551_cast, input_45_cast))[name = tensor("input_789_cast")]; + tensor reshape_144_shape_0 = const()[name = tensor("reshape_144_shape_0"), val = tensor([2, 32, 30, 64, 64])]; + tensor reshape_144_cast = reshape(shape = reshape_144_shape_0, x = input_789_cast)[name = tensor("reshape_144_cast")]; + tensor reduce_mean_108_axes_0 = const()[name = tensor("reduce_mean_108_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_108_keep_dims_0 = const()[name = tensor("reduce_mean_108_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_108_cast = reduce_mean(axes = reduce_mean_108_axes_0, keep_dims = reduce_mean_108_keep_dims_0, x = reshape_144_cast)[name = tensor("reduce_mean_108_cast")]; + tensor sub_72_cast = sub(x = reshape_144_cast, y = reduce_mean_108_cast)[name = tensor("sub_72_cast")]; + tensor square_36_cast = square(x = sub_72_cast)[name = tensor("square_36_cast")]; + tensor reduce_mean_110_axes_0 = const()[name = tensor("reduce_mean_110_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_110_keep_dims_0 = const()[name = tensor("reduce_mean_110_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_110_cast = reduce_mean(axes = reduce_mean_110_axes_0, keep_dims = reduce_mean_110_keep_dims_0, x = square_36_cast)[name = tensor("reduce_mean_110_cast")]; + tensor add_72_y_0_to_fp16 = const()[name = tensor("add_72_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_72_cast = add(x = reduce_mean_110_cast, y = add_72_y_0_to_fp16)[name = tensor("add_72_cast")]; + tensor sqrt_36_cast = sqrt(x = add_72_cast)[name = tensor("sqrt_36_cast")]; + tensor real_div_36_cast = real_div(x = sub_72_cast, y = sqrt_36_cast)[name = tensor("real_div_36_cast")]; + tensor reshape_145_shape_0 = const()[name = tensor("reshape_145_shape_0"), val = tensor([2, 960, 64, 64])]; + tensor reshape_145_cast = reshape(shape = reshape_145_shape_0, x = real_div_36_cast)[name = tensor("reshape_145_cast")]; + tensor add_73_mean_0_to_fp16 = const()[name = tensor("add_73_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391511360)))]; + tensor add_73_variance_0_to_fp16 = const()[name = tensor("add_73_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391513344)))]; + tensor add_73_gamma_0_to_fp16 = const()[name = tensor("add_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391515328)))]; + tensor add_73_beta_0_to_fp16 = const()[name = tensor("add_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391517312)))]; + tensor add_73_epsilon_0_to_fp16 = const()[name = tensor("add_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_73_cast = batch_norm(beta = add_73_beta_0_to_fp16, epsilon = add_73_epsilon_0_to_fp16, gamma = add_73_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_145_cast)[name = tensor("add_73_cast")]; + tensor input_793_cast = silu(x = add_73_cast)[name = tensor("input_793_cast")]; + tensor var_13423 = const()[name = tensor("op_13423"), val = tensor([1, 1])]; + tensor var_13425 = const()[name = tensor("op_13425"), val = tensor([1, 1])]; + tensor hidden_states_553_pad_type_0 = const()[name = tensor("hidden_states_553_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_553_pad_0 = const()[name = tensor("hidden_states_553_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391519296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397048960))), name = tensor("up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([640, 960, 3, 3])]; + tensor up_blocks_1_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397049536)))]; + tensor hidden_states_553_cast = conv(bias = up_blocks_1_resnets_2_conv1_bias_to_fp16, dilations = var_13425, groups = var_12518, pad = hidden_states_553_pad_0, pad_type = hidden_states_553_pad_type_0, strides = var_13423, weight = up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized, x = input_793_cast)[name = tensor("hidden_states_553_cast")]; + tensor var_13431 = const()[name = tensor("op_13431"), val = tensor([1, 1])]; + tensor var_13433 = const()[name = tensor("op_13433"), val = tensor([1, 1])]; + tensor temb_27_pad_type_0 = const()[name = tensor("temb_27_pad_type_0"), val = tensor("custom")]; + tensor temb_27_pad_0 = const()[name = tensor("temb_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397050880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397870144))), name = tensor("up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397870720)))]; + tensor temb_27_cast = conv(bias = up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13433, groups = var_12518, pad = temb_27_pad_0, pad_type = temb_27_pad_type_0, strides = var_13431, weight = up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_27_cast")]; + tensor input_797_cast = add(x = hidden_states_553_cast, y = temb_27_cast)[name = tensor("input_797_cast")]; + tensor reshape_148_shape_0 = const()[name = tensor("reshape_148_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_148_cast = reshape(shape = reshape_148_shape_0, x = input_797_cast)[name = tensor("reshape_148_cast")]; + tensor reduce_mean_111_axes_0 = const()[name = tensor("reduce_mean_111_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_111_keep_dims_0 = const()[name = tensor("reduce_mean_111_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_111_cast = reduce_mean(axes = reduce_mean_111_axes_0, keep_dims = reduce_mean_111_keep_dims_0, x = reshape_148_cast)[name = tensor("reduce_mean_111_cast")]; + tensor sub_74_cast = sub(x = reshape_148_cast, y = reduce_mean_111_cast)[name = tensor("sub_74_cast")]; + tensor square_37_cast = square(x = sub_74_cast)[name = tensor("square_37_cast")]; + tensor reduce_mean_113_axes_0 = const()[name = tensor("reduce_mean_113_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_113_keep_dims_0 = const()[name = tensor("reduce_mean_113_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_113_cast = reduce_mean(axes = reduce_mean_113_axes_0, keep_dims = reduce_mean_113_keep_dims_0, x = square_37_cast)[name = tensor("reduce_mean_113_cast")]; + tensor add_74_y_0_to_fp16 = const()[name = tensor("add_74_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_74_cast = add(x = reduce_mean_113_cast, y = add_74_y_0_to_fp16)[name = tensor("add_74_cast")]; + tensor sqrt_37_cast = sqrt(x = add_74_cast)[name = tensor("sqrt_37_cast")]; + tensor real_div_37_cast = real_div(x = sub_74_cast, y = sqrt_37_cast)[name = tensor("real_div_37_cast")]; + tensor reshape_149_shape_0 = const()[name = tensor("reshape_149_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_149_cast = reshape(shape = reshape_149_shape_0, x = real_div_37_cast)[name = tensor("reshape_149_cast")]; + tensor add_75_gamma_0_to_fp16 = const()[name = tensor("add_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397872064)))]; + tensor add_75_beta_0_to_fp16 = const()[name = tensor("add_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397873408)))]; + tensor add_75_epsilon_0_to_fp16 = const()[name = tensor("add_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_75_cast = batch_norm(beta = add_75_beta_0_to_fp16, epsilon = add_75_epsilon_0_to_fp16, gamma = add_75_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_149_cast)[name = tensor("add_75_cast")]; + tensor input_801_cast = silu(x = add_75_cast)[name = tensor("input_801_cast")]; + tensor var_13443 = const()[name = tensor("op_13443"), val = tensor([1, 1])]; + tensor var_13445 = const()[name = tensor("op_13445"), val = tensor([1, 1])]; + tensor hidden_states_555_pad_type_0 = const()[name = tensor("hidden_states_555_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_555_pad_0 = const()[name = tensor("hidden_states_555_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397874752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401561216))), name = tensor("up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401561792)))]; + tensor hidden_states_555_cast = conv(bias = up_blocks_1_resnets_2_conv2_bias_to_fp16, dilations = var_13445, groups = var_12518, pad = hidden_states_555_pad_0, pad_type = hidden_states_555_pad_type_0, strides = var_13443, weight = up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized, x = input_801_cast)[name = tensor("hidden_states_555_cast")]; + tensor var_13450 = const()[name = tensor("op_13450"), val = tensor([1, 1])]; + tensor var_13452 = const()[name = tensor("op_13452"), val = tensor([1, 1])]; + tensor x_15_pad_type_0 = const()[name = tensor("x_15_pad_type_0"), val = tensor("custom")]; + tensor x_15_pad_0 = const()[name = tensor("x_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401563136)))]; + tensor up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402792000)))]; + tensor x_15_cast = conv(bias = up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13452, groups = var_12518, pad = x_15_pad_0, pad_type = x_15_pad_type_0, strides = var_13450, weight = up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16, x = input_789_cast)[name = tensor("x_15_cast")]; + tensor hidden_states_557_cast = add(x = x_15_cast, y = hidden_states_555_cast)[name = tensor("hidden_states_557_cast")]; + tensor reshape_152_shape_0 = const()[name = tensor("reshape_152_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_152_cast = reshape(shape = reshape_152_shape_0, x = hidden_states_557_cast)[name = tensor("reshape_152_cast")]; + tensor reduce_mean_114_axes_0 = const()[name = tensor("reduce_mean_114_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_114_keep_dims_0 = const()[name = tensor("reduce_mean_114_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_114_cast = reduce_mean(axes = reduce_mean_114_axes_0, keep_dims = reduce_mean_114_keep_dims_0, x = reshape_152_cast)[name = tensor("reduce_mean_114_cast")]; + tensor sub_76_cast = sub(x = reshape_152_cast, y = reduce_mean_114_cast)[name = tensor("sub_76_cast")]; + tensor square_38_cast = square(x = sub_76_cast)[name = tensor("square_38_cast")]; + tensor reduce_mean_116_axes_0 = const()[name = tensor("reduce_mean_116_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_116_keep_dims_0 = const()[name = tensor("reduce_mean_116_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_116_cast = reduce_mean(axes = reduce_mean_116_axes_0, keep_dims = reduce_mean_116_keep_dims_0, x = square_38_cast)[name = tensor("reduce_mean_116_cast")]; + tensor add_76_y_0_to_fp16 = const()[name = tensor("add_76_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_76_cast = add(x = reduce_mean_116_cast, y = add_76_y_0_to_fp16)[name = tensor("add_76_cast")]; + tensor sqrt_38_cast = sqrt(x = add_76_cast)[name = tensor("sqrt_38_cast")]; + tensor real_div_38_cast = real_div(x = sub_76_cast, y = sqrt_38_cast)[name = tensor("real_div_38_cast")]; + tensor reshape_153_shape_0 = const()[name = tensor("reshape_153_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_153_cast = reshape(shape = reshape_153_shape_0, x = real_div_38_cast)[name = tensor("reshape_153_cast")]; + tensor add_77_gamma_0_to_fp16 = const()[name = tensor("add_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402793344)))]; + tensor add_77_beta_0_to_fp16 = const()[name = tensor("add_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402794688)))]; + tensor add_77_epsilon_0_to_fp16 = const()[name = tensor("add_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_77_cast = batch_norm(beta = add_77_beta_0_to_fp16, epsilon = add_77_epsilon_0_to_fp16, gamma = add_77_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_153_cast)[name = tensor("add_77_cast")]; + tensor var_13474 = const()[name = tensor("op_13474"), val = tensor([1, 1])]; + tensor var_13476 = const()[name = tensor("op_13476"), val = tensor([1, 1])]; + tensor hidden_states_559_pad_type_0 = const()[name = tensor("hidden_states_559_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_559_pad_0 = const()[name = tensor("hidden_states_559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402796032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403205696))), name = tensor("up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403206272)))]; + tensor hidden_states_559_cast = conv(bias = up_blocks_1_attentions_2_proj_in_bias_to_fp16, dilations = var_13476, groups = var_12518, pad = hidden_states_559_pad_0, pad_type = hidden_states_559_pad_type_0, strides = var_13474, weight = up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized, x = add_77_cast)[name = tensor("hidden_states_559_cast")]; + tensor var_13481 = const()[name = tensor("op_13481"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_409_cast = reshape(shape = var_13481, x = hidden_states_559_cast)[name = tensor("inputs_409_cast")]; + tensor var_13491 = const()[name = tensor("op_13491"), val = tensor([1])]; + tensor channels_mean_409_cast = reduce_mean(axes = var_13491, keep_dims = var_12513, x = inputs_409_cast)[name = tensor("channels_mean_409_cast")]; + tensor zero_mean_409_cast = sub(x = inputs_409_cast, y = channels_mean_409_cast)[name = tensor("zero_mean_409_cast")]; + tensor zero_mean_sq_409_cast = mul(x = zero_mean_409_cast, y = zero_mean_409_cast)[name = tensor("zero_mean_sq_409_cast")]; + tensor var_13495 = const()[name = tensor("op_13495"), val = tensor([1])]; + tensor var_13496_cast = reduce_mean(axes = var_13495, keep_dims = var_12513, x = zero_mean_sq_409_cast)[name = tensor("op_13496_cast")]; + tensor var_13497_to_fp16 = const()[name = tensor("op_13497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13498_cast = add(x = var_13496_cast, y = var_13497_to_fp16)[name = tensor("op_13498_cast")]; + tensor denom_409_epsilon_0_to_fp16 = const()[name = tensor("denom_409_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_409_cast = rsqrt(epsilon = denom_409_epsilon_0_to_fp16, x = var_13498_cast)[name = tensor("denom_409_cast")]; + tensor out_409_cast = mul(x = zero_mean_409_cast, y = denom_409_cast)[name = tensor("out_409_cast")]; + tensor var_13502_to_fp16 = const()[name = tensor("op_13502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403207616)))]; + tensor var_13503_cast = add(x = out_409_cast, y = var_13502_to_fp16)[name = tensor("op_13503_cast")]; + tensor var_13505_to_fp16 = const()[name = tensor("op_13505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403208960)))]; + tensor hidden_states_561_cast = mul(x = var_13503_cast, y = var_13505_to_fp16)[name = tensor("hidden_states_561_cast")]; + tensor var_13512 = const()[name = tensor("op_13512"), val = tensor([1, 1])]; + tensor var_13514 = const()[name = tensor("op_13514"), val = tensor([1, 1])]; + tensor q_273_pad_type_0 = const()[name = tensor("q_273_pad_type_0"), val = tensor("custom")]; + tensor q_273_pad_0 = const()[name = tensor("q_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403210304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403517568))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_273_cast = conv(dilations = var_13514, groups = var_12518, pad = q_273_pad_0, pad_type = q_273_pad_type_0, strides = var_13512, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("q_273_cast")]; + tensor var_13518 = const()[name = tensor("op_13518"), val = tensor([1, 1])]; + tensor var_13520 = const()[name = tensor("op_13520"), val = tensor([1, 1])]; + tensor k_273_pad_type_0 = const()[name = tensor("k_273_pad_type_0"), val = tensor("custom")]; + tensor k_273_pad_0 = const()[name = tensor("k_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403517760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403825024))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_273_cast = conv(dilations = var_13520, groups = var_12518, pad = k_273_pad_0, pad_type = k_273_pad_type_0, strides = var_13518, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("k_273_cast")]; + tensor var_13524 = const()[name = tensor("op_13524"), val = tensor([1, 1])]; + tensor var_13526 = const()[name = tensor("op_13526"), val = tensor([1, 1])]; + tensor v_273_pad_type_0 = const()[name = tensor("v_273_pad_type_0"), val = tensor("custom")]; + tensor v_273_pad_0 = const()[name = tensor("v_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403825216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404234880))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_273_cast = conv(dilations = var_13526, groups = var_12518, pad = v_273_pad_0, pad_type = v_273_pad_type_0, strides = var_13524, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("v_273_cast")]; + tensor var_13530 = const()[name = tensor("op_13530"), val = tensor([2, 10, 64, -1])]; + tensor var_13531_cast = reshape(shape = var_13530, x = q_273_cast)[name = tensor("op_13531_cast")]; + tensor var_13532 = const()[name = tensor("op_13532"), val = tensor([2, 10, 64, -1])]; + tensor var_13533_cast = reshape(shape = var_13532, x = k_273_cast)[name = tensor("op_13533_cast")]; + tensor var_13534 = const()[name = tensor("op_13534"), val = tensor([2, 10, 64, -1])]; + tensor var_13535_cast = reshape(shape = var_13534, x = v_273_cast)[name = tensor("op_13535_cast")]; + tensor attn_weights_545_transpose_x_0 = const()[name = tensor("attn_weights_545_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_545_transpose_y_0 = const()[name = tensor("attn_weights_545_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_545_cast = matmul(transpose_x = attn_weights_545_transpose_x_0, transpose_y = attn_weights_545_transpose_y_0, x = var_13531_cast, y = var_13533_cast)[name = tensor("attn_weights_545_cast")]; + tensor attn_weights_547_cast = mul(x = attn_weights_545_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_547_cast")]; + tensor var_13539_cast = softmax(axis = var_12502, x = attn_weights_547_cast)[name = tensor("op_13539_cast")]; + tensor attn_273_transpose_x_0 = const()[name = tensor("attn_273_transpose_x_0"), val = tensor(false)]; + tensor attn_273_transpose_y_0 = const()[name = tensor("attn_273_transpose_y_0"), val = tensor(true)]; + tensor attn_273_cast = matmul(transpose_x = attn_273_transpose_x_0, transpose_y = attn_273_transpose_y_0, x = var_13535_cast, y = var_13539_cast)[name = tensor("attn_273_cast")]; + tensor var_13543 = const()[name = tensor("op_13543"), val = tensor([2, 640, 1, -1])]; + tensor input_805_cast = reshape(shape = var_13543, x = attn_273_cast)[name = tensor("input_805_cast")]; + tensor var_13548 = const()[name = tensor("op_13548"), val = tensor([1, 1])]; + tensor var_13550 = const()[name = tensor("op_13550"), val = tensor([1, 1])]; + tensor var_13552_pad_type_0 = const()[name = tensor("op_13552_pad_type_0"), val = tensor("custom")]; + tensor var_13552_pad_0 = const()[name = tensor("op_13552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404235456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404645120))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404645696)))]; + tensor var_13552_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13550, groups = var_12518, pad = var_13552_pad_0, pad_type = var_13552_pad_type_0, strides = var_13548, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_805_cast)[name = tensor("op_13552_cast")]; + tensor inputs_411_cast = add(x = var_13552_cast, y = inputs_409_cast)[name = tensor("inputs_411_cast")]; + tensor var_13556 = const()[name = tensor("op_13556"), val = tensor([1])]; + tensor channels_mean_411_cast = reduce_mean(axes = var_13556, keep_dims = var_12513, x = inputs_411_cast)[name = tensor("channels_mean_411_cast")]; + tensor zero_mean_411_cast = sub(x = inputs_411_cast, y = channels_mean_411_cast)[name = tensor("zero_mean_411_cast")]; + tensor zero_mean_sq_411_cast = mul(x = zero_mean_411_cast, y = zero_mean_411_cast)[name = tensor("zero_mean_sq_411_cast")]; + tensor var_13560 = const()[name = tensor("op_13560"), val = tensor([1])]; + tensor var_13561_cast = reduce_mean(axes = var_13560, keep_dims = var_12513, x = zero_mean_sq_411_cast)[name = tensor("op_13561_cast")]; + tensor var_13562_to_fp16 = const()[name = tensor("op_13562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13563_cast = add(x = var_13561_cast, y = var_13562_to_fp16)[name = tensor("op_13563_cast")]; + tensor denom_411_epsilon_0_to_fp16 = const()[name = tensor("denom_411_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_411_cast = rsqrt(epsilon = denom_411_epsilon_0_to_fp16, x = var_13563_cast)[name = tensor("denom_411_cast")]; + tensor out_411_cast = mul(x = zero_mean_411_cast, y = denom_411_cast)[name = tensor("out_411_cast")]; + tensor var_13567_to_fp16 = const()[name = tensor("op_13567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404647040)))]; + tensor var_13568_cast = add(x = out_411_cast, y = var_13567_to_fp16)[name = tensor("op_13568_cast")]; + tensor var_13570_to_fp16 = const()[name = tensor("op_13570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404648384)))]; + tensor hidden_states_563_cast = mul(x = var_13568_cast, y = var_13570_to_fp16)[name = tensor("hidden_states_563_cast")]; + tensor var_13577 = const()[name = tensor("op_13577"), val = tensor([1, 1])]; + tensor var_13579 = const()[name = tensor("op_13579"), val = tensor([1, 1])]; + tensor q_275_pad_type_0 = const()[name = tensor("q_275_pad_type_0"), val = tensor("custom")]; + tensor q_275_pad_0 = const()[name = tensor("q_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404649728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404956992))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_275_cast = conv(dilations = var_13579, groups = var_12518, pad = q_275_pad_0, pad_type = q_275_pad_type_0, strides = var_13577, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_563_cast)[name = tensor("q_275_cast")]; + tensor var_13583 = const()[name = tensor("op_13583"), val = tensor([1, 1])]; + tensor var_13585 = const()[name = tensor("op_13585"), val = tensor([1, 1])]; + tensor k_275_pad_type_0 = const()[name = tensor("k_275_pad_type_0"), val = tensor("custom")]; + tensor k_275_pad_0 = const()[name = tensor("k_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404957184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405940288))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_275_cast = conv(dilations = var_13585, groups = var_12518, pad = k_275_pad_0, pad_type = k_275_pad_type_0, strides = var_13583, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_275_cast")]; + tensor var_13589 = const()[name = tensor("op_13589"), val = tensor([1, 1])]; + tensor var_13591 = const()[name = tensor("op_13591"), val = tensor([1, 1])]; + tensor v_275_pad_type_0 = const()[name = tensor("v_275_pad_type_0"), val = tensor("custom")]; + tensor v_275_pad_0 = const()[name = tensor("v_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405940480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406923584))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_275_cast = conv(dilations = var_13591, groups = var_12518, pad = v_275_pad_0, pad_type = v_275_pad_type_0, strides = var_13589, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_275_cast")]; + tensor var_13595 = const()[name = tensor("op_13595"), val = tensor([2, 10, 64, -1])]; + tensor var_13596_cast = reshape(shape = var_13595, x = q_275_cast)[name = tensor("op_13596_cast")]; + tensor var_13597 = const()[name = tensor("op_13597"), val = tensor([2, 10, 64, -1])]; + tensor var_13598_cast = reshape(shape = var_13597, x = k_275_cast)[name = tensor("op_13598_cast")]; + tensor var_13599 = const()[name = tensor("op_13599"), val = tensor([2, 10, 64, -1])]; + tensor var_13600_cast = reshape(shape = var_13599, x = v_275_cast)[name = tensor("op_13600_cast")]; + tensor attn_weights_549_transpose_x_0 = const()[name = tensor("attn_weights_549_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_549_transpose_y_0 = const()[name = tensor("attn_weights_549_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_549_cast = matmul(transpose_x = attn_weights_549_transpose_x_0, transpose_y = attn_weights_549_transpose_y_0, x = var_13596_cast, y = var_13598_cast)[name = tensor("attn_weights_549_cast")]; + tensor attn_weights_551_cast = mul(x = attn_weights_549_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_551_cast")]; + tensor var_13604_cast = softmax(axis = var_12502, x = attn_weights_551_cast)[name = tensor("op_13604_cast")]; + tensor attn_275_transpose_x_0 = const()[name = tensor("attn_275_transpose_x_0"), val = tensor(false)]; + tensor attn_275_transpose_y_0 = const()[name = tensor("attn_275_transpose_y_0"), val = tensor(true)]; + tensor attn_275_cast = matmul(transpose_x = attn_275_transpose_x_0, transpose_y = attn_275_transpose_y_0, x = var_13600_cast, y = var_13604_cast)[name = tensor("attn_275_cast")]; + tensor var_13608 = const()[name = tensor("op_13608"), val = tensor([2, 640, 1, -1])]; + tensor input_807_cast = reshape(shape = var_13608, x = attn_275_cast)[name = tensor("input_807_cast")]; + tensor var_13613 = const()[name = tensor("op_13613"), val = tensor([1, 1])]; + tensor var_13615 = const()[name = tensor("op_13615"), val = tensor([1, 1])]; + tensor var_13617_pad_type_0 = const()[name = tensor("op_13617_pad_type_0"), val = tensor("custom")]; + tensor var_13617_pad_0 = const()[name = tensor("op_13617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406923776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407231040))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407231232)))]; + tensor var_13617_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13615, groups = var_12518, pad = var_13617_pad_0, pad_type = var_13617_pad_type_0, strides = var_13613, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_807_cast)[name = tensor("op_13617_cast")]; + tensor inputs_413_cast = add(x = var_13617_cast, y = inputs_411_cast)[name = tensor("inputs_413_cast")]; + tensor var_13621 = const()[name = tensor("op_13621"), val = tensor([1])]; + tensor channels_mean_413_cast = reduce_mean(axes = var_13621, keep_dims = var_12513, x = inputs_413_cast)[name = tensor("channels_mean_413_cast")]; + tensor zero_mean_413_cast = sub(x = inputs_413_cast, y = channels_mean_413_cast)[name = tensor("zero_mean_413_cast")]; + tensor zero_mean_sq_413_cast = mul(x = zero_mean_413_cast, y = zero_mean_413_cast)[name = tensor("zero_mean_sq_413_cast")]; + tensor var_13625 = const()[name = tensor("op_13625"), val = tensor([1])]; + tensor var_13626_cast = reduce_mean(axes = var_13625, keep_dims = var_12513, x = zero_mean_sq_413_cast)[name = tensor("op_13626_cast")]; + tensor var_13627_to_fp16 = const()[name = tensor("op_13627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13628_cast = add(x = var_13626_cast, y = var_13627_to_fp16)[name = tensor("op_13628_cast")]; + tensor denom_413_epsilon_0_to_fp16 = const()[name = tensor("denom_413_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_413_cast = rsqrt(epsilon = denom_413_epsilon_0_to_fp16, x = var_13628_cast)[name = tensor("denom_413_cast")]; + tensor out_413_cast = mul(x = zero_mean_413_cast, y = denom_413_cast)[name = tensor("out_413_cast")]; + tensor var_13632_to_fp16 = const()[name = tensor("op_13632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407232576)))]; + tensor var_13633_cast = add(x = out_413_cast, y = var_13632_to_fp16)[name = tensor("op_13633_cast")]; + tensor var_13635_to_fp16 = const()[name = tensor("op_13635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407233920)))]; + tensor input_809_cast = mul(x = var_13633_cast, y = var_13635_to_fp16)[name = tensor("input_809_cast")]; + tensor var_13643 = const()[name = tensor("op_13643"), val = tensor([1, 1])]; + tensor var_13645 = const()[name = tensor("op_13645"), val = tensor([1, 1])]; + tensor var_13647_pad_type_0 = const()[name = tensor("op_13647_pad_type_0"), val = tensor("custom")]; + tensor var_13647_pad_0 = const()[name = tensor("op_13647_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407235264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410512128))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410512704)))]; + tensor var_13647_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13645, groups = var_12518, pad = var_13647_pad_0, pad_type = var_13647_pad_type_0, strides = var_13643, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_809_cast)[name = tensor("op_13647_cast")]; + tensor var_13648_split_sizes_0 = const()[name = tensor("op_13648_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13648_axis_0 = const()[name = tensor("op_13648_axis_0"), val = tensor(1)]; + tensor var_13648_cast_0, tensor var_13648_cast_1 = split(axis = var_13648_axis_0, split_sizes = var_13648_split_sizes_0, x = var_13647_cast)[name = tensor("op_13648_cast")]; + tensor var_13650_mode_0 = const()[name = tensor("op_13650_mode_0"), val = tensor("EXACT")]; + tensor var_13650_cast = gelu(mode = var_13650_mode_0, x = var_13648_cast_1)[name = tensor("op_13650_cast")]; + tensor input_811_cast = mul(x = var_13648_cast_0, y = var_13650_cast)[name = tensor("input_811_cast")]; + tensor var_13654 = const()[name = tensor("op_13654"), val = tensor([1, 1])]; + tensor var_13656 = const()[name = tensor("op_13656"), val = tensor([1, 1])]; + tensor var_13658_pad_type_0 = const()[name = tensor("op_13658_pad_type_0"), val = tensor("custom")]; + tensor var_13658_pad_0 = const()[name = tensor("op_13658_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410523008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412161472))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412162048)))]; + tensor var_13658_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13656, groups = var_12518, pad = var_13658_pad_0, pad_type = var_13658_pad_type_0, strides = var_13654, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_811_cast)[name = tensor("op_13658_cast")]; + tensor inputs_415_cast = add(x = var_13658_cast, y = inputs_413_cast)[name = tensor("inputs_415_cast")]; + tensor var_13668 = const()[name = tensor("op_13668"), val = tensor([1])]; + tensor channels_mean_415_cast = reduce_mean(axes = var_13668, keep_dims = var_12513, x = inputs_415_cast)[name = tensor("channels_mean_415_cast")]; + tensor zero_mean_415_cast = sub(x = inputs_415_cast, y = channels_mean_415_cast)[name = tensor("zero_mean_415_cast")]; + tensor zero_mean_sq_415_cast = mul(x = zero_mean_415_cast, y = zero_mean_415_cast)[name = tensor("zero_mean_sq_415_cast")]; + tensor var_13672 = const()[name = tensor("op_13672"), val = tensor([1])]; + tensor var_13673_cast = reduce_mean(axes = var_13672, keep_dims = var_12513, x = zero_mean_sq_415_cast)[name = tensor("op_13673_cast")]; + tensor var_13674_to_fp16 = const()[name = tensor("op_13674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13675_cast = add(x = var_13673_cast, y = var_13674_to_fp16)[name = tensor("op_13675_cast")]; + tensor denom_415_epsilon_0_to_fp16 = const()[name = tensor("denom_415_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_415_cast = rsqrt(epsilon = denom_415_epsilon_0_to_fp16, x = var_13675_cast)[name = tensor("denom_415_cast")]; + tensor out_415_cast = mul(x = zero_mean_415_cast, y = denom_415_cast)[name = tensor("out_415_cast")]; + tensor var_13679_to_fp16 = const()[name = tensor("op_13679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412163392)))]; + tensor var_13680_cast = add(x = out_415_cast, y = var_13679_to_fp16)[name = tensor("op_13680_cast")]; + tensor var_13682_to_fp16 = const()[name = tensor("op_13682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412164736)))]; + tensor hidden_states_567_cast = mul(x = var_13680_cast, y = var_13682_to_fp16)[name = tensor("hidden_states_567_cast")]; + tensor var_13689 = const()[name = tensor("op_13689"), val = tensor([1, 1])]; + tensor var_13691 = const()[name = tensor("op_13691"), val = tensor([1, 1])]; + tensor q_277_pad_type_0 = const()[name = tensor("q_277_pad_type_0"), val = tensor("custom")]; + tensor q_277_pad_0 = const()[name = tensor("q_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412166080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412575744))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_277_cast = conv(dilations = var_13691, groups = var_12518, pad = q_277_pad_0, pad_type = q_277_pad_type_0, strides = var_13689, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("q_277_cast")]; + tensor var_13695 = const()[name = tensor("op_13695"), val = tensor([1, 1])]; + tensor var_13697 = const()[name = tensor("op_13697"), val = tensor([1, 1])]; + tensor k_277_pad_type_0 = const()[name = tensor("k_277_pad_type_0"), val = tensor("custom")]; + tensor k_277_pad_0 = const()[name = tensor("k_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412576320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412985984))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_277_cast = conv(dilations = var_13697, groups = var_12518, pad = k_277_pad_0, pad_type = k_277_pad_type_0, strides = var_13695, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("k_277_cast")]; + tensor var_13701 = const()[name = tensor("op_13701"), val = tensor([1, 1])]; + tensor var_13703 = const()[name = tensor("op_13703"), val = tensor([1, 1])]; + tensor v_277_pad_type_0 = const()[name = tensor("v_277_pad_type_0"), val = tensor("custom")]; + tensor v_277_pad_0 = const()[name = tensor("v_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412986560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413396224))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_277_cast = conv(dilations = var_13703, groups = var_12518, pad = v_277_pad_0, pad_type = v_277_pad_type_0, strides = var_13701, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("v_277_cast")]; + tensor var_13707 = const()[name = tensor("op_13707"), val = tensor([2, 10, 64, -1])]; + tensor var_13708_cast = reshape(shape = var_13707, x = q_277_cast)[name = tensor("op_13708_cast")]; + tensor var_13709 = const()[name = tensor("op_13709"), val = tensor([2, 10, 64, -1])]; + tensor var_13710_cast = reshape(shape = var_13709, x = k_277_cast)[name = tensor("op_13710_cast")]; + tensor var_13711 = const()[name = tensor("op_13711"), val = tensor([2, 10, 64, -1])]; + tensor var_13712_cast = reshape(shape = var_13711, x = v_277_cast)[name = tensor("op_13712_cast")]; + tensor attn_weights_553_transpose_x_0 = const()[name = tensor("attn_weights_553_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_553_transpose_y_0 = const()[name = tensor("attn_weights_553_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_553_cast = matmul(transpose_x = attn_weights_553_transpose_x_0, transpose_y = attn_weights_553_transpose_y_0, x = var_13708_cast, y = var_13710_cast)[name = tensor("attn_weights_553_cast")]; + tensor attn_weights_555_cast = mul(x = attn_weights_553_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_555_cast")]; + tensor var_13716_cast = softmax(axis = var_12502, x = attn_weights_555_cast)[name = tensor("op_13716_cast")]; + tensor attn_277_transpose_x_0 = const()[name = tensor("attn_277_transpose_x_0"), val = tensor(false)]; + tensor attn_277_transpose_y_0 = const()[name = tensor("attn_277_transpose_y_0"), val = tensor(true)]; + tensor attn_277_cast = matmul(transpose_x = attn_277_transpose_x_0, transpose_y = attn_277_transpose_y_0, x = var_13712_cast, y = var_13716_cast)[name = tensor("attn_277_cast")]; + tensor var_13720 = const()[name = tensor("op_13720"), val = tensor([2, 640, 1, -1])]; + tensor input_813_cast = reshape(shape = var_13720, x = attn_277_cast)[name = tensor("input_813_cast")]; + tensor var_13725 = const()[name = tensor("op_13725"), val = tensor([1, 1])]; + tensor var_13727 = const()[name = tensor("op_13727"), val = tensor([1, 1])]; + tensor var_13729_pad_type_0 = const()[name = tensor("op_13729_pad_type_0"), val = tensor("custom")]; + tensor var_13729_pad_0 = const()[name = tensor("op_13729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413396800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413806464))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413807040)))]; + tensor var_13729_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13727, groups = var_12518, pad = var_13729_pad_0, pad_type = var_13729_pad_type_0, strides = var_13725, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_813_cast)[name = tensor("op_13729_cast")]; + tensor inputs_417_cast = add(x = var_13729_cast, y = inputs_415_cast)[name = tensor("inputs_417_cast")]; + tensor var_13733 = const()[name = tensor("op_13733"), val = tensor([1])]; + tensor channels_mean_417_cast = reduce_mean(axes = var_13733, keep_dims = var_12513, x = inputs_417_cast)[name = tensor("channels_mean_417_cast")]; + tensor zero_mean_417_cast = sub(x = inputs_417_cast, y = channels_mean_417_cast)[name = tensor("zero_mean_417_cast")]; + tensor zero_mean_sq_417_cast = mul(x = zero_mean_417_cast, y = zero_mean_417_cast)[name = tensor("zero_mean_sq_417_cast")]; + tensor var_13737 = const()[name = tensor("op_13737"), val = tensor([1])]; + tensor var_13738_cast = reduce_mean(axes = var_13737, keep_dims = var_12513, x = zero_mean_sq_417_cast)[name = tensor("op_13738_cast")]; + tensor var_13739_to_fp16 = const()[name = tensor("op_13739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13740_cast = add(x = var_13738_cast, y = var_13739_to_fp16)[name = tensor("op_13740_cast")]; + tensor denom_417_epsilon_0_to_fp16 = const()[name = tensor("denom_417_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_417_cast = rsqrt(epsilon = denom_417_epsilon_0_to_fp16, x = var_13740_cast)[name = tensor("denom_417_cast")]; + tensor out_417_cast = mul(x = zero_mean_417_cast, y = denom_417_cast)[name = tensor("out_417_cast")]; + tensor var_13744_to_fp16 = const()[name = tensor("op_13744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413808384)))]; + tensor var_13745_cast = add(x = out_417_cast, y = var_13744_to_fp16)[name = tensor("op_13745_cast")]; + tensor var_13747_to_fp16 = const()[name = tensor("op_13747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413809728)))]; + tensor hidden_states_569_cast = mul(x = var_13745_cast, y = var_13747_to_fp16)[name = tensor("hidden_states_569_cast")]; + tensor var_13754 = const()[name = tensor("op_13754"), val = tensor([1, 1])]; + tensor var_13756 = const()[name = tensor("op_13756"), val = tensor([1, 1])]; + tensor q_pad_type_0 = const()[name = tensor("q_pad_type_0"), val = tensor("custom")]; + tensor q_pad_0 = const()[name = tensor("q_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413811072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414118336))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_cast = conv(dilations = var_13756, groups = var_12518, pad = q_pad_0, pad_type = q_pad_type_0, strides = var_13754, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_569_cast)[name = tensor("q_cast")]; + tensor var_13760 = const()[name = tensor("op_13760"), val = tensor([1, 1])]; + tensor var_13762 = const()[name = tensor("op_13762"), val = tensor([1, 1])]; + tensor k_pad_type_0 = const()[name = tensor("k_pad_type_0"), val = tensor("custom")]; + tensor k_pad_0 = const()[name = tensor("k_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414118528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1415101632))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_cast = conv(dilations = var_13762, groups = var_12518, pad = k_pad_0, pad_type = k_pad_type_0, strides = var_13760, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_cast")]; + tensor var_13766 = const()[name = tensor("op_13766"), val = tensor([1, 1])]; + tensor var_13768 = const()[name = tensor("op_13768"), val = tensor([1, 1])]; + tensor v_pad_type_0 = const()[name = tensor("v_pad_type_0"), val = tensor("custom")]; + tensor v_pad_0 = const()[name = tensor("v_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1415101824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416084928))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_cast = conv(dilations = var_13768, groups = var_12518, pad = v_pad_0, pad_type = v_pad_type_0, strides = var_13766, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_cast")]; + tensor var_13772 = const()[name = tensor("op_13772"), val = tensor([2, 10, 64, -1])]; + tensor var_13773_cast = reshape(shape = var_13772, x = q_cast)[name = tensor("op_13773_cast")]; + tensor var_13774 = const()[name = tensor("op_13774"), val = tensor([2, 10, 64, -1])]; + tensor var_13775_cast = reshape(shape = var_13774, x = k_cast)[name = tensor("op_13775_cast")]; + tensor var_13776 = const()[name = tensor("op_13776"), val = tensor([2, 10, 64, -1])]; + tensor var_13777_cast = reshape(shape = var_13776, x = v_cast)[name = tensor("op_13777_cast")]; + tensor attn_weights_557_transpose_x_0 = const()[name = tensor("attn_weights_557_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_557_transpose_y_0 = const()[name = tensor("attn_weights_557_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_557_cast = matmul(transpose_x = attn_weights_557_transpose_x_0, transpose_y = attn_weights_557_transpose_y_0, x = var_13773_cast, y = var_13775_cast)[name = tensor("attn_weights_557_cast")]; + tensor attn_weights_cast = mul(x = attn_weights_557_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_cast")]; + tensor var_13781_cast = softmax(axis = var_12502, x = attn_weights_cast)[name = tensor("op_13781_cast")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_13777_cast, y = var_13781_cast)[name = tensor("attn_cast")]; + tensor var_13785 = const()[name = tensor("op_13785"), val = tensor([2, 640, 1, -1])]; + tensor input_815_cast = reshape(shape = var_13785, x = attn_cast)[name = tensor("input_815_cast")]; + tensor var_13790 = const()[name = tensor("op_13790"), val = tensor([1, 1])]; + tensor var_13792 = const()[name = tensor("op_13792"), val = tensor([1, 1])]; + tensor var_13794_pad_type_0 = const()[name = tensor("op_13794_pad_type_0"), val = tensor("custom")]; + tensor var_13794_pad_0 = const()[name = tensor("op_13794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416085120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416392384))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416392576)))]; + tensor var_13794_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13792, groups = var_12518, pad = var_13794_pad_0, pad_type = var_13794_pad_type_0, strides = var_13790, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_815_cast)[name = tensor("op_13794_cast")]; + tensor inputs_cast = add(x = var_13794_cast, y = inputs_417_cast)[name = tensor("inputs_cast")]; + tensor var_13798 = const()[name = tensor("op_13798"), val = tensor([1])]; + tensor channels_mean_cast = reduce_mean(axes = var_13798, keep_dims = var_12513, x = inputs_cast)[name = tensor("channels_mean_cast")]; + tensor zero_mean_cast = sub(x = inputs_cast, y = channels_mean_cast)[name = tensor("zero_mean_cast")]; + tensor zero_mean_sq_cast = mul(x = zero_mean_cast, y = zero_mean_cast)[name = tensor("zero_mean_sq_cast")]; + tensor var_13802 = const()[name = tensor("op_13802"), val = tensor([1])]; + tensor var_13803_cast = reduce_mean(axes = var_13802, keep_dims = var_12513, x = zero_mean_sq_cast)[name = tensor("op_13803_cast")]; + tensor var_13804_to_fp16 = const()[name = tensor("op_13804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13805_cast = add(x = var_13803_cast, y = var_13804_to_fp16)[name = tensor("op_13805_cast")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_13805_cast)[name = tensor("denom_cast")]; + tensor out_cast = mul(x = zero_mean_cast, y = denom_cast)[name = tensor("out_cast")]; + tensor var_13809_to_fp16 = const()[name = tensor("op_13809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416393920)))]; + tensor var_13810_cast = add(x = out_cast, y = var_13809_to_fp16)[name = tensor("op_13810_cast")]; + tensor var_13812_to_fp16 = const()[name = tensor("op_13812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416395264)))]; + tensor input_817_cast = mul(x = var_13810_cast, y = var_13812_to_fp16)[name = tensor("input_817_cast")]; + tensor var_13820 = const()[name = tensor("op_13820"), val = tensor([1, 1])]; + tensor var_13822 = const()[name = tensor("op_13822"), val = tensor([1, 1])]; + tensor var_13824_pad_type_0 = const()[name = tensor("op_13824_pad_type_0"), val = tensor("custom")]; + tensor var_13824_pad_0 = const()[name = tensor("op_13824_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416396608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419673472))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419674048)))]; + tensor var_13824_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13822, groups = var_12518, pad = var_13824_pad_0, pad_type = var_13824_pad_type_0, strides = var_13820, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_817_cast)[name = tensor("op_13824_cast")]; + tensor var_13825_split_sizes_0 = const()[name = tensor("op_13825_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13825_axis_0 = const()[name = tensor("op_13825_axis_0"), val = tensor(1)]; + tensor var_13825_cast_0, tensor var_13825_cast_1 = split(axis = var_13825_axis_0, split_sizes = var_13825_split_sizes_0, x = var_13824_cast)[name = tensor("op_13825_cast")]; + tensor var_13827_mode_0 = const()[name = tensor("op_13827_mode_0"), val = tensor("EXACT")]; + tensor var_13827_cast = gelu(mode = var_13827_mode_0, x = var_13825_cast_1)[name = tensor("op_13827_cast")]; + tensor input_819_cast = mul(x = var_13825_cast_0, y = var_13827_cast)[name = tensor("input_819_cast")]; + tensor var_13831 = const()[name = tensor("op_13831"), val = tensor([1, 1])]; + tensor var_13833 = const()[name = tensor("op_13833"), val = tensor([1, 1])]; + tensor var_13835_pad_type_0 = const()[name = tensor("op_13835_pad_type_0"), val = tensor("custom")]; + tensor var_13835_pad_0 = const()[name = tensor("op_13835_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419684352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421322816))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421323392)))]; + tensor var_13835_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13833, groups = var_12518, pad = var_13835_pad_0, pad_type = var_13835_pad_type_0, strides = var_13831, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_819_cast)[name = tensor("op_13835_cast")]; + tensor hidden_states_573_cast = add(x = var_13835_cast, y = inputs_cast)[name = tensor("hidden_states_573_cast")]; + tensor var_13837 = const()[name = tensor("op_13837"), val = tensor([2, 640, 64, 64])]; + tensor input_821_cast = reshape(shape = var_13837, x = hidden_states_573_cast)[name = tensor("input_821_cast")]; + tensor var_13841 = const()[name = tensor("op_13841"), val = tensor([1, 1])]; + tensor var_13843 = const()[name = tensor("op_13843"), val = tensor([1, 1])]; + tensor hidden_states_575_pad_type_0 = const()[name = tensor("hidden_states_575_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_575_pad_0 = const()[name = tensor("hidden_states_575_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421324736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421734400))), name = tensor("up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421734976)))]; + tensor hidden_states_575_cast = conv(bias = up_blocks_1_attentions_2_proj_out_bias_to_fp16, dilations = var_13843, groups = var_12518, pad = hidden_states_575_pad_0, pad_type = hidden_states_575_pad_type_0, strides = var_13841, weight = up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized, x = input_821_cast)[name = tensor("hidden_states_575_cast")]; + tensor input_823_cast = add(x = hidden_states_575_cast, y = hidden_states_557_cast)[name = tensor("input_823_cast")]; + tensor input_825_scale_factor_height_0 = const()[name = tensor("input_825_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_825_scale_factor_width_0 = const()[name = tensor("input_825_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_825_cast = upsample_nearest_neighbor(scale_factor_height = input_825_scale_factor_height_0, scale_factor_width = input_825_scale_factor_width_0, x = input_823_cast)[name = tensor("input_825_cast")]; + tensor var_13852 = const()[name = tensor("op_13852"), val = tensor([1, 1])]; + tensor var_13854 = const()[name = tensor("op_13854"), val = tensor([1, 1])]; + tensor hidden_states_577_pad_type_0 = const()[name = tensor("hidden_states_577_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_577_pad_0 = const()[name = tensor("hidden_states_577_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_upsamplers_0_conv_weight_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421736320)))]; + tensor up_blocks_1_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429109184)))]; + tensor hidden_states_577_cast = conv(bias = up_blocks_1_upsamplers_0_conv_bias_to_fp16, dilations = var_13854, groups = var_12518, pad = hidden_states_577_pad_0, pad_type = hidden_states_577_pad_type_0, strides = var_13852, weight = up_blocks_1_upsamplers_0_conv_weight_to_fp16, x = input_825_cast)[name = tensor("hidden_states_577_cast")]; + tensor var_13862 = const()[name = tensor("op_13862"), val = tensor(1)]; + tensor input_827_interleave_0 = const()[name = tensor("input_827_interleave_0"), val = tensor(false)]; + tensor input_827_cast = concat(axis = var_13862, interleave = input_827_interleave_0, values = (hidden_states_577_cast, input_43_cast))[name = tensor("input_827_cast")]; + tensor reshape_156_shape_0 = const()[name = tensor("reshape_156_shape_0"), val = tensor([2, 32, 30, 128, 128])]; + tensor reshape_156_cast = reshape(shape = reshape_156_shape_0, x = input_827_cast)[name = tensor("reshape_156_cast")]; + tensor reduce_mean_117_axes_0 = const()[name = tensor("reduce_mean_117_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_117_keep_dims_0 = const()[name = tensor("reduce_mean_117_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_117_cast = reduce_mean(axes = reduce_mean_117_axes_0, keep_dims = reduce_mean_117_keep_dims_0, x = reshape_156_cast)[name = tensor("reduce_mean_117_cast")]; + tensor sub_78_cast = sub(x = reshape_156_cast, y = reduce_mean_117_cast)[name = tensor("sub_78_cast")]; + tensor square_39_cast = square(x = sub_78_cast)[name = tensor("square_39_cast")]; + tensor reduce_mean_119_axes_0 = const()[name = tensor("reduce_mean_119_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_119_keep_dims_0 = const()[name = tensor("reduce_mean_119_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_119_cast = reduce_mean(axes = reduce_mean_119_axes_0, keep_dims = reduce_mean_119_keep_dims_0, x = square_39_cast)[name = tensor("reduce_mean_119_cast")]; + tensor add_78_y_0_to_fp16 = const()[name = tensor("add_78_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_78_cast = add(x = reduce_mean_119_cast, y = add_78_y_0_to_fp16)[name = tensor("add_78_cast")]; + tensor sqrt_39_cast = sqrt(x = add_78_cast)[name = tensor("sqrt_39_cast")]; + tensor real_div_39_cast = real_div(x = sub_78_cast, y = sqrt_39_cast)[name = tensor("real_div_39_cast")]; + tensor reshape_157_shape_0 = const()[name = tensor("reshape_157_shape_0"), val = tensor([2, 960, 128, 128])]; + tensor reshape_157_cast = reshape(shape = reshape_157_shape_0, x = real_div_39_cast)[name = tensor("reshape_157_cast")]; + tensor add_79_gamma_0_to_fp16 = const()[name = tensor("add_79_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429110528)))]; + tensor add_79_beta_0_to_fp16 = const()[name = tensor("add_79_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429112512)))]; + tensor add_79_epsilon_0_to_fp16 = const()[name = tensor("add_79_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_79_cast = batch_norm(beta = add_79_beta_0_to_fp16, epsilon = add_79_epsilon_0_to_fp16, gamma = add_79_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_157_cast)[name = tensor("add_79_cast")]; + tensor input_831_cast = silu(x = add_79_cast)[name = tensor("input_831_cast")]; + tensor var_13883 = const()[name = tensor("op_13883"), val = tensor([1, 1])]; + tensor var_13885 = const()[name = tensor("op_13885"), val = tensor([1, 1])]; + tensor hidden_states_579_pad_type_0 = const()[name = tensor("hidden_states_579_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_579_pad_0 = const()[name = tensor("hidden_states_579_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429114496)))]; + tensor up_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434644160)))]; + tensor hidden_states_579_cast = conv(bias = up_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_13885, groups = var_13862, pad = hidden_states_579_pad_0, pad_type = hidden_states_579_pad_type_0, strides = var_13883, weight = up_blocks_2_resnets_0_conv1_weight_to_fp16, x = input_831_cast)[name = tensor("hidden_states_579_cast")]; + tensor var_13891 = const()[name = tensor("op_13891"), val = tensor([1, 1])]; + tensor var_13893 = const()[name = tensor("op_13893"), val = tensor([1, 1])]; + tensor temb_29_pad_type_0 = const()[name = tensor("temb_29_pad_type_0"), val = tensor("custom")]; + tensor temb_29_pad_0 = const()[name = tensor("temb_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434644864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435054528))), name = tensor("up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435055104)))]; + tensor temb_29_cast = conv(bias = up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_13893, groups = var_13862, pad = temb_29_pad_0, pad_type = temb_29_pad_type_0, strides = var_13891, weight = up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_29_cast")]; + tensor input_835_cast = add(x = hidden_states_579_cast, y = temb_29_cast)[name = tensor("input_835_cast")]; + tensor reshape_160_shape_0 = const()[name = tensor("reshape_160_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_160_cast = reshape(shape = reshape_160_shape_0, x = input_835_cast)[name = tensor("reshape_160_cast")]; + tensor reduce_mean_120_axes_0 = const()[name = tensor("reduce_mean_120_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_120_keep_dims_0 = const()[name = tensor("reduce_mean_120_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_120_cast = reduce_mean(axes = reduce_mean_120_axes_0, keep_dims = reduce_mean_120_keep_dims_0, x = reshape_160_cast)[name = tensor("reduce_mean_120_cast")]; + tensor sub_80_cast = sub(x = reshape_160_cast, y = reduce_mean_120_cast)[name = tensor("sub_80_cast")]; + tensor square_40_cast = square(x = sub_80_cast)[name = tensor("square_40_cast")]; + tensor reduce_mean_122_axes_0 = const()[name = tensor("reduce_mean_122_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_122_keep_dims_0 = const()[name = tensor("reduce_mean_122_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_122_cast = reduce_mean(axes = reduce_mean_122_axes_0, keep_dims = reduce_mean_122_keep_dims_0, x = square_40_cast)[name = tensor("reduce_mean_122_cast")]; + tensor add_80_y_0_to_fp16 = const()[name = tensor("add_80_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_80_cast = add(x = reduce_mean_122_cast, y = add_80_y_0_to_fp16)[name = tensor("add_80_cast")]; + tensor sqrt_40_cast = sqrt(x = add_80_cast)[name = tensor("sqrt_40_cast")]; + tensor real_div_40_cast = real_div(x = sub_80_cast, y = sqrt_40_cast)[name = tensor("real_div_40_cast")]; + tensor reshape_161_shape_0 = const()[name = tensor("reshape_161_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_161_cast = reshape(shape = reshape_161_shape_0, x = real_div_40_cast)[name = tensor("reshape_161_cast")]; + tensor add_81_gamma_0_to_fp16 = const()[name = tensor("add_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435055808)))]; + tensor add_81_beta_0_to_fp16 = const()[name = tensor("add_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435056512)))]; + tensor add_81_epsilon_0_to_fp16 = const()[name = tensor("add_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_81_cast = batch_norm(beta = add_81_beta_0_to_fp16, epsilon = add_81_epsilon_0_to_fp16, gamma = add_81_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_161_cast)[name = tensor("add_81_cast")]; + tensor input_839_cast = silu(x = add_81_cast)[name = tensor("input_839_cast")]; + tensor var_13903 = const()[name = tensor("op_13903"), val = tensor([1, 1])]; + tensor var_13905 = const()[name = tensor("op_13905"), val = tensor([1, 1])]; + tensor hidden_states_581_pad_type_0 = const()[name = tensor("hidden_states_581_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_581_pad_0 = const()[name = tensor("hidden_states_581_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435057216)))]; + tensor up_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436900480)))]; + tensor hidden_states_581_cast = conv(bias = up_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_13905, groups = var_13862, pad = hidden_states_581_pad_0, pad_type = hidden_states_581_pad_type_0, strides = var_13903, weight = up_blocks_2_resnets_0_conv2_weight_to_fp16, x = input_839_cast)[name = tensor("hidden_states_581_cast")]; + tensor var_13910 = const()[name = tensor("op_13910"), val = tensor([1, 1])]; + tensor var_13912 = const()[name = tensor("op_13912"), val = tensor([1, 1])]; + tensor x_17_pad_type_0 = const()[name = tensor("x_17_pad_type_0"), val = tensor("custom")]; + tensor x_17_pad_0 = const()[name = tensor("x_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436901184)))]; + tensor up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437515648)))]; + tensor x_17_cast = conv(bias = up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_13912, groups = var_13862, pad = x_17_pad_0, pad_type = x_17_pad_type_0, strides = var_13910, weight = up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16, x = input_827_cast)[name = tensor("x_17_cast")]; + tensor hidden_states_583_cast = add(x = x_17_cast, y = hidden_states_581_cast)[name = tensor("hidden_states_583_cast")]; + tensor input_841_interleave_0 = const()[name = tensor("input_841_interleave_0"), val = tensor(false)]; + tensor input_841_cast = concat(axis = var_13862, interleave = input_841_interleave_0, values = (hidden_states_583_cast, input_29_cast))[name = tensor("input_841_cast")]; + tensor reshape_164_shape_0 = const()[name = tensor("reshape_164_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_164_cast = reshape(shape = reshape_164_shape_0, x = input_841_cast)[name = tensor("reshape_164_cast")]; + tensor reduce_mean_123_axes_0 = const()[name = tensor("reduce_mean_123_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_123_keep_dims_0 = const()[name = tensor("reduce_mean_123_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_123_cast = reduce_mean(axes = reduce_mean_123_axes_0, keep_dims = reduce_mean_123_keep_dims_0, x = reshape_164_cast)[name = tensor("reduce_mean_123_cast")]; + tensor sub_82_cast = sub(x = reshape_164_cast, y = reduce_mean_123_cast)[name = tensor("sub_82_cast")]; + tensor square_41_cast = square(x = sub_82_cast)[name = tensor("square_41_cast")]; + tensor reduce_mean_125_axes_0 = const()[name = tensor("reduce_mean_125_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_125_keep_dims_0 = const()[name = tensor("reduce_mean_125_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_125_cast = reduce_mean(axes = reduce_mean_125_axes_0, keep_dims = reduce_mean_125_keep_dims_0, x = square_41_cast)[name = tensor("reduce_mean_125_cast")]; + tensor add_82_y_0_to_fp16 = const()[name = tensor("add_82_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_82_cast = add(x = reduce_mean_125_cast, y = add_82_y_0_to_fp16)[name = tensor("add_82_cast")]; + tensor sqrt_41_cast = sqrt(x = add_82_cast)[name = tensor("sqrt_41_cast")]; + tensor real_div_41_cast = real_div(x = sub_82_cast, y = sqrt_41_cast)[name = tensor("real_div_41_cast")]; + tensor reshape_165_shape_0 = const()[name = tensor("reshape_165_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_165_cast = reshape(shape = reshape_165_shape_0, x = real_div_41_cast)[name = tensor("reshape_165_cast")]; + tensor add_83_gamma_0_to_fp16 = const()[name = tensor("add_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437516352)))]; + tensor add_83_beta_0_to_fp16 = const()[name = tensor("add_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437517696)))]; + tensor add_83_epsilon_0_to_fp16 = const()[name = tensor("add_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_83_cast = batch_norm(beta = add_83_beta_0_to_fp16, epsilon = add_83_epsilon_0_to_fp16, gamma = add_83_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_165_cast)[name = tensor("add_83_cast")]; + tensor input_845_cast = silu(x = add_83_cast)[name = tensor("input_845_cast")]; + tensor var_13930 = const()[name = tensor("op_13930"), val = tensor([1, 1])]; + tensor var_13932 = const()[name = tensor("op_13932"), val = tensor([1, 1])]; + tensor hidden_states_585_pad_type_0 = const()[name = tensor("hidden_states_585_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_585_pad_0 = const()[name = tensor("hidden_states_585_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437519040)))]; + tensor up_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441205504)))]; + tensor hidden_states_585_cast = conv(bias = up_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_13932, groups = var_13862, pad = hidden_states_585_pad_0, pad_type = hidden_states_585_pad_type_0, strides = var_13930, weight = up_blocks_2_resnets_1_conv1_weight_to_fp16, x = input_845_cast)[name = tensor("hidden_states_585_cast")]; + tensor var_13938 = const()[name = tensor("op_13938"), val = tensor([1, 1])]; + tensor var_13940 = const()[name = tensor("op_13940"), val = tensor([1, 1])]; + tensor temb_31_pad_type_0 = const()[name = tensor("temb_31_pad_type_0"), val = tensor("custom")]; + tensor temb_31_pad_0 = const()[name = tensor("temb_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441206208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441615872))), name = tensor("up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441616448)))]; + tensor temb_31_cast = conv(bias = up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_13940, groups = var_13862, pad = temb_31_pad_0, pad_type = temb_31_pad_type_0, strides = var_13938, weight = up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_31_cast")]; + tensor input_849_cast = add(x = hidden_states_585_cast, y = temb_31_cast)[name = tensor("input_849_cast")]; + tensor reshape_168_shape_0 = const()[name = tensor("reshape_168_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_168_cast = reshape(shape = reshape_168_shape_0, x = input_849_cast)[name = tensor("reshape_168_cast")]; + tensor reduce_mean_126_axes_0 = const()[name = tensor("reduce_mean_126_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_126_keep_dims_0 = const()[name = tensor("reduce_mean_126_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_126_cast = reduce_mean(axes = reduce_mean_126_axes_0, keep_dims = reduce_mean_126_keep_dims_0, x = reshape_168_cast)[name = tensor("reduce_mean_126_cast")]; + tensor sub_84_cast = sub(x = reshape_168_cast, y = reduce_mean_126_cast)[name = tensor("sub_84_cast")]; + tensor square_42_cast = square(x = sub_84_cast)[name = tensor("square_42_cast")]; + tensor reduce_mean_128_axes_0 = const()[name = tensor("reduce_mean_128_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_128_keep_dims_0 = const()[name = tensor("reduce_mean_128_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_128_cast = reduce_mean(axes = reduce_mean_128_axes_0, keep_dims = reduce_mean_128_keep_dims_0, x = square_42_cast)[name = tensor("reduce_mean_128_cast")]; + tensor add_84_y_0_to_fp16 = const()[name = tensor("add_84_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_84_cast = add(x = reduce_mean_128_cast, y = add_84_y_0_to_fp16)[name = tensor("add_84_cast")]; + tensor sqrt_42_cast = sqrt(x = add_84_cast)[name = tensor("sqrt_42_cast")]; + tensor real_div_42_cast = real_div(x = sub_84_cast, y = sqrt_42_cast)[name = tensor("real_div_42_cast")]; + tensor reshape_169_shape_0 = const()[name = tensor("reshape_169_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_169_cast = reshape(shape = reshape_169_shape_0, x = real_div_42_cast)[name = tensor("reshape_169_cast")]; + tensor add_85_gamma_0_to_fp16 = const()[name = tensor("add_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441617152)))]; + tensor add_85_beta_0_to_fp16 = const()[name = tensor("add_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441617856)))]; + tensor add_85_epsilon_0_to_fp16 = const()[name = tensor("add_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_85_cast = batch_norm(beta = add_85_beta_0_to_fp16, epsilon = add_85_epsilon_0_to_fp16, gamma = add_85_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_169_cast)[name = tensor("add_85_cast")]; + tensor input_853_cast = silu(x = add_85_cast)[name = tensor("input_853_cast")]; + tensor var_13950 = const()[name = tensor("op_13950"), val = tensor([1, 1])]; + tensor var_13952 = const()[name = tensor("op_13952"), val = tensor([1, 1])]; + tensor hidden_states_587_pad_type_0 = const()[name = tensor("hidden_states_587_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_587_pad_0 = const()[name = tensor("hidden_states_587_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441618560)))]; + tensor up_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443461824)))]; + tensor hidden_states_587_cast = conv(bias = up_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_13952, groups = var_13862, pad = hidden_states_587_pad_0, pad_type = hidden_states_587_pad_type_0, strides = var_13950, weight = up_blocks_2_resnets_1_conv2_weight_to_fp16, x = input_853_cast)[name = tensor("hidden_states_587_cast")]; + tensor var_13957 = const()[name = tensor("op_13957"), val = tensor([1, 1])]; + tensor var_13959 = const()[name = tensor("op_13959"), val = tensor([1, 1])]; + tensor x_19_pad_type_0 = const()[name = tensor("x_19_pad_type_0"), val = tensor("custom")]; + tensor x_19_pad_0 = const()[name = tensor("x_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443462528)))]; + tensor up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443872192)))]; + tensor x_19_cast = conv(bias = up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13959, groups = var_13862, pad = x_19_pad_0, pad_type = x_19_pad_type_0, strides = var_13957, weight = up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16, x = input_841_cast)[name = tensor("x_19_cast")]; + tensor hidden_states_589_cast = add(x = x_19_cast, y = hidden_states_587_cast)[name = tensor("hidden_states_589_cast")]; + tensor input_855_interleave_0 = const()[name = tensor("input_855_interleave_0"), val = tensor(false)]; + tensor input_855_cast = concat(axis = var_13862, interleave = input_855_interleave_0, values = (hidden_states_589_cast, input_13_cast))[name = tensor("input_855_cast")]; + tensor reshape_172_shape_0 = const()[name = tensor("reshape_172_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_172_cast = reshape(shape = reshape_172_shape_0, x = input_855_cast)[name = tensor("reshape_172_cast")]; + tensor reduce_mean_129_axes_0 = const()[name = tensor("reduce_mean_129_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_129_keep_dims_0 = const()[name = tensor("reduce_mean_129_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_129_cast = reduce_mean(axes = reduce_mean_129_axes_0, keep_dims = reduce_mean_129_keep_dims_0, x = reshape_172_cast)[name = tensor("reduce_mean_129_cast")]; + tensor sub_86_cast = sub(x = reshape_172_cast, y = reduce_mean_129_cast)[name = tensor("sub_86_cast")]; + tensor square_43_cast = square(x = sub_86_cast)[name = tensor("square_43_cast")]; + tensor reduce_mean_131_axes_0 = const()[name = tensor("reduce_mean_131_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_131_keep_dims_0 = const()[name = tensor("reduce_mean_131_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_131_cast = reduce_mean(axes = reduce_mean_131_axes_0, keep_dims = reduce_mean_131_keep_dims_0, x = square_43_cast)[name = tensor("reduce_mean_131_cast")]; + tensor add_86_y_0_to_fp16 = const()[name = tensor("add_86_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_86_cast = add(x = reduce_mean_131_cast, y = add_86_y_0_to_fp16)[name = tensor("add_86_cast")]; + tensor sqrt_43_cast = sqrt(x = add_86_cast)[name = tensor("sqrt_43_cast")]; + tensor real_div_43_cast = real_div(x = sub_86_cast, y = sqrt_43_cast)[name = tensor("real_div_43_cast")]; + tensor reshape_173_shape_0 = const()[name = tensor("reshape_173_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_173_cast = reshape(shape = reshape_173_shape_0, x = real_div_43_cast)[name = tensor("reshape_173_cast")]; + tensor add_87_gamma_0_to_fp16 = const()[name = tensor("add_87_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443872896)))]; + tensor add_87_beta_0_to_fp16 = const()[name = tensor("add_87_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443874240)))]; + tensor add_87_epsilon_0_to_fp16 = const()[name = tensor("add_87_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_87_cast = batch_norm(beta = add_87_beta_0_to_fp16, epsilon = add_87_epsilon_0_to_fp16, gamma = add_87_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_173_cast)[name = tensor("add_87_cast")]; + tensor input_859_cast = silu(x = add_87_cast)[name = tensor("input_859_cast")]; + tensor var_13977 = const()[name = tensor("op_13977"), val = tensor([1, 1])]; + tensor var_13979 = const()[name = tensor("op_13979"), val = tensor([1, 1])]; + tensor hidden_states_591_pad_type_0 = const()[name = tensor("hidden_states_591_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_591_pad_0 = const()[name = tensor("hidden_states_591_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443875584)))]; + tensor up_blocks_2_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447562048)))]; + tensor hidden_states_591_cast = conv(bias = up_blocks_2_resnets_2_conv1_bias_to_fp16, dilations = var_13979, groups = var_13862, pad = hidden_states_591_pad_0, pad_type = hidden_states_591_pad_type_0, strides = var_13977, weight = up_blocks_2_resnets_2_conv1_weight_to_fp16, x = input_859_cast)[name = tensor("hidden_states_591_cast")]; + tensor var_13985 = const()[name = tensor("op_13985"), val = tensor([1, 1])]; + tensor var_13987 = const()[name = tensor("op_13987"), val = tensor([1, 1])]; + tensor temb_pad_type_0 = const()[name = tensor("temb_pad_type_0"), val = tensor("custom")]; + tensor temb_pad_0 = const()[name = tensor("temb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447562752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447972416))), name = tensor("up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447972992)))]; + tensor temb_cast = conv(bias = up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13987, groups = var_13862, pad = temb_pad_0, pad_type = temb_pad_type_0, strides = var_13985, weight = up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_cast")]; + tensor input_863_cast = add(x = hidden_states_591_cast, y = temb_cast)[name = tensor("input_863_cast")]; + tensor reshape_176_shape_0 = const()[name = tensor("reshape_176_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_176_cast = reshape(shape = reshape_176_shape_0, x = input_863_cast)[name = tensor("reshape_176_cast")]; + tensor reduce_mean_132_axes_0 = const()[name = tensor("reduce_mean_132_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_132_keep_dims_0 = const()[name = tensor("reduce_mean_132_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_132_cast = reduce_mean(axes = reduce_mean_132_axes_0, keep_dims = reduce_mean_132_keep_dims_0, x = reshape_176_cast)[name = tensor("reduce_mean_132_cast")]; + tensor sub_88_cast = sub(x = reshape_176_cast, y = reduce_mean_132_cast)[name = tensor("sub_88_cast")]; + tensor square_44_cast = square(x = sub_88_cast)[name = tensor("square_44_cast")]; + tensor reduce_mean_134_axes_0 = const()[name = tensor("reduce_mean_134_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_134_keep_dims_0 = const()[name = tensor("reduce_mean_134_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_134_cast = reduce_mean(axes = reduce_mean_134_axes_0, keep_dims = reduce_mean_134_keep_dims_0, x = square_44_cast)[name = tensor("reduce_mean_134_cast")]; + tensor add_88_y_0_to_fp16 = const()[name = tensor("add_88_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_88_cast = add(x = reduce_mean_134_cast, y = add_88_y_0_to_fp16)[name = tensor("add_88_cast")]; + tensor sqrt_44_cast = sqrt(x = add_88_cast)[name = tensor("sqrt_44_cast")]; + tensor real_div_44_cast = real_div(x = sub_88_cast, y = sqrt_44_cast)[name = tensor("real_div_44_cast")]; + tensor reshape_177_shape_0 = const()[name = tensor("reshape_177_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_177_cast = reshape(shape = reshape_177_shape_0, x = real_div_44_cast)[name = tensor("reshape_177_cast")]; + tensor add_89_gamma_0_to_fp16 = const()[name = tensor("add_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447973696)))]; + tensor add_89_beta_0_to_fp16 = const()[name = tensor("add_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447974400)))]; + tensor add_89_epsilon_0_to_fp16 = const()[name = tensor("add_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_89_cast = batch_norm(beta = add_89_beta_0_to_fp16, epsilon = add_89_epsilon_0_to_fp16, gamma = add_89_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_177_cast)[name = tensor("add_89_cast")]; + tensor input_867_cast = silu(x = add_89_cast)[name = tensor("input_867_cast")]; + tensor var_13997 = const()[name = tensor("op_13997"), val = tensor([1, 1])]; + tensor var_13999 = const()[name = tensor("op_13999"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447975104)))]; + tensor up_blocks_2_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449818368)))]; + tensor hidden_states_cast = conv(bias = up_blocks_2_resnets_2_conv2_bias_to_fp16, dilations = var_13999, groups = var_13862, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_13997, weight = up_blocks_2_resnets_2_conv2_weight_to_fp16, x = input_867_cast)[name = tensor("hidden_states_cast")]; + tensor var_14004 = const()[name = tensor("op_14004"), val = tensor([1, 1])]; + tensor var_14006 = const()[name = tensor("op_14006"), val = tensor([1, 1])]; + tensor x_pad_type_0 = const()[name = tensor("x_pad_type_0"), val = tensor("custom")]; + tensor x_pad_0 = const()[name = tensor("x_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449819072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450023936))), name = tensor("up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450024512)))]; + tensor x_cast = conv(bias = up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_14006, groups = var_13862, pad = x_pad_0, pad_type = x_pad_type_0, strides = var_14004, weight = up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_855_cast)[name = tensor("x_cast")]; + tensor input_869_cast = add(x = x_cast, y = hidden_states_cast)[name = tensor("input_869_cast")]; + tensor reshape_180_shape_0 = const()[name = tensor("reshape_180_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_180_cast = reshape(shape = reshape_180_shape_0, x = input_869_cast)[name = tensor("reshape_180_cast")]; + tensor reduce_mean_135_axes_0 = const()[name = tensor("reduce_mean_135_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_135_keep_dims_0 = const()[name = tensor("reduce_mean_135_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_135_cast = reduce_mean(axes = reduce_mean_135_axes_0, keep_dims = reduce_mean_135_keep_dims_0, x = reshape_180_cast)[name = tensor("reduce_mean_135_cast")]; + tensor sub_90_cast = sub(x = reshape_180_cast, y = reduce_mean_135_cast)[name = tensor("sub_90_cast")]; + tensor square_45_cast = square(x = sub_90_cast)[name = tensor("square_45_cast")]; + tensor reduce_mean_137_axes_0 = const()[name = tensor("reduce_mean_137_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_137_keep_dims_0 = const()[name = tensor("reduce_mean_137_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_137_cast = reduce_mean(axes = reduce_mean_137_axes_0, keep_dims = reduce_mean_137_keep_dims_0, x = square_45_cast)[name = tensor("reduce_mean_137_cast")]; + tensor add_90_y_0_to_fp16 = const()[name = tensor("add_90_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_90_cast = add(x = reduce_mean_137_cast, y = add_90_y_0_to_fp16)[name = tensor("add_90_cast")]; + tensor sqrt_45_cast = sqrt(x = add_90_cast)[name = tensor("sqrt_45_cast")]; + tensor real_div_45_cast = real_div(x = sub_90_cast, y = sqrt_45_cast)[name = tensor("real_div_45_cast")]; + tensor reshape_181_shape_0 = const()[name = tensor("reshape_181_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_181_cast = reshape(shape = reshape_181_shape_0, x = real_div_45_cast)[name = tensor("reshape_181_cast")]; + tensor add_91_gamma_0_to_fp16 = const()[name = tensor("add_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450025216)))]; + tensor add_91_beta_0_to_fp16 = const()[name = tensor("add_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450025920)))]; + tensor add_91_epsilon_0_to_fp16 = const()[name = tensor("add_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_91_cast = batch_norm(beta = add_91_beta_0_to_fp16, epsilon = add_91_epsilon_0_to_fp16, gamma = add_91_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_181_cast)[name = tensor("add_91_cast")]; + tensor input_cast = silu(x = add_91_cast)[name = tensor("input_cast")]; + tensor var_14020 = const()[name = tensor("op_14020"), val = tensor(1)]; + tensor var_14023 = const()[name = tensor("op_14023"), val = tensor([1, 1])]; + tensor var_14025 = const()[name = tensor("op_14025"), val = tensor([1, 1])]; + tensor var_14027_pad_type_0 = const()[name = tensor("op_14027_pad_type_0"), val = tensor("custom")]; + tensor var_14027_pad_0 = const()[name = tensor("op_14027_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_out_weight_to_fp16 = const()[name = tensor("conv_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450026624)))]; + tensor conv_out_bias_to_fp16 = const()[name = tensor("conv_out_bias_to_fp16"), val = tensor([0x1.664p-9, -0x1.72p-10, 0x1.06p-9, -0x1.9b8p-9])]; + tensor var_14027_cast = conv(bias = conv_out_bias_to_fp16, dilations = var_14025, groups = var_14020, pad = var_14027_pad_0, pad_type = var_14027_pad_type_0, strides = var_14023, weight = conv_out_weight_to_fp16, x = input_cast)[name = tensor("op_14027_cast")]; + tensor var_14027_cast_to_fp32_dtype_0 = const()[name = tensor("op_14027_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor noise_pred = cast(dtype = var_14027_cast_to_fp32_dtype_0, x = var_14027_cast)[name = tensor("cast_0")]; + } -> (noise_pred); +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..11a02126a20d91b2bfba01311502816d2bdfae06 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/Unet.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9219e9fcaa60ff76a31ea84658b485264b50f66147c058e18a7cfd456bb0f3a +size 1450049728 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/analytics/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..5e5dc49b64e54b805e99f3ddf883fa95d3b9391f --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bac2854504d7f6bfdc5645982dd965cca9cc8c12b9fdd2493cf50cd583684cc2 +size 207 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..191ad6c2c088a6da76dd03ecfef4ff2d24ed1f6f --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:766fce2585587fe93a1d32e09bc4d63ad45335ea62239f11b193e481b5888258 +size 773 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..74234c580fbd56e743ae1ade72208e39a2d1197b --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/metadata.json @@ -0,0 +1,74 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Generated image normalized to range [-1, 1]", + "shape" : "[]", + "name" : "image", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Float32", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "Ios16.mul" : 2, + "Ios16.sqrt" : 30, + "Ios16.sub" : 30, + "Transpose" : 6, + "UpsampleNearestNeighbor" : 3, + "Ios16.conv" : 36, + "Ios16.add" : 46, + "Ios16.linear" : 4, + "Ios16.matmul" : 2, + "Ios16.realDiv" : 30, + "Ios16.reduceMean" : 60, + "Ios16.softmax" : 1, + "Ios16.batchNorm" : 29, + "Ios16.square" : 30, + "Ios16.reshape" : 65, + "Ios16.silu" : 29 + }, + "computePrecision" : "Mixed (Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32 1 × 4 × 128 × 128)", + "shortDescription" : "The denoised latent embeddings from the unet model after the last step of reverse diffusion", + "shape" : "[1, 4, 128, 128]", + "name" : "z", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1" + }, + "generatedClassName" : "Stable_Diffusion_version_diffusers_stable_diffusion_xl_base_1_0_vae_decoder", + "method" : "predict" + } +] \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/model.mil b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..1934b801c86ffe3e71435c98defa3d7058d8498a --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/model.mil @@ -0,0 +1,963 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1839.0.0"}, {"coremltools-component-torch", "2.0.1"}, {"coremltools-version", "7.0b1"}})] +{ + func main(tensor z) { + tensor post_quant_conv_bias = const()[name = tensor("post_quant_conv_bias"), val = tensor([-0x1.dbcp-5, 0x1.cf4p-3, -0x1.c7cp-4, 0x1.adp-3])]; + tensor post_quant_conv_weight = const()[name = tensor("post_quant_conv_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor decoder_conv_in_bias = const()[name = tensor("decoder_conv_in_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(192)))]; + tensor decoder_conv_in_weight = const()[name = tensor("decoder_conv_in_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2304)))]; + tensor decoder_mid_block_resnets_0_conv1_bias = const()[name = tensor("decoder_mid_block_resnets_0_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76096)))]; + tensor decoder_mid_block_resnets_0_conv1_weight = const()[name = tensor("decoder_mid_block_resnets_0_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78208)))]; + tensor decoder_mid_block_resnets_0_conv2_bias = const()[name = tensor("decoder_mid_block_resnets_0_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9515456)))]; + tensor decoder_mid_block_resnets_0_conv2_weight = const()[name = tensor("decoder_mid_block_resnets_0_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9517568)))]; + tensor decoder_mid_block_attentions_0_to_q_bias = const()[name = tensor("decoder_mid_block_attentions_0_to_q_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18954816)))]; + tensor decoder_mid_block_attentions_0_to_q_weight = const()[name = tensor("decoder_mid_block_attentions_0_to_q_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18956928)))]; + tensor decoder_mid_block_attentions_0_to_k_bias = const()[name = tensor("decoder_mid_block_attentions_0_to_k_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20005568)))]; + tensor decoder_mid_block_attentions_0_to_k_weight = const()[name = tensor("decoder_mid_block_attentions_0_to_k_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20007680)))]; + tensor decoder_mid_block_attentions_0_to_v_bias = const()[name = tensor("decoder_mid_block_attentions_0_to_v_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21056320)))]; + tensor decoder_mid_block_attentions_0_to_v_weight = const()[name = tensor("decoder_mid_block_attentions_0_to_v_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21058432)))]; + tensor decoder_mid_block_attentions_0_to_out_0_bias = const()[name = tensor("decoder_mid_block_attentions_0_to_out_0_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22107072)))]; + tensor decoder_mid_block_attentions_0_to_out_0_weight = const()[name = tensor("decoder_mid_block_attentions_0_to_out_0_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22109184)))]; + tensor decoder_mid_block_resnets_1_conv1_bias = const()[name = tensor("decoder_mid_block_resnets_1_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23157824)))]; + tensor decoder_mid_block_resnets_1_conv1_weight = const()[name = tensor("decoder_mid_block_resnets_1_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23159936)))]; + tensor decoder_mid_block_resnets_1_conv2_bias = const()[name = tensor("decoder_mid_block_resnets_1_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32597184)))]; + tensor decoder_mid_block_resnets_1_conv2_weight = const()[name = tensor("decoder_mid_block_resnets_1_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32599296)))]; + tensor decoder_up_blocks_0_resnets_0_conv1_bias = const()[name = tensor("decoder_up_blocks_0_resnets_0_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42036544)))]; + tensor decoder_up_blocks_0_resnets_0_conv1_weight = const()[name = tensor("decoder_up_blocks_0_resnets_0_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42038656)))]; + tensor decoder_up_blocks_0_resnets_0_conv2_bias = const()[name = tensor("decoder_up_blocks_0_resnets_0_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51475904)))]; + tensor decoder_up_blocks_0_resnets_0_conv2_weight = const()[name = tensor("decoder_up_blocks_0_resnets_0_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51478016)))]; + tensor decoder_up_blocks_0_resnets_1_conv1_bias = const()[name = tensor("decoder_up_blocks_0_resnets_1_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60915264)))]; + tensor decoder_up_blocks_0_resnets_1_conv1_weight = const()[name = tensor("decoder_up_blocks_0_resnets_1_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60917376)))]; + tensor decoder_up_blocks_0_resnets_1_conv2_bias = const()[name = tensor("decoder_up_blocks_0_resnets_1_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70354624)))]; + tensor decoder_up_blocks_0_resnets_1_conv2_weight = const()[name = tensor("decoder_up_blocks_0_resnets_1_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70356736)))]; + tensor decoder_up_blocks_0_resnets_2_conv1_bias = const()[name = tensor("decoder_up_blocks_0_resnets_2_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79793984)))]; + tensor decoder_up_blocks_0_resnets_2_conv1_weight = const()[name = tensor("decoder_up_blocks_0_resnets_2_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79796096)))]; + tensor decoder_up_blocks_0_resnets_2_conv2_bias = const()[name = tensor("decoder_up_blocks_0_resnets_2_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89233344)))]; + tensor decoder_up_blocks_0_resnets_2_conv2_weight = const()[name = tensor("decoder_up_blocks_0_resnets_2_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89235456)))]; + tensor decoder_up_blocks_0_upsamplers_0_conv_bias = const()[name = tensor("decoder_up_blocks_0_upsamplers_0_conv_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98672704)))]; + tensor decoder_up_blocks_0_upsamplers_0_conv_weight = const()[name = tensor("decoder_up_blocks_0_upsamplers_0_conv_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98674816)))]; + tensor decoder_up_blocks_1_resnets_0_conv1_bias = const()[name = tensor("decoder_up_blocks_1_resnets_0_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108112064)))]; + tensor decoder_up_blocks_1_resnets_0_conv1_weight = const()[name = tensor("decoder_up_blocks_1_resnets_0_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108114176)))]; + tensor decoder_up_blocks_1_resnets_0_conv2_bias = const()[name = tensor("decoder_up_blocks_1_resnets_0_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(117551424)))]; + tensor decoder_up_blocks_1_resnets_0_conv2_weight = const()[name = tensor("decoder_up_blocks_1_resnets_0_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(117553536)))]; + tensor decoder_up_blocks_1_resnets_1_conv1_bias = const()[name = tensor("decoder_up_blocks_1_resnets_1_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126990784)))]; + tensor decoder_up_blocks_1_resnets_1_conv1_weight = const()[name = tensor("decoder_up_blocks_1_resnets_1_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126992896)))]; + tensor decoder_up_blocks_1_resnets_1_conv2_bias = const()[name = tensor("decoder_up_blocks_1_resnets_1_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136430144)))]; + tensor decoder_up_blocks_1_resnets_1_conv2_weight = const()[name = tensor("decoder_up_blocks_1_resnets_1_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(136432256)))]; + tensor decoder_up_blocks_1_resnets_2_conv1_bias = const()[name = tensor("decoder_up_blocks_1_resnets_2_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145869504)))]; + tensor decoder_up_blocks_1_resnets_2_conv1_weight = const()[name = tensor("decoder_up_blocks_1_resnets_2_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145871616)))]; + tensor decoder_up_blocks_1_resnets_2_conv2_bias = const()[name = tensor("decoder_up_blocks_1_resnets_2_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155308864)))]; + tensor decoder_up_blocks_1_resnets_2_conv2_weight = const()[name = tensor("decoder_up_blocks_1_resnets_2_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155310976)))]; + tensor decoder_up_blocks_1_upsamplers_0_conv_bias = const()[name = tensor("decoder_up_blocks_1_upsamplers_0_conv_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164748224)))]; + tensor decoder_up_blocks_1_upsamplers_0_conv_weight = const()[name = tensor("decoder_up_blocks_1_upsamplers_0_conv_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164750336)))]; + tensor decoder_up_blocks_2_resnets_0_conv1_bias = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174187584)))]; + tensor decoder_up_blocks_2_resnets_0_conv1_weight = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174188672)))]; + tensor decoder_up_blocks_2_resnets_0_conv2_bias = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178907328)))]; + tensor decoder_up_blocks_2_resnets_0_conv2_weight = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178908416)))]; + tensor decoder_up_blocks_2_resnets_0_conv_shortcut_bias = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv_shortcut_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181267776)))]; + tensor decoder_up_blocks_2_resnets_0_conv_shortcut_weight = const()[name = tensor("decoder_up_blocks_2_resnets_0_conv_shortcut_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181268864)))]; + tensor decoder_up_blocks_2_resnets_1_conv1_bias = const()[name = tensor("decoder_up_blocks_2_resnets_1_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181793216)))]; + tensor decoder_up_blocks_2_resnets_1_conv1_weight = const()[name = tensor("decoder_up_blocks_2_resnets_1_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181794304)))]; + tensor decoder_up_blocks_2_resnets_1_conv2_bias = const()[name = tensor("decoder_up_blocks_2_resnets_1_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184153664)))]; + tensor decoder_up_blocks_2_resnets_1_conv2_weight = const()[name = tensor("decoder_up_blocks_2_resnets_1_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184154752)))]; + tensor decoder_up_blocks_2_resnets_2_conv1_bias = const()[name = tensor("decoder_up_blocks_2_resnets_2_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186514112)))]; + tensor decoder_up_blocks_2_resnets_2_conv1_weight = const()[name = tensor("decoder_up_blocks_2_resnets_2_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186515200)))]; + tensor decoder_up_blocks_2_resnets_2_conv2_bias = const()[name = tensor("decoder_up_blocks_2_resnets_2_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(188874560)))]; + tensor decoder_up_blocks_2_resnets_2_conv2_weight = const()[name = tensor("decoder_up_blocks_2_resnets_2_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(188875648)))]; + tensor decoder_up_blocks_2_upsamplers_0_conv_bias = const()[name = tensor("decoder_up_blocks_2_upsamplers_0_conv_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191235008)))]; + tensor decoder_up_blocks_2_upsamplers_0_conv_weight = const()[name = tensor("decoder_up_blocks_2_upsamplers_0_conv_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191236096)))]; + tensor decoder_up_blocks_3_resnets_0_conv1_bias = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193595456)))]; + tensor decoder_up_blocks_3_resnets_0_conv1_weight = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193596032)))]; + tensor decoder_up_blocks_3_resnets_0_conv2_bias = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194775744)))]; + tensor decoder_up_blocks_3_resnets_0_conv2_weight = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194776320)))]; + tensor decoder_up_blocks_3_resnets_0_conv_shortcut_bias = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv_shortcut_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195366208)))]; + tensor decoder_up_blocks_3_resnets_0_conv_shortcut_weight = const()[name = tensor("decoder_up_blocks_3_resnets_0_conv_shortcut_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195366784)))]; + tensor decoder_up_blocks_3_resnets_1_conv1_bias = const()[name = tensor("decoder_up_blocks_3_resnets_1_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195497920)))]; + tensor decoder_up_blocks_3_resnets_1_conv1_weight = const()[name = tensor("decoder_up_blocks_3_resnets_1_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(195498496)))]; + tensor decoder_up_blocks_3_resnets_1_conv2_bias = const()[name = tensor("decoder_up_blocks_3_resnets_1_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196088384)))]; + tensor decoder_up_blocks_3_resnets_1_conv2_weight = const()[name = tensor("decoder_up_blocks_3_resnets_1_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196088960)))]; + tensor decoder_up_blocks_3_resnets_2_conv1_bias = const()[name = tensor("decoder_up_blocks_3_resnets_2_conv1_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196678848)))]; + tensor decoder_up_blocks_3_resnets_2_conv1_weight = const()[name = tensor("decoder_up_blocks_3_resnets_2_conv1_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196679424)))]; + tensor decoder_up_blocks_3_resnets_2_conv2_bias = const()[name = tensor("decoder_up_blocks_3_resnets_2_conv2_bias"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197269312)))]; + tensor decoder_up_blocks_3_resnets_2_conv2_weight = const()[name = tensor("decoder_up_blocks_3_resnets_2_conv2_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197269888)))]; + tensor decoder_conv_out_bias = const()[name = tensor("decoder_conv_out_bias"), val = tensor([0x1.fd4p-4, 0x1.4c8p-4, 0x1.93p-5])]; + tensor decoder_conv_out_weight = const()[name = tensor("decoder_conv_out_weight"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197859776)))]; + tensor var_7 = const()[name = tensor("op_7"), val = tensor(1)]; + tensor var_10 = const()[name = tensor("op_10"), val = tensor([1, 1])]; + tensor var_12 = const()[name = tensor("op_12"), val = tensor([1, 1])]; + tensor input_1_pad_type_0 = const()[name = tensor("input_1_pad_type_0"), val = tensor("custom")]; + tensor input_1_pad_0 = const()[name = tensor("input_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_1 = conv(bias = post_quant_conv_bias, dilations = var_12, groups = var_7, pad = input_1_pad_0, pad_type = input_1_pad_type_0, strides = var_10, weight = post_quant_conv_weight, x = z)[name = tensor("input_1")]; + tensor var_26 = const()[name = tensor("op_26"), val = tensor(1)]; + tensor var_44 = const()[name = tensor("op_44"), val = tensor([1, 1])]; + tensor var_46 = const()[name = tensor("op_46"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_3 = conv(bias = decoder_conv_in_bias, dilations = var_46, groups = var_26, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_44, weight = decoder_conv_in_weight, x = input_1)[name = tensor("input_3")]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_0 = reshape(shape = reshape_0_shape_0, x = input_3)[name = tensor("reshape_0")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0 = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0)[name = tensor("reduce_mean_0")]; + tensor sub_0 = sub(x = reshape_0, y = reduce_mean_0)[name = tensor("sub_0")]; + tensor square_0 = square(x = sub_0)[name = tensor("square_0")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2 = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0)[name = tensor("reduce_mean_2")]; + tensor add_0_y_0 = const()[name = tensor("add_0_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_0 = add(x = reduce_mean_2, y = add_0_y_0)[name = tensor("add_0")]; + tensor sqrt_0 = sqrt(x = add_0)[name = tensor("sqrt_0")]; + tensor real_div_0 = real_div(x = sub_0, y = sqrt_0)[name = tensor("real_div_0")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_1 = reshape(shape = reshape_1_shape_0, x = real_div_0)[name = tensor("reshape_1")]; + tensor add_1_mean_0 = const()[name = tensor("add_1_mean_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197873664)))]; + tensor add_1_variance_0 = const()[name = tensor("add_1_variance_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197875776)))]; + tensor add_1_gamma_0 = const()[name = tensor("add_1_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197877888)))]; + tensor add_1_beta_0 = const()[name = tensor("add_1_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197880000)))]; + tensor add_1_epsilon_0 = const()[name = tensor("add_1_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_1 = batch_norm(beta = add_1_beta_0, epsilon = add_1_epsilon_0, gamma = add_1_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_1)[name = tensor("add_1")]; + tensor input_7 = silu(x = add_1)[name = tensor("input_7")]; + tensor var_65 = const()[name = tensor("op_65"), val = tensor([1, 1])]; + tensor var_67 = const()[name = tensor("op_67"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_9 = conv(bias = decoder_mid_block_resnets_0_conv1_bias, dilations = var_67, groups = var_26, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_65, weight = decoder_mid_block_resnets_0_conv1_weight, x = input_7)[name = tensor("input_9")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_4 = reshape(shape = reshape_4_shape_0, x = input_9)[name = tensor("reshape_4")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3 = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4)[name = tensor("reduce_mean_3")]; + tensor sub_2 = sub(x = reshape_4, y = reduce_mean_3)[name = tensor("sub_2")]; + tensor square_1 = square(x = sub_2)[name = tensor("square_1")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5 = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1)[name = tensor("reduce_mean_5")]; + tensor add_2_y_0 = const()[name = tensor("add_2_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_2 = add(x = reduce_mean_5, y = add_2_y_0)[name = tensor("add_2")]; + tensor sqrt_1 = sqrt(x = add_2)[name = tensor("sqrt_1")]; + tensor real_div_1 = real_div(x = sub_2, y = sqrt_1)[name = tensor("real_div_1")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_5 = reshape(shape = reshape_5_shape_0, x = real_div_1)[name = tensor("reshape_5")]; + tensor add_3_gamma_0 = const()[name = tensor("add_3_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197882112)))]; + tensor add_3_beta_0 = const()[name = tensor("add_3_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197884224)))]; + tensor add_3_epsilon_0 = const()[name = tensor("add_3_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_3 = batch_norm(beta = add_3_beta_0, epsilon = add_3_epsilon_0, gamma = add_3_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_5)[name = tensor("add_3")]; + tensor input_13 = silu(x = add_3)[name = tensor("input_13")]; + tensor var_77 = const()[name = tensor("op_77"), val = tensor([1, 1])]; + tensor var_79 = const()[name = tensor("op_79"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_1 = conv(bias = decoder_mid_block_resnets_0_conv2_bias, dilations = var_79, groups = var_26, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_77, weight = decoder_mid_block_resnets_0_conv2_weight, x = input_13)[name = tensor("hidden_states_1")]; + tensor var_82 = add(x = input_3, y = hidden_states_1)[name = tensor("op_82")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([1, 32, 16, 16384])]; + tensor reshape_8 = reshape(shape = reshape_8_shape_0, x = var_82)[name = tensor("reshape_8")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6 = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8)[name = tensor("reduce_mean_6")]; + tensor sub_4 = sub(x = reshape_8, y = reduce_mean_6)[name = tensor("sub_4")]; + tensor square_2 = square(x = sub_4)[name = tensor("square_2")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8 = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2)[name = tensor("reduce_mean_8")]; + tensor add_4_y_0 = const()[name = tensor("add_4_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_4 = add(x = reduce_mean_8, y = add_4_y_0)[name = tensor("add_4")]; + tensor sqrt_2 = sqrt(x = add_4)[name = tensor("sqrt_2")]; + tensor real_div_2 = real_div(x = sub_4, y = sqrt_2)[name = tensor("real_div_2")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([1, 512, 16384])]; + tensor reshape_9 = reshape(shape = reshape_9_shape_0, x = real_div_2)[name = tensor("reshape_9")]; + tensor reshape_10 = const()[name = tensor("reshape_10"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197886336)))]; + tensor mul_2 = mul(x = reshape_9, y = reshape_10)[name = tensor("mul_2")]; + tensor reshape_11 = const()[name = tensor("reshape_11"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197888448)))]; + tensor add_5 = add(x = mul_2, y = reshape_11)[name = tensor("add_5")]; + tensor input_19_perm_0 = const()[name = tensor("input_19_perm_0"), val = tensor([0, 2, 1])]; + tensor transpose_11 = transpose(perm = input_19_perm_0, x = add_5)[name = tensor("transpose_11")]; + tensor query_1 = linear(bias = decoder_mid_block_attentions_0_to_q_bias, weight = decoder_mid_block_attentions_0_to_q_weight, x = transpose_11)[name = tensor("query_1")]; + tensor key_1 = linear(bias = decoder_mid_block_attentions_0_to_k_bias, weight = decoder_mid_block_attentions_0_to_k_weight, x = transpose_11)[name = tensor("key_1")]; + tensor value_1 = linear(bias = decoder_mid_block_attentions_0_to_v_bias, weight = decoder_mid_block_attentions_0_to_v_weight, x = transpose_11)[name = tensor("value_1")]; + tensor var_123 = const()[name = tensor("op_123"), val = tensor([1, -1, 1, 512])]; + tensor var_124 = reshape(shape = var_123, x = query_1)[name = tensor("op_124")]; + tensor var_126 = const()[name = tensor("op_126"), val = tensor([1, -1, 1, 512])]; + tensor var_127 = reshape(shape = var_126, x = key_1)[name = tensor("op_127")]; + tensor var_129 = const()[name = tensor("op_129"), val = tensor([1, -1, 1, 512])]; + tensor var_130 = reshape(shape = var_129, x = value_1)[name = tensor("op_130")]; + tensor value_perm_0 = const()[name = tensor("value_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor mul_3_y_0 = const()[name = tensor("mul_3_y_0"), val = tensor(0x1.6a09e6p-5)]; + tensor mul_3 = mul(x = var_124, y = mul_3_y_0)[name = tensor("mul_3")]; + tensor matmul_0_transpose_y_0 = const()[name = tensor("matmul_0_transpose_y_0"), val = tensor(true)]; + tensor matmul_0_transpose_x_0 = const()[name = tensor("matmul_0_transpose_x_0"), val = tensor(false)]; + tensor transpose_4_perm_0 = const()[name = tensor("transpose_4_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor transpose_5_perm_0 = const()[name = tensor("transpose_5_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor transpose_8 = transpose(perm = transpose_5_perm_0, x = var_127)[name = tensor("transpose_8")]; + tensor transpose_9 = transpose(perm = transpose_4_perm_0, x = mul_3)[name = tensor("transpose_9")]; + tensor matmul_0 = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = transpose_9, y = transpose_8)[name = tensor("matmul_0")]; + tensor softmax_0_axis_0 = const()[name = tensor("softmax_0_axis_0"), val = tensor(-1)]; + tensor softmax_0 = softmax(axis = softmax_0_axis_0, x = matmul_0)[name = tensor("softmax_0")]; + tensor hidden_states_7_transpose_x_0 = const()[name = tensor("hidden_states_7_transpose_x_0"), val = tensor(false)]; + tensor hidden_states_7_transpose_y_0 = const()[name = tensor("hidden_states_7_transpose_y_0"), val = tensor(false)]; + tensor transpose_10 = transpose(perm = value_perm_0, x = var_130)[name = tensor("transpose_10")]; + tensor hidden_states_7 = matmul(transpose_x = hidden_states_7_transpose_x_0, transpose_y = hidden_states_7_transpose_y_0, x = softmax_0, y = transpose_10)[name = tensor("hidden_states_7")]; + tensor var_133_perm_0 = const()[name = tensor("op_133_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_137 = const()[name = tensor("op_137"), val = tensor([1, -1, 512])]; + tensor transpose_7 = transpose(perm = var_133_perm_0, x = hidden_states_7)[name = tensor("transpose_7")]; + tensor hidden_states_9 = reshape(shape = var_137, x = transpose_7)[name = tensor("hidden_states_9")]; + tensor input_23 = linear(bias = decoder_mid_block_attentions_0_to_out_0_bias, weight = decoder_mid_block_attentions_0_to_out_0_weight, x = hidden_states_9)[name = tensor("input_23")]; + tensor var_144_perm_0 = const()[name = tensor("op_144_perm_0"), val = tensor([0, -1, -2])]; + tensor var_145 = const()[name = tensor("op_145"), val = tensor([1, 512, 128, 128])]; + tensor transpose_6 = transpose(perm = var_144_perm_0, x = input_23)[name = tensor("transpose_6")]; + tensor hidden_states_13 = reshape(shape = var_145, x = transpose_6)[name = tensor("hidden_states_13")]; + tensor hidden_states_15 = add(x = hidden_states_13, y = var_82)[name = tensor("hidden_states_15")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_12 = reshape(shape = reshape_12_shape_0, x = hidden_states_15)[name = tensor("reshape_12")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9 = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12)[name = tensor("reduce_mean_9")]; + tensor sub_6 = sub(x = reshape_12, y = reduce_mean_9)[name = tensor("sub_6")]; + tensor square_3 = square(x = sub_6)[name = tensor("square_3")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11 = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3)[name = tensor("reduce_mean_11")]; + tensor add_6_y_0 = const()[name = tensor("add_6_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_6 = add(x = reduce_mean_11, y = add_6_y_0)[name = tensor("add_6")]; + tensor sqrt_3 = sqrt(x = add_6)[name = tensor("sqrt_3")]; + tensor real_div_3 = real_div(x = sub_6, y = sqrt_3)[name = tensor("real_div_3")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_13 = reshape(shape = reshape_13_shape_0, x = real_div_3)[name = tensor("reshape_13")]; + tensor add_7_gamma_0 = const()[name = tensor("add_7_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197890560)))]; + tensor add_7_beta_0 = const()[name = tensor("add_7_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197892672)))]; + tensor add_7_epsilon_0 = const()[name = tensor("add_7_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_7 = batch_norm(beta = add_7_beta_0, epsilon = add_7_epsilon_0, gamma = add_7_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_13)[name = tensor("add_7")]; + tensor input_29 = silu(x = add_7)[name = tensor("input_29")]; + tensor var_160 = const()[name = tensor("op_160"), val = tensor([1, 1])]; + tensor var_162 = const()[name = tensor("op_162"), val = tensor([1, 1])]; + tensor input_31_pad_type_0 = const()[name = tensor("input_31_pad_type_0"), val = tensor("custom")]; + tensor input_31_pad_0 = const()[name = tensor("input_31_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_31 = conv(bias = decoder_mid_block_resnets_1_conv1_bias, dilations = var_162, groups = var_26, pad = input_31_pad_0, pad_type = input_31_pad_type_0, strides = var_160, weight = decoder_mid_block_resnets_1_conv1_weight, x = input_29)[name = tensor("input_31")]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_16 = reshape(shape = reshape_16_shape_0, x = input_31)[name = tensor("reshape_16")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12 = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16)[name = tensor("reduce_mean_12")]; + tensor sub_8 = sub(x = reshape_16, y = reduce_mean_12)[name = tensor("sub_8")]; + tensor square_4 = square(x = sub_8)[name = tensor("square_4")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14 = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4)[name = tensor("reduce_mean_14")]; + tensor add_8_y_0 = const()[name = tensor("add_8_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_8 = add(x = reduce_mean_14, y = add_8_y_0)[name = tensor("add_8")]; + tensor sqrt_4 = sqrt(x = add_8)[name = tensor("sqrt_4")]; + tensor real_div_4 = real_div(x = sub_8, y = sqrt_4)[name = tensor("real_div_4")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_17 = reshape(shape = reshape_17_shape_0, x = real_div_4)[name = tensor("reshape_17")]; + tensor add_9_gamma_0 = const()[name = tensor("add_9_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197894784)))]; + tensor add_9_beta_0 = const()[name = tensor("add_9_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197896896)))]; + tensor add_9_epsilon_0 = const()[name = tensor("add_9_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_9 = batch_norm(beta = add_9_beta_0, epsilon = add_9_epsilon_0, gamma = add_9_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_17)[name = tensor("add_9")]; + tensor input_35 = silu(x = add_9)[name = tensor("input_35")]; + tensor var_172 = const()[name = tensor("op_172"), val = tensor([1, 1])]; + tensor var_174 = const()[name = tensor("op_174"), val = tensor([1, 1])]; + tensor hidden_states_17_pad_type_0 = const()[name = tensor("hidden_states_17_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_17_pad_0 = const()[name = tensor("hidden_states_17_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_17 = conv(bias = decoder_mid_block_resnets_1_conv2_bias, dilations = var_174, groups = var_26, pad = hidden_states_17_pad_0, pad_type = hidden_states_17_pad_type_0, strides = var_172, weight = decoder_mid_block_resnets_1_conv2_weight, x = input_35)[name = tensor("hidden_states_17")]; + tensor var_177 = add(x = hidden_states_15, y = hidden_states_17)[name = tensor("op_177")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_20 = reshape(shape = reshape_20_shape_0, x = var_177)[name = tensor("reshape_20")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15 = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20)[name = tensor("reduce_mean_15")]; + tensor sub_10 = sub(x = reshape_20, y = reduce_mean_15)[name = tensor("sub_10")]; + tensor square_5 = square(x = sub_10)[name = tensor("square_5")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17 = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5)[name = tensor("reduce_mean_17")]; + tensor add_10_y_0 = const()[name = tensor("add_10_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_10 = add(x = reduce_mean_17, y = add_10_y_0)[name = tensor("add_10")]; + tensor sqrt_5 = sqrt(x = add_10)[name = tensor("sqrt_5")]; + tensor real_div_5 = real_div(x = sub_10, y = sqrt_5)[name = tensor("real_div_5")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_21 = reshape(shape = reshape_21_shape_0, x = real_div_5)[name = tensor("reshape_21")]; + tensor add_11_gamma_0 = const()[name = tensor("add_11_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197899008)))]; + tensor add_11_beta_0 = const()[name = tensor("add_11_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197901120)))]; + tensor add_11_epsilon_0 = const()[name = tensor("add_11_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_11 = batch_norm(beta = add_11_beta_0, epsilon = add_11_epsilon_0, gamma = add_11_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_21)[name = tensor("add_11")]; + tensor input_43 = silu(x = add_11)[name = tensor("input_43")]; + tensor var_199 = const()[name = tensor("op_199"), val = tensor([1, 1])]; + tensor var_201 = const()[name = tensor("op_201"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_45 = conv(bias = decoder_up_blocks_0_resnets_0_conv1_bias, dilations = var_201, groups = var_26, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_199, weight = decoder_up_blocks_0_resnets_0_conv1_weight, x = input_43)[name = tensor("input_45")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_24 = reshape(shape = reshape_24_shape_0, x = input_45)[name = tensor("reshape_24")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18 = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24)[name = tensor("reduce_mean_18")]; + tensor sub_12 = sub(x = reshape_24, y = reduce_mean_18)[name = tensor("sub_12")]; + tensor square_6 = square(x = sub_12)[name = tensor("square_6")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20 = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6)[name = tensor("reduce_mean_20")]; + tensor add_12_y_0 = const()[name = tensor("add_12_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_12 = add(x = reduce_mean_20, y = add_12_y_0)[name = tensor("add_12")]; + tensor sqrt_6 = sqrt(x = add_12)[name = tensor("sqrt_6")]; + tensor real_div_6 = real_div(x = sub_12, y = sqrt_6)[name = tensor("real_div_6")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_25 = reshape(shape = reshape_25_shape_0, x = real_div_6)[name = tensor("reshape_25")]; + tensor add_13_gamma_0 = const()[name = tensor("add_13_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197903232)))]; + tensor add_13_beta_0 = const()[name = tensor("add_13_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197905344)))]; + tensor add_13_epsilon_0 = const()[name = tensor("add_13_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_13 = batch_norm(beta = add_13_beta_0, epsilon = add_13_epsilon_0, gamma = add_13_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_25)[name = tensor("add_13")]; + tensor input_49 = silu(x = add_13)[name = tensor("input_49")]; + tensor var_211 = const()[name = tensor("op_211"), val = tensor([1, 1])]; + tensor var_213 = const()[name = tensor("op_213"), val = tensor([1, 1])]; + tensor hidden_states_19_pad_type_0 = const()[name = tensor("hidden_states_19_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_19_pad_0 = const()[name = tensor("hidden_states_19_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_19 = conv(bias = decoder_up_blocks_0_resnets_0_conv2_bias, dilations = var_213, groups = var_26, pad = hidden_states_19_pad_0, pad_type = hidden_states_19_pad_type_0, strides = var_211, weight = decoder_up_blocks_0_resnets_0_conv2_weight, x = input_49)[name = tensor("hidden_states_19")]; + tensor var_216 = add(x = var_177, y = hidden_states_19)[name = tensor("op_216")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_28 = reshape(shape = reshape_28_shape_0, x = var_216)[name = tensor("reshape_28")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21 = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28)[name = tensor("reduce_mean_21")]; + tensor sub_14 = sub(x = reshape_28, y = reduce_mean_21)[name = tensor("sub_14")]; + tensor square_7 = square(x = sub_14)[name = tensor("square_7")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23 = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7)[name = tensor("reduce_mean_23")]; + tensor add_14_y_0 = const()[name = tensor("add_14_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_14 = add(x = reduce_mean_23, y = add_14_y_0)[name = tensor("add_14")]; + tensor sqrt_7 = sqrt(x = add_14)[name = tensor("sqrt_7")]; + tensor real_div_7 = real_div(x = sub_14, y = sqrt_7)[name = tensor("real_div_7")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_29 = reshape(shape = reshape_29_shape_0, x = real_div_7)[name = tensor("reshape_29")]; + tensor add_15_gamma_0 = const()[name = tensor("add_15_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197907456)))]; + tensor add_15_beta_0 = const()[name = tensor("add_15_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197909568)))]; + tensor add_15_epsilon_0 = const()[name = tensor("add_15_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_15 = batch_norm(beta = add_15_beta_0, epsilon = add_15_epsilon_0, gamma = add_15_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_29)[name = tensor("add_15")]; + tensor input_57 = silu(x = add_15)[name = tensor("input_57")]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor var_231 = const()[name = tensor("op_231"), val = tensor([1, 1])]; + tensor input_59_pad_type_0 = const()[name = tensor("input_59_pad_type_0"), val = tensor("custom")]; + tensor input_59_pad_0 = const()[name = tensor("input_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_59 = conv(bias = decoder_up_blocks_0_resnets_1_conv1_bias, dilations = var_231, groups = var_26, pad = input_59_pad_0, pad_type = input_59_pad_type_0, strides = var_229, weight = decoder_up_blocks_0_resnets_1_conv1_weight, x = input_57)[name = tensor("input_59")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_32 = reshape(shape = reshape_32_shape_0, x = input_59)[name = tensor("reshape_32")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24 = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32)[name = tensor("reduce_mean_24")]; + tensor sub_16 = sub(x = reshape_32, y = reduce_mean_24)[name = tensor("sub_16")]; + tensor square_8 = square(x = sub_16)[name = tensor("square_8")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26 = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8)[name = tensor("reduce_mean_26")]; + tensor add_16_y_0 = const()[name = tensor("add_16_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_16 = add(x = reduce_mean_26, y = add_16_y_0)[name = tensor("add_16")]; + tensor sqrt_8 = sqrt(x = add_16)[name = tensor("sqrt_8")]; + tensor real_div_8 = real_div(x = sub_16, y = sqrt_8)[name = tensor("real_div_8")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_33 = reshape(shape = reshape_33_shape_0, x = real_div_8)[name = tensor("reshape_33")]; + tensor add_17_gamma_0 = const()[name = tensor("add_17_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197911680)))]; + tensor add_17_beta_0 = const()[name = tensor("add_17_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197913792)))]; + tensor add_17_epsilon_0 = const()[name = tensor("add_17_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_17 = batch_norm(beta = add_17_beta_0, epsilon = add_17_epsilon_0, gamma = add_17_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_33)[name = tensor("add_17")]; + tensor input_63 = silu(x = add_17)[name = tensor("input_63")]; + tensor var_241 = const()[name = tensor("op_241"), val = tensor([1, 1])]; + tensor var_243 = const()[name = tensor("op_243"), val = tensor([1, 1])]; + tensor hidden_states_21_pad_type_0 = const()[name = tensor("hidden_states_21_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_21_pad_0 = const()[name = tensor("hidden_states_21_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_21 = conv(bias = decoder_up_blocks_0_resnets_1_conv2_bias, dilations = var_243, groups = var_26, pad = hidden_states_21_pad_0, pad_type = hidden_states_21_pad_type_0, strides = var_241, weight = decoder_up_blocks_0_resnets_1_conv2_weight, x = input_63)[name = tensor("hidden_states_21")]; + tensor var_246 = add(x = var_216, y = hidden_states_21)[name = tensor("op_246")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_36 = reshape(shape = reshape_36_shape_0, x = var_246)[name = tensor("reshape_36")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27 = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36)[name = tensor("reduce_mean_27")]; + tensor sub_18 = sub(x = reshape_36, y = reduce_mean_27)[name = tensor("sub_18")]; + tensor square_9 = square(x = sub_18)[name = tensor("square_9")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29 = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9)[name = tensor("reduce_mean_29")]; + tensor add_18_y_0 = const()[name = tensor("add_18_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_18 = add(x = reduce_mean_29, y = add_18_y_0)[name = tensor("add_18")]; + tensor sqrt_9 = sqrt(x = add_18)[name = tensor("sqrt_9")]; + tensor real_div_9 = real_div(x = sub_18, y = sqrt_9)[name = tensor("real_div_9")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_37 = reshape(shape = reshape_37_shape_0, x = real_div_9)[name = tensor("reshape_37")]; + tensor add_19_gamma_0 = const()[name = tensor("add_19_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197915904)))]; + tensor add_19_beta_0 = const()[name = tensor("add_19_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197918016)))]; + tensor add_19_epsilon_0 = const()[name = tensor("add_19_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_19 = batch_norm(beta = add_19_beta_0, epsilon = add_19_epsilon_0, gamma = add_19_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_37)[name = tensor("add_19")]; + tensor input_71 = silu(x = add_19)[name = tensor("input_71")]; + tensor var_259 = const()[name = tensor("op_259"), val = tensor([1, 1])]; + tensor var_261 = const()[name = tensor("op_261"), val = tensor([1, 1])]; + tensor input_73_pad_type_0 = const()[name = tensor("input_73_pad_type_0"), val = tensor("custom")]; + tensor input_73_pad_0 = const()[name = tensor("input_73_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_73 = conv(bias = decoder_up_blocks_0_resnets_2_conv1_bias, dilations = var_261, groups = var_26, pad = input_73_pad_0, pad_type = input_73_pad_type_0, strides = var_259, weight = decoder_up_blocks_0_resnets_2_conv1_weight, x = input_71)[name = tensor("input_73")]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_40 = reshape(shape = reshape_40_shape_0, x = input_73)[name = tensor("reshape_40")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30 = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40)[name = tensor("reduce_mean_30")]; + tensor sub_20 = sub(x = reshape_40, y = reduce_mean_30)[name = tensor("sub_20")]; + tensor square_10 = square(x = sub_20)[name = tensor("square_10")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32 = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10)[name = tensor("reduce_mean_32")]; + tensor add_20_y_0 = const()[name = tensor("add_20_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_20 = add(x = reduce_mean_32, y = add_20_y_0)[name = tensor("add_20")]; + tensor sqrt_10 = sqrt(x = add_20)[name = tensor("sqrt_10")]; + tensor real_div_10 = real_div(x = sub_20, y = sqrt_10)[name = tensor("real_div_10")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_41 = reshape(shape = reshape_41_shape_0, x = real_div_10)[name = tensor("reshape_41")]; + tensor add_21_gamma_0 = const()[name = tensor("add_21_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197920128)))]; + tensor add_21_beta_0 = const()[name = tensor("add_21_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197922240)))]; + tensor add_21_epsilon_0 = const()[name = tensor("add_21_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_21 = batch_norm(beta = add_21_beta_0, epsilon = add_21_epsilon_0, gamma = add_21_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_41)[name = tensor("add_21")]; + tensor input_77 = silu(x = add_21)[name = tensor("input_77")]; + tensor var_271 = const()[name = tensor("op_271"), val = tensor([1, 1])]; + tensor var_273 = const()[name = tensor("op_273"), val = tensor([1, 1])]; + tensor hidden_states_23_pad_type_0 = const()[name = tensor("hidden_states_23_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_23_pad_0 = const()[name = tensor("hidden_states_23_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_23 = conv(bias = decoder_up_blocks_0_resnets_2_conv2_bias, dilations = var_273, groups = var_26, pad = hidden_states_23_pad_0, pad_type = hidden_states_23_pad_type_0, strides = var_271, weight = decoder_up_blocks_0_resnets_2_conv2_weight, x = input_77)[name = tensor("hidden_states_23")]; + tensor var_276 = add(x = var_246, y = hidden_states_23)[name = tensor("op_276")]; + tensor input_81_scale_factor_height_0 = const()[name = tensor("input_81_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_81_scale_factor_width_0 = const()[name = tensor("input_81_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_81 = upsample_nearest_neighbor(scale_factor_height = input_81_scale_factor_height_0, scale_factor_width = input_81_scale_factor_width_0, x = var_276)[name = tensor("input_81")]; + tensor var_284 = const()[name = tensor("op_284"), val = tensor([1, 1])]; + tensor var_286 = const()[name = tensor("op_286"), val = tensor([1, 1])]; + tensor input_83_pad_type_0 = const()[name = tensor("input_83_pad_type_0"), val = tensor("custom")]; + tensor input_83_pad_0 = const()[name = tensor("input_83_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_83 = conv(bias = decoder_up_blocks_0_upsamplers_0_conv_bias, dilations = var_286, groups = var_26, pad = input_83_pad_0, pad_type = input_83_pad_type_0, strides = var_284, weight = decoder_up_blocks_0_upsamplers_0_conv_weight, x = input_81)[name = tensor("input_83")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_44 = reshape(shape = reshape_44_shape_0, x = input_83)[name = tensor("reshape_44")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33 = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44)[name = tensor("reduce_mean_33")]; + tensor sub_22 = sub(x = reshape_44, y = reduce_mean_33)[name = tensor("sub_22")]; + tensor square_11 = square(x = sub_22)[name = tensor("square_11")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35 = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11)[name = tensor("reduce_mean_35")]; + tensor add_22_y_0 = const()[name = tensor("add_22_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_22 = add(x = reduce_mean_35, y = add_22_y_0)[name = tensor("add_22")]; + tensor sqrt_11 = sqrt(x = add_22)[name = tensor("sqrt_11")]; + tensor real_div_11 = real_div(x = sub_22, y = sqrt_11)[name = tensor("real_div_11")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_45 = reshape(shape = reshape_45_shape_0, x = real_div_11)[name = tensor("reshape_45")]; + tensor add_23_gamma_0 = const()[name = tensor("add_23_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197924352)))]; + tensor add_23_beta_0 = const()[name = tensor("add_23_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197926464)))]; + tensor add_23_epsilon_0 = const()[name = tensor("add_23_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_23 = batch_norm(beta = add_23_beta_0, epsilon = add_23_epsilon_0, gamma = add_23_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_45)[name = tensor("add_23")]; + tensor input_87 = silu(x = add_23)[name = tensor("input_87")]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([1, 1])]; + tensor var_309 = const()[name = tensor("op_309"), val = tensor([1, 1])]; + tensor input_89_pad_type_0 = const()[name = tensor("input_89_pad_type_0"), val = tensor("custom")]; + tensor input_89_pad_0 = const()[name = tensor("input_89_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_89 = conv(bias = decoder_up_blocks_1_resnets_0_conv1_bias, dilations = var_309, groups = var_26, pad = input_89_pad_0, pad_type = input_89_pad_type_0, strides = var_307, weight = decoder_up_blocks_1_resnets_0_conv1_weight, x = input_87)[name = tensor("input_89")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_48 = reshape(shape = reshape_48_shape_0, x = input_89)[name = tensor("reshape_48")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36 = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48)[name = tensor("reduce_mean_36")]; + tensor sub_24 = sub(x = reshape_48, y = reduce_mean_36)[name = tensor("sub_24")]; + tensor square_12 = square(x = sub_24)[name = tensor("square_12")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38 = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12)[name = tensor("reduce_mean_38")]; + tensor add_24_y_0 = const()[name = tensor("add_24_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_24 = add(x = reduce_mean_38, y = add_24_y_0)[name = tensor("add_24")]; + tensor sqrt_12 = sqrt(x = add_24)[name = tensor("sqrt_12")]; + tensor real_div_12 = real_div(x = sub_24, y = sqrt_12)[name = tensor("real_div_12")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_49 = reshape(shape = reshape_49_shape_0, x = real_div_12)[name = tensor("reshape_49")]; + tensor add_25_gamma_0 = const()[name = tensor("add_25_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197928576)))]; + tensor add_25_beta_0 = const()[name = tensor("add_25_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197930688)))]; + tensor add_25_epsilon_0 = const()[name = tensor("add_25_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_25 = batch_norm(beta = add_25_beta_0, epsilon = add_25_epsilon_0, gamma = add_25_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_49)[name = tensor("add_25")]; + tensor input_93 = silu(x = add_25)[name = tensor("input_93")]; + tensor var_319 = const()[name = tensor("op_319"), val = tensor([1, 1])]; + tensor var_321 = const()[name = tensor("op_321"), val = tensor([1, 1])]; + tensor hidden_states_27_pad_type_0 = const()[name = tensor("hidden_states_27_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_27_pad_0 = const()[name = tensor("hidden_states_27_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_27 = conv(bias = decoder_up_blocks_1_resnets_0_conv2_bias, dilations = var_321, groups = var_26, pad = hidden_states_27_pad_0, pad_type = hidden_states_27_pad_type_0, strides = var_319, weight = decoder_up_blocks_1_resnets_0_conv2_weight, x = input_93)[name = tensor("hidden_states_27")]; + tensor var_324 = add(x = input_83, y = hidden_states_27)[name = tensor("op_324")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_52 = reshape(shape = reshape_52_shape_0, x = var_324)[name = tensor("reshape_52")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39 = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52)[name = tensor("reduce_mean_39")]; + tensor sub_26 = sub(x = reshape_52, y = reduce_mean_39)[name = tensor("sub_26")]; + tensor square_13 = square(x = sub_26)[name = tensor("square_13")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41 = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13)[name = tensor("reduce_mean_41")]; + tensor add_26_y_0 = const()[name = tensor("add_26_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_26 = add(x = reduce_mean_41, y = add_26_y_0)[name = tensor("add_26")]; + tensor sqrt_13 = sqrt(x = add_26)[name = tensor("sqrt_13")]; + tensor real_div_13 = real_div(x = sub_26, y = sqrt_13)[name = tensor("real_div_13")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_53 = reshape(shape = reshape_53_shape_0, x = real_div_13)[name = tensor("reshape_53")]; + tensor add_27_gamma_0 = const()[name = tensor("add_27_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197932800)))]; + tensor add_27_beta_0 = const()[name = tensor("add_27_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197934912)))]; + tensor add_27_epsilon_0 = const()[name = tensor("add_27_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_27 = batch_norm(beta = add_27_beta_0, epsilon = add_27_epsilon_0, gamma = add_27_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_53)[name = tensor("add_27")]; + tensor input_101 = silu(x = add_27)[name = tensor("input_101")]; + tensor var_337 = const()[name = tensor("op_337"), val = tensor([1, 1])]; + tensor var_339 = const()[name = tensor("op_339"), val = tensor([1, 1])]; + tensor input_103_pad_type_0 = const()[name = tensor("input_103_pad_type_0"), val = tensor("custom")]; + tensor input_103_pad_0 = const()[name = tensor("input_103_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_103 = conv(bias = decoder_up_blocks_1_resnets_1_conv1_bias, dilations = var_339, groups = var_26, pad = input_103_pad_0, pad_type = input_103_pad_type_0, strides = var_337, weight = decoder_up_blocks_1_resnets_1_conv1_weight, x = input_101)[name = tensor("input_103")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_56 = reshape(shape = reshape_56_shape_0, x = input_103)[name = tensor("reshape_56")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42 = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56)[name = tensor("reduce_mean_42")]; + tensor sub_28 = sub(x = reshape_56, y = reduce_mean_42)[name = tensor("sub_28")]; + tensor square_14 = square(x = sub_28)[name = tensor("square_14")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44 = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14)[name = tensor("reduce_mean_44")]; + tensor add_28_y_0 = const()[name = tensor("add_28_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_28 = add(x = reduce_mean_44, y = add_28_y_0)[name = tensor("add_28")]; + tensor sqrt_14 = sqrt(x = add_28)[name = tensor("sqrt_14")]; + tensor real_div_14 = real_div(x = sub_28, y = sqrt_14)[name = tensor("real_div_14")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_57 = reshape(shape = reshape_57_shape_0, x = real_div_14)[name = tensor("reshape_57")]; + tensor add_29_gamma_0 = const()[name = tensor("add_29_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197937024)))]; + tensor add_29_beta_0 = const()[name = tensor("add_29_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197939136)))]; + tensor add_29_epsilon_0 = const()[name = tensor("add_29_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_29 = batch_norm(beta = add_29_beta_0, epsilon = add_29_epsilon_0, gamma = add_29_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_57)[name = tensor("add_29")]; + tensor input_107 = silu(x = add_29)[name = tensor("input_107")]; + tensor var_349 = const()[name = tensor("op_349"), val = tensor([1, 1])]; + tensor var_351 = const()[name = tensor("op_351"), val = tensor([1, 1])]; + tensor hidden_states_29_pad_type_0 = const()[name = tensor("hidden_states_29_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_29_pad_0 = const()[name = tensor("hidden_states_29_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_29 = conv(bias = decoder_up_blocks_1_resnets_1_conv2_bias, dilations = var_351, groups = var_26, pad = hidden_states_29_pad_0, pad_type = hidden_states_29_pad_type_0, strides = var_349, weight = decoder_up_blocks_1_resnets_1_conv2_weight, x = input_107)[name = tensor("hidden_states_29")]; + tensor var_354 = add(x = var_324, y = hidden_states_29)[name = tensor("op_354")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_60 = reshape(shape = reshape_60_shape_0, x = var_354)[name = tensor("reshape_60")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45 = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60)[name = tensor("reduce_mean_45")]; + tensor sub_30 = sub(x = reshape_60, y = reduce_mean_45)[name = tensor("sub_30")]; + tensor square_15 = square(x = sub_30)[name = tensor("square_15")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47 = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15)[name = tensor("reduce_mean_47")]; + tensor add_30_y_0 = const()[name = tensor("add_30_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_30 = add(x = reduce_mean_47, y = add_30_y_0)[name = tensor("add_30")]; + tensor sqrt_15 = sqrt(x = add_30)[name = tensor("sqrt_15")]; + tensor real_div_15 = real_div(x = sub_30, y = sqrt_15)[name = tensor("real_div_15")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_61 = reshape(shape = reshape_61_shape_0, x = real_div_15)[name = tensor("reshape_61")]; + tensor add_31_gamma_0 = const()[name = tensor("add_31_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197941248)))]; + tensor add_31_beta_0 = const()[name = tensor("add_31_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197943360)))]; + tensor add_31_epsilon_0 = const()[name = tensor("add_31_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_31 = batch_norm(beta = add_31_beta_0, epsilon = add_31_epsilon_0, gamma = add_31_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_61)[name = tensor("add_31")]; + tensor input_115 = silu(x = add_31)[name = tensor("input_115")]; + tensor var_367 = const()[name = tensor("op_367"), val = tensor([1, 1])]; + tensor var_369 = const()[name = tensor("op_369"), val = tensor([1, 1])]; + tensor input_117_pad_type_0 = const()[name = tensor("input_117_pad_type_0"), val = tensor("custom")]; + tensor input_117_pad_0 = const()[name = tensor("input_117_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_117 = conv(bias = decoder_up_blocks_1_resnets_2_conv1_bias, dilations = var_369, groups = var_26, pad = input_117_pad_0, pad_type = input_117_pad_type_0, strides = var_367, weight = decoder_up_blocks_1_resnets_2_conv1_weight, x = input_115)[name = tensor("input_117")]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_64 = reshape(shape = reshape_64_shape_0, x = input_117)[name = tensor("reshape_64")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48 = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64)[name = tensor("reduce_mean_48")]; + tensor sub_32 = sub(x = reshape_64, y = reduce_mean_48)[name = tensor("sub_32")]; + tensor square_16 = square(x = sub_32)[name = tensor("square_16")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50 = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16)[name = tensor("reduce_mean_50")]; + tensor add_32_y_0 = const()[name = tensor("add_32_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_32 = add(x = reduce_mean_50, y = add_32_y_0)[name = tensor("add_32")]; + tensor sqrt_16 = sqrt(x = add_32)[name = tensor("sqrt_16")]; + tensor real_div_16 = real_div(x = sub_32, y = sqrt_16)[name = tensor("real_div_16")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_65 = reshape(shape = reshape_65_shape_0, x = real_div_16)[name = tensor("reshape_65")]; + tensor add_33_gamma_0 = const()[name = tensor("add_33_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197945472)))]; + tensor add_33_beta_0 = const()[name = tensor("add_33_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197947584)))]; + tensor add_33_epsilon_0 = const()[name = tensor("add_33_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_33 = batch_norm(beta = add_33_beta_0, epsilon = add_33_epsilon_0, gamma = add_33_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_65)[name = tensor("add_33")]; + tensor input_121 = silu(x = add_33)[name = tensor("input_121")]; + tensor var_379 = const()[name = tensor("op_379"), val = tensor([1, 1])]; + tensor var_381 = const()[name = tensor("op_381"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_31 = conv(bias = decoder_up_blocks_1_resnets_2_conv2_bias, dilations = var_381, groups = var_26, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_379, weight = decoder_up_blocks_1_resnets_2_conv2_weight, x = input_121)[name = tensor("hidden_states_31")]; + tensor var_384 = add(x = var_354, y = hidden_states_31)[name = tensor("op_384")]; + tensor input_125_scale_factor_height_0 = const()[name = tensor("input_125_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_125_scale_factor_width_0 = const()[name = tensor("input_125_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_125 = upsample_nearest_neighbor(scale_factor_height = input_125_scale_factor_height_0, scale_factor_width = input_125_scale_factor_width_0, x = var_384)[name = tensor("input_125")]; + tensor var_392 = const()[name = tensor("op_392"), val = tensor([1, 1])]; + tensor var_394 = const()[name = tensor("op_394"), val = tensor([1, 1])]; + tensor input_127_pad_type_0 = const()[name = tensor("input_127_pad_type_0"), val = tensor("custom")]; + tensor input_127_pad_0 = const()[name = tensor("input_127_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_127 = conv(bias = decoder_up_blocks_1_upsamplers_0_conv_bias, dilations = var_394, groups = var_26, pad = input_127_pad_0, pad_type = input_127_pad_type_0, strides = var_392, weight = decoder_up_blocks_1_upsamplers_0_conv_weight, x = input_125)[name = tensor("input_127")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([1, 32, 16, 512, 512])]; + tensor reshape_68 = reshape(shape = reshape_68_shape_0, x = input_127)[name = tensor("reshape_68")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51 = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68)[name = tensor("reduce_mean_51")]; + tensor sub_34 = sub(x = reshape_68, y = reduce_mean_51)[name = tensor("sub_34")]; + tensor square_17 = square(x = sub_34)[name = tensor("square_17")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53 = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17)[name = tensor("reduce_mean_53")]; + tensor add_34_y_0 = const()[name = tensor("add_34_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_34 = add(x = reduce_mean_53, y = add_34_y_0)[name = tensor("add_34")]; + tensor sqrt_17 = sqrt(x = add_34)[name = tensor("sqrt_17")]; + tensor real_div_17 = real_div(x = sub_34, y = sqrt_17)[name = tensor("real_div_17")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([1, 512, 512, 512])]; + tensor reshape_69 = reshape(shape = reshape_69_shape_0, x = real_div_17)[name = tensor("reshape_69")]; + tensor add_35_gamma_0 = const()[name = tensor("add_35_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197949696)))]; + tensor add_35_beta_0 = const()[name = tensor("add_35_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197951808)))]; + tensor add_35_epsilon_0 = const()[name = tensor("add_35_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_35 = batch_norm(beta = add_35_beta_0, epsilon = add_35_epsilon_0, gamma = add_35_gamma_0, mean = add_1_mean_0, variance = add_1_variance_0, x = reshape_69)[name = tensor("add_35")]; + tensor input_131 = silu(x = add_35)[name = tensor("input_131")]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([1, 1])]; + tensor var_418 = const()[name = tensor("op_418"), val = tensor([1, 1])]; + tensor input_133_pad_type_0 = const()[name = tensor("input_133_pad_type_0"), val = tensor("custom")]; + tensor input_133_pad_0 = const()[name = tensor("input_133_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_133 = conv(bias = decoder_up_blocks_2_resnets_0_conv1_bias, dilations = var_418, groups = var_26, pad = input_133_pad_0, pad_type = input_133_pad_type_0, strides = var_416, weight = decoder_up_blocks_2_resnets_0_conv1_weight, x = input_131)[name = tensor("input_133")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_72 = reshape(shape = reshape_72_shape_0, x = input_133)[name = tensor("reshape_72")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54 = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72)[name = tensor("reduce_mean_54")]; + tensor sub_36 = sub(x = reshape_72, y = reduce_mean_54)[name = tensor("sub_36")]; + tensor square_18 = square(x = sub_36)[name = tensor("square_18")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56 = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18)[name = tensor("reduce_mean_56")]; + tensor add_36_y_0 = const()[name = tensor("add_36_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_36 = add(x = reduce_mean_56, y = add_36_y_0)[name = tensor("add_36")]; + tensor sqrt_18 = sqrt(x = add_36)[name = tensor("sqrt_18")]; + tensor real_div_18 = real_div(x = sub_36, y = sqrt_18)[name = tensor("real_div_18")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_73 = reshape(shape = reshape_73_shape_0, x = real_div_18)[name = tensor("reshape_73")]; + tensor add_37_mean_0 = const()[name = tensor("add_37_mean_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197953920)))]; + tensor add_37_variance_0 = const()[name = tensor("add_37_variance_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197955008)))]; + tensor add_37_gamma_0 = const()[name = tensor("add_37_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197956096)))]; + tensor add_37_beta_0 = const()[name = tensor("add_37_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197957184)))]; + tensor add_37_epsilon_0 = const()[name = tensor("add_37_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_37 = batch_norm(beta = add_37_beta_0, epsilon = add_37_epsilon_0, gamma = add_37_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_73)[name = tensor("add_37")]; + tensor input_137 = silu(x = add_37)[name = tensor("input_137")]; + tensor var_428 = const()[name = tensor("op_428"), val = tensor([1, 1])]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_35 = conv(bias = decoder_up_blocks_2_resnets_0_conv2_bias, dilations = var_430, groups = var_26, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_428, weight = decoder_up_blocks_2_resnets_0_conv2_weight, x = input_137)[name = tensor("hidden_states_35")]; + tensor var_435 = const()[name = tensor("op_435"), val = tensor([1, 1])]; + tensor var_437 = const()[name = tensor("op_437"), val = tensor([1, 1])]; + tensor input_tensor_1_pad_type_0 = const()[name = tensor("input_tensor_1_pad_type_0"), val = tensor("custom")]; + tensor input_tensor_1_pad_0 = const()[name = tensor("input_tensor_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_tensor_1 = conv(bias = decoder_up_blocks_2_resnets_0_conv_shortcut_bias, dilations = var_437, groups = var_26, pad = input_tensor_1_pad_0, pad_type = input_tensor_1_pad_type_0, strides = var_435, weight = decoder_up_blocks_2_resnets_0_conv_shortcut_weight, x = input_127)[name = tensor("input_tensor_1")]; + tensor var_440 = add(x = input_tensor_1, y = hidden_states_35)[name = tensor("op_440")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_76 = reshape(shape = reshape_76_shape_0, x = var_440)[name = tensor("reshape_76")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57 = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76)[name = tensor("reduce_mean_57")]; + tensor sub_38 = sub(x = reshape_76, y = reduce_mean_57)[name = tensor("sub_38")]; + tensor square_19 = square(x = sub_38)[name = tensor("square_19")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59 = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19)[name = tensor("reduce_mean_59")]; + tensor add_38_y_0 = const()[name = tensor("add_38_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_38 = add(x = reduce_mean_59, y = add_38_y_0)[name = tensor("add_38")]; + tensor sqrt_19 = sqrt(x = add_38)[name = tensor("sqrt_19")]; + tensor real_div_19 = real_div(x = sub_38, y = sqrt_19)[name = tensor("real_div_19")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_77 = reshape(shape = reshape_77_shape_0, x = real_div_19)[name = tensor("reshape_77")]; + tensor add_39_gamma_0 = const()[name = tensor("add_39_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197958272)))]; + tensor add_39_beta_0 = const()[name = tensor("add_39_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197959360)))]; + tensor add_39_epsilon_0 = const()[name = tensor("add_39_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_39 = batch_norm(beta = add_39_beta_0, epsilon = add_39_epsilon_0, gamma = add_39_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_77)[name = tensor("add_39")]; + tensor input_145 = silu(x = add_39)[name = tensor("input_145")]; + tensor var_453 = const()[name = tensor("op_453"), val = tensor([1, 1])]; + tensor var_455 = const()[name = tensor("op_455"), val = tensor([1, 1])]; + tensor input_147_pad_type_0 = const()[name = tensor("input_147_pad_type_0"), val = tensor("custom")]; + tensor input_147_pad_0 = const()[name = tensor("input_147_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_147 = conv(bias = decoder_up_blocks_2_resnets_1_conv1_bias, dilations = var_455, groups = var_26, pad = input_147_pad_0, pad_type = input_147_pad_type_0, strides = var_453, weight = decoder_up_blocks_2_resnets_1_conv1_weight, x = input_145)[name = tensor("input_147")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_80 = reshape(shape = reshape_80_shape_0, x = input_147)[name = tensor("reshape_80")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60 = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80)[name = tensor("reduce_mean_60")]; + tensor sub_40 = sub(x = reshape_80, y = reduce_mean_60)[name = tensor("sub_40")]; + tensor square_20 = square(x = sub_40)[name = tensor("square_20")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62 = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20)[name = tensor("reduce_mean_62")]; + tensor add_40_y_0 = const()[name = tensor("add_40_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_40 = add(x = reduce_mean_62, y = add_40_y_0)[name = tensor("add_40")]; + tensor sqrt_20 = sqrt(x = add_40)[name = tensor("sqrt_20")]; + tensor real_div_20 = real_div(x = sub_40, y = sqrt_20)[name = tensor("real_div_20")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_81 = reshape(shape = reshape_81_shape_0, x = real_div_20)[name = tensor("reshape_81")]; + tensor add_41_gamma_0 = const()[name = tensor("add_41_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197960448)))]; + tensor add_41_beta_0 = const()[name = tensor("add_41_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197961536)))]; + tensor add_41_epsilon_0 = const()[name = tensor("add_41_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_41 = batch_norm(beta = add_41_beta_0, epsilon = add_41_epsilon_0, gamma = add_41_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_81)[name = tensor("add_41")]; + tensor input_151 = silu(x = add_41)[name = tensor("input_151")]; + tensor var_465 = const()[name = tensor("op_465"), val = tensor([1, 1])]; + tensor var_467 = const()[name = tensor("op_467"), val = tensor([1, 1])]; + tensor hidden_states_37_pad_type_0 = const()[name = tensor("hidden_states_37_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_37_pad_0 = const()[name = tensor("hidden_states_37_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_37 = conv(bias = decoder_up_blocks_2_resnets_1_conv2_bias, dilations = var_467, groups = var_26, pad = hidden_states_37_pad_0, pad_type = hidden_states_37_pad_type_0, strides = var_465, weight = decoder_up_blocks_2_resnets_1_conv2_weight, x = input_151)[name = tensor("hidden_states_37")]; + tensor var_470 = add(x = var_440, y = hidden_states_37)[name = tensor("op_470")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_84 = reshape(shape = reshape_84_shape_0, x = var_470)[name = tensor("reshape_84")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63 = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84)[name = tensor("reduce_mean_63")]; + tensor sub_42 = sub(x = reshape_84, y = reduce_mean_63)[name = tensor("sub_42")]; + tensor square_21 = square(x = sub_42)[name = tensor("square_21")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65 = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21)[name = tensor("reduce_mean_65")]; + tensor add_42_y_0 = const()[name = tensor("add_42_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_42 = add(x = reduce_mean_65, y = add_42_y_0)[name = tensor("add_42")]; + tensor sqrt_21 = sqrt(x = add_42)[name = tensor("sqrt_21")]; + tensor real_div_21 = real_div(x = sub_42, y = sqrt_21)[name = tensor("real_div_21")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_85 = reshape(shape = reshape_85_shape_0, x = real_div_21)[name = tensor("reshape_85")]; + tensor add_43_gamma_0 = const()[name = tensor("add_43_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197962624)))]; + tensor add_43_beta_0 = const()[name = tensor("add_43_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197963712)))]; + tensor add_43_epsilon_0 = const()[name = tensor("add_43_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_43 = batch_norm(beta = add_43_beta_0, epsilon = add_43_epsilon_0, gamma = add_43_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_85)[name = tensor("add_43")]; + tensor input_159 = silu(x = add_43)[name = tensor("input_159")]; + tensor var_483 = const()[name = tensor("op_483"), val = tensor([1, 1])]; + tensor var_485 = const()[name = tensor("op_485"), val = tensor([1, 1])]; + tensor input_161_pad_type_0 = const()[name = tensor("input_161_pad_type_0"), val = tensor("custom")]; + tensor input_161_pad_0 = const()[name = tensor("input_161_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_161 = conv(bias = decoder_up_blocks_2_resnets_2_conv1_bias, dilations = var_485, groups = var_26, pad = input_161_pad_0, pad_type = input_161_pad_type_0, strides = var_483, weight = decoder_up_blocks_2_resnets_2_conv1_weight, x = input_159)[name = tensor("input_161")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_88 = reshape(shape = reshape_88_shape_0, x = input_161)[name = tensor("reshape_88")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66 = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88)[name = tensor("reduce_mean_66")]; + tensor sub_44 = sub(x = reshape_88, y = reduce_mean_66)[name = tensor("sub_44")]; + tensor square_22 = square(x = sub_44)[name = tensor("square_22")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68 = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22)[name = tensor("reduce_mean_68")]; + tensor add_44_y_0 = const()[name = tensor("add_44_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_44 = add(x = reduce_mean_68, y = add_44_y_0)[name = tensor("add_44")]; + tensor sqrt_22 = sqrt(x = add_44)[name = tensor("sqrt_22")]; + tensor real_div_22 = real_div(x = sub_44, y = sqrt_22)[name = tensor("real_div_22")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_89 = reshape(shape = reshape_89_shape_0, x = real_div_22)[name = tensor("reshape_89")]; + tensor add_45_gamma_0 = const()[name = tensor("add_45_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197964800)))]; + tensor add_45_beta_0 = const()[name = tensor("add_45_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197965888)))]; + tensor add_45_epsilon_0 = const()[name = tensor("add_45_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_45 = batch_norm(beta = add_45_beta_0, epsilon = add_45_epsilon_0, gamma = add_45_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_89)[name = tensor("add_45")]; + tensor input_165 = silu(x = add_45)[name = tensor("input_165")]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 1])]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_39 = conv(bias = decoder_up_blocks_2_resnets_2_conv2_bias, dilations = var_497, groups = var_26, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_495, weight = decoder_up_blocks_2_resnets_2_conv2_weight, x = input_165)[name = tensor("hidden_states_39")]; + tensor var_500 = add(x = var_470, y = hidden_states_39)[name = tensor("op_500")]; + tensor input_169_scale_factor_height_0 = const()[name = tensor("input_169_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_169_scale_factor_width_0 = const()[name = tensor("input_169_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_169 = upsample_nearest_neighbor(scale_factor_height = input_169_scale_factor_height_0, scale_factor_width = input_169_scale_factor_width_0, x = var_500)[name = tensor("input_169")]; + tensor var_508 = const()[name = tensor("op_508"), val = tensor([1, 1])]; + tensor var_510 = const()[name = tensor("op_510"), val = tensor([1, 1])]; + tensor input_171_pad_type_0 = const()[name = tensor("input_171_pad_type_0"), val = tensor("custom")]; + tensor input_171_pad_0 = const()[name = tensor("input_171_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_171 = conv(bias = decoder_up_blocks_2_upsamplers_0_conv_bias, dilations = var_510, groups = var_26, pad = input_171_pad_0, pad_type = input_171_pad_type_0, strides = var_508, weight = decoder_up_blocks_2_upsamplers_0_conv_weight, x = input_169)[name = tensor("input_171")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([1, 32, 8, 1024, 1024])]; + tensor reshape_92 = reshape(shape = reshape_92_shape_0, x = input_171)[name = tensor("reshape_92")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69 = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92)[name = tensor("reduce_mean_69")]; + tensor sub_46 = sub(x = reshape_92, y = reduce_mean_69)[name = tensor("sub_46")]; + tensor square_23 = square(x = sub_46)[name = tensor("square_23")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71 = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23)[name = tensor("reduce_mean_71")]; + tensor add_46_y_0 = const()[name = tensor("add_46_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_46 = add(x = reduce_mean_71, y = add_46_y_0)[name = tensor("add_46")]; + tensor sqrt_23 = sqrt(x = add_46)[name = tensor("sqrt_23")]; + tensor real_div_23 = real_div(x = sub_46, y = sqrt_23)[name = tensor("real_div_23")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([1, 256, 1024, 1024])]; + tensor reshape_93 = reshape(shape = reshape_93_shape_0, x = real_div_23)[name = tensor("reshape_93")]; + tensor add_47_gamma_0 = const()[name = tensor("add_47_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197966976)))]; + tensor add_47_beta_0 = const()[name = tensor("add_47_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197968064)))]; + tensor add_47_epsilon_0 = const()[name = tensor("add_47_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_47 = batch_norm(beta = add_47_beta_0, epsilon = add_47_epsilon_0, gamma = add_47_gamma_0, mean = add_37_mean_0, variance = add_37_variance_0, x = reshape_93)[name = tensor("add_47")]; + tensor input_175 = silu(x = add_47)[name = tensor("input_175")]; + tensor var_530 = const()[name = tensor("op_530"), val = tensor([1, 1])]; + tensor var_532 = const()[name = tensor("op_532"), val = tensor([1, 1])]; + tensor input_177_pad_type_0 = const()[name = tensor("input_177_pad_type_0"), val = tensor("custom")]; + tensor input_177_pad_0 = const()[name = tensor("input_177_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_177 = conv(bias = decoder_up_blocks_3_resnets_0_conv1_bias, dilations = var_532, groups = var_26, pad = input_177_pad_0, pad_type = input_177_pad_type_0, strides = var_530, weight = decoder_up_blocks_3_resnets_0_conv1_weight, x = input_175)[name = tensor("input_177")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_96 = reshape(shape = reshape_96_shape_0, x = input_177)[name = tensor("reshape_96")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72 = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96)[name = tensor("reduce_mean_72")]; + tensor sub_48 = sub(x = reshape_96, y = reduce_mean_72)[name = tensor("sub_48")]; + tensor square_24 = square(x = sub_48)[name = tensor("square_24")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74 = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24)[name = tensor("reduce_mean_74")]; + tensor add_48_y_0 = const()[name = tensor("add_48_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_48 = add(x = reduce_mean_74, y = add_48_y_0)[name = tensor("add_48")]; + tensor sqrt_24 = sqrt(x = add_48)[name = tensor("sqrt_24")]; + tensor real_div_24 = real_div(x = sub_48, y = sqrt_24)[name = tensor("real_div_24")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_97 = reshape(shape = reshape_97_shape_0, x = real_div_24)[name = tensor("reshape_97")]; + tensor add_49_mean_0 = const()[name = tensor("add_49_mean_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197969152)))]; + tensor add_49_variance_0 = const()[name = tensor("add_49_variance_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197969728)))]; + tensor add_49_gamma_0 = const()[name = tensor("add_49_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197970304)))]; + tensor add_49_beta_0 = const()[name = tensor("add_49_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197970880)))]; + tensor add_49_epsilon_0 = const()[name = tensor("add_49_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_49 = batch_norm(beta = add_49_beta_0, epsilon = add_49_epsilon_0, gamma = add_49_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_97)[name = tensor("add_49")]; + tensor input_181 = silu(x = add_49)[name = tensor("input_181")]; + tensor var_542 = const()[name = tensor("op_542"), val = tensor([1, 1])]; + tensor var_544 = const()[name = tensor("op_544"), val = tensor([1, 1])]; + tensor hidden_states_43_pad_type_0 = const()[name = tensor("hidden_states_43_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_43_pad_0 = const()[name = tensor("hidden_states_43_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_43 = conv(bias = decoder_up_blocks_3_resnets_0_conv2_bias, dilations = var_544, groups = var_26, pad = hidden_states_43_pad_0, pad_type = hidden_states_43_pad_type_0, strides = var_542, weight = decoder_up_blocks_3_resnets_0_conv2_weight, x = input_181)[name = tensor("hidden_states_43")]; + tensor var_549 = const()[name = tensor("op_549"), val = tensor([1, 1])]; + tensor var_551 = const()[name = tensor("op_551"), val = tensor([1, 1])]; + tensor input_tensor_pad_type_0 = const()[name = tensor("input_tensor_pad_type_0"), val = tensor("custom")]; + tensor input_tensor_pad_0 = const()[name = tensor("input_tensor_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor input_tensor = conv(bias = decoder_up_blocks_3_resnets_0_conv_shortcut_bias, dilations = var_551, groups = var_26, pad = input_tensor_pad_0, pad_type = input_tensor_pad_type_0, strides = var_549, weight = decoder_up_blocks_3_resnets_0_conv_shortcut_weight, x = input_171)[name = tensor("input_tensor")]; + tensor var_554 = add(x = input_tensor, y = hidden_states_43)[name = tensor("op_554")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_100 = reshape(shape = reshape_100_shape_0, x = var_554)[name = tensor("reshape_100")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75 = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100)[name = tensor("reduce_mean_75")]; + tensor sub_50 = sub(x = reshape_100, y = reduce_mean_75)[name = tensor("sub_50")]; + tensor square_25 = square(x = sub_50)[name = tensor("square_25")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77 = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25)[name = tensor("reduce_mean_77")]; + tensor add_50_y_0 = const()[name = tensor("add_50_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_50 = add(x = reduce_mean_77, y = add_50_y_0)[name = tensor("add_50")]; + tensor sqrt_25 = sqrt(x = add_50)[name = tensor("sqrt_25")]; + tensor real_div_25 = real_div(x = sub_50, y = sqrt_25)[name = tensor("real_div_25")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_101 = reshape(shape = reshape_101_shape_0, x = real_div_25)[name = tensor("reshape_101")]; + tensor add_51_gamma_0 = const()[name = tensor("add_51_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197971456)))]; + tensor add_51_beta_0 = const()[name = tensor("add_51_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197972032)))]; + tensor add_51_epsilon_0 = const()[name = tensor("add_51_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_51 = batch_norm(beta = add_51_beta_0, epsilon = add_51_epsilon_0, gamma = add_51_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_101)[name = tensor("add_51")]; + tensor input_189 = silu(x = add_51)[name = tensor("input_189")]; + tensor var_567 = const()[name = tensor("op_567"), val = tensor([1, 1])]; + tensor var_569 = const()[name = tensor("op_569"), val = tensor([1, 1])]; + tensor input_191_pad_type_0 = const()[name = tensor("input_191_pad_type_0"), val = tensor("custom")]; + tensor input_191_pad_0 = const()[name = tensor("input_191_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_191 = conv(bias = decoder_up_blocks_3_resnets_1_conv1_bias, dilations = var_569, groups = var_26, pad = input_191_pad_0, pad_type = input_191_pad_type_0, strides = var_567, weight = decoder_up_blocks_3_resnets_1_conv1_weight, x = input_189)[name = tensor("input_191")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_104 = reshape(shape = reshape_104_shape_0, x = input_191)[name = tensor("reshape_104")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78 = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104)[name = tensor("reduce_mean_78")]; + tensor sub_52 = sub(x = reshape_104, y = reduce_mean_78)[name = tensor("sub_52")]; + tensor square_26 = square(x = sub_52)[name = tensor("square_26")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80 = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26)[name = tensor("reduce_mean_80")]; + tensor add_52_y_0 = const()[name = tensor("add_52_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_52 = add(x = reduce_mean_80, y = add_52_y_0)[name = tensor("add_52")]; + tensor sqrt_26 = sqrt(x = add_52)[name = tensor("sqrt_26")]; + tensor real_div_26 = real_div(x = sub_52, y = sqrt_26)[name = tensor("real_div_26")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_105 = reshape(shape = reshape_105_shape_0, x = real_div_26)[name = tensor("reshape_105")]; + tensor add_53_gamma_0 = const()[name = tensor("add_53_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197972608)))]; + tensor add_53_beta_0 = const()[name = tensor("add_53_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197973184)))]; + tensor add_53_epsilon_0 = const()[name = tensor("add_53_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_53 = batch_norm(beta = add_53_beta_0, epsilon = add_53_epsilon_0, gamma = add_53_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_105)[name = tensor("add_53")]; + tensor input_195 = silu(x = add_53)[name = tensor("input_195")]; + tensor var_579 = const()[name = tensor("op_579"), val = tensor([1, 1])]; + tensor var_581 = const()[name = tensor("op_581"), val = tensor([1, 1])]; + tensor hidden_states_45_pad_type_0 = const()[name = tensor("hidden_states_45_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_45_pad_0 = const()[name = tensor("hidden_states_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states_45 = conv(bias = decoder_up_blocks_3_resnets_1_conv2_bias, dilations = var_581, groups = var_26, pad = hidden_states_45_pad_0, pad_type = hidden_states_45_pad_type_0, strides = var_579, weight = decoder_up_blocks_3_resnets_1_conv2_weight, x = input_195)[name = tensor("hidden_states_45")]; + tensor var_584 = add(x = var_554, y = hidden_states_45)[name = tensor("op_584")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_108 = reshape(shape = reshape_108_shape_0, x = var_584)[name = tensor("reshape_108")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81 = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108)[name = tensor("reduce_mean_81")]; + tensor sub_54 = sub(x = reshape_108, y = reduce_mean_81)[name = tensor("sub_54")]; + tensor square_27 = square(x = sub_54)[name = tensor("square_27")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83 = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27)[name = tensor("reduce_mean_83")]; + tensor add_54_y_0 = const()[name = tensor("add_54_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_54 = add(x = reduce_mean_83, y = add_54_y_0)[name = tensor("add_54")]; + tensor sqrt_27 = sqrt(x = add_54)[name = tensor("sqrt_27")]; + tensor real_div_27 = real_div(x = sub_54, y = sqrt_27)[name = tensor("real_div_27")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_109 = reshape(shape = reshape_109_shape_0, x = real_div_27)[name = tensor("reshape_109")]; + tensor add_55_gamma_0 = const()[name = tensor("add_55_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197973760)))]; + tensor add_55_beta_0 = const()[name = tensor("add_55_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197974336)))]; + tensor add_55_epsilon_0 = const()[name = tensor("add_55_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_55 = batch_norm(beta = add_55_beta_0, epsilon = add_55_epsilon_0, gamma = add_55_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_109)[name = tensor("add_55")]; + tensor input_203 = silu(x = add_55)[name = tensor("input_203")]; + tensor var_597 = const()[name = tensor("op_597"), val = tensor([1, 1])]; + tensor var_599 = const()[name = tensor("op_599"), val = tensor([1, 1])]; + tensor input_205_pad_type_0 = const()[name = tensor("input_205_pad_type_0"), val = tensor("custom")]; + tensor input_205_pad_0 = const()[name = tensor("input_205_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor input_205 = conv(bias = decoder_up_blocks_3_resnets_2_conv1_bias, dilations = var_599, groups = var_26, pad = input_205_pad_0, pad_type = input_205_pad_type_0, strides = var_597, weight = decoder_up_blocks_3_resnets_2_conv1_weight, x = input_203)[name = tensor("input_205")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_112 = reshape(shape = reshape_112_shape_0, x = input_205)[name = tensor("reshape_112")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84 = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112)[name = tensor("reduce_mean_84")]; + tensor sub_56 = sub(x = reshape_112, y = reduce_mean_84)[name = tensor("sub_56")]; + tensor square_28 = square(x = sub_56)[name = tensor("square_28")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86 = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28)[name = tensor("reduce_mean_86")]; + tensor add_56_y_0 = const()[name = tensor("add_56_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_56 = add(x = reduce_mean_86, y = add_56_y_0)[name = tensor("add_56")]; + tensor sqrt_28 = sqrt(x = add_56)[name = tensor("sqrt_28")]; + tensor real_div_28 = real_div(x = sub_56, y = sqrt_28)[name = tensor("real_div_28")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_113 = reshape(shape = reshape_113_shape_0, x = real_div_28)[name = tensor("reshape_113")]; + tensor add_57_gamma_0 = const()[name = tensor("add_57_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197974912)))]; + tensor add_57_beta_0 = const()[name = tensor("add_57_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197975488)))]; + tensor add_57_epsilon_0 = const()[name = tensor("add_57_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_57 = batch_norm(beta = add_57_beta_0, epsilon = add_57_epsilon_0, gamma = add_57_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_113)[name = tensor("add_57")]; + tensor input_209 = silu(x = add_57)[name = tensor("input_209")]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 1])]; + tensor var_611 = const()[name = tensor("op_611"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor hidden_states = conv(bias = decoder_up_blocks_3_resnets_2_conv2_bias, dilations = var_611, groups = var_26, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_609, weight = decoder_up_blocks_3_resnets_2_conv2_weight, x = input_209)[name = tensor("hidden_states")]; + tensor var_614 = add(x = var_584, y = hidden_states)[name = tensor("op_614")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_116 = reshape(shape = reshape_116_shape_0, x = var_614)[name = tensor("reshape_116")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87 = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116)[name = tensor("reduce_mean_87")]; + tensor sub_58 = sub(x = reshape_116, y = reduce_mean_87)[name = tensor("sub_58")]; + tensor square_29 = square(x = sub_58)[name = tensor("square_29")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89 = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29)[name = tensor("reduce_mean_89")]; + tensor add_58_y_0 = const()[name = tensor("add_58_y_0"), val = tensor(0x1.0c6f7ap-20)]; + tensor add_58 = add(x = reduce_mean_89, y = add_58_y_0)[name = tensor("add_58")]; + tensor sqrt_29 = sqrt(x = add_58)[name = tensor("sqrt_29")]; + tensor real_div_29 = real_div(x = sub_58, y = sqrt_29)[name = tensor("real_div_29")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_117 = reshape(shape = reshape_117_shape_0, x = real_div_29)[name = tensor("reshape_117")]; + tensor add_59_gamma_0 = const()[name = tensor("add_59_gamma_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197976064)))]; + tensor add_59_beta_0 = const()[name = tensor("add_59_beta_0"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197976640)))]; + tensor add_59_epsilon_0 = const()[name = tensor("add_59_epsilon_0"), val = tensor(0x1.4f8b58p-17)]; + tensor add_59 = batch_norm(beta = add_59_beta_0, epsilon = add_59_epsilon_0, gamma = add_59_gamma_0, mean = add_49_mean_0, variance = add_49_variance_0, x = reshape_117)[name = tensor("add_59")]; + tensor input = silu(x = add_59)[name = tensor("input")]; + tensor var_623 = const()[name = tensor("op_623"), val = tensor([1, 1])]; + tensor var_625 = const()[name = tensor("op_625"), val = tensor([1, 1])]; + tensor var_627_pad_type_0 = const()[name = tensor("op_627_pad_type_0"), val = tensor("custom")]; + tensor var_627_pad_0 = const()[name = tensor("op_627_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor image = conv(bias = decoder_conv_out_bias, dilations = var_625, groups = var_26, pad = var_627_pad_0, pad_type = var_627_pad_type_0, strides = var_623, weight = decoder_conv_out_weight, x = input)[name = tensor("op_627")]; + } -> (image); +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..10a9bb7a56bab8bda6b91e312c1a30847f14d99e --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEDecoder.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ade814d6037fb5ba892963be1596c8e37852f96c399101401831f8c07e64bd2 +size 197977216 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/analytics/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..722163a423cdb80707f6eb5ad91d80f1438bd128 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e44297c358d64101602d9abfe4d6c9fb96ddb3b120f84fbb74001aa4312cf93 +size 207 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/coremldata.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..c8241a4d50e775e295089590466b9a8f9e8f0bf2 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7269e365034d061c3ad035d9a5b7c38864d7db71bee6fb7545c97a4942a865f1 +size 783 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..216c974d24d18e3116bc89b4321848fb20963592 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/metadata.json @@ -0,0 +1,75 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "The latent embeddings from the unet model from the input image.", + "shape" : "[]", + "name" : "latent", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Float16", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "Pad" : 3, + "Ios16.cast" : 1, + "Ios16.mul" : 2, + "Ios16.sqrt" : 22, + "Ios16.sub" : 22, + "Transpose" : 6, + "Ios16.conv" : 28, + "Ios16.add" : 34, + "Ios16.linear" : 4, + "Ios16.matmul" : 2, + "Ios16.realDiv" : 22, + "Ios16.reduceMean" : 44, + "Ios16.softmax" : 1, + "Ios16.batchNorm" : 21, + "Ios16.square" : 22, + "Ios16.reshape" : 49, + "Ios16.silu" : 21 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 1 × 3 × 1024 × 1024)", + "shortDescription" : "The input image to base the initial latents on normalized to range [-1, 1]", + "shape" : "[1, 3, 1024, 1024]", + "name" : "z", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "generatedClassName" : "Stable_Diffusion_version_diffusers_stable_diffusion_xl_base_1_0_vae_encoder", + "method" : "predict" + } +] \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/model.mil b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..f7ba6ef5d577f3010bf818be77b5a26f77063e7c --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/model.mil @@ -0,0 +1,740 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.4"}, {"coremlc-version", "1839.0.0"}, {"coremltools-component-torch", "2.0.1+cu117"}, {"coremltools-version", "7.0b1"}})] +{ + func main(tensor z) { + tensor var_15 = const()[name = tensor("op_15"), val = tensor(1)]; + tensor var_33 = const()[name = tensor("op_33"), val = tensor([1, 1])]; + tensor var_35 = const()[name = tensor("op_35"), val = tensor([1, 1])]; + tensor input_1_pad_type_0 = const()[name = tensor("input_1_pad_type_0"), val = tensor("custom")]; + tensor input_1_pad_0 = const()[name = tensor("input_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_conv_in_weight_to_fp16 = const()[name = tensor("encoder_conv_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor encoder_conv_in_bias_to_fp16 = const()[name = tensor("encoder_conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7040)))]; + tensor input_1_cast = conv(bias = encoder_conv_in_bias_to_fp16, dilations = var_35, groups = var_15, pad = input_1_pad_0, pad_type = input_1_pad_type_0, strides = var_33, weight = encoder_conv_in_weight_to_fp16, x = z)[name = tensor("input_1_cast")]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_1_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7360)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7680)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8000)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8320)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_5_cast = silu(x = add_1_cast)[name = tensor("input_5_cast")]; + tensor var_54 = const()[name = tensor("op_54"), val = tensor([1, 1])]; + tensor var_56 = const()[name = tensor("op_56"), val = tensor([1, 1])]; + tensor input_7_pad_type_0 = const()[name = tensor("input_7_pad_type_0"), val = tensor("custom")]; + tensor input_7_pad_0 = const()[name = tensor("input_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_0_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8640)))]; + tensor encoder_down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(303616)))]; + tensor input_7_cast = conv(bias = encoder_down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_56, groups = var_15, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = var_54, weight = encoder_down_blocks_0_resnets_0_conv1_weight_to_fp16, x = input_5_cast)[name = tensor("input_7_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_7_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(303936)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304256)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_11_cast = silu(x = add_3_cast)[name = tensor("input_11_cast")]; + tensor var_66 = const()[name = tensor("op_66"), val = tensor([1, 1])]; + tensor var_68 = const()[name = tensor("op_68"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_0_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304576)))]; + tensor encoder_down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(599552)))]; + tensor hidden_states_1_cast = conv(bias = encoder_down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_68, groups = var_15, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_66, weight = encoder_down_blocks_0_resnets_0_conv2_weight_to_fp16, x = input_11_cast)[name = tensor("hidden_states_1_cast")]; + tensor var_71_cast = add(x = input_1_cast, y = hidden_states_1_cast)[name = tensor("op_71_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = var_71_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(599872)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600192)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_19_cast = silu(x = add_5_cast)[name = tensor("input_19_cast")]; + tensor var_84 = const()[name = tensor("op_84"), val = tensor([1, 1])]; + tensor var_86 = const()[name = tensor("op_86"), val = tensor([1, 1])]; + tensor input_21_pad_type_0 = const()[name = tensor("input_21_pad_type_0"), val = tensor("custom")]; + tensor input_21_pad_0 = const()[name = tensor("input_21_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_0_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(600512)))]; + tensor encoder_down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895488)))]; + tensor input_21_cast = conv(bias = encoder_down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_86, groups = var_15, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = var_84, weight = encoder_down_blocks_0_resnets_1_conv1_weight_to_fp16, x = input_19_cast)[name = tensor("input_21_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([1, 32, 4, 1024, 1024])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_21_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([1, 128, 1024, 1024])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895808)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(896128)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_25_cast = silu(x = add_7_cast)[name = tensor("input_25_cast")]; + tensor var_96 = const()[name = tensor("op_96"), val = tensor([1, 1])]; + tensor var_98 = const()[name = tensor("op_98"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_0_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(896448)))]; + tensor encoder_down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191424)))]; + tensor hidden_states_3_cast = conv(bias = encoder_down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_98, groups = var_15, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_96, weight = encoder_down_blocks_0_resnets_1_conv2_weight_to_fp16, x = input_25_cast)[name = tensor("hidden_states_3_cast")]; + tensor var_101_cast = add(x = var_71_cast, y = hidden_states_3_cast)[name = tensor("op_101_cast")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([0, 0, 0, 0, 0, 1, 0, 1])]; + tensor hidden_states_7_mode_0 = const()[name = tensor("hidden_states_7_mode_0"), val = tensor("constant")]; + tensor hidden_states_7_constant_val_0_to_fp16 = const()[name = tensor("hidden_states_7_constant_val_0_to_fp16"), val = tensor(0x0p+0)]; + tensor hidden_states_7_cast = pad(constant_val = hidden_states_7_constant_val_0_to_fp16, mode = hidden_states_7_mode_0, pad = hidden_states_7_pad_0, x = var_101_cast)[name = tensor("hidden_states_7_cast")]; + tensor var_109 = const()[name = tensor("op_109"), val = tensor([2, 2])]; + tensor var_111 = const()[name = tensor("op_111"), val = tensor([1, 1])]; + tensor input_29_pad_type_0 = const()[name = tensor("input_29_pad_type_0"), val = tensor("custom")]; + tensor input_29_pad_0 = const()[name = tensor("input_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor encoder_down_blocks_0_downsamplers_0_conv_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_0_downsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191744)))]; + tensor encoder_down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1486720)))]; + tensor input_29_cast = conv(bias = encoder_down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_111, groups = var_15, pad = input_29_pad_0, pad_type = input_29_pad_type_0, strides = var_109, weight = encoder_down_blocks_0_downsamplers_0_conv_weight_to_fp16, x = hidden_states_7_cast)[name = tensor("input_29_cast")]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([1, 32, 4, 512, 512])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_29_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([1, 128, 512, 512])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1487040)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1487360)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_33_cast = silu(x = add_9_cast)[name = tensor("input_33_cast")]; + tensor var_131 = const()[name = tensor("op_131"), val = tensor([1, 1])]; + tensor var_133 = const()[name = tensor("op_133"), val = tensor([1, 1])]; + tensor input_35_pad_type_0 = const()[name = tensor("input_35_pad_type_0"), val = tensor("custom")]; + tensor input_35_pad_0 = const()[name = tensor("input_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_1_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1487680)))]; + tensor encoder_down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2077568)))]; + tensor input_35_cast = conv(bias = encoder_down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_133, groups = var_15, pad = input_35_pad_0, pad_type = input_35_pad_type_0, strides = var_131, weight = encoder_down_blocks_1_resnets_0_conv1_weight_to_fp16, x = input_33_cast)[name = tensor("input_35_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_35_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2078144)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2078720)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2079296)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2079872)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_39_cast = silu(x = add_11_cast)[name = tensor("input_39_cast")]; + tensor var_143 = const()[name = tensor("op_143"), val = tensor([1, 1])]; + tensor var_145 = const()[name = tensor("op_145"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_1_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2080448)))]; + tensor encoder_down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3260160)))]; + tensor hidden_states_9_cast = conv(bias = encoder_down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_145, groups = var_15, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_143, weight = encoder_down_blocks_1_resnets_0_conv2_weight_to_fp16, x = input_39_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 1])]; + tensor var_152 = const()[name = tensor("op_152"), val = tensor([1, 1])]; + tensor input_tensor_1_pad_type_0 = const()[name = tensor("input_tensor_1_pad_type_0"), val = tensor("custom")]; + tensor input_tensor_1_pad_0 = const()[name = tensor("input_tensor_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor encoder_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3260736)))]; + tensor encoder_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3326336)))]; + tensor input_tensor_1_cast = conv(bias = encoder_down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_152, groups = var_15, pad = input_tensor_1_pad_0, pad_type = input_tensor_1_pad_type_0, strides = var_150, weight = encoder_down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16, x = input_29_cast)[name = tensor("input_tensor_1_cast")]; + tensor var_155_cast = add(x = input_tensor_1_cast, y = hidden_states_9_cast)[name = tensor("op_155_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = var_155_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3326912)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3327488)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor input_47_cast = silu(x = add_13_cast)[name = tensor("input_47_cast")]; + tensor var_168 = const()[name = tensor("op_168"), val = tensor([1, 1])]; + tensor var_170 = const()[name = tensor("op_170"), val = tensor([1, 1])]; + tensor input_49_pad_type_0 = const()[name = tensor("input_49_pad_type_0"), val = tensor("custom")]; + tensor input_49_pad_0 = const()[name = tensor("input_49_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_1_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3328064)))]; + tensor encoder_down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4507776)))]; + tensor input_49_cast = conv(bias = encoder_down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_170, groups = var_15, pad = input_49_pad_0, pad_type = input_49_pad_type_0, strides = var_168, weight = encoder_down_blocks_1_resnets_1_conv1_weight_to_fp16, x = input_47_cast)[name = tensor("input_49_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([1, 32, 8, 512, 512])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_49_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([1, 256, 512, 512])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4508352)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4508928)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_53_cast = silu(x = add_15_cast)[name = tensor("input_53_cast")]; + tensor var_180 = const()[name = tensor("op_180"), val = tensor([1, 1])]; + tensor var_182 = const()[name = tensor("op_182"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_1_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4509504)))]; + tensor encoder_down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5689216)))]; + tensor hidden_states_11_cast = conv(bias = encoder_down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_182, groups = var_15, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_180, weight = encoder_down_blocks_1_resnets_1_conv2_weight_to_fp16, x = input_53_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_185_cast = add(x = var_155_cast, y = hidden_states_11_cast)[name = tensor("op_185_cast")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0, 0, 1, 0, 1])]; + tensor hidden_states_15_mode_0 = const()[name = tensor("hidden_states_15_mode_0"), val = tensor("constant")]; + tensor hidden_states_15_constant_val_0_to_fp16 = const()[name = tensor("hidden_states_15_constant_val_0_to_fp16"), val = tensor(0x0p+0)]; + tensor hidden_states_15_cast = pad(constant_val = hidden_states_15_constant_val_0_to_fp16, mode = hidden_states_15_mode_0, pad = hidden_states_15_pad_0, x = var_185_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_193 = const()[name = tensor("op_193"), val = tensor([2, 2])]; + tensor var_195 = const()[name = tensor("op_195"), val = tensor([1, 1])]; + tensor input_57_pad_type_0 = const()[name = tensor("input_57_pad_type_0"), val = tensor("custom")]; + tensor input_57_pad_0 = const()[name = tensor("input_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor encoder_down_blocks_1_downsamplers_0_conv_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_1_downsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5689792)))]; + tensor encoder_down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6869504)))]; + tensor input_57_cast = conv(bias = encoder_down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_195, groups = var_15, pad = input_57_pad_0, pad_type = input_57_pad_type_0, strides = var_193, weight = encoder_down_blocks_1_downsamplers_0_conv_weight_to_fp16, x = hidden_states_15_cast)[name = tensor("input_57_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([1, 32, 8, 256, 256])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_57_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([1, 256, 256, 256])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6870080)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6870656)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_61_cast = silu(x = add_17_cast)[name = tensor("input_61_cast")]; + tensor var_215 = const()[name = tensor("op_215"), val = tensor([1, 1])]; + tensor var_217 = const()[name = tensor("op_217"), val = tensor([1, 1])]; + tensor input_63_pad_type_0 = const()[name = tensor("input_63_pad_type_0"), val = tensor("custom")]; + tensor input_63_pad_0 = const()[name = tensor("input_63_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_2_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(6871232)))]; + tensor encoder_down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9230592)))]; + tensor input_63_cast = conv(bias = encoder_down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_217, groups = var_15, pad = input_63_pad_0, pad_type = input_63_pad_type_0, strides = var_215, weight = encoder_down_blocks_2_resnets_0_conv1_weight_to_fp16, x = input_61_cast)[name = tensor("input_63_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = input_63_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_mean_0_to_fp16 = const()[name = tensor("add_19_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9231680)))]; + tensor add_19_variance_0_to_fp16 = const()[name = tensor("add_19_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9232768)))]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9233856)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9234944)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor input_67_cast = silu(x = add_19_cast)[name = tensor("input_67_cast")]; + tensor var_227 = const()[name = tensor("op_227"), val = tensor([1, 1])]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor hidden_states_17_pad_type_0 = const()[name = tensor("hidden_states_17_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_17_pad_0 = const()[name = tensor("hidden_states_17_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_2_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9236032)))]; + tensor encoder_down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13954688)))]; + tensor hidden_states_17_cast = conv(bias = encoder_down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_229, groups = var_15, pad = hidden_states_17_pad_0, pad_type = hidden_states_17_pad_type_0, strides = var_227, weight = encoder_down_blocks_2_resnets_0_conv2_weight_to_fp16, x = input_67_cast)[name = tensor("hidden_states_17_cast")]; + tensor var_234 = const()[name = tensor("op_234"), val = tensor([1, 1])]; + tensor var_236 = const()[name = tensor("op_236"), val = tensor([1, 1])]; + tensor input_tensor_pad_type_0 = const()[name = tensor("input_tensor_pad_type_0"), val = tensor("custom")]; + tensor input_tensor_pad_0 = const()[name = tensor("input_tensor_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor encoder_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13955776)))]; + tensor encoder_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14217984)))]; + tensor input_tensor_cast = conv(bias = encoder_down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_236, groups = var_15, pad = input_tensor_pad_0, pad_type = input_tensor_pad_type_0, strides = var_234, weight = encoder_down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16, x = input_57_cast)[name = tensor("input_tensor_cast")]; + tensor var_239_cast = add(x = input_tensor_cast, y = hidden_states_17_cast)[name = tensor("op_239_cast")]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = var_239_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14219072)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14220160)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_75_cast = silu(x = add_21_cast)[name = tensor("input_75_cast")]; + tensor var_252 = const()[name = tensor("op_252"), val = tensor([1, 1])]; + tensor var_254 = const()[name = tensor("op_254"), val = tensor([1, 1])]; + tensor input_77_pad_type_0 = const()[name = tensor("input_77_pad_type_0"), val = tensor("custom")]; + tensor input_77_pad_0 = const()[name = tensor("input_77_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_2_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14221248)))]; + tensor encoder_down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18939904)))]; + tensor input_77_cast = conv(bias = encoder_down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_254, groups = var_15, pad = input_77_pad_0, pad_type = input_77_pad_type_0, strides = var_252, weight = encoder_down_blocks_2_resnets_1_conv1_weight_to_fp16, x = input_75_cast)[name = tensor("input_77_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([1, 32, 16, 256, 256])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_77_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([1, 512, 256, 256])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18940992)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18942080)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_81_cast = silu(x = add_23_cast)[name = tensor("input_81_cast")]; + tensor var_264 = const()[name = tensor("op_264"), val = tensor([1, 1])]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor([1, 1])]; + tensor hidden_states_19_pad_type_0 = const()[name = tensor("hidden_states_19_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_19_pad_0 = const()[name = tensor("hidden_states_19_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_2_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18943168)))]; + tensor encoder_down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23661824)))]; + tensor hidden_states_19_cast = conv(bias = encoder_down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_266, groups = var_15, pad = hidden_states_19_pad_0, pad_type = hidden_states_19_pad_type_0, strides = var_264, weight = encoder_down_blocks_2_resnets_1_conv2_weight_to_fp16, x = input_81_cast)[name = tensor("hidden_states_19_cast")]; + tensor var_269_cast = add(x = var_239_cast, y = hidden_states_19_cast)[name = tensor("op_269_cast")]; + tensor hidden_states_23_pad_0 = const()[name = tensor("hidden_states_23_pad_0"), val = tensor([0, 0, 0, 0, 0, 1, 0, 1])]; + tensor hidden_states_23_mode_0 = const()[name = tensor("hidden_states_23_mode_0"), val = tensor("constant")]; + tensor hidden_states_23_constant_val_0_to_fp16 = const()[name = tensor("hidden_states_23_constant_val_0_to_fp16"), val = tensor(0x0p+0)]; + tensor hidden_states_23_cast = pad(constant_val = hidden_states_23_constant_val_0_to_fp16, mode = hidden_states_23_mode_0, pad = hidden_states_23_pad_0, x = var_269_cast)[name = tensor("hidden_states_23_cast")]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor([2, 2])]; + tensor var_279 = const()[name = tensor("op_279"), val = tensor([1, 1])]; + tensor input_85_pad_type_0 = const()[name = tensor("input_85_pad_type_0"), val = tensor("custom")]; + tensor input_85_pad_0 = const()[name = tensor("input_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor encoder_down_blocks_2_downsamplers_0_conv_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_2_downsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23662912)))]; + tensor encoder_down_blocks_2_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_2_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28381568)))]; + tensor input_85_cast = conv(bias = encoder_down_blocks_2_downsamplers_0_conv_bias_to_fp16, dilations = var_279, groups = var_15, pad = input_85_pad_0, pad_type = input_85_pad_type_0, strides = var_277, weight = encoder_down_blocks_2_downsamplers_0_conv_weight_to_fp16, x = hidden_states_23_cast)[name = tensor("input_85_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = input_85_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28382656)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28383744)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor input_89_cast = silu(x = add_25_cast)[name = tensor("input_89_cast")]; + tensor var_296 = const()[name = tensor("op_296"), val = tensor([1, 1])]; + tensor var_298 = const()[name = tensor("op_298"), val = tensor([1, 1])]; + tensor input_91_pad_type_0 = const()[name = tensor("input_91_pad_type_0"), val = tensor("custom")]; + tensor input_91_pad_0 = const()[name = tensor("input_91_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_3_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28384832)))]; + tensor encoder_down_blocks_3_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33103488)))]; + tensor input_91_cast = conv(bias = encoder_down_blocks_3_resnets_0_conv1_bias_to_fp16, dilations = var_298, groups = var_15, pad = input_91_pad_0, pad_type = input_91_pad_type_0, strides = var_296, weight = encoder_down_blocks_3_resnets_0_conv1_weight_to_fp16, x = input_89_cast)[name = tensor("input_91_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_91_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33104576)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33105664)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_95_cast = silu(x = add_27_cast)[name = tensor("input_95_cast")]; + tensor var_308 = const()[name = tensor("op_308"), val = tensor([1, 1])]; + tensor var_310 = const()[name = tensor("op_310"), val = tensor([1, 1])]; + tensor hidden_states_25_pad_type_0 = const()[name = tensor("hidden_states_25_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_25_pad_0 = const()[name = tensor("hidden_states_25_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_3_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(33106752)))]; + tensor encoder_down_blocks_3_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37825408)))]; + tensor hidden_states_25_cast = conv(bias = encoder_down_blocks_3_resnets_0_conv2_bias_to_fp16, dilations = var_310, groups = var_15, pad = hidden_states_25_pad_0, pad_type = hidden_states_25_pad_type_0, strides = var_308, weight = encoder_down_blocks_3_resnets_0_conv2_weight_to_fp16, x = input_95_cast)[name = tensor("hidden_states_25_cast")]; + tensor var_313_cast = add(x = input_85_cast, y = hidden_states_25_cast)[name = tensor("op_313_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = var_313_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37826496)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37827584)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_103_cast = silu(x = add_29_cast)[name = tensor("input_103_cast")]; + tensor var_326 = const()[name = tensor("op_326"), val = tensor([1, 1])]; + tensor var_328 = const()[name = tensor("op_328"), val = tensor([1, 1])]; + tensor input_105_pad_type_0 = const()[name = tensor("input_105_pad_type_0"), val = tensor("custom")]; + tensor input_105_pad_0 = const()[name = tensor("input_105_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_3_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37828672)))]; + tensor encoder_down_blocks_3_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42547328)))]; + tensor input_105_cast = conv(bias = encoder_down_blocks_3_resnets_1_conv1_bias_to_fp16, dilations = var_328, groups = var_15, pad = input_105_pad_0, pad_type = input_105_pad_type_0, strides = var_326, weight = encoder_down_blocks_3_resnets_1_conv1_weight_to_fp16, x = input_103_cast)[name = tensor("input_105_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = input_105_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42548416)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42549504)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor input_109_cast = silu(x = add_31_cast)[name = tensor("input_109_cast")]; + tensor var_338 = const()[name = tensor("op_338"), val = tensor([1, 1])]; + tensor var_340 = const()[name = tensor("op_340"), val = tensor([1, 1])]; + tensor hidden_states_27_pad_type_0 = const()[name = tensor("hidden_states_27_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_27_pad_0 = const()[name = tensor("hidden_states_27_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_down_blocks_3_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42550592)))]; + tensor encoder_down_blocks_3_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("encoder_down_blocks_3_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47269248)))]; + tensor hidden_states_27_cast = conv(bias = encoder_down_blocks_3_resnets_1_conv2_bias_to_fp16, dilations = var_340, groups = var_15, pad = hidden_states_27_pad_0, pad_type = hidden_states_27_pad_type_0, strides = var_338, weight = encoder_down_blocks_3_resnets_1_conv2_weight_to_fp16, x = input_109_cast)[name = tensor("hidden_states_27_cast")]; + tensor var_343_cast = add(x = var_313_cast, y = hidden_states_27_cast)[name = tensor("op_343_cast")]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = var_343_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47270336)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47271424)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_117_cast = silu(x = add_33_cast)[name = tensor("input_117_cast")]; + tensor var_362 = const()[name = tensor("op_362"), val = tensor([1, 1])]; + tensor var_364 = const()[name = tensor("op_364"), val = tensor([1, 1])]; + tensor input_119_pad_type_0 = const()[name = tensor("input_119_pad_type_0"), val = tensor("custom")]; + tensor input_119_pad_0 = const()[name = tensor("input_119_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_mid_block_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47272512)))]; + tensor encoder_mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51991168)))]; + tensor input_119_cast = conv(bias = encoder_mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_364, groups = var_15, pad = input_119_pad_0, pad_type = input_119_pad_type_0, strides = var_362, weight = encoder_mid_block_resnets_0_conv1_weight_to_fp16, x = input_117_cast)[name = tensor("input_119_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_119_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51992256)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51993344)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_123_cast = silu(x = add_35_cast)[name = tensor("input_123_cast")]; + tensor var_374 = const()[name = tensor("op_374"), val = tensor([1, 1])]; + tensor var_376 = const()[name = tensor("op_376"), val = tensor([1, 1])]; + tensor hidden_states_29_pad_type_0 = const()[name = tensor("hidden_states_29_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_29_pad_0 = const()[name = tensor("hidden_states_29_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_mid_block_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51994432)))]; + tensor encoder_mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56713088)))]; + tensor hidden_states_29_cast = conv(bias = encoder_mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_376, groups = var_15, pad = hidden_states_29_pad_0, pad_type = hidden_states_29_pad_type_0, strides = var_374, weight = encoder_mid_block_resnets_0_conv2_weight_to_fp16, x = input_123_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_379_cast = add(x = var_343_cast, y = hidden_states_29_cast)[name = tensor("op_379_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([1, 32, 16, 16384])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = var_379_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([1, 512, 16384])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor reshape_74_to_fp16 = const()[name = tensor("reshape_74_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56714176)))]; + tensor mul_18_cast = mul(x = reshape_73_cast, y = reshape_74_to_fp16)[name = tensor("mul_18_cast")]; + tensor reshape_75_to_fp16 = const()[name = tensor("reshape_75_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56715264)))]; + tensor add_37_cast = add(x = mul_18_cast, y = reshape_75_to_fp16)[name = tensor("add_37_cast")]; + tensor input_129_perm_0 = const()[name = tensor("input_129_perm_0"), val = tensor([0, 2, 1])]; + tensor encoder_mid_block_attentions_0_to_q_weight_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_q_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56716352)))]; + tensor encoder_mid_block_attentions_0_to_q_bias_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_q_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57240704)))]; + tensor transpose_9 = transpose(perm = input_129_perm_0, x = add_37_cast)[name = tensor("transpose_9")]; + tensor query_1_cast = linear(bias = encoder_mid_block_attentions_0_to_q_bias_to_fp16, weight = encoder_mid_block_attentions_0_to_q_weight_to_fp16, x = transpose_9)[name = tensor("query_1_cast")]; + tensor encoder_mid_block_attentions_0_to_k_weight_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_k_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57241792)))]; + tensor encoder_mid_block_attentions_0_to_k_bias_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_k_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57766144)))]; + tensor key_1_cast = linear(bias = encoder_mid_block_attentions_0_to_k_bias_to_fp16, weight = encoder_mid_block_attentions_0_to_k_weight_to_fp16, x = transpose_9)[name = tensor("key_1_cast")]; + tensor encoder_mid_block_attentions_0_to_v_weight_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57767232)))]; + tensor encoder_mid_block_attentions_0_to_v_bias_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_v_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58291584)))]; + tensor value_1_cast = linear(bias = encoder_mid_block_attentions_0_to_v_bias_to_fp16, weight = encoder_mid_block_attentions_0_to_v_weight_to_fp16, x = transpose_9)[name = tensor("value_1_cast")]; + tensor var_420 = const()[name = tensor("op_420"), val = tensor([1, -1, 1, 512])]; + tensor var_421_cast = reshape(shape = var_420, x = query_1_cast)[name = tensor("op_421_cast")]; + tensor var_423 = const()[name = tensor("op_423"), val = tensor([1, -1, 1, 512])]; + tensor var_424_cast = reshape(shape = var_423, x = key_1_cast)[name = tensor("op_424_cast")]; + tensor var_426 = const()[name = tensor("op_426"), val = tensor([1, -1, 1, 512])]; + tensor var_427_cast = reshape(shape = var_426, x = value_1_cast)[name = tensor("op_427_cast")]; + tensor value_perm_0 = const()[name = tensor("value_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor mul_19_y_0_to_fp16 = const()[name = tensor("mul_19_y_0_to_fp16"), val = tensor(0x1.6ap-5)]; + tensor mul_19_cast = mul(x = var_421_cast, y = mul_19_y_0_to_fp16)[name = tensor("mul_19_cast")]; + tensor matmul_0_transpose_y_0 = const()[name = tensor("matmul_0_transpose_y_0"), val = tensor(true)]; + tensor matmul_0_transpose_x_0 = const()[name = tensor("matmul_0_transpose_x_0"), val = tensor(false)]; + tensor transpose_2_perm_0 = const()[name = tensor("transpose_2_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor transpose_3_perm_0 = const()[name = tensor("transpose_3_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor transpose_6 = transpose(perm = transpose_3_perm_0, x = var_424_cast)[name = tensor("transpose_6")]; + tensor transpose_7 = transpose(perm = transpose_2_perm_0, x = mul_19_cast)[name = tensor("transpose_7")]; + tensor matmul_0_cast = matmul(transpose_x = matmul_0_transpose_x_0, transpose_y = matmul_0_transpose_y_0, x = transpose_7, y = transpose_6)[name = tensor("matmul_0_cast")]; + tensor softmax_0_axis_0 = const()[name = tensor("softmax_0_axis_0"), val = tensor(-1)]; + tensor softmax_0_cast = softmax(axis = softmax_0_axis_0, x = matmul_0_cast)[name = tensor("softmax_0_cast")]; + tensor hidden_states_35_transpose_x_0 = const()[name = tensor("hidden_states_35_transpose_x_0"), val = tensor(false)]; + tensor hidden_states_35_transpose_y_0 = const()[name = tensor("hidden_states_35_transpose_y_0"), val = tensor(false)]; + tensor transpose_8 = transpose(perm = value_perm_0, x = var_427_cast)[name = tensor("transpose_8")]; + tensor hidden_states_35_cast = matmul(transpose_x = hidden_states_35_transpose_x_0, transpose_y = hidden_states_35_transpose_y_0, x = softmax_0_cast, y = transpose_8)[name = tensor("hidden_states_35_cast")]; + tensor var_430_perm_0 = const()[name = tensor("op_430_perm_0"), val = tensor([0, 2, 1, 3])]; + tensor var_434 = const()[name = tensor("op_434"), val = tensor([1, -1, 512])]; + tensor transpose_5 = transpose(perm = var_430_perm_0, x = hidden_states_35_cast)[name = tensor("transpose_5")]; + tensor hidden_states_37_cast = reshape(shape = var_434, x = transpose_5)[name = tensor("hidden_states_37_cast")]; + tensor encoder_mid_block_attentions_0_to_out_0_weight_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58292672)))]; + tensor encoder_mid_block_attentions_0_to_out_0_bias_to_fp16 = const()[name = tensor("encoder_mid_block_attentions_0_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58817024)))]; + tensor input_133_cast = linear(bias = encoder_mid_block_attentions_0_to_out_0_bias_to_fp16, weight = encoder_mid_block_attentions_0_to_out_0_weight_to_fp16, x = hidden_states_37_cast)[name = tensor("input_133_cast")]; + tensor var_441_perm_0 = const()[name = tensor("op_441_perm_0"), val = tensor([0, -1, -2])]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([1, 512, 128, 128])]; + tensor transpose_4 = transpose(perm = var_441_perm_0, x = input_133_cast)[name = tensor("transpose_4")]; + tensor hidden_states_41_cast = reshape(shape = var_442, x = transpose_4)[name = tensor("hidden_states_41_cast")]; + tensor hidden_states_43_cast = add(x = hidden_states_41_cast, y = var_379_cast)[name = tensor("hidden_states_43_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = hidden_states_43_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58818112)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58819200)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_139_cast = silu(x = add_39_cast)[name = tensor("input_139_cast")]; + tensor var_457 = const()[name = tensor("op_457"), val = tensor([1, 1])]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, 1])]; + tensor input_141_pad_type_0 = const()[name = tensor("input_141_pad_type_0"), val = tensor("custom")]; + tensor input_141_pad_0 = const()[name = tensor("input_141_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_mid_block_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58820288)))]; + tensor encoder_mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63538944)))]; + tensor input_141_cast = conv(bias = encoder_mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_459, groups = var_15, pad = input_141_pad_0, pad_type = input_141_pad_type_0, strides = var_457, weight = encoder_mid_block_resnets_1_conv1_weight_to_fp16, x = input_139_cast)[name = tensor("input_141_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_141_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63540032)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63541120)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_145_cast = silu(x = add_41_cast)[name = tensor("input_145_cast")]; + tensor var_469 = const()[name = tensor("op_469"), val = tensor([1, 1])]; + tensor var_471 = const()[name = tensor("op_471"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_mid_block_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63542208)))]; + tensor encoder_mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("encoder_mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68260864)))]; + tensor hidden_states_cast = conv(bias = encoder_mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_471, groups = var_15, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_469, weight = encoder_mid_block_resnets_1_conv2_weight_to_fp16, x = input_145_cast)[name = tensor("hidden_states_cast")]; + tensor var_474_cast = add(x = hidden_states_43_cast, y = hidden_states_cast)[name = tensor("op_474_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([1, 32, 16, 128, 128])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = var_474_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([1, 512, 128, 128])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_gamma_0_to_fp16 = const()[name = tensor("add_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68261952)))]; + tensor add_43_beta_0_to_fp16 = const()[name = tensor("add_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68263040)))]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16, mean = add_19_mean_0_to_fp16, variance = add_19_variance_0_to_fp16, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_153_cast = silu(x = add_43_cast)[name = tensor("input_153_cast")]; + tensor var_483 = const()[name = tensor("op_483"), val = tensor([1, 1])]; + tensor var_485 = const()[name = tensor("op_485"), val = tensor([1, 1])]; + tensor input_pad_type_0 = const()[name = tensor("input_pad_type_0"), val = tensor("custom")]; + tensor input_pad_0 = const()[name = tensor("input_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor encoder_conv_out_weight_to_fp16 = const()[name = tensor("encoder_conv_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68264128)))]; + tensor encoder_conv_out_bias_to_fp16 = const()[name = tensor("encoder_conv_out_bias_to_fp16"), val = tensor([0x1.c88p-7, -0x1.04cp-4, 0x1.944p-3, 0x1.d9cp-3, 0x1.e78p-3, 0x1.78cp-5, 0x1.bb8p-5, -0x1.824p-3])]; + tensor input_cast = conv(bias = encoder_conv_out_bias_to_fp16, dilations = var_485, groups = var_15, pad = input_pad_0, pad_type = input_pad_type_0, strides = var_483, weight = encoder_conv_out_weight_to_fp16, x = input_153_cast)[name = tensor("input_cast")]; + tensor var_491 = const()[name = tensor("op_491"), val = tensor(1)]; + tensor var_494 = const()[name = tensor("op_494"), val = tensor([1, 1])]; + tensor var_496 = const()[name = tensor("op_496"), val = tensor([1, 1])]; + tensor var_498_pad_type_0 = const()[name = tensor("op_498_pad_type_0"), val = tensor("custom")]; + tensor var_498_pad_0 = const()[name = tensor("op_498_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor quant_conv_weight_to_fp16 = const()[name = tensor("quant_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68337920)))]; + tensor quant_conv_bias_to_fp16 = const()[name = tensor("quant_conv_bias_to_fp16"), val = tensor([0x1.f48p-4, 0x1.088p-4, -0x1.e48p-3, -0x1.bf8p-2, -0x1.56cp+4, -0x1.598p+4, -0x1.62p+4, -0x1.664p+4])]; + tensor var_498_cast = conv(bias = quant_conv_bias_to_fp16, dilations = var_496, groups = var_491, pad = var_498_pad_0, pad_type = var_498_pad_type_0, strides = var_494, weight = quant_conv_weight_to_fp16, x = input_cast)[name = tensor("op_498_cast")]; + tensor var_498_cast_to_fp32_dtype_0 = const()[name = tensor("op_498_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor latent = cast(dtype = var_498_cast_to_fp32_dtype_0, x = var_498_cast)[name = tensor("cast_29")]; + } -> (latent); +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..b65da487e8950ca2002120fefa9387de7c571e32 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/VAEEncoder.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:329f708df0bae1990a1886007b5ae56cfd9a44e7091e8f822db907a9fc411858 +size 68338112 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/merges.txt b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/merges.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbfec752c9a675946c6dce106def6f35c882dcc2 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/merges.txt @@ -0,0 +1,48895 @@ +#version: 0.2 - Trained by `huggingface/tokenizers` +i n +t h +a n +r e +a r +e r +th e +in g +o u +o n +s t +o r +e n +o n +a l +a t +e r +i t +i n +t o +r o +i s +l e +i c +a t +an d +e d +o f +c h +o r +e s +i l +e l +s t +a c +o m +a m +l o +a n +a y +s h +r i +l i +t i +f or +n e +ð Ł +r a +h a +d e +o l +v e +s i +u r +a l +s e +' s +u n +d i +b e +l a +w h +o o +d ay +e n +m a +n o +l e +t o +ou r +i r +g h +w it +i t +y o +a s +s p +th is +t s +at i +yo u +wit h +a d +i s +a b +l y +w e +th e +t e +a s +a g +v i +p p +s u +h o +m y +. . +b u +c om +s e +er s +m e +m e +al l +c on +m o +k e +g e +ou t +en t +c o +f e +v er +a r +f ro +a u +p o +c e +gh t +ar e +s s +fro m +c h +t r +ou n +on e +b y +d o +t h +w or +er e +k e +p ro +f or +d s +b o +t a +w e +g o +h e +t er +in g +d e +b e +ati on +m or +a y +e x +il l +p e +k s +s c +l u +f u +q u +v er +ðŁ ĺ +j u +m u +at e +an d +v e +k ing +m ar +o p +h i +.. . +p re +a d +r u +th at +j o +o f +c e +ne w +a m +a p +g re +s s +d u +no w +y e +t ing +y our +it y +n i +c i +p ar +g u +f i +a f +p er +t er +u p +s o +g i +on s +g r +g e +b r +p l +' t +m i +in e +we e +b i +u s +sh o +ha ve +to day +a v +m an +en t +ac k +ur e +ou r +â Ģ +c u +l d +lo o +i m +ic e +s om +f in +re d +re n +oo d +w as +ti on +p i +i r +th er +t y +p h +ar d +e c +! ! +m on +mor e +w ill +t ra +c an +c ol +p u +t e +w n +m b +s o +it i +ju st +n ing +h ere +t u +p a +p r +bu t +wh at +al ly +f ir +m in +c a +an t +s a +t ed +e v +m ent +f a +ge t +am e +ab out +g ra +no t +ha pp +ay s +m an +h is +ti me +li ke +g h +ha s +th an +lo ve +ar t +st e +d ing +h e +c re +w s +w at +d er +it e +s er +ac e +ag e +en d +st r +a w +st or +r e +c ar +el l +al l +p s +f ri +p ho +p or +d o +a k +w i +f re +wh o +sh i +b oo +s on +el l +wh en +il l +ho w +gre at +w in +e l +b l +s si +al i +som e +ðŁ Ĵ +t on +d er +le s +p la +ï ¸ +e d +s ch +h u +on g +d on +k i +s h +an n +c or +. . +oun d +a z +in e +ar y +fu l +st u +ou ld +st i +g o +se e +ab le +ar s +l l +m is +b er +c k +w a +en ts +n o +si g +f e +fir st +e t +sp e +ac k +i f +ou s +' m +st er +a pp +an g +an ce +an s +g ood +b re +e ver +the y +t ic +com e +of f +b ack +as e +ing s +ol d +i ght +f o +h er +happ y +p ic +it s +v ing +u s +m at +h om +d y +e m +s k +y ing +the ir +le d +r y +u l +h ar +c k +t on +on al +h el +r ic +b ir +vi e +w ay +t ri +d a +p le +b ro +st o +oo l +ni ght +tr u +b a +re ad +re s +ye ar +f r +t or +al s +c oun +c la +t ure +v el +at ed +le c +en d +th ing +v o +ic i +be st +c an +wor k +la st +af ter +en ce +p ri +p e +e s +i l +âĢ ¦ +d re +y s +o ver +i es +ðŁ ij +com m +t w +in k +s un +c l +li fe +t t +a ch +l and +s y +t re +t al +p ol +s m +du c +s al +f t +' re +ch e +w ar +t ur +ati ons +ac h +m s +il e +p m +ou gh +at e +st ar +wee k +! !! +c lu +th ere +n er +t om +s el +ï¸ ı +wor ld +v es +c am +go t +in ter +of f +u m +ton ight +o ther +h ou +loo k +j e +i d +si on +be au +at t +el i +or t +re c +f f +st er +su pp +g en +be en +il y +te am +m m +i c +pe op +it t +at s +on ly +mb er +en g +b ri +m p +k now +b ur +b ar +in s +lo w +sh e +ro w +â Ŀ +t ro +peop le +vi a +lo w +ag a +be t +x t +f ac +ch ar +e ar +w al +s en +f am +b le +n ati +is h +n or +g ame +li ve +s co +le y +d on +ic k +b all +ver y +the se +p an +i a +at ing +c r +a re +g ir +ma ke +st re +sho w +. " +f l +u p +d r +than ks +il li +w om +st s +i g +s ur +ever y +c ur +vie w +le t +in to +mo st +n a +in di +g ar +ha d +s ou +v ed +an t +iti on +ma de +f ol +un i +it ed +ðŁ ı +ic al +th r +read y +ch ec +d ra +k es +boo k +e p +si c +mor ning +ne ws +c au +c t +w ell +an c +pho to +th an +or s +bir th +g g +ou t +ne xt +som e +en ing +stor y +ch ri +do wn +hom e +f fe +fre e +d a +b or +f il +ci al +than k +si de +le ar +qu e +l ine +t en +at es +ye ars +m y +pho to +beau ti +ri ght +n u +for m +shi p +b an +th er +d ays +g am +as on +g y +ðŁ İ +birth day +se t +ic k +e t +st ill +com ing +ta ke +ðŁ ĩ +b b +s ol +s on +d en +e p +mu sic +the m +de n +wh y +f oo +c ra +am az +w n +h ol +t ting +w r +u e +ma g +c ro +l an +c lo +b ra +a k +s ing +c al +re ad +' ve +jo h +b ab +d ri +b lo +bi g +er ic +in t +t or +tr y +l a +le g +hou se +m ic +v al +beauti ful +l itt +chec k +ne w +ver s +s w +ar i +pla y +h er +âĢ ĵ +w in +m a +con gr +sch ool +f un +. @ +he al +ic h +d el +wh ere +l on +ke t +tw o +mu ch +wat ch +v en +d ed +a st +k ed +b as +go ing +m p +e ver +w ays +ro o +de sig +l y +s ed +to p +l in +ch an +to o +it ing +d ent +gh ts +t y +sp o +ne ed +b lu +in st +be ing +âĿ ¤ +w el +l s +hi m +m ay +st ing +n a +el y +litt le +g a +n at +tom or +m c +h on +w ant +a ir +pi c +am eric +p er +le ss +wee k +ve l +a h +c ap +ch am +g er +ti m +tomor row +ne ss +st ate +h al +ser v +z e +o s +p at +v is +ex c +s in +f f +c ity +c en +an y +b el +su mm +t in +w ould +loo king +k o +ce le +fam ily +m er +po w +hel p +bu s +c o +c le +sel f +en s +ic s +th o +an i +ch o +le ad +b s +t wee +th ink +for e +ch il +vi de +di d +al e +ch i +v il +en ds +w ing +p as +' ll +v ol +s a +g s +man y +j ec +be fore +gra ph +n y +ur ing +w il +d d +bu il +f av +st ed +tr an +l ing +ou d +d ge +fi el +nati onal +st a +c er +w ere +in a +se ason +c ou +n ed +amaz ing +ti ons +cele br +n s +a th +he ad +s day +d ar +lo c +v in +an other +g oo +s at +n y +jo in +pre s +s es +s ing +an a +in ing +.. .. +c our +ï¸ ı +ac t +cau se +li ght +am s +t a +b al +f c +hi gh +off ici +t t +chri st +d ic +d ay +ra l +h or +: ) +vi si +n am +o b +ma s +gh t +re ally +t un +fin d +thr ough +por t +u t +ti ve +st y +n e +or e +ðŁĺ Ĥ +supp ort +ne ver +ev en +ðŁ Ķ +h a +y a +l d +u k +r an +j am +wi th +me di +d es +ne y +ch ing +al e +h y +k in +! ! +d y +pl ace +al so +b le +wh ich +bl ack +b li +s ay +par k +pl ay +ir e +vide o +week end +a il +ke y +p t +w ard +fri day +d in +ine ss +g ro +b en +al ways +t ball +ag o +m il +c y +pro duc +di sc +un der +ple ase +sp or +fu ll +e y +ðŁ Ļ +is e +iti es +c at +k no +u se +fo re +k er +ar t +hi gh +op en +s an +e f +our s +sh ed +st ri +d ro +aga in +i m +ðŁ ĵ +en jo +fu n +ge tting +p en +g er +c li +an y +ever y +e u +wom en +â ľ +e st +c ould +r y +" @ +th ou +sh a +comm un +b er +d ents +di s +wh ile +aw ay +di o +h am +g la +d ate +k a +mis s +un ch +w on +in f +roo m +g a +re al +ex per +di rec +sh ould +sp r +g ol +l ong +bet ter +or i +e y +i ence +il s +z z +h an +f ound +v s +â Ļ +po st +ti c +par t +m en +ren ce +ce ss +v ic +s il +sho p +ðŁĺ Ĥ +f ood +v al +sti c +y ou +s ays +e lec +st ar +o c +l and +i d +c tion +fiel d +s of +st art +wat er +fri ends +on es +ðŁ Į +f la +f ar +wh ite +par ty +in st +gr ou +t v +every one +m ent +j a +ch a +pr in +an ts +d uring +l at +l ar +we st +th en +k a +y oun +in sp +in te +we en +visi t +aga inst +re le +he ad +c es +to wn +loo ks +th re +re gi +ren t +pro jec +gir l +se ar +w o +m om +c ar +h un +pu bli +d i +p le +c all +c ri +u m +for d +per fe +fri end +h ard +ssi on +te st +pla ying +ar ound +be cause +ke ts +me et +sat ur +ar ti +wor k +j un +v en +r un +me mber +por t +su per +t wit +s am +el s +t ly +ad v +ati ve +at h +s ure +av ail +la r +s qu +ar ds +ev ent +m en +l l +o ver +lo gy +it al +tim es +m al +b ack +c oo +ma king +st ru +â ģ +it u +sh ar +g an +c as +s n +summ er +pic ture +f an +h in +christ mas +c y +pr oud +cham pi +desig n +pp ing +ho pe +c a +avail able +ma y +we d +photo graph +spe cial +sal e +sto p +er y +a we +al ity +hi story +am a +pre si +b ru +wor king +d one +d r +k en +fe at +w ood +ate st +sun day +mo vi +vel y +s le +f ace +sp ec +stu dents +b y +ha m +sp on +bus iness +d at +i e +i p +so ci +g lo +h and +re cor +r s +me e +ke ep +p ur +heal th +sh e +com ple +go d +da vi +col lec +li st +r a +clu b +t ers +in clu +th ings +pl an +â ĺ +joh n +sh ing +at ul +so on +blu e +g or +satur day +w on +congr atul +se e +âĿ¤ ï¸ı +tho se +ðŁĺ į +fin al +d ou +it h +o wn +ro ad +t our +a st +indi a +ti l +n d +f er +fav or +su l +lear n +fir e +ju st +grou p +a h +r ac +bo dy +u r +c are +à ¸ +p lo +o h +po s +gi ve +te ch +su b +c ent +er ing +y m +il ity +f ic +lon don +v ir +gu ys +b a +ðŁ ¤ +bab y +sc re +ðŁĺ į +tru mp +un der +chan ge +i an +col le +ss es +l er +ss ed +n ice +ann oun +pow er +s ar +a king +min i +s li +s wee +k ar +fu l +c ru +ac tion +a ther +) . +st and +de vel +a a +g an +le ft +lo l +re l +tran s +m ents +in t +e f +man ag +di g +gen er +do wn +p au +ti v +k u +th ur +k en +st on +f ans +tal k +twee t +t oo +sty le +pro te +se con +fr on +awe some +g l +p al +ne t +s or +la u +g on +sin ce +t ty +ser ies +me mor +b eli +fil m +di d +di es +o t +congratul ations +p ra +e ve +w oo +offici al +su c +in cre +b on +par t +pp ed +cla ss +si ve +bo y +cu l +perfe ct +t ou +d am +wel come +foo tball +h i +p ap +wa it +ad a +congr ats +youn g +exc ited +re ce +j an +v a +re d +st ra +medi a +' d +do es +le t +mu l +ill s +gre en +m el +to ge +fu ture +ye ster +vers ity +for m +ta in +i de +ch es +ki ds +qu i +ha ha +de ta +bi g +favor ite +gir ls +con tin +do m +sear ch +u al +a ir +d ers +mon th +c er +yester day +commun ity +ad e +do g +vil le +ic es +d eli +sy ste +ru n +is m +he art +c up +en ti +fe w +presi dent +e ds +un til +fe sti +o k +f lo +sa id +ol e +me d +tra vel + £ +ph one +toge ther +fa st +lo t +gam es +sh ir +bet ween +y es +th ers +do ing +m ac +at or +b and +fol low +projec t +devel op +di ffe +con fe +spe ci +ca st +y s +bo ard +r d +i al +sh oo +r am +ha ving +sh are +fol low +on e +n ame +m r +pu t +disc u +or y +c ame +ou s +s ite +twit ter +t b +t it +fin ally +z ed +su per +com pan +us ing +all s +li st +r is +sho t +g al +t ar +de l +joh n +âĢ Ķ +some thing +ra m +inte re +wh e +b it +ðŁ į +stre et +oun d +a i +tic kets +movi e +re al +k y +ta king +o pp +c c +l am +m oun +in ve +bl ack +us ed +on line +y or +loc al +gu e +c ks +o w +ge st +bo ys +illi on +con t +re ci +in ed +eu ro +no w +se en +p h +te ach +de f +sou th +su ch +aw ard +mu st +is su +ca re +fe el +p lu +l atest +spor ts +we b +te x +e ment +s k +fi c +w an +te ch +o t +bo x +n er +fre e +t al +a sh +c ase +ho t +won der +mee ting +er a +ch all +ðŁ IJ +jo b +il i +c ool +j our +th s +m o +f el +di e +mic ha +e le +te am +serv ice +st and +ma kes +p ing +ear ly +com es +e k +ho li +v ers +ag ue +s au +thre e +mon day +fa shi +some one +th ro +se a +b ad +supp or +tur n +ur y +m ing +photograph y +n ic +mar k +pre tty +ss ing +wat ching +me mb +ar ri +coun ty +be ach +fr an +cen ter +pol ice +b at +publi c +t an +pre ss +s af +s y +ge ts +ro y +n ers +y our +bu y +st ers +sho w +as ed +chil dre +af ric +in es +sp ace +sc ri +h all +pa in +ar ing +hom e +m ur +heal th +ch ed +s and +rece i +gu y +e a +americ an +re si +childre n +- - +i ri +ing ton +coun try +ro ss +le n +ann a +boo ks +b c +e ce +d om +lo vely +k h +pe t +g y +g ri +st age +off ice +ro ck +m on +b ay +t able +su n +m ed +th in +l or +f low +( @ +uni versity +stor e +fron t +goo d +z a +vo te +nor th +he y +an im +or der +mi d +with out +a de +re member +mar ket +? ? +mu s +tra ining +e duc +bu t +co ver +st an +sc en +b la +bre ak +l ou +s ame +g old +a in +o s +bo th +l it +ver n +a i +al bu +p a +enjo y +be g +ell ing +thur sday +inf o +s an +americ a +ha ir +te l +mar ch +con cer +colle ge +confe rence +ap p +h our +ch ang +â ļ +s our +ol s +we ather +w ar +p hi +festi val +secon d +cu te +pr ac +en er +str y +le a +pol it +s av +se n +o w +m i +ne ar +ou ght +z e +co ffe +w illi +d an +se y +davi d +e se +f an +de ci +the at +no v +ati on +tr ac +sc i +re view +c el +e m +u n +ju ly +or ig +ti on +d ru +form er +st ay +af ter +in v +too k +dat a +b al +tu es +d an +ev ening +ðŁĺĤ ðŁĺĤ +d ol +u res +pro vi +t s +e st +sig n +j ac +u k +s ong +ye t +bo w +in du +j ap +h oo +po int +any one +z y +i st +h ur +it al +buil ding +wom an +ch ur +j er +per for +co ach +le ague +ce ss +ne t +i mag +nati on +br it +qu e +aw ards +ag es +wor ks +c ed +man ce +l ate +ig n +mon ey +tru e +i i +t ell +pl ac +p ac +as y +wor ld +be hin +im port +read ing +gra m +gi ving +me t +h it +for ward +st om +pres ent +jun e +so cial +no on +mar t +hal f +s we +go vern +k er +deta ils +li sh +_ _ +ac y +si a +ber t +f all +! !!! +) , +th i +d iti +sp ort +k ing +f it +st af +c at +mu se +cen tr +y er +con tro +b loo +wal k +ac tu +did n +li m +lear ning +re search +wed ne +au th +h ours +k y +f ar +h en +.. .. +it ch +ri l +str ong +sk y +que sti +jam es +r on +d g +f ur +c in +do es +app ro +mar ke +tu res +ful ly +ch at +behin d +te m +fin i +mis sion +b att +fe el +he av +every thing +b ar +w ish +pre mi +i ma +exper ience +e ach +re port +swee t +tic s +spr ing +re spon +syste m +vic tor +l in +sa w +al ready +gh ter +f le +ã ĥ +br ing +albu m +- - +ell s +st an +to m +inter national +w ent +an ni +mat ch +pp er +st one +sm all +ra in +fashi on +are a +v an +ag ram +k o +thou ght +wor th +v an +m er +coffe e +it es +g n +arti st +c on +ar ch +c ir +se cre +gr ound +is o +h and +co m +bri dge +h s +x i +l ink +pu l +sp l +r ace +f li +ri ver +g as +di sco +d al +play er +f it +photo s +it y +o k +j or +tr a +ap ril +ad s +a di +sol u +beau ty +do or +me ss +up date +ali a +sch o +en ed +mom ent +sco t +sc ience +i or +ti es +ac ross +ous ly +sh es +does n +p age +wat er +m illion +cla ssi +l ic +ca st +form ation +micha el +ell o +s mo +in ts +vi sion +op ening +ld n +au str +tues day +win ner +po ssi +r ound +shir t +di t +b o +u es +il led +al ong +tri p +star ting +im pro +k an +per son +no t +re co +ne eds +c le +li e +re st +r ing +win ter +si mp +mo m +be er +fac e +tor s +us a +collec tion +ge or +se ssion +tr ying +la s +la ke +j en +orig in +stu dent +se cur +v in +pic s +ex pe +com p +gon na +e qu +b ad +le y +a u +memb ers +bre ak +w all +gi c +din ner +bu l +insp ir +r i +min d +ic a +win ning +tal king +t ren +s is +t en +wonder ful +s now +he ar +th om +no thing +gu i +st in +blo g +fe st +b un +le e +war ds +ch ance +dre ss +re n +pau l +p es +tech no +ru ssi +c ard +e ast +mar i +w ine +t i +la w +str ic +k i +ap e +au gu +pro fe +as h +cour se +ma il +ren tly +d un +m un +lo ve +is land +dri ve +s l +end ed +ma in +lo st +nat ure +âĿ¤ ï¸ı +ch ic +re por +p in +pr o +st ation +ce p +ta kes +compan y +go es +on d +ma ch +ra dio +d ad +ro ck +j a +p ay +champi on +e e +in de +tt a +ati c +t ab +beli eve +ener gy +z i +t at +wor d +on ce +re sul +y l +and re +an o +inst agram +clo se +t am +cu stom +w a +con om +sho ws +li fe +k in +ro b +t age +n ation +al most +list en +sa ve +re li +ac e +mar y +tre e +for get +j ack +wa iting +direc tor +h ill +bor n +te mp +f l +st e +on a +sing le +wedne sday +un ited +in o +@ _ +ne l +celebr ate +en ding +de al +j i +can ada +hu ge +tr ack +âĢ ¢ +f y +fan ta +an g +yor k +rele ase +p un +ep iso +wor ds +t our +p ack +i gh +classi c +perfor mance +ke t +after noon +recor d +win s +pro ble +âĿ ¤ +f our +b ed +ban k +d ance +s la +cal led +mi ght +a p +pa st +ðŁ ļ +diffe rent +it e +gi ft +ssi ve +chur ch +c us +pro gram +ho tel +ic e +ma d +secur ity +en ge +d c +en ough +st a +e ty +de ad +g un +he ar +m ir +hu man +gre ss +oun ds +pi ece +bre aking +gar den +fi ght +vie ws +f ish +star ted +run ning +gre en +ser i +s m +as k +d or +de ath +e conom +er i +ir d +s er +l unch +âģ ¦ +bo x +nat u +ba se +b an +f al +glo bal +wil d +wo w +out side +mo ve +le ad +an al +muse um +on g +ha w +pow er +than k +b ac +char ac +cam pa +dig ital +r o +op er +de v +w ol +p ati +f a +m ale +pap er +ill ing +c s +â ĥ +educ ation +ta ken +e ffe +m ou +s ad +" . +bas ed +staf f +inclu ding +li ving +a c +ch ina +mo b +stor m +lu ck +ph il +o o +y n +tra vel +k el +ti al +pr ice +boo k +import ant +bi o +p ool +ny c +f ab +lo ad +? ! +chall enge +cr y +ser ve +we ar +bu s +ta in +nu mber +ro r +k at +i z +th ough +ho sp +m m +fa ir +ut es +ho t +po p +fi ed +cam p +develop ment +li br +c ali +em s +âģ¦ @ +b ol +is ed +stand ing +mo del +it a +g le +bro wn +ima ge +ve red +for ce +o il +par tic +sh u +da ily +la w +se c +cla ss +cam p +holi day +cl in +k ers +pres ent +gam e +incre di +er ship +inter view +b ill +du e +and y +ab o +in nov +ke y +ac ade +p il +mo der +st ars +br and +f er +wee ks +con si +pr e +sa fe +wr it +di um +la unch +marke ting +ann ual +as si +cour t +la dy +c ted +and a +in side +chil d +opp or +sm ith +centr e +gu e +âģ © +f ren +st y +for t +ent ly +is n +ke ep +to ber +on y +bo y +al d +col la +de mo +le vel +com pet +ad o +b our +fanta stic +m ate +s u +sou th +oppor tun +vers ary +lat er +bu d +face book +la un +ster n +p it +! " +ma j +gr am +tb t +fi re +happ y +a ks +wh ole +actu ally +ill er +ell a +lo ts +al ex +an ge +lan ds +ðŁĺ Ń +en ter +r ou +episo de +p ed +in ten +sh ire +wh o +pl an +h o +ca ke +we st +mag az +fre sh +c c +n ar +ch ris +wr iting +w er +n om +l o +mi dd +dre am +o l +ti onal +de b +> > +be come +s i +gr and +all ing +hi stor +ri de +i red +saf e +que en +ci l +in tro +vi l +d ani +.. . +ar tic +st at +sh ort +or ing +sel fi +mis si +do c +b it +g all +b om +i re +se lec +d ition +ðŁĶ ¥ +fri end +be at +gh ting +ðŁĺ Ĭ +pe ace +ex hi +ant a +ab ility +il lu +j on +qu ality +tri bu +m es +play ers +fa ir +cu t +c ab +suc cess +b i +su s +pro mo +sch e +an ge +ic o +comm it +cat ch +ill a +kin d +feel ing +qu o +s ay +anni versary +spo t +mo ther +an e +p end +your self +op s +app le +min utes +p o +gr and +ri es +ha ha +care er +ed ition +de c +ric k +am i +concer t +iti ve +ge ous +d ly +t te +adv ent +i g +li ghts +ak er +sk y +âĥ £ +r ay +fini shed +w ay +s d +ac coun +ðŁĴ ķ +ck y +ch el +lit er +pain ting +lo s +st un +techno logy +n as +ma r +b il +afric a +ki e +ey es +gol f +plu s +ni a +it ec +serv ices +wed ding +kno wn +te le +.. ... +star ts +pa ren +w ants +ati onal +mon ths +win do +fav our +er t +magaz ine +ex clu +re ve +b c +origin al +e ss +n al +an ti +st ro +t ice +stu dy +à ¤ +v ac +nation al +fi ve +ra in +ve ment +u te +ver se +em er +ar my +possi ble +gue ss +val ley +ther n +cro w +m r +col or +on to +pic k +cle ar +dar k +t ac +wan ted +it ting +can cer +govern ment +di e +ri se +z ing +col d +f oun +stu dio +str ation +bro ther +a head +sh el +mic ro +ic ally +d au +sig ned +vi ol +a x +as se +i o +w re +spl ay +ch ick +augu st +pl at +ti ps +sp i +hu man +e asy +lo gi +mi ke +gro w +ag re +w w +sh ad +mo tiv +wi de +tur ns +om g +v ar +de fin +su g +j im +ðŁĶ ¥ +t d +campa ign +nam ed +re tweet +co p +t v +le av +k is +dou ble +s mar +issu e +vil la +in formation +li es +sto ck +n t +di stric +sh or +mi x +er o +se p +me x +see ing +li ve +re min +co de +g ur +s c +wil d +l un +h ood +spo t +fa ther +fore ver +up d +tra f +f ly +ne ed +gra du +tra in +ma ke +s ab +be y +si ze +lead er +tal ks +e u +lo g +fo x +gor geous +le ss +le ts +sur pri +my self +no te +li ves +f ru +lo ved +se ver +de m +j i +so c +h old +do gs +n i +â ŀ +lea ve +air port +ben ef +ex pl +shi ps +comple te +ach i +gre at +vin tage +j ack +ro c +woo d +pri v +off er +ey e +ver sion +te a +co ach +off ic +w ell +g en +s at +h h +you th +o x +? " +m t +mi x +g g +d le +natu ral +buil d +break fast +thin king +theat re +mo on +ber g +go als +geor ge +en e +exc ell +il ing +tun e +y ed +g ate +m it +net work +jo e +h ello +f b +tu be +we aring +ath le +stru c +har d +gla ss +g ers +thro w +g es +b t +indu stry +manag ement +ali st +go al +stre am +y el +a vi +ici ous +o thers +s ki +chri sti +bir d +e sc +m in +tr o +l t +j an +im p +ri ghts +sh a +or gan +cent ral +ar a +ro ll +favour ite +che ster +el se +p ay +car s +m ine +ste p +prac tice +maj or +h ang +ðŁĺ ĺ +n on +v ari +eng ine +vol un +di a +i led +arch itec +p ink +d s +th y +wa sh +web site +ba g +contro l +el li +f ra +an sw +d ence +y u +r on +ol a +g in +dr in +li c +cou ple +sp ar +g on +cre ate +c t +celebr ating +de ep +e at +te e +vo ice +dro p +vis it +at ors +sta dium +f t +w is +ro l +gra de +fam il +po ints +re pre +w as +traf fic +jap an +or g +hon or +tex as +man u +âĻ ¥ +safe ty +re r +b ag +em plo +rele ased +re gu +ak a +n av +ro le +sen ior +spec t +cro ss +lin es +be st +p ack +s in +ti e +mis sing +sun set +li ber +is ing +j ay +sk i +champion ship +ac tiv +la dies +play ed +y y +pu bl +al o +pri de +s r +pa ki +lu x +sur vi +ck ed +e ts +cho col +austr alia +par is +mi les +h at +ment al +al a +me an +mob ile +en a +in si +f ound +chi ef +t ag +incredi ble +re turn +à © +goo gle +fren ch +cre w +hal lo +ali an +j az +ch er +sil ver +nor th +eng lish +base ball +c af +lim ited +follow ing +app reci +ear th +k ir +ve mber +w ed +p tion +g ed +oc tober +fl ori +c r +en cy +ga ve +lor d +stu ff +ber ry +po st +sm ile +bro ad +st ate +gg er +me ans +ic y +gu n +y o +ma ster +bur g +han ds +ni e +/ / +uni on +brit ish +big gest +distric t +am ing +h il +o ce +per son +pas s +en vir +scho ols +arri ved +anc es +insp ired +ex pla +be n +libr ary +bo tt +am p +ste ph +cont act +b ang +m s +cali for +t old +batt le +b b +chic ago +âľ ¨ +str ate +sh i +de ce +- ) +ad d +la b +j ones +leg end +cast le +ing er +st ance +be l +ur a +re fu +lead ers +po t +se x +h ic +artic le +ki d +fr ance +x x +ex e +gui de +volun te +pr int +al i +ce o +twee ts +w x +scen e +vol u +ant i +h an +as soci +shar ing +ro se +mini ster +sh er +in ste +cle an +demo cr +po ster +sk in +p sy +pro per +cra zy +i am +o re +in i +any thing +po d +mo ving +cl ick +ex plo +com b +cra ft +f i +bloo d +is ra +publ ic +d ent +ol ym +eng land +a si +ch er +fac t +envir on +har ry +g one +me dic +enjo ying +just ice +j r +indi an +wi fe +s ound +t es +dra wing +p al +ide a +cr it +ju li +il er +war m +cl ar +thou ghts +def en +coun cil +intro duc +di ed +jan u +an i +s end +li er +m l +intere sting +tra de +win d +b ay +s ac +anc y +sour ce +b es +org ani +ar ly +lar ge +ff ici +ta g +u t +de sp +o es +tit le +sy m +pic tures +op en +wom en +sho wing +ri a +le ast +lead ership +cur rent +elec tr +val ent +list ening +c key +gener al +de ser +du ce +; ) +c ent +ðŁĺį ðŁĺį +sco tt +po or +selfi e +ev ents +i on +wr ong +de v +h ill +sep te +cul ture +l ine +sor ry +s ent +si ster +ce pt +k ri +no vember +ar i +announ ce +z ation +br an +g ent +d u +l en +per s +f m +mart in +o p +e mb +om e +midd le +suc cess +pe ter +janu ary +f lu +rac ing +d av +bi ke +ðŁı » +pe t +shoo t +profe ssi +feat uring +septe mber +now playing +sta ur +z a +on ic +qu ick +bas ke +spe aking +mil it +z er +chick en +b ell +s ad +co ast +lo ving +y ers +d j +pan el +ver age +s wit +ic ks +b ou +califor nia +s am +paren ts +er o +k illed +ph ys +jo bs +mi gr +an th +e mo +hallo ween +and er +c m +compet ition +e ag +s ket +sp ir +may be +exclu sive +app e +jour ney +scre en +for d +i o +h ate +u g +sou l +her o +soci ety +sy n +gu it +n h +d j +as es +im pre +ti me +sal es +d d +f ts +summ it +stun ning +om s +tur ned +cle an +sof t +be at +re staur +de red +en ces +ma gic +di o +sh ine +gu est +health y +exhi b +stor ies +po pu +n is +el a +bel ow +fun ny +resul ts +s ne +cur rently +ar d +down load +f light +m al +f ine +p ad +ch u +ent ed +h at +ðŁij ı +ste ve +j o +mar k +r at +b all +p c +p on +b by +o li +ar ts +as ure +bow l +att ack +mi c +de ar +ran ge +en ter +chocol ate +br illi +ac cess +, " +? ?? +ch ap +con st +t n +mat ter +blu e +gall ery +em p +work shop +lead ing +y ours +baske tball +w anna +th u +_ _ +mar ri +sle ep +bi a +ch e +ma d +imp act +o wn +si r +chan nel +euro pe +e sp +k itch +hosp ital +w ra +roy al +f s +ne u +qu ar +ne y +ac ks +ch ase +pp y +st al +at ely +ti m +dece mber +r are +per form +cre am +we ight +ch oo +ni ght +ha ven +fr anc +kh an +buil t +hel ping +tru st +ty pe +gol den +ta x +s now +s wi +di sa +questi ons +ve y +li ght +c n +cl oud +thom as +ag ed +sh ou +te ams +gr an +re ason +a a +you tube +v p +pi zz +manag er +bur y +cre dit +tre at +ma x +i k +ma in +g ing +de ad +pro bab +ye ah +ã Ĥ +br and +so li +pl ant +ta yl +gir l +ðŁĺ Ń +nam ent +au to +mess age +ko re +n ur +ter r +ag u +ma p +sen ting +lo ves +gi ves +g ab +z en +ro bert +con fir +w ars +o m +sta in +cam era +and er +won der +a b +ca p +s old +su it +wal king +contin ue +effe c +dau ghter +d anc +cha in +mul ti +ki d +y an +champi on +v o +ta ins +ho st +min i +mis sed +re sc +ly n +fin ish +del icious +s as +tayl or +i b +pro mis +produc ts +moun tain +flori da +regi ster +tre at +rec ent +fe male +boo th +mat t +ve hic +s op +mo tor +suppor ting +phi c +ex tre +dr ink +lan e +th ird +p s +con stru +ce re +far m +ðŁİ ī +tu red +ðŁij ī +c ats +a j +gi e +shoo ting +as ked +paki stan +am e +m b +g il +leg al +squ are +in vol +dra w +oo oo +!! !! +opportun ity +p y +e i +b ts +teach er +charac ter +john son +br on +ly wood +ch ine +c ing +c ine +d ge +gam ing +russi a +ci a +quo te +ric h +go v +flow ers +sp iri +st in +grow th +ðŁı ¼ +comm er +j uni +mu m +r an +s na +a ren +c b +ac tor +col or +si t +pa ir +ch i +bo w +acade my +hel d +r ang +me tal +y l +ac tive +probab ly +t ch +need ed +spe e +cho ice +ital y +ry an +ðŁĩ º +flow er +v it +m n +found ation +b ak +si ons +ne igh +f loo +he ard +re mo +fre sh +ing ing +re f +to wn +cl ou +je sus +spiri t +cou ldn +z es +ðŁĴ Ļ +willi ams +pro ce +moder n +pro cess +sho es +cre ated +tri c +issu es +ann e +att en +de but +h r +n it +sti g +a po +e ps +z u +ã Ģ +si x +car ds +lan gu +fam ous +tour nament +se l +e bay +y n +st on +k ick +announ ced +k am +vo c +brilli ant +hou se +che ese +war ri +mus ic +ho ckey +ðŁĺĤ ðŁĺĤ +sk ills +au tom +smar t +med ical +mon y +e x +gu ar +gi ve +pers onal +ven tion +al li +pre ss +flo or +m c +victor y +hi m +simp le +th or +ðŁĩº ðŁĩ +ta il +lu cky +ale x +qu ite +bo t +ssi ons +chall eng +c ann +amaz on +h ell +b ought +) : +ed y +secre t +produc tion +inde pend +de fe +ad ded +p r +p ag +be d +gre atest +with in +j ay +ðŁ ¥ +ire land +re ly +s d +te xt +dri ving +pro gram +spe ed +col um +str on +à © +fore st +â ĸ +mach ine +co in +sc ar +oun t +bi e +¡ ï¸ı +por tra +comm on +wre st +recei ved +kno w +inve st +pl ans +ac cor +ad op +ter y +re ali +p p +k al +art work +me an +go d +inste ad +an ci +motiv ation +as ing +inspir ation +up coming +polit ical +euro pe +m ers +heav y +ðŁij į +fe bru +scot land +ou gh +b t +bo ss +sche du +spe ak +n ick +u red +in o +e k +ri sk +tor y +pres ents +b on +ru g +st ates +exhib ition +il o +m ill +br ought +: -) +tou ri +com e +offici ally +champi ons +do ors +re p +po se +ex tra +k ings +soc cer +squ ad +app lic +at a +some times +t ari +excell ent +ðŁĺ ĺ +stra ight +car ol +ri p +âĢ į +gra phic +m ol +elec tion +febru ary +as ons +l i +di r +m t +n ick +u su +m rs +com ics +inst itu +cor por +v i +ðŁĻ ı +tu ral +di se +ac ci +we are +am ong +sho pping +t ill +wh at +cha ir +sp an +chine se +innov ation +jo y +k it +cent ury +ob ama +ph ili +f c +re ach +c iti +ul ous +n on +d ang +happ ening +bur n +p el +or ange +d v +k ick +cla im +ing ham +ph y +no v +pod cast +wh i +ni ghts +ear lier +be ar +la h +exc iting +or a +gi ven +s lo +memor ies +contin ues +produc t +gh o +c d +kno ws +ðŁİ ī +publi shed +discu ss +y ard +i phone +tri es +w all +fe b +are n +tru th +win ners +tu re +diti onal +milit ary +proble m +m and +do g +lo ss +c ric +can adi +ve ter +villa ge +" , +y r +un g +don ald +ag ing +bir ds +sci enti +le s +th is +regi on +tic al +itt en +il a +ðŁĺ İ +d ad +di am +abo ve +st ren +li t +p ir +la b +fo cus +bus y +d ur +app ly +s ma +auth or +ac i +exe cu +dom in +re la +jack son +at o +wash ington +ðŁĻ Į +k ill +popu lar +ce ment +ro ad +e ating +loc ation +v ent +ar re +n an +cu sto +advent ure +or din +spor t +ul t +lo ck +questi on +dri ver +land sc +on i +k ins +p d +jor dan +te red +k k +a f +chil d +s p +just in +en i +s elling +z o +wh it +bo ston +partic ip +sig ning +happ ened +he at +m am +dre ams +lo ws +gra ph +the day +head ing +br o +ble ssed +vi c +ve gas +h d +in ning +ro man +and ro +den ti +u se +c it +pro gress +writ er +bo b +ff s +gro wing +b ly +aw are +ex am +sp ent +be t +sc ore +bey ond +do cu +ad el +s f +cou ra +colla bor +in c +priv ate +bo at +* * +z one +p ha +b ill +to tal +plan ning +to wards +plac es +pre view +cre ative +dam n +ide as +se ems +po ten +say ing +di splay +s w +a qu +lou is +by e +li l +e mail +we stern +ger many +ell er +re s +f ant +ment ary +de als +ric hard +jer sey +stren g +ra d +pizz a +mon d +w are +l ac +g i +ar chi +c d +yel low +rec ently +re ach +à ¹ +kitch en +desig ned +tr y +g al +restaur ant +at ure +w w +j as +l ma +ðŁij Į +pa in +av o +min ute +sch ol +ther ap +tic ket +d ry +jap an +diti ons +ter ri +sel ves +happ en +t up +ma g +cop y +sh er +free dom +f ile +speci ally +tor onto +lo ad +g ary +re y +answ er +lo y +cau ght +pri ze +u ne +fic ation +ni ger +sy d +tou ch +feat ure +jaz z +recor ds +him self +di sh +ro ber +spot ted +ma ster +wa ve +fin als +bu ll +for um +al d +re comm +ch a +a e +d oo +inst ru +tru ly +l g +in k +bro thers +de st +j im +m it +clo sed +is on +tri ed +s anta +af fe +w an +hor se +g row +camp us +rel ation +nati ve +jour n +go v +o ct +k it +b ound +part ner +re ma +crow d +! ) +c alls +ra il +qu ali +solu tion +con test +con vers +sn ap +b ase +in iti +ta x +y e +ent repre +it or +constru ction +foo d +present ed +n ings +cli mate +k m +mo del +b j +blo ck +present ation +dre am +fi x +c alling +bus ine +con gress +under stand +we b +val ue +ï¸ı âĥ£ +mex ico +it ely +ki m +char ity +ref lec +bl an +fl ying +anal y +famil ies +b and +reci pe +celebr ation +ac cep +ar y +to t +g b +intere sted +cap tain +âĻ ¥ +ti p +ab sol +bra z +inve stig +o logy +de c +tru ck +ver ing +c lear +don t +go tta +ad vis +beg ins +ma ss +de scri +blo ck +k im +davi d +son gs +memor ial +feat ures +su stain +' . +gra b +jo se +v a +con serv +se ts +man chester +fi ghting +de gre +ag a +in d +sle ep +pos ition +ha ir +sig ns +pol icy +it o +al ert +st am +sp end +w y +absol ut +d m +anim al +my ster +success ful +proble ms +ro bo +k ay +gar den +p d +may or +d ale +t ol +off ers +vis iting +friend ly +tre es +offic er +accoun t +ke vin +ðŁij į +gi ant +contin u +con su +tr act +n fl +ðŁĺ Ĭ +h q +b ility +a ar +dis ney +te en +on ed +wh ite +tra iler +de dic +al one +absolut ely +dig ital +willi am +in ation +s wa +e e +enti re +ger man +ro ll +h its +co st +st ay +th a +ali ve +accor ding +co t +liter ally +her it +re ti +haha ha +exper i +li kes +g t +ste el +__ __ +ch air +christi an +to wer +diffe rence +m d +tre ss +mi d +prin ce +afric an +fe der +foo t +car ri +ser ved +r ice +sh all +feat ured +ck er +rec ru +po e +sen se +ni fic +com edy +cont ent +f at +po sted +con tribu +tim ate +li ver +mb le +inter net +ag e +europe an +cl ing +gla d +ff ic +sc o +ak es +el le +ter min +ton y +p ale +col our +seri ous +pat ri +movi es +b m +professi onal +ad o +al u +br inging +f alls +isra el +ter m +langu age +bro ok +man n +commun ic +can not +ac ti +p he +y an +entrepre ne +tur key +log ical +lon g +ar m +ur s +work ers +ing ly +gg s +ri c +tu al +recei ve +op ens +ge ar +soci al +fe et +c king +ad ver +fin an +fe els +sp la +h r +ea ster +bra in +ã ģ +fi g +le dge +ne arly +prote ct +ma ssive +e th +aw a +ðŁĺ ģ +y rs +aware ness +defin itely +k n +imag ine +k u +syste ms +ðŁij ı +f as +li k +provi de +am o +disco ver +inf lu +ma ker +g az +fit ness +stre et +er s +te d +w c +ys is +pos itive +hel ped +que st +andre w +bra d +b in +hang ing +l ing +bri ght +se ction +ma ss +ðŁĻ Į +follow ers +ho sting +tem por +fla g +a ve +let ter +k ur +re qui +of ten +cry p +su ff +âļ ½ +russi an +treat ment +al le +ha y +l an +keep ing +hol y +power ful +pre dic +fun d +e specially +windo w +je wel +il y +ðŁĴ ľ +gener ation +app a +seri ously +o d +ðŁĺĤðŁĺĤ ðŁĺĤ +cer ti +iri sh +ðŁij Į +mi ami +be th +v ity +se cu +che f +cri me +graph y +ma x +arti sts +re volu +gu ard +spee ch +u c +upd ates +fac es +st ant +chang ed +repor ts +low er +pe ar +n c +k il +loo ked +spe aker +s f +re spect +ok ay +oce an +s itting +architec ture +tra il +se at +i ra +le g +japan ese +d am +u lar +sw im +polit ics +finan cial +ol d +mou th +at temp +de stin +fi shing +atten tion +me m +chang es +deci ded +reli gi +g in +c av +z z +ad am +ma c +wr ite +beg in +sc ul +al ter +is s +ath on +imag es +m oo +jo ined +ðŁĺ ī +âŀ ¡ï¸ı +pas sed +mu sli +h ir +lar gest +cam er +com ic +gh ted +rug by +bur gh +gg ing +te sting +pre par +lau gh +al ed +impro ve +beli ev +adv ice +sha res +he art +tur ning +s b +t el +caf e +n es +dani el +pat ter +t z +se tt +par k +c and +st ick +happ ens +bri an +ne west +e pic +ad or +ki es +war ning +anim als +custo m +ar c +di an +gol d +cor e +t f +c ity +pan ts +re ality +con fi +in ju +fo x +gu il +k new +âĺ º +cor rec +itu de +d den +. # +re duc +pas s +f on +y a +ow ner +re turns +n c +e ast +ap ol +in sur +th o +si m +juni or +be e +ang el +att le +elec tric +hor ror +cra sh +e ye +pat h +sou thern +emplo ye +ge o +t an +ha z +r ally +ðŁı » +proper ty +was n +enjo yed +gre y +g as +bre w +nor thern +hol ding +g p +ta ke +ch art +ly n +dr ama +z o +pa id +throw back +cu p +discu ssion +down town +w ill +le w +b is +t ary +bre ad +up on +r ate +teach ers +it ation +anc ed +cy cle +choo se +d c +ir an +co w +da ve +ra ise +prin cess +fa ith +- > +indu stri +sp ain +guit ar +fac ts +m n +sp en +cour te +go tt +projec ts +au di +o sc +pe ter +s and +intere st +happ iness +ven ue +sol di +surpri se +poten tial +per io +custom er +i i +g ni +manu fac +e co +bro ken +sing er +vel s +wal es +hu s +in j +f our +tal ent +d ying +mat the +fil m +jo ining +s ell +j ar +lma o +sur ger +bb c +sour ces +au stin +ni k +char les +f am +prin ci +ange l +cas h +lo t +o red +pla ys +pl ate +don e +memor y +br ings +n ba +solu tions +teach ing +gr ace +cir cu +hel ps +foun der +mar y +expl ore +de cor +par ts +ch o +inte gr +ha u +is es +pu tting +in er +r it +v y +mic hel +blu es +every day +for ms +bi o +ye ar +p in +t ter +spr ing +) ) +po t +al ing +perform ing +sh an +plan et +mus ical +head s +it alian +stru gg +âĢį âĻ +w ings +pu mp +h h +tr ou +a id +pri me +ear th +pa int +mon t +am y +bb c +fab ulous +fru it +andro id +bour ne +cere mony +enti al +? ? +deb ate +on ing +dra ft +sol ar +t x +j am +cor n +!! !!! +bro o +mil k +po sed +o hi +mo vement +b ren +part ner +p g +et te +ar ies +sh out +n g +leav ing +t ells +sen s +ta ste +kel ly +wor l +gy m +ric h +e gy +pi d +ma s +â Ĥ +courte sy +fran k +incre ase +wr itten +pp ers +re l +ha i +s as +s ound +tt i +w ich +ri ver +.. ." +a g +fel low +ro me +sm all +gen cy +ic an +lux ury +pro of +me t +wild life +mom ents +ra ther +cor ner +com pe +canadi an +lik ely +therap y +li am +econom ic +indi e +rou te +fi ght +ho pe +se tting +ant ly +cro ss +fant asy +de e +sket ch +comp li +ym i +ru les +engine ering +fig ure +ro w +. , +f w +syd ney +w ou +t ation +dre w +us es +the re +sp read +struc ture +pat rick +appa rently +ro s +h ills +w we +ann y +com mission +di v +f ying +con sul +anal ysis +ex i +ten nis +vehic le +ðŁĺŃ ðŁĺŃ +as s +high ly +op ened +b ann +ðŁĴ Ļ +mp h +wi shing +v or +fi f +give away +r r +ra y +je ss +g at +ic ymi +x it +high est +yor k +pi e +invol ved +high er +ri e +mal ay +int elli +desp ite +che e +sar ah +be an +reco gni +ar sen +tal ented +pas sion +ic h +ab c +lead s +dise ase +v is +se c +pre senting +m illi +hol e +sho ts +de part +surger y +gov t +b in +du al +e vi +lon ger +ev ol +scre en +portra it +et c +lo se +ch at +p en +p i +om a +s ick +er c +compan ies +en try +plan e +gr y +ven e +liver pool +premi ere +sha red +a red +fil ms +ir a +holi days +cric ket +ici an +v ing +. ) +ul timate +di vision +con duc +se pt +for ces +mon t +s mart +disa pp +sun shine +in d +b less +ma de +col ors +fran k +ir on +bott le +s go +m ood +j ason +er ic +bir th +te en +respon se +tar get +state ment +fe ar +th el +al um +ar ab +bl in +direc tion +ste ps +er ial +wor ked +at l +ðŁĴ ķ +fel t +pol i +scen es +hom es +b ell +e at +ate ful +t in +l ace +fol ks +p se +an n +wis dom +fa v +but ter +s r +are as +sm oo +bi z +dg es +app o +mo re +the m +effe ct +windo ws +sun ny +cap ital +tot ally +c ities +gr ant +mb ers +s low +au tu +il ities +w ro +ri sing +st ics +viol ence +i gh +qu ot +h it +t c +herit age +bu ff +ne s +z ar +den tial +ex ac +ed ge +de ep +aren a +be came +benef its +mar ks +mb er +a z +am es +pre ci +dra gon +re g +d ings +do s +ðŁĴ ª +n el +s ity +me al +di st +leg end +pur chase +pic al +st ick +f at +du ba +profe ss +car to +pro f +coun tries +respon si +se qu +fa b +tribu te +hon ored +prac tic +pur ple +an ton +pa red +t ough +summ er +environ ment +s ons +ðŁĻ ı +m ps +gi es +her oes +t elling +hen ry +f en +know ledge +Ģ ï¸ı +f r +ne g +u re +ac king +hear ts +s oo +hol lywood +ju mp +sau ce +schedu le +tur n +yo ga +cre ating +c ket +cre ek +â Ń +custom ers +ma dri +gu l +asse mb +moun t +c ell +to p +st al +dav is +t wi +sig n +premi er +iti ons +he aring +un k +pati ents +app ear +heav en +al ty +doc tor +a e +plat form +je ff +ðŁĵ · +regi onal +bi d +box ing +ex ten +or ity +a w +w ise +il le +sever al +bi e +s itu +sy ria +âľ ħ +remin der +enter tain +li on +part ners +in n +ph ar +f au +pl s +expe cted +sug ar +deci sion +s b +ch ron +associ ation +leav es +vis ited +sh ap +ðŁĴ ĸ +fur ther +h ann +w i +run s +l er +fun ding +fil led +.. .... +tin y +han g +or g +co ol +se min +ðŁı Ĩ +spon s +nav y +sa int +dru g +d al +r oun +co vered +tra ditional +invest ment +de te +al ism +f low +n is +sun rise +fe at +f ted +we ird +je re +ve gan +medic ine +an o +ac cu +deli very +temp le +chang ing +wil son +phili pp +re fe +n d +is er +g ay +r and +ati ves +t ely +p and +intelli g +g are +am bas +de mon +commit tee +strate gy +refu ge +bud get +prote c +pi er +ex press +nom in +econom y +al low +ic on +gal ax +o h +indi vi +dem and +vir gin +lu ke +ali sts +man i +s mi +ju dge +ent y +mic hi +resul t +am ed +spe aks +' , +hou ston +sh in +b ing +fl y +ch em +au to +v as +ge t +ar m +thank s +d in +gan g +x x +si on +loc ated +p l +jo sh +in fo +jo ins +adver ti +ot d +el d +si e +re asons +v ent +ðŁĩºðŁĩ ¸ +â ł +convers ation +stu di +ðŁĶ¥ ðŁĶ¥ +go s +s ounds +un it +mu sc +ge l +ack ed +pac i +co s +de re +u u +a o +la m +inspir ing +ar ms +tw are +mat ters +ad dic +du de +ex t +cri sis +b ath +me et +sing h +expe ct +del hi +resc ue +wor st +au g +shi pping +ser ving +st o +dar k +ac es +histor ic +landsc ape +desig ner +b illion +gr ateful +wa ke +e ve +m iller +hou sing +dy nam +is co +be ha +sh op +pr ou +e as +a sia +e ding +k on +depart ment +aw ar +mar ine +in ci +photograph er +ta pe +lo go +r ings +d it +-- -- +vin yl +w c +vo ting +se ven +ambas sad +dal las +t u +com ment +k ra +b les +w ag +u d +au dio +stri ke +offici al +o ts +me tho +to ols +ra di +al an +hun t +wat ched +a ke +fa ke +drin king +mer ry +m l +b day +ri o +ni ke +c ant +re pe +co stu +mur der +ak ers +ch ers +ou ts +beg inning +so s +ad es +n in +not es +wro te +sol o +c i +li ghting +ur ban +bre xit +att end +shir ts +pla yo +ac tress +pl ic +stand ard +quot es +par ade +anci ent + © +tur ing +re e +pri mary +fla sh +citi z +mat es +ste in +z i +clin ton +sk in +gen e +hu m +g ar +t le +y i +fo cu +de an +pl ants +cy ber +b u +om e +ho p +ad dress +ti x +gi fts +relation ship +sub scri +fe ed +exac tly +haw ks +ex o +stre ss +s n +arre sted +an e +sof tware +z ero +the me +mu mb +im migr +mi a +make up +ple asure +uni vers +har b +eng ine +ap er +r in +br a +institu te +le ather +al th +sing ing +co s +gh ty +me as +st ic +si de +insur ance +co t +pit ch +moun tains +cri min +su pre +valent ine +at er +wou ldn +sc ale +rel ated +re gar +star tup +pack ed +mi ke +week ly +p ts +coun t +ha r +gott en +min d +ber lin +con ditions +swit ch +cor n +sa ve +g li +emer gency +tun ed +sto ck +discu ssing +every body +s day +whe ther +wrest ling +ec es +gen der +ch en +ðŁij Ģ +madri d +mar athon +e gg +i er +th x +as king +kore a +wol f +ay a +g m +g au +at ory +v r +gra ss +k illing +b ble +ur o +un i +e th +sh ore +th en +re ale +bot tom +ex erc +k ar +or ies +ad ri +san ds +se x +. ' +volunte ers +per form +par liam +inclu de +deli ghted +execu tive +fu el +kis s +ã ħ +char ge +h u +ca kes +ve t +g lu +agre e +pr ices +n au +h l +g ru +ra j +streng th +b ic +sp ending +al es +av en +b last +: ( +yo f +nor mal +si x +qu ick +se a +d aw +mee ts +lo vers +upd ated +po tat +comple ted +coo k +opportun ities +p ure +organ ic +tem per +c am +avo id +par king +duba i +and o +di stri +to y +comple tely +don ald +tri al +bas s +b oun +back ground +v as +mar vel +lu m +ru s +t ool +com missi +throw back +fin ding +is lam +! ? +st op +e vil +or al +resi dents +i denti +o ak +ðŁİ ¶ +l il +span ish +chap ter +sto pped +direc t +ho sted +pic ked +lab our +lew is +defen se +à ® +health care +wh is +mat h +pe ak +ra ised +fi x +bu ll +th ir +chel sea +fol k +tr e +can di +pau l +ei ther +ad am +poe try +jewel ry +ðŁ ¦ +pr ay +Ø § +g c +o z +wi shes +fore ign +sun g +lear ned +en e +n ing +micha el +illu stration +legend ary +w av +b au +ðŁļ ¨ +cal end +stre ets +â Ĩ +mon ster +bu ck +g r +scho ol +ba th +wa ste +ne ck +ha wa +be ach +re plac +jec t +on er +fac tory +coun t +ðŁĵ ¸ +mor gan +der ing +se an +steph en +de p +no vel +vide os +ic al +press ure +arsen al +ex pre +ir s +tren ding +ss a +fla sh +re sear +thr ough +profess or +scul p +to s +gg ed +mm a +be e +a pe +hun ter +am i +he i +pla stic +bu cks +uni verse +le gen +niger ia +ple ased +ri s +thin ks +autu mn +i ds +d is +anth ony +ðŁı ½ +ak ed +gla sses +fin ance +z er +k as +con tract +nu mbers +sh aw +partner ship +t il +laun ched +s al +victor ia +theat er +usu al +nam es +perio d +eli za +i th +bar cel +ro cks +bag s +mat e +distri bu +j on +di ffic +ali zed +cur ren +sco red +b ha +du blin +ro se +in ted +soli d +beha vi +wal ker +simp ly +garden s +head ed +in i +ohi o +we ap +f o +gl en +e state +ran dom +th under +thr u +k ill +jac ket +it i +entertain ment +thanks giving +ent al +en coura +el o +a ther +tan k +high lights +f ting +ru le +model s +bor der +bj p +hus band +in done +ken ya +be ars +al o +n inten +pi x +str o +or ders +sal ad +ro ads +n or +l ation +sop hi +ðŁı ¼ +pi eces +b one +min s +inclu des +nu tr +phi l +s ent +fun dra +ga in +bor ough +n ad +mon day +activ ity +it ems +be coming +ken ne +de tro +car di +gue sts +u x +world wide +sever e +new s +thank ful +fic tion +ve ge +m all +si an +er al +inj ury +le e +men u +danc ing +scot ti +exam ple +( # +na i +studi os +ba i +ðŁĴ Ľ +j av +diam ond +vin ce +ric k +prote ction +lin col +cham ps +appro ach +d ar +m ile +clou ds +je ff +in fin +l ers +p les +pe ace +go p +âĻ ¡ +tech n +str a +a verage +ef fort +introduc ing +di versity +austr alian +am p +boo st +s ke +pati ent +appreci ate +ici ans +pu r +f ell +woo ds +illu str +ðŁ ĸ +ag ency +ac tions +brit ain +under way +se attle +el and +ag o +f ill +stre aming +pro test +challeng es +ky o +et sy +coo king +exper t +ru ss +rain bow +commer cial +sp in +be ats +c ry +val u +el i +th row +gr ams +le vels +michi gan +c ad +ador able +const itu +w s +pu b +mid night +th at +net fli +braz il +die go +regu lar +jo y +âĤ ¬ +li qu +ea stern +k ni +fl at +n p +bro wn +w er +se y +tt ers +ac ting +v anc +cy cling +program me +ra w +comple x +tat too +throwback thursday +se ssions +ro oms +si ght +speci es +bom b +lau gh +ke eps +mo on +offic ers +con ver +t r +ha sh +t ack +ri ous +ad ap +a j +reco gn +ex po +sug ge +confir med +rol ling +dre ssing +ic t +fri day +ph ones +ri dge +con cept +ro y +ke ys +ef for +c ate +k ne +ev en +l ay +commun ities +mo d +n az +every where +al ab +bit coin +ban ks +out door +feder al +sto res +h p +c al +m ely +sig nific +be ar +re public +clo ser +al lah +pic k +x d +pal ace +ch ill +b am +er ous +un a +al len +out standing +olym pic +supp ly +fi gu +v au +l p +char lie +un es +> >> +legen ds +ici al +co ast +benef it +mul ti +f its +far mers +am ount +si sters +har ve +hon ey +que en +b ers +pl ann +âŃ IJ +m u +barcel ona +al ber +stat us +re main +ex tra +c andy +vi ous +âľ Į +o v +warri ors +-- > +ju mp +am ar +x mas +stu dies +i ors +k or +don ate +pre p +fi sh +im a +pain ted +ad mini +co splay +spor ts +dro ps +fi ghter +evi dence +ðŁĴ ª +la ke +ro b +cine ma +pro file +à ± +stan ds +leg acy +sh ape +ro of +ci vil +i ans +sy l +sh am +vo ted +re tail +ph illi +li sted +du ty +n b +th es +f are +au ction +ffici al +stor ms +d p +l oun +sh ops +al y +ani me +multi ple +ðŁĺį ðŁĺį +psy cho +je an +ap art +candi date +gg y +con f +jose ph +w ick +me at +fr ame +c l +for got +ph y +f ing +li ed +re p +se ed +f all +u fc +nu t +lin d +mo de +fiel ds +en ce +s ley +ðŁ¤ Ķ +ch ill +follow ed +announ ces +cor ru +tro phy +them selves +ac le +al du +k ong +l on +s v +bro ke +ander son +ta i +stor y +tempor ary +activ ities +k ati +ari z +cry stal +spo ke +extre mely +tra ding +ðŁĴ ļ +à ¼ +in ch +ed in +out fit +equ ip +ma di +form ed +be ef +po p +ti ger +this day +ti red +neigh b +re tro +is a +un t +t as +kan sas +de st +secon ds +ta y +hur ric +o u +galax y +dad dy +bro w +bur ger +en ced +de sk +ac cur +secre tary +el ite +k ab +ch in +touri sm +bud dy +ici de +dre ssed +u d +vac ation +che ers +com for +charac ters +j et +bu ying +l ins +n ap +reale state +li e +af c +i ii +f ame +n r +b at +ag ent +ma kers +âĢ ¼ +sec tor +op ti +le on +di et +pra yer +hi p +mi r +le x +br y +an a +pas sing +w en +reco very +ak i +po pul +res ort +mar ia +stu ck +read s +ti er +perfe c +netfli x +p oo +cham p +o c +re duce +we red +comm ents +cla im +acci dent +s ag +h ack +sal t +kin da +k iller +i os +z y +ex change +lec ture +eng er +ic king +t au +reve als +pri son +z om +gh an +u l +jour nal +i ot +tr in +jon a +govern or +cap e +quar ter +spec tive +impre ssive +bab ies +t x +m ill +o y +har ri +jo int +su e +collabor ation +tren d +revolu tion +re new +alum ni +ge tt +sh ell +sun day +ent u +ni c +donald trump +block chain +paci fic +expla ins +sp y +ad voc +par adi +to f +star ring +p av +fe ed +br ac +smo ke +ham p +y am +to kyo +si mon +d h +e ffici +phys ical +n j +ell i +s low +gradu ate +americ ans +ti fy +f red +ap ore +fin ds +rob in +we t +not ice +se mi +un ve +k om +pil ot +scre ening +da ily +ðŁĴ Ĺ +roy al +sp a +vo tes +n ag +wh ate +att ending +exper im +ad dition +k ate +sto l +m ali +foo t +chri st +ch an +de e +lic en +glo bal +mo ore +ti a +bri gh +myster y +y ay +âĿ¤ï¸ı âĿ¤ï¸ı +cre ati +me chan +clo ck +di c +âĢ Ķ +pp er +al ph +through out +al low +re sources +selec tion +ham il +bb q +aa aa +virgin ia +dis ney +en g +so red +drin ks +f ancy +consi der +end a +jan e +hand made +du l +on tari +i us +s ville +color ado +whate ver +whe el +promis e +ne ver +desig ns +ab ly +sex ual +vanc ou +at i +con vention +cul tural +sing apore +pro mo +load ed +gla sgo +pp l +n oo +ke e +ste m +men tion +i do +cru ise +ri ding +be comes +be y +âļ½ ï¸ı +tw in +dedic ated +na sh +de si +work out +jen ni +i v +grou ps +rela x +pho eni +li ft +mix ed +m ck +p c +mu st +me tro +ci es +y ar +a im +ang er +i e +rec y +marri ed +dro pped +eng ag +le st +ambassad or +op h +de s +w ick +assi stant +nat ur +fa il +l td +shor t +k ap +sha w +bi gger +rema ins +crit ical +sur vey +co verage +er son +win d +n b +bil ly +let es +ac ts +jim my +at lan +al and +t c +import ance +dam age +f g +stor age +tw t +bon d +bal ance +cr ying +pu ppy +vo te +pu sh +ðŁĴ ľ +pol y +me l +lon don +terr ori +effec tive +corpor ate +atl anta +jac o +nas a +gre ek +sen ate +i sh +ev a +intellig ence +effor ts +al co +k un +h all +di ag +claim s +fir st +h b +ba e +v ul +pu ll + ° +se par +spe ed +vic ti +on thisday +audi ence +r ates +te ach +fil ming +bu sh +son g +y um +br un +ra ine +aw a +par ks +ð Ŀ +ra bb +ra ch +ra id +reach ed +ra il +mo ves +selec ted +fr i +ra ising +om y +st ones +su k +franc isco +cas es +cap it +con fu +w tf +po ke +equip ment +gre g +ess ential +off ering +ne x +pi es +be c +cre ation +chair man +cro wn +w al +john ny +shi ft +ne ck +ban g +bir d +ðŁĺ ı +du ck +re serve +de pu +ma sters +over all +no tic +ju ice +sne ak +che er +cla sses +eag les +n ca +car pet +ci vil +coach es +har ris +u ps +b alls +dec or +mar tin +ro s +v ice +announ cement +who se +ti gers +ste red +c ts +dr am +ste el +youn g +inst all +supp o +recor ding +de ck +se ats +l der +ang le +bo t +sty les +elec tions +for tun +n ab +but ter +ari an +ka sh +in ner +ou red +be ast +we i +ic onic +exper ts +ne cess +b eng +jam es +li a +gre ece +ðŁĵ · +ðŁĺ ģ +good bye +m itch +tw ice +mumb ai +ste am +ru sh +med al +ne tt +fashi on +t ar +r s +sav ing +ric ul +l m +sleep ing +brook lyn +mis s +sen ding +disco vered +sp here +of theday +k icks +missi ons +w right +er n +ght ly +i ous +mel bourne +star tu +mo ved +car ry +d ak +ag ues +bel gi +e ma +way ne +do t +er ie +pe l +it unes +matthe w +no body +est ab +cal m +win ds +lu c +prep are +tren ds +exerc ise +adv ant +ðŁĴ ¯ +athle tics +app s +c tions +adv ance +laun ches +litt le +real donaldtrump +eliza beth +carol ina +hu b +hi dden +n w +us er +pol l +great er +mo st +f ed +p at +life style +s ati +sco res +marri age +l r +aven ue +de serve +ri f +ðŁ Ĺ +wat ch +champion ships +gr ay +en ni +cot ton +g om +whe re +pack age +su m +ab solu +new ly +foo ds +ty ler +assemb ly +musli m +ban k +re memb +op tions +produc er +land o +fun ds +u pper +shad ow +pro gre +co p +ing e +leg s +detro it +hill ary +jo se +gi ants +sou p +sustain able +t us +clo thes +roc king +n z +min ne +mat eri +bru ce +ear t +ca sting +independ ent +thou sands +ta h +de cl +veter ans +li ons +wra p +âĢ ¦ +de ss +bl ing +st ine +e ggs +o on +clo sing +z ay +at t +bac on +fa il +ariz ona +de pre +gho st +new sp +w ers +vi p +li ked +id ent +volunte er +ad ult +pu pp +cir cle +mat erial +degre e +gro wn +boo m +calend ar +su r +vie wing +ath letes +ch and +re ll +asi an +en tr +vol ley +victi ms +bo dy +m ama +trans fer +ge ek +in dic +sav ed +ma i +g ent +it s +loun ge +k ol +the ory +situ ation +is lands +ar th +z oo +floo d +vi ously +show ed +parliam ent +ch ev +el ine +at trac +ab ad +ta il +h rs +lu s +por tu +gor y +provi des +to ys +de ath +in fe +an ce +g le +li am +lo ver +hu d +dv d +reve aled +g w +re ment +ca the +l ying +ra dio +der by +stor s +che mi +hosp it +âľ ¨ +' : +ilo ve +le mon +re public +s ni +ne ss +do or +re action +pre gn +fla v +schol ar +spo tify +is ation +vis ual +aw are +spon sored +jo ke +less ons +leg is +lo ck +si mil +ðŁĺ ĭ +kin d +la y +ma h +ho ping +vancou ver +as er +clean ing +gal a +thre at +la p +ach e +ro mance +ex pen +re post +z am +e pi +mir ror +o ak +ad ul +bat man +s lu +l c +vie wed +re views +d ates +indone sia +acti vi +off en +lea f +i si +ag ricul +costu me +s ites +spir itu +appear ance +ir y +st air +applic ation +spec tac +ic ity +ski es +hand le +pun k +paradi se +t n +de al +provi ding +do c +recei ving +bre w +micro soft +à ¶ +fer r +me tro +th ail +y um +car ter +à ¡ +gent le +bre aks +coo per +show case +cu tting +egy pt +bab y +semin ar +gl ori +ss on +fa ve +re hear +lo tte +la dy +al as +pre p +deli vered +nu clear +ir o +engag ement +at ta +con ven +z an +gl ory +hol ds +busine sses +str ange +sch e +it self +gra d +mar kets +f alling +st ats +ge on +bu dd +li s +she et +thi si +co lo +deser t +regi stration +ig n +expla in +inter ior +la ws +writ ers +spr ings +k r +fri ed +blo om +inf ra +a o +cre d +pa st +line up +bo o +bre a +boo ts +celebr ity +att acks +bro ok +ev es +ex cu +cher ry +oo p +fas cin +boy friend +se as +n ine +effec ts +po wered +k ha +ðŁĺ Ģ +sh out +con dition +i j +her o +enter pri +win ter +applic ations +sho e +g el +batt le +pro grams +w art +ðŁĴ ¥ +ra p +ho l +dang erous +di a +coun ter +ric s +i or +k night +co at +emo tional +at ures +d as +whe el +fore cast +tran sport +glasgo w +king dom +prepar ing +im medi +ff in +awar ded +prin ting +ro man +fight ers +any more +bel t +p ine +win e +x i +employe es +logi es +al led +de mo +birth day +ange les +lo g +dri vers +neck lace +k ath +s it +athle te +ef s +s burg +pur pose +resi stance +rele ases +t is +vari ous +deli ver +ch al +s anc +opp o +cra w +neu ro +dr a +suppor ters +sna p +diffic ult +swe ar +logi st +pa th +attemp t +à ¥ +swim ming +ste ve +hur t +inclu ded +b ap +wa re +ðŁĴ ĭ +end ers +ja ke +le eds +cli mb +l b +im ple +li sa +clo thing +ðŁĺ İ +d t +com pla +sw ing +stra w +v als +k le +us ers +stor m +cu ts +ontari o +p an +hand some +i ow +ar gu +chec king +scotti sh +Ķ ï¸ı +si er +em ma +po d +patter n +de sh +en h +ed ward +t ing +k h +hal f +lincol n +mo ther +al leg +r c +volley ball +d n +g ay +all y +le ton +gro ve +l oud +adv anced +re spec +cli ent +supre me +thail and +ho w +gi g +to i +do t +dol lar +ðŁij ĩ +p it +r b +h n +produc ed +gg ers +âĨ Ĵ +ml b +can vas +fin eart +us d +in the +p son +actu al +s l +t b +ip ad +en sure +u mb +w d +sk a +mar s +k end +f eli +th ing +count down +absolu te +r out +dra l +p y +inju red +min t +hun ting +mm er +s age +li gh +ac ity +ex pan +mur ray +ar o +sec ure +four th +eag le +reli ef +st akes +industri al +clar k +under standing +see m +pl enty +sil ver +cla u +thre at +sa il +pro duce +ab str +is is +b r +eng ers +wor ry +bie ber +s j +just in +reali ze +ky le +esp n +fil ter +s ch +ty pes +game dev +d ing +twit ter +soldi ers +p om +car bon +y ards +child hood +ri ed +ke l +ele ph +t ons +key note +qui et +wi re +po sting +is sa +repre senting +bac ks +alex ander +celebr ates +ta ining +| | +ch or +esc ape +pe ek +ti ves +fiel d +ssi e +im pac +spons or +r c +we dd +cann ab +si des +trac ks +com par +con trac +techn ical +bi ble +expl oring +sh are +tra v +n ate +ill o +sc ru +m ingham +gun s +of the +sh ame +se es +ca tho +ac cess +ce l +repor ted + » +mari o +p ad +hope fully +ou se +y on +disapp o +ol o +p itt +pa c +ga p +cru sh +s g +k le +ge m +emp ire +dir ty +a is +avi ation +ze aland +fac ing +high way +d anny +spi der +ot ta +ðŁĺ Ħ +w y +col ours +in fl +co sts +olym pics +au s +h m +ho ward +pas ses +lau ren +mu sh +op in +r ho +disc ount +oper ation +em ily +mm m +cham ber +d il +to yo +shi p +sam u +pic tured +un ic +po l +keep er +carto on +st en +ig nor +n ations +n l +ta sting +deta il +offici als +mo tor +franc is +ed itor +ðŁij ĩ +pe ts +rang ers +t g +r n +w ri +nic hol +i se +spo ts +ani e +chec k +tri ple +ku mar +spe akers +ic ing +pre pared +ab use +friend ship +mon th +swi m +air e +sc ent +hamil ton +indi an +j es +yum my +te ars +da wn +i zed +worl ds +ðŁ ķ +b illi +st one +n hs +ba sic +p or +st le +ir on +ol der +cle vel +e ing +ðŁĺįðŁĺį ðŁĺį +prin ts +fir m +air craft +fin est +devel op +aar on +t z +gra ham +own ers +fo li +less on +qu es +bab e +cra ft +ph en +ju n +bir mingham +v ine +ll er +i an +fineart america +evol u +st ab +im per +war d +com ic +wi z +inv ited +du ke +mat ch +por ts +ro ger +diag no +ke pt +te st +vis u +r hy +so c +to x +b aker +sur face +co vers +man s +b its +x box +ff le +n an +gar d +h art +wat ers +v illa +re tro +light ning +catho lic +democr acy +neigh bor +pen n +cr an +jona than +la ura +vi bes +su b +coach ing +clear ly +uk raine +bra ve +commit ment +t all +mar t +ra p +mo di +sco tt +bro s +show er +ðŁı ¾ +âĺº ï¸ı +cou sin +appro ach +br e +com pos +hil ari +phil ly +g ad +quick ly +ri an +t m +vir tual +hou ses +k t +phoeni x +w ire +ff y +b unch +anc ing +tal e +snap chat +star ter +h t +k icking +ap art +th y +) ! +blo gger +it z +com fort +ang els +w ash +" : +ar gent +re quest +hon est +mi ghty +bo bby +k g +ro l +thou se +ex po +h c +tab les +mag ical +po sts +de m +n w +or lando +ab er +* ** +ðŁĺ ľ +environ mental +trans formation +mi le +w ic +hir ing +ma ine +bo ar +r ying +ti s +nit ure +twee ted +anton io +opin ion +fin ale +di y +f is +th in +trou ble +le go +fi les +qu art +sp a +curren cy +cli mate +fan art +rail way +sp ace +ban ds +dani el +mo tion +l eng +hol der +oc cu +mar ie +cathe dral +bu zz +bi es +nas car +bm w +bat tery +char lotte +doc tor +zz le +se ven +in san +d dy +st en +lab or +thr illed +se ren +docu mentary +wav es +cer tain +can did +allow ed +ninten do +star wars +ta p +home made +d les +ther ing +bre e +emp ty +pi ano +pos iti +coun try +por k +pu ts +per ry +m atic +spot light +ti st +or ities +we alth +c p +bar bar +commit ted +as sau +pro fit +e ight +hu l +fini shing +run ner +ss o +insp ec +char ged +christ op +lo sing +co al +ho o +ele v +de le +mo ham +don ation +c able +clin ic +j in +manag ed +ter ing +â ¬ +ur ban +depu ty +bb er +bur n +acade mic +o tt +sta ke +it er +sto wn +ack er +advent ures +ad ams +gre g +pro m +vo l +ac qu +con gre +pa int +citiz ens +c all +af ford +v c +as ks +the tic +independ ence +â Ľ +h itting +bl on +fu ture +â ı +in no +gen e +bo ards +di stance +se t +re mem +th al +pre vent +l ang +ob jec +su sp +mat t +in duc +bor o +pi one +re di +vir tu +prin ted +sco pe +shar k +suc ce +a stron +il legal +j ag +c ting +ine e +at o +rob in +nutr ition +b f +du tch +b n +fur niture +for gotten +at ar +ru p +hy per +bran ch +communic ation +degre es +on ia +un cle +promo te +or che +wi i +j s +but ton +ma jor +c bs +bri stol +premi um +ordin ary +e dit +m g +we ed +st even +: ' +gu s +te s +cap tured +dru gs +do w +wr ites +bi shop +whe els +ali zation +disco very +w r +rach el +ne il +hy dr +cu test +entreprene ur +kore an +ore gon +ul ty +perfec tly +suppor ted +histor ical +t wins +ell y +we l +de vil +in come +scienti sts +de leg +h en +on i +ic ed +gi o +cur ry +reve al +e g +buff alo +n ol +op era +camer on +haha haha +j ab +gradu ation +cra ig +r al +i f +organi zation +le ge +g ang +su d +edin burgh +l ack +fli es +g ate +thr ones +q b +the real +e leg +pp in +c les +jam ie +tn am +cryp to +ou l +p ages +a se +roo ts +stu pid +a did +boo t +prote in +s ap +si um +su s +end or +fun ction +don t +en na +ch y +squ e +wor ker +m tv +e a +k an +ðŁĴ ļ +mu s +professi on +t to +oper ations +al lo +c tor +inv ite +sc and +ou th +z im +lin ks +cli ents +sam sung +discu sses +n ell +ul tra +some where +ste wart +ine t +de z +b out +fac tor +ti an +tr ans +jere my +d b +ðŁĩ ¬ +or n +develop ing +spo l +coo per +ma u +rememb ering +tre k +famil y +sen iors +fo ster +att ended +w ing +trans form +ele mentary +hor iz +li sting +malay sia +it ch +warri or +philipp ines +russ ell +m end +initi ative +cre ep +to ps +br iti +a ur +shar p +adverti sing +ug ly +achi ev +materi als +bu g +dev ice +bon us +fac ility +col e +nh l +y as +plann ed +pol e +excell ence +tr ick +con fl +r p +achi eve +lo an +swa g +jess ica +ho we +p our +sc u +z oo +r ated +dre sses +re bel +mex ican +co ordin +me ss +atlan tic +t l +osc ar +wal ks +phar mac +investig ation +... # +cc i +eas ily +monday motivation +y ment +au ti +for ced +ar med +colle agues +pap ers +pro per +sha ke +bu c +le an +exhi bit +e vement +co tt +bi z +sp er +k ent +sw an +/ @ +girl friend +haw k +âĺ Ģï¸ı +mon o +ðŁĴ Ľ +stat ue +ðŁĺ ³ +ra s +te eth +preci ous +t ile +p am +swi ft +v ali +no se +dr unk +experi ences +come back +gen ius +wor se +sh ef +ra d +ed it +hon our +au spol +lar ry +h ire +gor don +achi evement +.... .... +su icide +alter native +su p +sur roun +sha ke +ke ith +pe pper +tur k +crimin al +be ck +su m +w alls +cn n +an tic +of fe +col li +win es +high light +hawa ii +emb ar +l fc +ðŁĩ ® +m v +> > +at mo +wor d +car l +shout out +bre wing +ì Ŀ +do f +s ic +hot test +col on +hh h +shu t +low ing +volu me +apart ment +agre ement +de stro +we e +religi ous +iow a +ro d +land ing +re present +ðŁĵ· : +la s +usu ally +h l +c ac +sal v +al ong +laugh ing +be ans +remin ds +pha se +some body +ma sk +ran ked +dest roy +sc i +âĢ¼ ï¸ı +gab ri +le o +ro a +fa iled +si l +refuge es +re vi +r ing +ber ries +coo kies +y y +conserv ation +sh ab +human s +de termin +a in +ni all +as su +mb a +fro m +extre me +vic es +commer ce +ght ful +or dered +suppor ts +re cap +v or +dro pping +correc t +pay ing +mean ing +n j +qui z +" # +busine ss +ðŁĩ® ðŁĩ +indi gen +du st +box es +bl ind +x xx +zz y +ðŁĩ¬ ðŁĩ +ss els +s ant +dd le +hilari ous +desig n +wonder ing +vehic les +k re +ju d +rece ption +par ker +Ã Ń +pri vi +hy dro +sof tball +pol lu +lo cked +ba h +e ar +scri pt +di vi +br ace +geor ge +the ast +bel o +j al +tion ary +dent al +roc ket +pur ch +sh ak +manufac turing +e z +it is +con cep +tb all +ch s +direc ted +pra yers +oo k +phil os +vari ety +che ss +ser ver +g and +bal ti +ðŁĵ ¸ +sel y +cru z +spectac ular +bur ning +re present +i z +t one +mer ce +h ell +bed room +estab li +bo l +com mon +ãĥ » +ab or +kit ty +hei ghts +re pair +willi am +qu ake +alab ama +popul ation +re v +re tt +i sts +n ite +le m +a ha +clevel and +r m +po ver +ob se +mon tre +man ia + ® +con ne +car ni +sh ah +f y +u a +sc or +strugg le +bo b +' ' +appro pri +deci de +ff ed +ca ster +s ort +hun gry +dra g +ا Ù +gr ounds +d w +sli ghtly +car din +dead line +bron ze +web in +bar ry +sil ence +e uro +op tion +ear n +ðŁĴ ĸ +howe ver +na ren +na ils +bath room +v ine +ph d +min ing +gar age +( ) +shou lder +defe at +di r +o v +liber ty +ple as +x on +com pre +a v +j in +ab les +sil ent +fam ili +vis its +di pl +ha bit +milli ons +regar ding +innov ative +sen ator +r ts +v on +k l +wh il +requi red +âĿ Ħ +lu v +presi dential +po cket +hun dre +sho wn +fro zen +to ward +fa st +confi dence +r ough +indivi dual +qu et +ðŁı ½ +dom e +fi fa +engine er +z en +re mix +ðŁĺ ĥ +pl ant +min or +robin son +as y +pul led +cer tain +potat o +( : +pre s +oc ca +w it +it em +si e +d ating +thom pson +own ed +an u +vi e +te dly +good night +ex cept +ðŁĮ Ł +ira q +ki e +ren ces +li p +simil ar +sau di +vi g +arth ur +pic ks +mil an +hon da +ma xi +o g +ste st +ar ch +analy tics +ba sti +pear l +ter ry +hor se +ast ro +ac ce +laun ching +inter national +s no +ta sty +den ver +ir l +pe te +tor n +advant age +var sity +" " +sol e +g c +lan g +demon str +ol ds +un ity +ne ts +insp ire +cre te +nash ville +nel son +e ter +wal k +hy un +m ack +tre as +see king +ra ge +bru sh +ab and +whil st +co con +h ong +shel ter +i p +possi bly +so o +it ed +â Ħ +rac es +war ming +qu in +tele vision +mat ches +ra pi +ment al +pal m +jenni fer +rol ls +indi ana +b ars +cat ching +resc u +candid ates +fa re +âł Ģ +se o +vie tnam +alph a +michel le +visi ble +re gre +wn ed +app le +li p +f fe +li z +york shire +ha il +se asons +be gan +m d +k c +la p +fascin ating +hel p +ur y +u ms +nu ts +se m +along side +bri dge +ori al +o ve +world cup +briti sh +comfor table +i ve +hot els +fair s +hor ri +so x +d ining +stre am +bar ri +ss y +w im +ter ms +v u +pe re +l ens +wal ked +r or +l ars +shi eld +dou bt +pro to +cro ssing +me ant +medi um +ad ding +e b +che ap +fun c +pap er +bran ds +ry an +feed back +col lins +un known +tro pical +sand wich +fal len +for mu +selec t +lo ads +answ ers +or i +mag a +d or +du o +ali e +dru m +ur i +de er +sou l +sh ut +âĺ º +sto len +don ated +bu zz +patri ots +ha l +na sty +nomin ated +mon te +ki a +th ri +ing u +te sts +pe tro +ðŁij ij +ho sts +ne st +to pic +pat ch +m my +hu gh +ab ilities +ma the +s miles +g b +ag enda +insi ghts +chi p +ph an +fail ure +dg ers +ha i +signific ant +sho ck +ru ral +gl am +figu res +pot us +o ta +mini stry +appe ars +fe ar +r h +americ an +h att +son y +fi res +e di +n ou +e qui +wh en +univers al +mad ness +i x +sculp ture +b ach +t to +swe den +et a +en to +develop ed +month ly +ma ps +ra h +le d +del ta +sa ints +is lam +ben ch +fif th +v ard +so cks +wel coming +j e +tur ner +v b +ad i +nor way +ad y +hurric ane +por sche +tra dition +ex am +newsp aper +lu ci +a ver +ide al +d na +madi son +ðŁ § +wit ness +ac ou +insi ght +si mon +robo t +sna ke +n bc +ac o +ro ss +sh ment +religi on +ch ann +in su +camp bell +inst alled +we ather +hor ses +ol i +rober t +k az +ðŁı Ģ +veter an +th read +quar ter +ea sier +cap ture +hi pho +law rence +roman tic +pas sion +cl ay +ox ford +th ai +stu dying +fi a +elec ted +most ly +c b +tu mb +âĢįâĻ Ĥ +x l +sh an +fa ster +ev ans +sli de +sh ri +see k +mi es +chemi stry +pump kin +tu m +, , +ro om +fi red +li ps +pres ence +af f +brew ery +arri ve +sw ag +photo graph +pen gu +chi ps +at tor +val ues +accur ate +con temporary +princi pal +cannab is +ari o +any where +gi a +democr ats +buil dings +li ved +ap s +neg ative +m are +bal lo +li on +diam on +loo k +re form +tom my +il la +tre ats +hundre ds +port land +wor thy +ex cep +ar ia +ido l +be er +cd n +y u +aw k +ðŁĩ ¨ +c ells +à ³ +ident ity +dra wn +de vil +f inger +th am +ðŁij Ĭ +ear ned +fin tech +dol ph +twee ting +evolu tion +ðŁĵ į +est im +m vp +n one +ðŁĩºðŁĩ ¸ +toyo ta +au x +mar in +b old +l bs +ste ak +mur phy +it able +lou is +sol ve +pi a +sk ir +ill ino +webin ar +ban ana +lo v +th on +vo ters +afford able +defe ated +lm fa +air lines +super b +any way +deb t +bo red +ver si +me tal +responsi ble +m k +s se +f ay +cau sed +f p +recomm end +pla za +spor ting +alli ance +au stri +n n +t ours +surpri sed +arti f +th under +sur ve +wor e +bri ef +necess ary +z ie +ash ley +dra ke +r t +kni fe +im mun +char ges +a the +bri de +rep ly +g av +broad cast +pu er +brace let +cap acity +harve st +id k +perfor man +d ding +il ers +par a +jam a +pro vince +ch in +id ers +har i +te aser +ch en +re stor +r at +fl at +col om +ðŁĴ ŀ +ðŁĩ¨ ðŁĩ +smoo th +r t +p itch +stay ing +isra eli +t cot +per spective +do ck +open er +lo vel +x o +class room +l ington +go al +kenne dy +sh am +sp aces +mitch ell +home coming +uk i +claim ed +recru it +ing o +mu fc +mon it +g roo +resi dent +per cent +per man +otta wa +int ment +an xi +stand ards +wor ship +sche me +f x +pot ter +bi an +athle tic +af gh +s se +sat ell +par ties +âĿ¤ âĿ¤ +infra structure +rela x +mo du +wor n +smo king +y ach +practic es +wc w +am b +dome stic +tay lor +k entu +provi ded +mo di +ve g +" ... +ob serv +ðŁĺ © +be ard +m our +an gry +ðŁĺ ± +startu ps +woo den +di ve +na il +anti que +ro ses +torn ado +m at +^ ^ +su spect +far m +de vices +me ga +tu l +scholar ship +ge e +disa ster +arri val +po in +mar c +kati e +bb ed +fal se +deser ves +ric hard +ju ana +fre y +tion ed +hy bri +r w +sar ah +ach i +c ure +o le +mor ris +ch ic +broad way +la bel +pa k +pover ty +gol f +e red +f u +er ies +be es +alo gue +st el +wire less +je wish +ti de +blo cked +life time +b har +sp lit +am ster +th i +jo shu +br unch +ha ps +s for +oo ps +ka poor +hi king +suppo sed +ro of +re as +tra in +ti ght +tru mp +bas ically +r r +ea red +see ds +entr ance +c p +wi e +son ic +vic tim +he re +e h +ear rings +sal mon +arc tic +an ne +dou gla +corru ption +hann ah +ha sn +vo ices +con ce +att a +fle et +clin ical +democr atic +ton y +st ood +le f +twit ch +a il +honest ly +incre ased +dro me +don na +accep ted +visit ors +ap ar +ad or +p ar +jer ry +ra i +brand on +ab u +!! !!!! +me me +in gh +glori ous +b hu +pu mp +j ol +li ke +fi sher +ma z +ag an +destin ation +play list +le tters +gen u +br ace +celebr ated +bann er +r he +dra gon +ðŁĺ ħ +sig nature +gre y +âľ Ķï¸ı +al ice +be red +ph er +ber n +ca th +ga thering +sc oring +influ ence +sm iling +de pt +lo cal +a x +ac u +reti rement +hon or +her self +chem ical +asse ss +y all +fre qu +appreci ation +ac a +cho ir +cu z +so il +c il +repor ting +u h +enterpri se +gr at +jaco b +ru m +fe e +j ak +sp in +bi kes +phi a +ste re +p is +bloo d +t att +ra ft +war ren +sh eri +back stage +mar sh +hash tag +ther ine +re in +game day +guar an +reci pes +min ds +stron ger +issu ed +bic y +n ak +ment ed +sc ary +u x +pre vious +tt le +th ats +ac tors +u ma +tin a +bun ny +promo tion +u ss +oli ver +montre al +what s +appreci ated +la kes +excu se +kno wing +pri zes +musc le +shad es +sco t +ing redi +electr onic +ju an +comb at +s ri +e h +turk ish +l om +stri kes +pri son +re e +po pe +vi d +ol dest +dol l +sw iss +certi fied +cli p +re turning +lat or +le igh +tt es +wat son +heal ing +el im +per haps +ha ss +k au +d der +mou se +new castle +indigen ous +wel comes +co le +tau ght +no ise +appe ar +jo e +can on +wedne sday +u tah +c tive +dri ven +i v +c ell +stri p +ac c +focu sed +ar rest +sto cks +wo o +â Ĺ +notic ed +shad o +di spla +ter ror +bor ne +secon d +que ens +wo ke +ja il +no tt +cam bridge +har t +se af +fa x +ac cept +âĺ ħ +goo ds +k at +t win +h s +thou sand +s ins +su ite +amp ton +ar n +rele v +ric har +hoo ps +n bc +class ic +p ab +soldi er +de plo +le ans +install ation +cla sh +le ban +ee e +ti re +belo ved +fu sion +travel ing +ne i +coo kie +glo be +phys ics +s q +co l +wol ves +d l +ex it +" - +foo tball +le af +ster ling +hi de +minne so +fresh man +natu re +indi e +supp lies +bri s +iri sh +ink tober +doo dle +ic op +mess ages +adul ts +recor ded +fix ed +ar do +offe red +under ground +dr one +p ine +ma inten +and re +ham mer +s x +r ound +hi ke +bra d +ro me +fu ll +on ey +ro ws +colum bia +archi ves +appro ved +bat ch +illino is +recogn ition +shou ldn +fo g +nca a +ke vin +human ity +al though +pow ers +p ou +s ar +pe st +alco hol +con sci +phil adel +en o +t m +ok la +cate gory +particip ate +accu sed +bri ef +po em +clu bs +consul t +ja b +big data +amster dam +ac ing +certi fic +n u +d at +impro ved +and y +campa ig +pale stin +p ace +mo bi +feel ings +wol f +bra in +pro pos +inter active +prin ce +inde x +c is +cha e +peace ful +co vering +ac o +cour ses +mon key +re place +b l +bloo dy +tal es +brigh ton +neighbor hood +g ates +spiritu al +af raid +bre ast +b ones +ðŁij ī +vide o +w au +tou ch +inju ries +car l +ri x +une x +âĢ ¢ +fre d +consi dered +thu si +an ch +on y +u sa +graph ics +ac re +ðŁĺ © +com memor +com mod +go ti +guar dian +star bucks +pre vention +haha haha +admini stration +portu gal +fac ulty +bet a +ul a +al bert +bre ath +er i +le tting +tr ic +ment ation +incredi bly +ten nes +v d +ðŁĻ Ī +ed die +br ick +gr ill +bt w +wat ches +resear chers +t ney +ni e +p as +a ster +vi br +poke mon +ch rome +go at +pitt s +il ly +festi ve +y d +can al +ðŁ Ĩ +fi es +car los +re que +partic i +tra ins +sam ple +temper ature +sym ph +pic king +in door +z ers +playo ffs +____ ____ +ap es +ly rics +islam ic +performan ces +d ick +spar k +se as +hom a +gr ound +disc i +employe e +com mu +alas ka +al an +fe ast +dg ing +ban king +manu el +slow ly +tru cks +mc car +oo o +sc rat +orche stra +indivi du +m x +bre ath +stair s +equ ality +bla ke +loc ations +cocon ut +balti more +aa a +l c +ðŁı Ĩ +har vey +resi st +immigr ation +adid as +fil i +re f +lg bt +mo s +pp i +ken ny +terr or +ban e +apol is +s g +social media +ka i +hon est +as sas +bol lywood +âĢįâĻ Ģï¸ı +ferr ari +hor n +cryp to +bo om +mainten ance +i di +s man +w l +ext ended +in sul +ve s +go sp +tr i +pi g +tar ge +cel er +st ati +sm h +ri dic +appe al +? ) +con clu +cos me +she ep +christop her +en thusi +po lish +me ts +oun ded +sustain ability +creati vity +con crete +ra i +ali en +ble ss +te es +clu b +ro t +bo s +ex ist +perfe ction +lu ck +rock y +expen sive +mean while +happy birthday +pre t +thr iller +ca ve +playo ff +som er +l u +le x +def ence +am writing +home less +pro phe +ch et +past or +ðŁ¤ £ +land er +ww w +Ģ ï¸ı +tic a +! # +o tic +rad ar +po sters +pow der +po li +ha un +tra p +bl in +assau lt +shor ts +re y +sh y +squ ir +rac ist +gar lic +fu r +remo te +sm ell +impre ssed +fing ers +âł Ģ +din o +le ment +s nu +promo ting +str ing +produc tive +b age +ma son +ra z +direc tly +j k +ev al +ðŁij Ĭ +doc tors +co w +ri der +st v +re move +w u +na than +ro d +n r += > +affe cted +inve st +mp tion +g inger +o d +agricul ture +s que +mu g +coun ting +ke e +mag nific +coo k +ani stan +roo t +plac ed +sym po +gh ana +un d +che er +thro wing +secre ts +f illing +opti mi +butter fly +bu bb +ðŁĺ ī +terri ble +d g +sil k +obse ssed +lo u +ai de +sal ute +mon u +philadel phia +scienti fic +i st +u ae +dess ert +bott les +can yon +ðŁĺ Ī +car ib +o ther +w ich +re source +guil ty +un d +le on +e ss +kan e +el e +tra iner +he im +an te +man age +roo kie +tre ated +po ses +rs vp +cau ses +aw ak +je well +le tt +on ics +tit les +cardi ff +g aga +bu mp +use ful +? ! +loo se +bb ing +: : +argent ina +de bu +cy cl +wh el +dis gu +j el +k ills +bio logy +ex ter +tra sh +bo dies +tr am +circu it +expe ct +la ds +w ells +sho t +ge e +naren dr +fa stest +b ent +b ills +mar shall +h ats +intro duce +citi zen +im possible +gi b +az z +net working +r ant +thin k +in dy +st ops +f theday +bri an +* * +amo di +dom e +coura ge +pac king +af fairs +g n +si zed +ent ary +pol and +swit zer +afgh anistan +w u +ten der +subscri be +mo sco +att end +republic an +hon ey +âĢ ĭ +si mul +we ster +foo die +or o +midd le +ab t +co pies +ma je +narendr amodi +ty pical +inspir ational +vit am +wis con +cu bs +tiv ity +h ali +e ars +k ay +d are +mari juana +cu rious +an ia +tom ato +re mind +ðŁĩ · +sc ared +cou p +po et +land ed +ri d +wra pped +mor ri +climb ing +e ws +fe eding +con tra +tho logy +gri d +ti vely +read er +la ser +di ving +di g +lat in +ti ed +shake spe +o ci +ad m +show ers +chu ck +mar cus +oo s +kne e +o live +ow l +dy lan +an no +g ym +deci sions +well ness +arri ves +sati s +chri s +thur s +ðŁ¤ £ +inter views +thank you +switzer land +over night +journ alist +ser ves +vol can +.... ... +plo t +nic ol +car rying +mag ne +tre asure +ex p +be ver +ðŁĺ ¢ +mar ty +mo le +don ations +recogni zed +b h +du s +sh ann +al do +success fully +ent e +ðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤ +cab inet +cu is +tit led +d as +so l +strate gies +deli vering +ad ds +ani an +ne ther +ðŁĴ ĥ +con tain +su its +pa irs +to dd +rel la +ro pe +ci o +cro p +paint ings +su z +re jec +bu st +d h +fra ud +m h +contro l +je al +destroy ed +al lows +wo ol +minneso ta +om en +j u +sympo sium +d af +lim it +accoun ts +load ing +inter n +re solution +hol land +qu al +meet ings +gra ve +cam ping +v am +re nov +liber al +am ber +gre e +hu mb +fe ver +el ing +broo ks +à ² +be th +ad ed +al t +ro e +perform ed +jo sh +frank lin +nic ole +de ss +bb s +m g +net works +min im +al t +weap ons +gu y +jas on +g ha +harb our +at on +pra ise +kentu cky +bel fast +st icks +blo ss +ho pes +an thro +famili ar +wa it +ch ile +depre ssion +la x +je ts +le ice +recei ves +si er +an k +de x +inde ed +fle xi +fab ric +lam b +hel icop +am anda +âĢĶ âĢĶ +compe te +sn ack +techno logies +sy rian +mom s +mu ham +cho sen +an at +dev on +shar ks +re t +fundra iser +selfi es +st ations +communic ations +tennes see +tu tor +ro t +valu able +dynam ic +nur se +i ed +earth quake +deser ved +a ve +sar a +stre tch +dougla s +ne pal +à § +ob viously +d ame +ra pe +any body +k w +pat rol +hol ders +h anna +info graphic +ec o +be ating +stan ley +bo ats +ri bb +e z +wit ch +inv a +ac id +boar ding +- @ +gi l +da ve +care ers +opp os +l loy +in ter +do pe +re su +j agu +sh ade +in dy +on ist +rel ations +ag en +ab le +inci dent +me ter +shar ma +id r +pro ve +immedi ately +tro ops +am an +g low +gaz a +blo cks +person al +chron ic +all er +si d +sh r +whats app +lu cy +ar chae +ho u +journ alism +our selves +go t +the med +shap ed +we ak +cas ual +leng th +sla m +ab bey +e v +coun ter +est a +reci pi +cha pel +expan sion +sel f +suff ering +sp ice +n z +sp art +desp er +boo king +quart ers +y on +ðŁĴ Ĺ +p k +continu ed +- # +man hatt +tal ked +sh en +com bo +hybri d +je ans +liqu id +se al +re tweets +ac celer +collec tive +t as +: )) +profession als +ra w +o tt +su san +ir ing +okla homa +re ven +survi val +cre ator +tran sit +st ac +sur f +i k +ed iting +ch illing +bai ley +ste al +ra ble +pa rent +hun ger +sn app +collec t +philos oph +dedic ation +c f +c m +le ep +repe at +re ha +un fortun +a er +a ero +abstr act +mon itor +ag ents +bu l +sci ence +harb or +drag ons +floo ding +ac compli +d ash +juli a +the red +tues day +cy ber +b low +ta ined +le m +refe rence +pp o +ne goti +char le +con nor +au lt +access ories +commissi oner +rain y +re ar +advis ory +luc as +ma id +co al +k av +pol o +ðŁı ¾ +tran sport +mar gare +straw berry +bur ns +gre ens +ne v +partici pants +col in +belgi um +col our +in form +d ell +br on +cal y +kick off +strate gic +re union +hon ors +li b +egy p +âŃIJ ï¸ı +hy po +si zes +regi stered +bet es +relax ing +bloo m +inten se +valent ines +insan e +w wii +p x +tri o +bla de +wiscon sin +con e +plat in +ali ze +ra ven +incre asing +indi ans +il ian +bl u +rabb it +exten sion +je f +au di +fer ry +s ell +a day +us b +swe at +cham pag +metho d +mem ph +assi st +s by +ca pe +remo ved +mag n +v t +r ams +f bi +tack le +phe w +h on +motor cycle +su spec +eleph ant +sub ject +let te +da iry +whe at +awk ward +ac t +tro l +mit ted +zay n +sheri ff +ene my +con s +ke tt +bul ls +ev alu +bt c +satell ite +ho lo +por ter +dia betes +bet ter +rele asing +sur f +: - +se basti +collec ting +en cing +e thi +go ds +al ley +health y +m ills +sma sh +co pper +cr ack +read ers +sp ac +licen se +bas ket +bang la +en tic +om i +m ere +si vely +anim ation +lan es +dent ally +chill in +fi e +k aren +dep th +li pse +n g +ri p +mel o +sand y +ðŁijı ðŁijı +vin cent +nu t +hu g +who le +cre ates +? ??? +âĿ¤ï¸ı âĿ¤ï¸ı +bak ed +up grade +rober ts +har a +carib bean +auth entic +mb s +mosco w +attor ney +wi ki +ch lo +hu ll +cor k +" ! +sty lish +ðŁĵ¸ : +di ary +impro ving +ex pand +bri ght +pollu tion +k nights +person ality +chec ked +fac ilities +z el +bow ling +gu er +ðŁİ Ĥ +on going +un its +hoo k +be ck +confl ict +to dd +far ming +educ ational +k ak +cla y +stro ke +bel ly +explo re +mill enni +th m +loo p +sm s +consi st +cir ca +br yan +d ab +youn ger +soli dar +pp a +experi enced +b ella +bo ard +shef field +steph en +consu mer +sub mit +spon sor +t ang +ag gre +comb ined +trac king +sand ers +b az +survi ve +fer red +equ al +se p +re ed +str ong +priv acy +st ap +un g +ac ry +pa sta +pir ates +ag er +fair y +du p +introduc ed +wi p +let s +spr ay +ðŁĵ º +gre w +a sts +pitts burgh +new york +jo ey +lau ren +tra de +ch op +pi pe +cla ire +behavi or +v ap +cre ws +lap top +ðŁ¤ Ĺ +che ster +disci pl +d f +out doors +k s +go ver +super star +cas ino +far mer +; -) +re turned +ðŁı Ī +ma il +roa sted +co sta +v ill +pe z +gard ening +distribu tion +sh ining +inve stors +ra sp +dec ades +reali zed +bar n +p ti +st able +ut d +pan thers +m ens +b n +ca de +bu cket +yn n +when ever +wa ke +da is +ber nie +lo dge +ju lie +atmo sphere +ðŁĺĺ ðŁĺĺ +major ity +par ti +exc it +cu t +me h +musli ms +be gun +fli ghts +vene ss +ce me +po sing +so le +g ou +dark ness +pe ach +cel tic +auth ority +grand ma +ful ness +smi th +speci fic +gar cia +co ins +good ness +aldu b +recru iting +den nis +gar y +sle eve +weap on +pl z +disco ver +harri son +recruit ment +ja i +ch im +com pared +tom s +mo thers +am y +archi ve +t ask +ben jam +se g +law yer +al um +inve sting +mi e +che z +j p +a ke +fl am +wall paper +âĻ¥ ï¸ı +t ton +che st +favor ites +we igh +coo lest +r ating +relev ant +lo gan +ma ple +run ners +pri or +peop le +ma ur +terrori st +te sted +carni val +su spen +me asure +m v +cyber security +app ren +terror ism +o z +v ital +ni es +gon z +fun ded +twi st +assess ment +die sel +en for +colum n +ad dressing +ca sts +pay ment +x ton +fi er +, ' +la st +ne e +un less +clo se +sk ill +cuis ine +fun eral +ti les +a un +k ru +relation ships +ðŁĴ ¯ +ev ent +âĢįâĻĤ ï¸ı +kind ness +pro posed +acou stic +a es +defen der +dan ce +h tt +w at +vo y +ðŁ¤ ĺ +au s +cli ff +sear ching +beauti fully +in qu +at l +speci alist +ðŁIJ ¶ +da i +tra ils +class ics +inst ant +v ous +re venue +mar ch +kir k +fr inge +fire works +tri via +âĺ ħ +tr action +wal ter +mo to +l ily +att itude +cli mb +sc an +sav ings +c w +fa ith +cred its +ab led +gra ff +auto graph +he he +ran ch +ha d +ro gers +ðŁĮ ¹ +f in +re qu +fol k +ad ditional +lyn n +u ber +dol lars +lo gic +wor th +so m +the sis +p ound +bi c +st ur +cer am +spen cer +en tered +v amp +organi zed +âľ Ī +pp s +tr on +merce des +no ti +compet itive +do w +ous ness +vic tor +gr illed +na i +pu tin +ab ra +bl ame +alex and +anim al +dec ent +p ent +inter ior +:' ) +but ler +bal let +ðŁĴ Ķ +albu ms +down s +la d +si r +pla in +p ers +blon de +dis c +paki stan +se ment +ga a +w age +ch as +man i +co ps +terr it +lo l +lau ghter +ri vers +magnific ent +lam p +w b +new sle +char ts +ble ssing +p unch +lon gest +fl oral +cu tie +fare well +sto pping +mb b +bu d +chee se +de cla +si m +mc donald +de ter +you th +t ch +fre der +kin dle +fer n +at or +as leep +p ond +spr int +p ounds +la zy +gh e +fundra ising +dead ly +gran de +dou g +he y +lin da +consi dering +i um +gol den +vi k +auth ors +di ss +u ally +appropri ate +mor ning +y le +hon oring +foli o +be c +re bec +fin land +formu la +corn wall +sh ay +cau sing +bl end +sig nal +t ent +kash mir +nation als +har mony +sc out +acce ssi +he ight +medi eval +impro vement +ke es +prac tical +car d +de par +hu n +om ing +cal gary +ste l +bu bble +gur u +ma h +unex pe +n h +ed a +me at +i ge +si o +god dess +in ches +tun es +br itt +sti on +ra j +âĻ « +mer cy +ðŁĴ ĺ +sen ds +i est +pol ici +val e +reduc ed +as ap +vi jay +defen sive +celebr ations +ri ders +med itation +har mon +g ing + ¡ +program ming +in au +sud den +m h +replac ement +sk u +j ar +gra des +ta st +k itt +brand ing +k aw +boo t +f ought +p ays +g f +iz ation +ho p +k k +activi st +v end +coast al +cha os +ðŁĶ ´ +se me +bill board +li fting +cu mb +sc al +ðŁĸ ¤ +stru ck +l v +indie dev +beat en +jun gle +al right +destin y +m ing +k c +ch ances +om an +q atar +cra f +tra ined +pri x +char m +o tive +s mu +e c +and ers +hand ed +al ban +certain ly +arri ving +i ze +sa i +tr ack +pain ter +hu mble +appo intment +head line +manag ing +mo d +as pe +andre a +à ¤ +ethi op +un ited +exi st +bal i +k ad +n t +d red +re x +recogni ze +tam pa +be ers +ati a +he els +no te +transport ation +tur tle +re de +hipho p +sp icy +sp urs +⬠ĩ +cor p +ther n +to ast +hur ry +proper ties +ma ge +mar co +ele ments +bou ti +syn drome +ms g +develop er +gra ders +he im +re sil +off ices +del ay +di men +vin tag +barbar a +ðŁĺ ± +vene zu +cu lar +fac ed +bar n +ðŁĺ Ĩ +survi vor +wor m +confu sed +passion ate +Ø ± +identi fy +electr icity +sou ls +brad ley +repor tedly +lun ch +shel f +eli a +swee t +smoo th +emplo yment +am el +manhatt an +ste am +oun ts +ye p +li ving +un e +descri be +ca res +man ila +sha wn +ac ted +bas h +st even +re st +pet ition +div ine +wel sh +rac e +platin um +ðŁĮ ¸ +p b +extra ordinary +solidar ity +m all +on ion +schedu led +game of +fer gu +de ms +nor m +p k +tri als +polici es +publi shing +st ole +fron t +charac ter +van ia +ex ce +sti e +sc a +resi dential +sa iling +ðŁĶ¥ðŁĶ¥ ðŁĶ¥ +spons ors +th ick +champag ne +she pher +continu ing +ven ice +per th +na p +a ster +y ak +un limited +cho ices +ne o +hi v +repor ter +bru ssels +f old +dy s +se mi +la wn +it alia +wi fi +as k +em ed +fr ame +monit oring +ste ad +i da +gr in +is a +fli p +re stric +offen sive +atta ched +di sh +wh y +philli ps +gre et +p als +mix tape +v ou +fiel der +spar k +alber ta +g len +ca sh +s ri +u ri +ro dri +entreprene urs +climate change +p sy +d le +em ents +lin ked +nether lands +acci dentally +oppos ition +vel vet +ra ys +c w +om o +m f +lmfa o +newsle tter +: ) +toi let +liter ature +di sp +phili p +uni form +sudden ly +head er +cool er +-- - +prou d +bri g +nis san +scienti st +j ah +con centr +pac ks +appo inted +so ap +eng age +cho se +âĻ ¡ +se tup +jeal ous +har ry +g ation +tun nel +te mp +osc ars +dec ade +recomm ended +child ren +ab a +anxi ety +ve ments +sal on +pho too +organi z +mach ines +ab s +vil le +hy pe +ti ff +emer ging +av geek +[ # +contribu tion +bra dy +re sto +g mail +fit z +photo shoot +hel met +h t +eleg ant +ug anda +nur sing +or leans +pen n +na h +foo tage +em a +w o +w ad +concer ns +ve re +re mark +who ever +str ang +p t +qu it +sh ang +histor y +s ick +perman ent +ill ness +col d +visi on +he m +ar row +con vic +pin k +oc cup +bal d +ex hau +u of +am o +on t +ãĥ » +adop t +la id +smo ked +inter pre +ess enti +associ ated +b d +bb y +fi er +inst all +dipl om +con diti +c f +w ak +any a +gr aci +fi sher +s ss +ap r +il it +mus ician +symph ony +cor d +h ack +le gi +l v +bless ings +hum or +sc ra +e ti +min ster +trav elling +bu sh +jewell ery +li me +!! ! +pregn ant +pe e +lo b +cap ital +ip a +pen cil +la bor +duc ks +prou dly +wedd ing +dere k +m w +pe g +valent ine +an gu +re treat +pro spect +dang er +vul ner +up set +, # +sr k +x im +thur sday +n fl +kis ses +re ds +cr ack +re ward +c u +ko k +me te +aband oned +it t +me als +sp ell +stan bul +del ays +ru m +le op +gu m +no va +super man +ch ick +m is +dram atic +inno cent +r ounds +re c +auti sm +bangla desh +mor al +mo vie +sp oo +k la +âĥ £ +ou ting +mess i +ab road +loo kin +a im +q i +st ack +colla ge +à ¯ +hud son +sc an +ho e +ch au +oc cur +comm ander +ho les +ðŁİ Ħ +bi as +v on +stick er +ma k +responsi bility +colum bus +sa int +ed mon +rac ism +far ms +w en +gul f +may o +!!!! !!!! +corpor ation +ba chel +el a +inter nal +je ep +fol lows +di alogue +de rer +smart phone +he len +rich mond +equ ity +s land +b g +ne ar +av i +memph is +we ir +discu ssed +bad ge +p up +mi stake +phen omen +un ite +ðŁ Ľ +de pic +ri des +in augu +n at +sof twitter +comb ination +gosp el +âļ ¾ +ad mission +retro gaming +ðŁIJ ¾ +sch u +mb o +jun ction +al arm +à ¦ +gr ac +kh ali +k ul +m ale +cap tion +wi sh +te re +cor ps +ru bber +play station +er in +effici ent +l or +jo kes +in ary +nor man +lu is +inaugu ral +ch ed +âļ½ ï¸ı +di p +to e +str at +aa c +am u +pi er +co tt +comm and +tt en +sn oo +cu be +clo ses +class ical +s word +expre ssion +reach ing +n app +co st +affe ct +ric o +gi f +brea the +tri be +or tho +h ay +l g +fri es +n m +hi ding +richar ds +en de +mic ro +capit ol +cop y +ro m +regi me +mary land +tax i +di al +embar ra +un believ +ch t +v s +elim in +o dd +pen ny +sound track +l ings +trans ition +rema ining +a is +mali k +? !? +rand om +def end +ul tra +tru m +danc er +st ol +dri ve +a ver +ro ast +defin ition +se an +excit ement +partic ul +su rely +sh av +ber y +di shes +com m +is ol +i am +ob li +gho st +hugh es +chi efs +b as +conserv ative +speci al +fe min +sh ri +n ancy +inte l +tu ne +ðŁĩ ª +jo el +gg le +mo to +ðŁĺ Ķ +bu ck +d ag +antic ip +mont ana +gu id +fro g +ec raft +op e +dri ves +nu mer +x y +color ful +wednesday wisdom +illu min +bey on +inau gur +deep ly +pre fer +for tune +coo ked +ti ble +âĺ ķ +swe ater +it ter +tt y +u i +gi e +com plic +~ ~ +tax es +cu ps +di verse +sam anth +âłĢ âłĢ +ba king +sy mp +wa i +be half +mer cur +travel s +ðŁİī ðŁİ +or ia +eng aged +jump ing +reti red +n aked +p uni +speed way +sci ences +rehear sal +on ym +dy ou +pl ates +r ati +kri sh +jaz z +car ol +ra f +pen alty +tim eline +ru by +engine ers +ra f +bel le +do se +che on +esc ap +me g +ran k +or d +me gan +mer ch +ec lipse +âĺº ï¸ı +ple dge +kir k +per si +leice ster +sa k +w k +saf ely +yy y +je t +promis ed +j c +en ne +no ah +re no +re a +ðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤ +tra il +ðŁij Ģ +f d +soo o +ri min +w k +ภ² +i al +x ox +bis cu +d ale +fan dom +particip ating +fla g +privi lege +pe ach +mach ine +bo ston +gro ss +o g +mir acle +adop tion +u ss +mon sters +be ij +clar ke +pu shing +pra ying +ar o +d n +ell is +apol lo +od ds +refuge e +to w +b p +ðŁĩ¬ðŁĩ § +h end +app eared +memb ership +pe an +du m +viol ent +v y +potat oes +aw w +greet ings +t ts +ac on +sh ane +photograph ed +cra b +temper atures +cu ba +c fc +wel com +he l +in nings +m k +co de +kno ck +gra ss +swe dish +p ta +ick y +v at +lin ing +s q +sa p +ar c +announ cing +sk ins +cit yof +br ing +co x +gam er +it arian +i da +h d +ros se +sad ly +ge o +âļ ¡ï¸ı +tag s +fa ther +chan ge +l ance +whis key +adel aide +te c +stick ers +marke t +class y +bad ass +flo rence +lin er +fro st +k ate +ac on +scand al +es sex +ðŁĺ ı +vi vi +dr ill +blo ggers +recomm end +d ha +ac res +ro ma +bu y +gro cer +er ia +ma har +ff er +patter ns +ver i +com pu +st ev +ang a +ment or +do o +it ali +cdn poli +on ly +conduc t +elec tro +de f +wh ale +prepar ation +bicy cle +vi ral +turn out +bra ss +qu ad +hospit ality +pack aging +den cy +ceme tery +abo ard +dre aming +pic ture +t all +inv ent +ad mi +o e +tem ps +qu an +fun dam +pro mp +resi dence +mu d +sour i +âĦ ¢ +graff iti +gi f +d nd +com p +s war +pe eps +pale stine +devil s +san g +assi stance +bi ke +missi ssi +inter viewed +ne phew +dru ms +v and +gentle men +n sw +inst a +leban on +ee ee +oli via +ver y +rou gh +industri es +m ation +ðŁĺ Ĵ +bar rel +n ay +po ps +moder n +ill y +are st +on ents +protec ting +v ans +e o +vi kings +restaur ants +re ck +jac kie +andre w +w illing +he ath +citiz en +disc rimin +๠Ī +stu art +m ys +hi p +tran sp +" ? +te x +su shi +ke d +cro ssed +dist ur +pe dia +f ate +some how +mo th +proce ssing +is s +r in +u ts +yy c +ver t +lg bt +re id +on to +arab ia +habit at += = +stre ak +simp son +addic tion +wim ble +deli vers +challeng ing +ðŁİ ¶ +fran ch +e du +s me +ai ds +hur st +th am +tari an +remem bered +palestin ian +fe es +tru m +sket ch +ur u +fit ting +jes se +ðŁĶ¥ ðŁĶ¥ +---- ---- +ba ch +ici a +colo red +da h +associ ate +int el +s eller +p u +stu ffed +ac s +b s +sh in +cooper ation +certific ate +ab u +ingredi ents +re v +in ge +el der +christi an +bun dle +th ic +dir t +beij ing +comm it +ted dy +ed u +to day +s field +w yn +confir ms +lo o +j v +ene ss +al pha +vir us +ari um +gr ind +bri dges +introduc tion +pol ls +bac ter +z ach +termin al +ra iders +fla vor +zom bie +vo d +sp reading +gameof thrones +effici ency +lat ely +ale m +twee t +cri mes +cl er +de y +dg ed +hy un +pay ments +cir cus +ðŁĺŃ ðŁĺŃ +mis souri +lu b +episo des +c age +po s +mat ching +tumb lr +lin ed +ge st +am bi +nar r +ing ton +regu l +blo wn +is le +co co +on don +joshu a +tour ing +sm a +sau sage +best friend +bo eing +desi re +sav age +ra pper +de vo +te ar +take over +cow boys +po ker +par ag +pp e +h int +we ars +se th +ro les +l anc +man ga +form at +fl yer +c ay +mo or +ba ke +spla sh +v ad +ker ala +proce eds +sil ly +reflec tion +di str +wi d +su it +ci vic +yan kees +by n +migr ation +di stin +or ch +fe mini +quali fying +tu ri +o be +hun dred +cra p +wan g +mathe mat +bu re +expo sure +fergu son +seme ster +re serv +pl ym +a hu +fac ial +wa x +wor ried +ca b +vi o +as a +co d +to pics +p cs +hal o +rescu ed +horiz on +ar k +âļ ª +hol ly +el f +ul ti +pu p +quali fied +attend ance +ati vely +destro y +y c +for th +photoo ftheday +c ents +ic eland +meas ures +de sk +port folio +artic les +direc tors +dat ab +e w +creep y +oun ding +hon oured +mi st +j it +men tioned +port able +iti c +d ann +friday feeling +am id +ti ger +scri p +helicop ter +hard ware +expl or +work place +austri a +beat les +ber nar +spi der +disc o +cul t +lim its +shor tly +fin al +nin ja +lu ke +le bron +wal mart +o il +van illa +shi re +ye g +ak y +c s +bl er +collec ted +t g +rol led +speci als +b ff +pier re +sh im +vi er +flash back +restor ation +individu als +pro d +fre aking +tu rer +o a +re fre +mor oc +gre et +re yn +care ful +our ing +u sh +is d +g ill +vie w +thunder storm +b led +pic nic +guar di +pi g +ar k +syl vania +bann ed +u cl +vi jay +ori um +av engers +believ es +eu r +monu ment +concer ned +la bs +ber g +a ap +vi sh +sing les +can cel +z el +ar ab +ru th +too th +ar ta +sh af +chair s +r ack +dise ases +crow d +cl y +fle x +christ ma +artif icial +tom at +fin e +dra ws +advoc ate +fran ce +Ù Ĭ +ðŁĺ ³ +heav y +s our +compre hen +no ble +aa p +hin du +cor al +g ars +ow en +n l +st all +yel low +mar ina +in ver +suppor t +tou gh +promis es +pi e +master piece +sco re +for ce +mor tg +crypto currency +o x +r ors +rock in +pro vin +ho g +no stal +oak land +pat rick +inclu sion +tra ffic +ah med +a ha +lux ury +con secu +de mon +âĸ º +b lowing +st ag +: " +encoura ge +ben e +sku ll +do dge +bu ster +kin son +wit ne +er ror +lo west +fel low +à ° +sh re +bl ur +vir gin +compos er +sli p +mor nings +ga ins +tab le +gra in +ari st +braz ilian +w we +tu es +ribb on +an ag +di st +sac rif +em brace +entreprene ur +af fili +de o +t ali +touri st +fat al +ì Ĭ +autom atic +ðŁĩ µ +we ak +wel fare +confir m +benjam in +fi ghts +alleg ed +me ad +strugg ling +pro secu +che f +à ¨ +propos al +er n +ðŁĺ Ħ +dy k +on gs +hon g +m ack +mel on +on ent +ru sh +d ap +tol er +pro pag +c ze +trans lation +wal let +cott age +sa il +constitu tion +ðŁĴ Ģ +mun ici +fav or +storm hour +i h +ðŁĺ Į +approach ing +pin ned +j ed +niger ian +n ach +sh at +particul arly +mc don +camer as +anni e +admini str +he at +electr ical +char ming +gib son +bouti que +ex posed +ac tor +pil low +beach es +genu ine +margare t +ben nett +lou isi +pos itions +el y +shin y +ten tion +architec t +ren tal +ac qui +goo gle +sub way +mom ent +ðŁļ ¨ +ri m +metho ds +cy cli +nor folk +Ù Ī +over whel +ra pid +we ar +happy birthday +progre ssive +ðŁĴ ¥ +co gn +pap a +f ool +philosoph y +pol ar +jim my +wi g +ðŁĴ ĭ +oper ating +reduc tion +ph i +fla gs +to the +o di +a res +k oo +k ang +ar kansas +ash ton +wimble don +sci fi +attrac tive +mississi ppi +logi sts +ral ph +la bel +gradu ates +ma ha +home town +âľĮ ï¸ı +foun ded +on the +li z +trans l +mini mum +pre sti +ta m +gener ations +re bel +journ alists +par am +mc m +acry lic +death s +tes la +w t +bry ant +jer us +i stanbul +muham mad +ri ley +k ris +work shops +is o +coun ts +stre t +prote cted +trin ity +man ual +r hin +r il +pleas ant +le mon +ner d +har der +dar ren +bur y +ra h +bas is +mi gu +occa sion +li sts +âĿ¤ï¸ıâĿ¤ï¸ı âĿ¤ï¸ı +e b +de cre +hamp ton +ìĿ ´ +tra vis +trans form +puer to +nh l +av oc +tri ps +unexpe cted +ve t +di dyou +bar ber +st ages +m son +re presented +for t +l al +pp le +nic ely +ignor e +qu il +qu inn +h k +carri er +remin ded +am ong +pass enger +el len +gue z +sc ape +mu ral +youn gest +ma sh +d ill +rout ine +stain less +jack son +gand hi +th al +on ers +edit orial +convers ations +sd ale +autom ation +i ke +า ภ+ðŁĩ ª +hau l +la ying +men tions +am en +abor tion +i bi +coun ties +ca therine +man ds +jam e +roll er +au t +n am +o logical +cep tion +ran king +tox ic +sn acks +victor ian +bang kok +psycho logy +re g +ang ela +respon d +sty le +sophi e +dak ota +achiev ed +mar ked +imper ial +in as +glo ves +sli m +confi dent +att acked +gg er +lon ely +valentine sday +re b +craft beer +orig in +zim bab +ce iling +te ens +other wise +w b +f ers +day sof +advis or +y ah +âĻ ª +en der +republic ans +av a +skir t +pi pel +chi e +jan e +ja x +ðŁĺ ĭ +âľ Ĭ +j ays +bre tt +bal o +cru cial +d har +as is +de au +lloy d +chat ting +âĿĦ ï¸ı +rel ay +remark able +n s +we t +bris bane +ðŁĶ ´ +tion ally +f k +la yer +house hold +consecu tive +es is +pend ant +st ir +crit ic +su gar +photo shop +pa res +arti stic +do dgers +c un +cra fted +am end +bo at +âŃIJ ï¸ı +egyp tian +sa w +tra ge +small er +ox y +pa ired +nex t +i res +tac o +o y +u c +st i +a erial +: // +dr o +dot com +gg ins +r pg +ay e +le an +stri ker +lo bby +prote sts +pri ority +congre ss +am ate +inv it +r ington +mom my +th us +allow ing +pione er +enfor cement +g ori +tal k +dra g +du mb +bul let +san ge +er y +tar gets +ðŁĩ ¦ +he ather +consi der +seaf ood +ve st +ris ks +% . +p g +sac red +he ating +kick ed +tto t +. - +chan di +co ven +po ol +pul se +i a +ro ster +shakespe are +es a +car go +pean ut +tro op +ac tion +tab let +home work +cast le +stru ction +mus icians +free zing +bu tt +justin bieber +j j +bah rain +an them +au dit +didyou know +na vig +guid ance +âĸ ¶ +tur f +n un +fic ations +ye men +char ging +x c +bron cos +su bur +p ale +bor ing +among st +for the +em per +om fg +p j +expe cting +ðŁĴ « +st l +ad min +expect ations +sw an +shoo t +oooo o +min ent +ãĢ IJ +wall ace +stan g +satur day +adop ted +dou bles +hom ie +ome z +d han +vent ure +surroun ding +fi le +mob ility +de es +w ski +broo ke +emb ro +re members +kar a +test im +bo tan +m tv +sacrif ice +jerus alem +d l + ´ +proper ly +ili on +as i +leg it +co pe +m cla +recy cling +lar ger +ðŁĴ ĵ +pat ric +gener ous +ja red +p f +mol ly +thom as +ju dges +h b +sor ts +bl vd +o ven +enter ing +plan es +be et +integr ation +boo ked +fre ed +ver n +ash es +to pped +de pot +welcom ed +ren a +m ick +d and +see ks +gam er +ran kings +ren e +mu t +whis ky +fire fighters +gu es +ga ther +tour ney +de men +y ang +new ton +autom otive +back yard +deta iled +mi st +to bac +fi ber +un usual +grat itude +sp are +ne ys +: * +per i +flo ating +fin alist +don ating +dre ss +bro ad +be the +econom ics +tai wan +ed wards +plu g +pra iri +val en +bab a +f ad +an as +har per +dis order +app lied +p att +bi kin +li ver +cu ri +carol ine +ann er +juli an +wal king +mal col +screen shot +co ding +skin care +activi sts +myster ious +ex act +blo cking +mercur y +bat ter +du mp +âľ Į +en se +li sh +ridic ulous +prote sters +ðŁĻ Ī +lu st +swe at +as s +ali ke +co dy +re ments +win ds +as pir +vi enna +pra y +.. .@ +bo i +cand le +assi sts +te e +der son +p ony +f ence +con spir +âĺħ âĺħ +oo th +e pic +ba rely +a unt +b am +diamon ds +end less +scre ens +can cer +gr o +p st +pro spec +mo sque +help ful +ou ri +bro ther +gu jar +cri sti +ine z +to wers +ad dresses +gra y +bur ton +re tweeted +ðŁ¤ Ķ +n ity +du ck +super vis +jo an +kin der +sanc tu +pi ed +âı ° +ł ï¸ı +m ati +reven ge +ce ster +eli fe +desig ners +back ed +bo li +wei ght +cou ch +su res +s its +shri mp +la gos +auth orities +os ity +hol ly +compu ting +fac tors +ab e +pan els +ram ad +sent ence +missi on +hol m +r b +d ads +shang hai +mon ey +she ets +sk ate +thre w +cup cakes +infin ite +l is +practic ing +ess ay +ka i +as ci +mo b +u gh +hol mes +re gg +ik h +mo ck +collec tions +pe p +o va +sal t +nan dez +co y +thre ats +tex ts +cin nam +pregn ancy +pen ding +stam p +flow er +g is +agre ed +pay ne +ro ver +ph ra +sof t +f fin +fa thers +pass engers +aw ays +al a +h es +li van +in s +samu el +ingu i +h of +j j +chen nai +cat al +om ic +he ath +ni ece +pump ed +integr ated +are l +no m +produc tivity +wan ting +vis a +di ana +tw il +it v +cam ps +ro wing +d ley +black and +gu ards +b ells +re verse +vi be +ric ky +mo ss +ny t +âĺ Ģï¸ı +el le +tro y +cu dd +ev an +women s +fo to +mi stakes +wick ed +mi l +c led +me mes +co smo +schol ar +ren o +ðŁĺ Ģ +v ents +# âĢ¦ +terrori sts +ca sey +cardin als +ðŁĺĬ ðŁĺĬ +venezu ela +bol a +liter acy +t w +en o +con tains +au stin +fin anci +ev an +har vard +origin ally +chev ro +her ald +nott ingham +manag ers +âŀ ¡ +accep ting +wal sh +tutor ial +entrepreneur ship +yach t +requi rements +glen n +pe de +unfortun ately +ach ing +dais y +gi an +night mare +âĿ Ĺ +r ina +b art +ema ils +oppo site +who m +sa ke +pu zzle +da shi +par ty +blan ket +bus es +lo re +beau ty +reas on +pun jab +winds or +func tional +exi sting +hel lo +gli mp +con vin +la k +scre aming +rebec ca +bli ss +north west +infin ity +cosme tics +pul ling +coffe e +pl ing +op ho +colom bia +interior design +( + +emo tions +sa c +sun glasses +sav es +d f +six th +al y +ðŁĺ » +de en +dev ast +polit icians +lac rosse +g u +pe i +jav a +comb ine +coal ition +er ts +survi v +ch ad +stri an +n n +de vi +coun c +concer n +contro ller +bre ast +j ury +tu m +introduc es +la di +mobi le +al z +ste ady +nur ses +h acking +on line +oce an +ðŁİ Ħ +a am +ju ven +ic c +louisi ana +ar te +street art +is on +wn s +fr m +p anda +no ir +main tain +del ay +symp toms +thor n +ge ome +ter n +carri ed +p ru +pan or +as sy +per u +clou d +sp ra +pe di +e ste +tag ged +ðŁĺ Ŀ +shado ws +naz i +ا٠Ħ +cor ri +âĻ¥ âĻ¥ +j ad +ðŁĩ « +form al +spo ken +ðŁĮ ŀ +enjo y +lo pez +out look +in ho +w ander +Ù ħ +ma ya +pe e +d ine +ãĢ ij +brief ing +suppor ter +ar ily +ght ers +natur ally +doctor who +j en +v ar +new year +re se +si mm +re x +con sequ +tomat oes +bur st +bra vo +bur gers +cr acking +nor theast +bi om +mush room +mar que +dou ble +ni er +v ag +tw enty +key board +win ni +jama ica +par ish +: - +mental health +ali zing +ren der +wa king +ðŁİ Ĥ +g ly +na than +wa shing +mel issa +jun g +loy al +chil i +song writer +guit arist +bo wie +neighb ors +onym ous +as set +ta i +head quarters +ðŁĮ Ī +i hear +ci gare +sur g +) " +re pl +dar ling +ðŁĻ Ħ +z ak +sa re +ãħ ĭ +mic key +ware house +mass age +ine es +did nt +i w +hur ts +eng aging +mag ic +women in +k itten +mor s +c art +tit ans +colle ague +compe ting +er an +k hal +mar ble +dem and +del ight +et ary +bli zz +lou ise +m ls +fini shes +experim ent +conduc ted +electr onics +itt ers +car ing +wh ats +sym bol +jun g +e cu +pi x +con text +char ger +ðŁĺ ĩ +re ig +fra g +ë ĭ +ch ad +tru e +ker ry +def ending +a int +au ton +check out +bar nes +less ly +d t +m me +clou dy +second ary +are z +_ : +app a +const ant +" ) +ve ts +jo b +i ent +ðŁĺŃðŁĺŃ ðŁĺŃ +m j +fren ch +di ver +davi es +hh hh +e book +๠ī +mar iti +bree ze +susp ended +mat o +vi et +ra hu +se i +bol t +en ary +le is +kar l +fr amed +expla ining +ab c +de aling +nat o +ja ke +exp and +leon ard +establi shed +du b +ar men +el led +voc al +nichol as +ori ent +k yo +illustr ated +ah h +danc ers +milli on +ge ta +po pp +as u +mur dered +gi ble +sto ked +gri ffin +maxi mum +adri an +en counter +ther o +david son +ðŁį » +holi day +ev o +asse ts +car son +memor able +âļ ½ +ob am +represent ative +cb d +tr icks +vo gue +vo ice +mm mm +sebasti an +cli f +ath y +par alle +ðŁ¤ · +pa k +ev acu +e ats +ا Ø +tou ched +organ ised +spir its +can ad +gui ded +frame work +ðŁĮ Ł +pe d +natur al +ag ar +replac ed +anch or +ti t +sha h +organ is +super ior +r n +ch ro +eric a +st ill +cor on +chu ck +loc ks +or gan +ro sen +sc am +ben ed +/ # +ke en +tre vor +vamp ire +sor ted +! ' +af ford +in tro +gr ace +ðŁĺ ľ +sau r +kick starter +influ en +v u +y up +po c +ðŁİ ¥ +a ar +s ang +tre k +et sy +tb h +scre am +chevro let +pix el +shepher d +an or +gabri el +tw ood +sd cc +me ters +develop ers +clo sure +v w +twit ch +ì Ĺ +se oul +pr ice +ho g +n ish +hill ary +scrat ch +in cen +wag on +dis ability +pan ther +ch ats +g d +wit z +sus sex +l ate +den mark +ger ald +cancel led +net te +i x +nav al +bap tist +te t +y ad +ma th +ho y +r andy +po int +intel lec +fru its +w ool +gu in +pr on +the ft +con dem +mar ry +n ola +architec ts +cin cin +roc kets +gentle man +ex plan +t ate +do e +ra ises +wild life +w l +insi der +blan c +w p +for sale +ny c +po well +unbeliev able +pen s +goo dies +mu stang +p ens +st ays +squ ash +xox o +near by +ever ton +co co +le agu +k han +stu d +south west +con struc +s worth +cro atia +le a +su ms +aim s +e an +van ess +iti ous +pa thy +arc ade +b end +sugge sts +sac ram +roy als +ri er +em ir +in cl +an k +clar k +ri ght +vac c +ठ¾ +tan e +li b +u sc +sal es +hu h +s ally +ver a +p ga +gro ws +dru m +tre e +eth ics +sug gest +is ab +se aled +pre viously +anim ated +ab du +ri ses +glo b +pre dat +scar f +del ic +om ar +ll i +sx sw +py thon +ne bra +fun k +reflec t +pav ilion +tic ally +ch asing +bak ery +inva sion +ko h +believ ed +co hen +con qu +cra fts +nat i +cle ver +govern ance +sam ples +fa ils +â Ķ +ti mo +r itu +stri king +inclu sive +sho cking +can t +requi res +dra wings +à¸ Ń +purch ased +du m +z ach +war ner +con sole +man sion +foun tain +circu m +e sh +is land +mil k +pro fits +hali fax +ri val +âľĪ ï¸ı +jen ny +sand ra +ny e +k elly +y al +qu ad +no s +inste in +fin alists +mid fielder +cu e +excep tional +a an +sa pp +gett in +sa a +f ati +sl ice +vol k +s wal +la sting +sum mary +it as +sm o +s z +âĺ Ĩ +ip l +fl ames +ene ws +ha v +hoo die +pitch er +win dy +re vol +centr al +ton ite +ðŁİī ðŁİī +sol ved +mil wau +organiz ations +wee ts +re fin +s th +ãĥ ¼ +el in +ton a +cinnam on +ðŁİ ¨ +ðŁİ ģ +ron aldo +pen insu +ome ga +el ds +desig ning +e igh +blu et +ben z +nu g +ash a +robo ts +su dan +choo sing +en do +ser ge +clo sely +hand y +fing er +be ing +ar te +survi ved +fl ame +mile stone +gu t +d war +fu tures +é e +el o +fri dge +eli c +ou ch +u b +p v +tit an +col lar +st ation +nev ada +aur ora +r d +dun can +âģ ł +bri en +mar sh +Ð ¾ +to tal +ch ry +s ers +su ffe +ra chel +colle ge +to days +cour ts +ch it +re united +gym na +gen esis +be side +re presentation +ch ant +collec tor +ra k +ath ens +ni gh +mun ich +langu ages +fl u +particip ation +__ _ +c v +spec trum +so da +co ver +refe ren +ab bo +ap a +public ation +ed m +mon ica +ar my +ðŁļ Ģ +div or +dr y +stre ams +robo tics +ci der +bull ying +appro val +sto ke +plat forms +sier ra +ex tin +i b +ha yes +succe ed +suff er +at ically +da i +lyn ch +h ound +del ines +ack now +d ated +exclu sively +he res +fac ilit +dam aged +char ter +la kers +fal con +unve iled +wel ove +e ase +pati ence +l one +gent le +gene tic +produc ing +g our +shann on +bil ities +zimbab we +p int +dau ghters +liter ary +bel le +cl am +surroun ded +k any +ne il +pir ate +rang er +hb d +nat alie +bel ong +olym pi +emb assy +sc ol +en er +ak in +lo ren +b h +: / +di va +den im +hi pp +ðŁĩµ ðŁĩ +arn old +? ' +we ren +em power +dis abled +man or +rasp berry +b af +aw ful +dru mmer +kar dashi +n ash +machine learning +ch u +rebel s +tim ing +mon roe +ton gue +ran ge +pup ils +re ss +amaz on +b z +har ley +pal mer +ballo on +s ings +ic ec +j b +c ers +g ps +whi st +ri se +l t +oo oo +c attle +shoo ter +vod ka +uc l +mt g +le sli +jon as +di spo +at ric +ste in +vintag e +fir ms +flo yd +cow boy +soo oo +is aac +war craft +disney land +beauti ful +be am +franch ise +bu n +k ag +an on +tur bo +swee p +made in +kar achi +dete ctive +penn sylvania +contro versi +vitam in +a side +chron ic +descri bes +remo val +ha h +ap er +ten ed +u to +bad ly +mir ac +f ry +ye a +in jec +ther mal +comp act +th or +te ed +ur gent +l ite +g illi +sop hom +ic o +che m +p m +for k +fre ak +ch ak +recipi ent +i y +ni k +model ing +c ans +ðŁı Ģ +del ux +se am +surviv ors +rad ical +investig ating +reli able +f m +tur t +ligh thouse +to ol +go wn +) ) +bo ts +auto graph +a id +bu ffe +h mm +horri ble +ssi onal +ann i +๠Ģ +k its +sch i +eter nal +hu ss +sens itive +r u +tast es +chec ks +im o +por tion +sk ate +e den +half time +fri ed +ri hanna +ti se +fl ick +ca in +s gt +âľ Ķ +sh au +sta ined +ra ffle +dro ve +sal man +princi ples +sh o +ar u +je ss +gu ine +gar bage +my an +jel ly +dis ru +z ia +q ld +ent ries +la v +fle w +ad mit +objec ts +comp are +ny times +cann es +p n +suff ol +ro c +d ana +e gg +hi st +coun sel +' ! +phy si +imag ination +ad just +explo sion +plym outh +hor ror +elli ott +bour ne +de x +bre ed +au dio +lob ster +disappo inted +nation wide +( ( +incre ases +austr ali +ce dar +star ing +rac ial +e is +g mt +visi ons +stay ed +discu ssions +de an +cur tis +mai den +stel lar +happ iest +h wy +pre season +car av +mon days +hospit als +glimp se +schol ars +ja i +ter race +ann a +goo se +gra ded +lot us +hun g +grocer y +stam ps +emper or +sc oop +in ser +c as +exist ence +he al +fal cons +mar vel +reduc ing +terri fic +magne tic +perfor ms +bar re +p us +tre ating +ic on +w h +decla red +tra uma +do d +come dian +nik on +bu gs +as m +mont gom +ibi za +comprehen sive +ha s +san ti +fellow ship +da sh +p sal +louis ville +sp y +fau lt +d the +fi led +vi sta +de sc +fe ars +you tu +sp s +es p +ri g +cri me +ber ger +wonder land +k ent +in formed +stev ens +my th +ast on +ir i +visit or +at ri +produc ers +al la +person ally +separ ate +agen cies +af ri +il an +spo ke +n ina +squ ad +di ves +de pend +li v +fier ce +enter taining +cha in +sc at +bor ders +pal ette +sp ro +os is +der by +tobac co +zi o +willi e +ju vent +zoo m +hol y +enti rely +af e +mart inez +be ds +pe a +bull dogs +ðŁĩª ðŁĩ +ib m +ne on +ethiop ia +team mates +plan ting +tw er +any time +for bes +ó n +run way +ner vous +ro ger +p ile +ch anc +apo caly +u w +o i +dr ought +territ ory +br ick +cre atures +go in +w aff +gre n +sou theast +je an +am bul +ed ited +stra p +c v +aar on +ãĥ» ãĥ» +t su +descri ption +kin dly +clu tch +im mer +en or +women sday +or ange +ra g +ob vious +hy der +chann els +man go +me yer +ra ining +ge tty +pil gri +coordin ator +up load +ninten do +don uts +san chez +app arel +j r +zz i +, @ +jeff erson +accessi ble +great ly +e id +initi al +budd ha +par is +ma scot +â¬ĩ ï¸ı +sch war +si ri +sp inning +mortg age +e cho +end ange +ge dly +chlo e +enh ance +kar nat +k ry +explo res +ðŁĴ ģ +af fair +ic als +all a +dar t +dolph ins +diffe rences +squir rel +au gh +dr ones +ell en +re store +pa w +un for +pi ke +hil ton +colla b +consu mers +co inci +out comes +pp p +a q +coup on +li est +si ms +k ho +av es +spo on +pu dding +cor byn +hat ers +ex ams +sla ve +. ! +p sa +app les +tam il +se d +co ke +zz o +lo sange +car bon +cla ir +... ) +k hu +cra ig +explor ation +sanctu ary +su e +al way +demen tia +won ders +super hero +pakistan i +brown s +bluet ooth +lo cker +mar c +ev entu +delux e +rodri guez +âĿ¤ âĿ¤ +ro bb +ðŁĴ ¦ +lin ux +ten s +intellig ent +se ed +vo ter +s ler +pe aks +inter n +teen age +peninsu la +hand ling +ti e +cou sins +wen dy +me e +à¹Ģ ภ+din o +ðŁĴ ° +ðŁĺ ĥ +ze e +s bury +trage dy +b k +bo re +z in +war ns +idi ot +tou ching +contin ental +tac os +saf ari +wa shed +po dium +morri son +fore sts +c bc +al on +partic ular +be ads +inv ented +lo ch +li ghter +where ver +i de +docu ments +a we +k r +no where +min er +st it +ro x +contribu te +har dy +cl an +ob ject +ca it +ðŁĴķ ðŁĴķ +happ ier +vege tables +t art +g ag +nom inee +heav ily +pan ic +j d +there sa +at m +u ph +s fc +su ri +drin k +n al +re vel +k l +avoc ado +nom ination +ma donna +shar on +malcol m +control led +sh ers +revi val +legis lation +shoo ts +n in +comm entary +pro s +human rights +str anger +mit ch +pipel ine +leg ally +th u +gil bert +tol l +gran ted +gh s +ir anian +refre shing +du k +ab i +pri me +jose ph +mo sa +stati stics +produc tions +mer ry +pat el +sa x +human itarian +struc tures +e missions +town s +fre el +ster ing +rat ings +alle gedly +cab in +st l +w ade +fl yers +tri m +promis ing +z u +bal lot +compar ison +free ze +ou ter +great ness +as sign +snow y +r ale +tor ies +med iter +kno ck +consult ant +cincin nati +analy st +sc oo +je ws +appro xim +pu re +portra its +cy rus +ation al +lo ans +acqu is +el u +accep table +uni on +water color +ru st +batt les +per fu +seas onal +ser ial +mind set +ri ot +fel d +enni al +clo set +pri est +tan ks +int l +scre w +bu m +ab dul +ou x +expla ined +ric a +imag ing +law yers +bu ried +ãĥ»ãĥ» ãĥ» +ear l +âĢ ķ +l ton +resto red +stri pes +fo ss +de mands +ste aling +alex is +mun d +ak er +ur us +war dro +hu gs +gen re +e go +Ù Ħ +particip ated +bab es +ban quet +ti ous +he mi +ds b +lo st +milwau kee +jen ner +ge m +ou tra +lo ses +id i +re ps +ðŁİ § +regu lation +fla w +f ang +vibr ant +ram p +ra ins +well being +so viet +vie wers +de po +libr aries +bi go +ser y +g ill +de struction +co z +c x +bri dal +al ds +plan ted +amate ur +lu d +che ering +show cas +pro file +i u +ver tical +pack ers +wiz ard +ski p +s light +be au +air ways +mu ch +re ra +ðŁĮ Ĭ +ab sor +pati o +pack ages +s ells +ment ally +ðŁĺ ¢ +reyn olds +k are +tri bun +wal t +kn it +ta ste +sur rey +boun ce +cre ature +b are +bet ting +su re +mi ley +laugh s +al ore +cy n +t l +arti st +ann ah +war mer +dynam ics +lunch time +mariti me +vulner able +ðŁĴ ĥ +wol ver +dur ham +const antly +am in +si bl +: @ +bul let +k ach +angel o +wil der +doo m +desk top +law suit +k ca +hen derson +inv iting +bet ty +ta wards +ra fa +le aked +and i +ge ms +af l +vel o +mediter ran +pro be +to tten +steph anie +sn ation +com be +q s +over come +assas sin +ra v +fil ip +winni peg +sh il +determin ed +k as +ou tre +regre t +gui des +aa a +ðŁĺ Ī +wi ves +mani fe +er ly +sm y +sh ima +x ing +pix el +jac ob +ac commod +to y +on o +po o +ti er +an swe +ðŁĴ ģ +ro sa +le ase +bel ongs +th ar +eventu ally +nei ther +go a +ski ing +at ra +ag h +broad casting +f ury +py ram +d ice +volk swag +wom ens +provi der +bom bs +miss ile +whi p +d ick +nor we +back up +el der +mat ure +concer ts +gi ous +sque e +good morning +bra ves +^ _ +au ssie +lun a +mal es +he ck +for tn +rome o +steel ers +p n +pe er +re presents + « +kat y +migu el +requ ire +cha ins +l ur +immedi ate +ti mber +âĸ¶ ï¸ı +advoc acy +ex port +an z +tiff any +auth or +ðŁİ Ī +du des +chil ly +hi d +har m +bu g +mon ster +terri er +tu c +story telling +ta k +in ti +immigr ants +b is +reach es +com passion +john ny +contribu tions +ðŁIJ ¶ +mechan ical +impre ssion +ran ks +ko be +men ting +bloss om +pab lo +buil der +bom bing +tw el +sul livan +om o +pe te +de mi +ku dos +w bb +t gif +mass ach +neighb or +che fs +eng ines +pun e +ga ined +phan tom +s days +ext end +gr an +cent ers +jac qu +dat asci +sleep y +el vis +answe red +s lot +con y +flexi ble +ti ally +le tics +% , +andre ws +si ble +mom ma +vin o +do x +invit ational +twil ight +j ade +ill ery +joh ns +f ou +p v +-- -> +break down +billi on +prin ter +mon d +c bc +mag gie +legi on +du b +kur t +po or +paren ting +regi ons +bikin i +be ware +si onal +au burn +kid ding +amp les +sp an +con tempor +c ic +ha bits +ak o +pre fe +bud dies +it z +em ily +person nel +moun tain +ver sus +ðŁĺ ¬ +ear ning +s ink +dar i +u u +s win +i ster +bru tal +n ac +kat a +clo th +am and +ðŁĶ Ĺ +ne o +alu min +week ends +nebra ska +co des +delay ed +brun o +pro ven +in c +i ght +fl an +or o +lam bert +regu lat +w f +massach use +kardashi an +bern ard +fi esta +volcan o +grand pa +anc a +d re +st itu +mean ing +fo am +au ck +at ed +r l +hot el +pers ons +dy nasty +ell or +ma i +am ne +sty ling +avi er +e g +vege tarian +, âĢ¦ +foun ders +sta in +g d +cy cles +sky line +trac tor +exi sts +tra l +kid ney +mar il +inst ag +se tte +addic t +tri angle +flash back +controversi al +z on +p ins +i as +tr ay +town ship +deleg ates +sp am +h ms +cr ane +peop les +o lo +fac tion +but es +on ica +deleg ation +new profile +eli er +mc a +w and +g ely +losange les +ber ke +ti ve +dis rup +zz a +cas a +jor dan +ford shire +ga thered +ic hi +atten dees +à¸Ń ภ+pe ppers +co in +bour bon +ern ity +ro tary +behavi our +jere my +team work +compli ance +tre mend +ðŁĩ § +bu hari +cam bo +bu yers +ha gen +bu ds +bay ern +mon te +sm ells +an za +ath lon +descri bed +work force +gi ving +ap i +invest ments +da il +sel ena +datab ase +th um +mor tal +stu dent +bu yer +do ver +gar ten +att le +loy alty +gen oci +holo cau +theat ers +ru ling +ven us +pat ent +ch un +ab by +awa ke +mass acre +bang alore +break ing +simm ons +ju sti +hal e +ed chat +gg les +haw k +mar king +head lines +stro m +co ve +breath taking +med als +hair cut +christ ine +tele graph +gujar at +ju ra +can e +sho re +propag anda +mu eller +.... .... +sa vi +stom ach +thro ws +ta b +war m +j ong +reno wned +hi r +ra is +mush rooms +guaran teed +bo a +m j +revolu tionary +certi fication +bru ins +jo in +w es +pas sport +c g +sex u +cap able +w v +ton es +jac kets +ac compan +spin ach +fore ver +bla ir +wat ts +g l +cou ples +prairi e +newprofile pic +logi stics +massachuse tts +jagu ar +o id +we al +under water +mo z +y i +ma ths +myan mar +pre ps +suffe red +tr ace +wal i +ah hh +bor g +st itch +cu lin +real ise +infe ction +discrimin ation +sh ame +an kle +hu mid +y t +brac ket +tru ck +tri u +ea ster +commun ity +post card +invol ving +ty ler +car amel +over view +ex amples +integr ity +base ment +instru ments +ani um +at us +gh er +laun dry +achi eve +gen eva +pr icing +hyder abad +beli ef +me ta +j aw +accoun ting +lead er +cristi ano +cou ture +cy p +vis ed +, ,, +k nu +h ick +break er +br am +ra b +mo or +ham as +gradu ating +pupp ies +ak h +ta h +ach es +ri e +op ini +g ta +re ign +tra gic +re ver +p ill +pine apple +tou ches +da re +le ys +il o +inter iors +sc outs +bar t +en zie +don o +bro ck +christi ans +ense mble + · +cine mas +new port +air line +win ston +le igh +cont ents +pre scri +ur ge +tr out +fic ally +il ia +sub si +are r +âļ¾ ï¸ı +w ounded +ðŁĻ Ĥ +pe pper +ðŁĴ ŀ +fit ted +af f +re sur +thursday thoughts +z ero +archae ology +di v +je e +i on +awa iting +co zy +beauti es +bal d +dat a +gri zz +stal k +kin ds +cle ared +jess ic +regu lar +ali ens +plac e +bo s +bi zar +thisi s +ðŁĴ Ģ +totten ham +ma fia +s lam +ari ana +car roll +back pack +care y +uni v +r g +pe p +dig it +tatt oos +ag on +volunte ering +diffe ren +consu mption +ka thr +head phones +t shirt +o b +ele ment +re tail +sh ru +al gori +contain er +consci ous +fi l +com ing +ra sh +u rope +def ine +gi or +femini st +flow ing +rout es +gl aci +fer t +somer set +ant es +twee ps +$ $ +h our +endange red +year sof +ro h +po pped +bac king +ba sil +bra ke +mon aco +lgbt q +pra gue +ut ility +cas si +gate way +haun ted +sch ul +ðŁİ µ +shou ld +walking dead +comple ting +dann y +montgom ery +pengu in +ss i +mer chandi +ðŁij ij +chur ch +h ates +cap tain +brea thing +ce t +fair ly +approach es +compan ion +surpri sing +kany e +pe y +hin di +targe ted +lor ds +de ut +di gging +ger man +ru t +ener gy +close st +y un +apo logi +ภ± +s ack +ru p +dd y +port al +d ough +b ats +ðŁĵ ° +at ur +graph er +pi res +mo tors +ðŁĮ ¹ +j c +dan g +tu k +clu e +us c +pag e +d less +bro ws +ju s +ad ing +re marks +oo m +car dio +ste fan +arm strong +âĢ¢ âĢ¢ +ni est +belgi an +bi op +so y +lo f +í ĥ +q t +flashback friday +ce e +ģ ภ+wre ck +mar ines +amend ment +wardro be +vo y +bur ned +guit ars +ra inf +li fel +ssi l +oun ce +exter nal +c key +me sh +she ikh +inv itation +sugge sti +pop corn +phenomen al +an onymous +tun a +chic ago +o val +del y +loc als +( & +pro f +no vel +fin der +spar ks +la ven +in fu +nic ks +qu ant +ra e +exe c +dist ingui +st ances +mu tual +sh al +unve ils +edmon ton +zan ia +a dio +vie wer +brad ford +audit orium +qu is +re act +htt p +l ero +chee ky +impac ts +ta k +ed t +desper ate +t ay +ì Ħ +sett le +bar gain +resu me +un ite +thro wn +ke st +se ys +mar ching +am it +decl ine +sch ar +me tr +stan ford +lin ke +ber ra +dol ls +rug by +jam i +b or +road trip +dino saur +mi k +sun der +re m +b k +over seas +nau ghty +imple mentation +iam srk +lun cheon +fir ing +mi ami +pere z +the e +z on +gi fted +con version +ceram ic +¡ ï¸ı +pe dro +ì Ĩ +v ick +! @ +he ed +si d +b w +docu ment +pl un +gr ants +fant asy +predic tions +vali d +car ved +gradu ated +ðŁijį ðŁı» +nation ally +ch y +af l +re sso +blan k +ri vals +j ig +e ties +om ics +une mp +b ound +sk o +inspec tion +par al +high s +cri sp +b ans +ob a +[ @ +co spla +costu mes +rec all +mou th +ni gel +b ts +ter a +ko v +do cs +west minster +dic t +gra vity +kar i +ro gue +t ted +war k +ida ho +w end +aw i +queen sland +proce sses +cli ffe +m ick +com pens +op ol +the y +cl ari +wiki pedia +salman khan +haz ard +pre ston +swee test +pd f +che es +tr ilo +south africa +bur nt +( $ +con tain +t p +sub mitted +sound cloud +at u +re z +word press +corru pt +n f +ma ker +í ķ +par as +adv ent +ri al +ca fe +fo ssil +!!!! !!! +co ws +c j +sp ur +institu tions +land mark +ent it +re ut +h is +alz heim +we mb +regg ae +mo squ +st at +identi fied +deal er +re am +re land +ten sion +ðŁĩ © +wra pping +deep er +fr at +red dit +ar is +moroc co +.. " +b low +ma pping +pri orities +ing a +swa p +re wards +conspir acy +creati ve +c j +congre ssional +vau lt +ple x +sophom ore +shad ow +ele ss +ðŁĺ ħ +dar ts +aldu b +anno ying +pro ps +n as +alumin um +h bo +offen se +j ill +oni ons +la ur +ta e +har dest +sh ro +ga ining +meas ure +ed tech +cyp rus +tar a +ang eli +car lo +go on +all i +im plic +ju pit +resil ience +ha il +bal anced +) ... +joy ce +gr a +th eli +defin ed +shi pped +main ly +min a +l m +sac ri +o ber +p im +claim ing +ent ers +co rey +bo k +cri ed +cool ing +dani elle +pharmac y +thor ough +ca ke +k lo +outre ach +z ens +digital marketing +val ent +sn p +her b +mr w +caf é +cap tures +no tre +triu mph +pan cakes +cu mber +spi ke +d ation +bi gg +sp er +crit ical +am al +too th +foun ding +a stro +' # +quan tum +th ames +un c +pri de +air bus +kno cked +un defeated +mediterran ean +cal cu +clo wn +sens or +ham mer +for give +cu shi +ber ry +maje stic +elec t +polit an +g ta +k ari +bur ke +sea hawks +volkswag en +re i +landsc apes +cas u +grand father +list ened +/ / +star trek +rainf all +fur ry +vi er +star k +rif le +ff a +leg es +hillary clinton +min us +correc tly +architec tural +pre ce +up side +box er +ðŁĻĮ ðŁı¼ +is ai +de t +pro vo +tis sue +spoo ky +ve led +re con +prospec ts +que bec +âļ « +ig no +anat omy +shap es +w p +p interest +hor e +an es +pick up +ti p +pra desh +hu gh +co e +po k +gram my +well ington +sti gate +ri gh +lea p +king ston +scen ic +go sh +v ani +au g +s ary +zi er +bure au +lin son +con te +fra gr +all an +g aw +lan a +colli sion +surve ill +ren ais +ar range +s ali +do in +br ance +bren dan +our se +in coming +suspen sion +à ´ +l la +educ ators +in tri +da e +bio graphy +bul gar +villa in +go thic +rw anda +e w +may or +meet up +democr at +mor gan +su dden +te sco +car rot +bom ber +mck in +re ne +fun day +agricul tural +haha h +show time +form ing +col a +scor pi +quo te +po ppy +s life +d az +tu b +ne n +mo t +ðŁĺ » +s ore +elder ly +o ve +skin ny +um i +anc o +man ship +we re +g v +k ah +fol ding +ne at +samanth a +dan ish +uk rain +humid ity +nu tri +jak arta +cand les +oooo oooo +at ile +streng th +i bra +bap ti +charle ston +fr ames +girl s +clear ing +glu ten +# # +super natural +ju bi +ph one +he in +dr un +le ak +invest or +y er +dom ain +ball room +mi sh +app li +off shore +bla ze +dor o +âĺķ ï¸ı +win ery +shar if +ad ore +n ir +saf er +si gh +as cri +strong ly +trac y +ck er +ol l +faith ful +ey ed +deli ghtful +vis m +karnat aka +tit an +wh ar +jer seys +re fur +heav en +gri p +pan ama +pre li +glu ten +o dd +cont ent +pon ti +tion ing +e commerce +feder ation +flaw less +ge ar +ti res +by r +pol ice +cu ban +tri butes +tic ul +chur ches +nur sery +di aries +muse ums +snapp ed +i van +wi ght +touri sts +ramad an +t rent +prophe t +won dered +focu sing +hi d +ic ons +i q +ambul ance +pi st +fun niest +time less +sr ilan +bu ys +ki ds +colour ful +a shi +ch ir +mu m +ðŁĵ ļ +let ter +x en +reut ers +pre serve +in ting +ste p +fu ji +uni ver +i u +show down +po ems +surveill ance +suspec ted +ta e +sol ving +tom b +mother sday +car pen +recru it +pil ots +bro c +mix ing +fri days +ty r +represent atives +tra pped +abdu l +free style +clu ster +âļ łï¸ı +k d +sk ill +pit t +ex o +commer ci +muse um +loc ally +g ina +no bel +immun e +fr ac +cap su +main ed +attemp ts +bull dog +be spoke +sing ers +sp elling +seg ment +nat ures +tic k +lip stick +clean er +gett able +preci sion +âĢ¼ ï¸ı +th ood +re ef +no pe +bill y +di gi +mu si +ri val +figu red +tal ity +sun ny +ber k +aw ww +awa its +un real +co pen +asy lum +ex otic +bu en +mo ck +en able +arch y +fr a +pla stic +al mond +amp li +displa ys +abbo tt +s me +x p +ðŁĻ ĥ +graph ic +i ved +mar a +cau tion +lea ks +en berg +ul u +unic orn +cann on +appren tic +ðŁĺĺ ðŁĺĺ +b ball +wil low +at ics +am as +manufac turer +campaig ns +port ers +flo ors +l su +ty pe +ke j +honor ary +it im +to le +min ecraft +d x +ma sh +ri o +consequ ences +ron ald +go ssi +suffol k +mu se +r bi +live music +i van +ðŁİ ¤ +le u +patri ot +man it +lan ca +home decor +de ar +sig ma +ti de +str ings +v ita +sequ el +try na +inve stigate +bor is +ve gan +barri er +mind fulness +web b +hu stle +in da +tan zania +str ay +tex as +c ag +diagno sis +wom an +g w +ob session +l ative +nu fc +fl ynn +moment um +sof a +wal d +vege table +tu cker +supp er +se ab +ar ro +se ag +ven ting +counc ill +sp lat +cal cul +.. # +com fy +odi sha +sto pp +war fare +ca es +à ¨ +co y +price less +in sec +ðŁĺ Ľ +contro ls +empower ment +datasci ence +per pe +gen ic +e res +tru deau +man o +sla very +expand ing +ma he +fa iling +s aga +photograph s +cre st +re on +surf ing +hi e +ðŁį Ģ +ja e +fel lows +south ampton +sol om +ce ster +tab ility +hor n +se ct +he e +cole man +at las +explo rer +consul tation +copy right +organi zing +den ied +mon keys +noo dles +br is +fl or +dou gh +bon ds +sho cked +eco system +care fully +w m +apart ments +cur ve +san diego +must ard +comm en +cere mon +e ch +ru th +ðŁĻĮ ðŁı» +hawa i +fil med +te ar +as ingly +ca ir +wat t +instru ment +ou tta +ye ol +river side +ë ° +. : +nor wich +alo g +migr ants +new man +ri de +spr ink +targe ting +beli eve +tor ch +reflec ts +per mission +ff man +ene mies +bas ics +se ized +sun days +le i +hass an +en do +h c +st ad +le ments +kk kk +nan o +shar k +man a +on ic +treat ments +ear ly +collabor ative +shu ttle +bran ches +mis ses +mained cm +ap ers +ky le +carri e +leis ure +sh et +bir ding +adv ances +ðŁĵ Ŀ +popu lar +di ane +a be +re war +neigh bour +k pop +remem brance +play ground +ru b +krish na +e bola +inqu iry +ep a +lu min +organ isation +abra ham +norm ally +pre ten +jan et +w t +ðŁĴ İ +encoura ging +a stic +bu mp +syd ney +s z +ss ss +gar rett +ðŁĵ » +consul ting +roman ia +spo tting +chanc ellor +ar ma +presti gious +ðĿ IJ +t ad +cry st +compe tit +rati o +cat aly +bro w +j ur +vi king +commu te +y day +la yers +du mb +esc al +genoci de +f ill +gu pta +ste pping +se i +fo to +wild cats +col i +projec t +ear nings +st r +ge ons +comple tion +b m +decor ated +craw ford +af ghan +sc are +visi bility +hi b +direc tion +stro ll +christ ina +alter nate +cl are +sty list +be hold +s ance +leop ard +acqui red +narr ative +ash i +the a +?? ?? +pe as +at ch +sli des +le en +renew able +eng lish +qu ir +co aster +r x +fo ols +match day +mis m +amaz ing +z ig +ke ting +won t +to wel +di ab +sta ke +n m +mel t +e than +gra pe +polit ician +sm en +í ĺ +re o +wedd ings +cat cher +or acle +me mo +ðŁĮ ´ +ec k +rob bie +norwe gian +oper ator +am or +se wing +ju l +x ie +u v +fif ty +me ga +tatt oo +liber als +u pri +traffic king +richard son +su v +ki p +mess y +tremend ous +gl ou +cour tney +la d +stere o +my ers +i dio +^_ ^ +man ning +dy e +w d +thr one +jun k +as u +provin cial +k ook +wr c +fine art +hamp shire +renais sance +b red +fall out +s j +sn l +al am +tor ture +fy i +sh ines +pa w +ch ar +hen ry +c row +aci ous +di an +pa ige +ba re +stock holm +scen ery +ðŁĩ · +jef frey +pu sh +decor ation +ne d +cu te +brig ade +laven der +inv ites +e sports +vo ir +dri ed +tran spl +sur geon +no vels +pul ls +son y +lun ar +man e +i vy +fru str +dor set +sa i +tor res +ssi on +shut down +suggesti ons +writ ing +e o +battle field +u ga +ðŁIJ ¾ +vac u +spl ac +g it +u g +high land +% ) +mer maid +sacram ento +ta ils +p w +ka h +t ell +enh anced +ì ķ +auck land +cru el +ðŁ¤ © +au dre +sail or +gram mar +g love +de on +infl am +fresh ly +k ell +zi p +christi e +mil d +di xon +instru ctor +g ence +ãħ ł +sub jec +constitu tional +crow ds +in visible +ru ins +da k +si p +pla que +p ouring +comple x +z ine +ste ad +f let +trans mission +lo way +ar un +incre asingly +au d +transp aren +cro wned +sc oun +blizz ard +lux u +fi ers +achieve ments +hun ters +rock ed +bas in +vio let +pro ves +achiev ing +pro sper +se ga +flo at +vi an +xi v +pol ic +tur a +approxim ately +wander lust +keep ers +geta way +co d +pol is +br yan +col ts +tal ents +yo gur +gluten free +wri st +gr y +cze ch +ðŁİ Ī +ev ille +ðŁı Ī +to x +dani els +am er +bi ds +weare one +me tab +g t +boy z +pd x +pos session +pu shed +shr ine +reali stic +tri gger +na vi +ru mors +n af +jen kins +tr un +comm uni +Ã Ĺ +gam ers +arm or +moham med +bal cony +y ah +stron gest +rhy thm +unfor gettable +k p +ho bb +custo dy +greg or +r ita +aes thetic +il ation +sponsor ing +n ay +kid napp +sh s +ra jas +me g +signific antly +butt ons +la c +ver sions +essenti als +opini ons +k ro +d printing +wi dely +d k +ur an +y al +reque sted +c n +cur ric +plu m +gr un +v m +dev on +m yo +rel ation +juvent us +rou ge +min ority +min es +jupit er +n ine +oxy gen +fran kie +une sco +fab ric +disgu sting +sal man +dete ction +lan ka +d ac +ðŁĩ« ðŁĩ· +argu ment +shel ves +cel tics +rober to +pi gs +he dge +fau l +pow ering +butter flies +fi r +re make +att i +com o +emp ha +kend all +poke mon +se ating +d ans +bald win +ðŁij » +lesli e +one direction +ti mber +im an +fon t +e der +di on +ste ph +for mat +gre gory +pro p +he x +ru in +sor y +inf er +n aw +bar ak +sd gs +kar ao +lu sh +v ander +end ent +g is +a fro +soc cer +ay an +t uni +lun g +da yof +alex a +mar ath +addic ted +ag ile +hy gi +light weight +ì § +mand ela +jo ey +anc y +hu m +bi r +memor ial +jim in +ging er +v ak +jav ascri +cro ps +orig ins +d ari +pi per +im port +aggre ssive +predic tion +re pairs +cr acker +voy age +ni ke +mu mmy +linke din +country side +bor der +gla ss +per t +s als +sho e +autograph ed +wal nut +colle gi +sal ary +pa iring +ðŁĮ ¸ +cath ol +swee the +defe ats +streng then +roof top +impro vements +barri ers +ur u +t ally +ru led +ðŁĨ ļ +nai ja +emo ji +per cent +gi o +pro bs +on ce +adm its +pa ths +li ar +day tona +pe ters +cal i +cal li +mu g +o sa +ap h +ab y +hy de +eth nic +pla ins +ol f +haha hahaha +holi c +?! ?! +su bli +bl acks +mo t +gh ton +lo vin +b rent +bar u +l ati +de w +ate au +q a +pain ful +bu sters +st atic +ðŁĩ¨ðŁĩ ¦ +note book +out fits +si es +r f +floo ds +Ñ Ģ +thro at +su ici +ro vers +beng al +pre pares +blo g +mini ature +Ø ¨ +am phi +com b +r sp +in timate +green e +Ì ĩ +al tar +surg ical +ves sel +... ? +gav in +g ator +threat ened +z ar +rob bery +di er +promo ted +y g +x s +su bs +inter viewing +threat ening +do zen +me ado +water fall +nintendo switch +cal um +mini sters +dro p +univers ities +war ned +tac tics +ðŁĩ ² +refu se +ad ju +v ast +ðŁĺ ´ +mc fc +lib ya +no filter +distribu ted +re ser +ron nie +de co +javascri pt +mon k +intere sts +fle x +mar tha +sti es +oo d +ðŁ¤£ ðŁ¤£ +e un +b ali +g omez +sti mul +moder ate +d ity +ir is +stra w +consist ent +direc tions +adop t +sal sa +cro o +reco vered +black friday +lan caster +accep t +weareone exo +buil ds +free man +air plane +diti on +bel ong +jam ie +pit ching +li f +om in +cri spy +pre pping +ve g +chan g +accompli shed +graci as +dolph in +elec tor +culin ary +super bowl +wal a +pur suit +black berry +be an +cardin al +pro ved +immigr ant +stric tly +holocau st +pass age +ha us +cou p +pur se +har ass +< < +le ed +ado be +st ad +legis lat +par ked +pri yan +sil va +kri st +s the +fun ky +ig a +sett lement +ph s +t mrw +stre ssed +hun t +ho ckey +treas ures +cham bers +ol u +hu t +mar ley +tex ture +wilder ness +mm ing +poten tially +om aha +ju dy +to es +spo iler +distingui shed +feli x +ah u +recommend ations +zom bies +hit ler +tri ple +colla pse +motiv ated +ulti mat +gg ling +so y +ci gar +fo ren +vine yard +gl itter +fin dings +colon ial +hun ter +eri k +den s +beet le +lot te +sub tle +s matter +tru sted +experim ental +nam ents +ðŁĺ Ĩ +regi on +acquis ition +bre eding +quarter back +am reading +oo td +ru de +initi atives +st out +hy ung +out come +al fred +mic s +exper tise +bacter ia +pengu ins +jump er +valen cia +bar k +ing day +sell ers +contrac ts +hou ston +commissi oned +adap tation +swan sea +santi ago +common wealth +ju dging +sub mission +sco rer +tom my +ñ o +ex quis +fil ing +explan ation +alli son +wemb ley +ri dge +chev y +san tos +own ership +cogn itive +favour ites +sh ed +phil anthro +dele ted +go dd +s nor +gui delines +ff ing +je ep +cli ps +sw amp +an or +guil d +bol ton +spring field +munici pal +goal keeper +ye on +ðŁĺįðŁĺį ðŁĺįðŁĺį +ãħĭ ãħĭ +water front +gra ve +contempor ary +ar ity +ÃŃ a +sle eps +sy rup +al am +pi re +co yo +moto gp +ty son +kej ri +cir cul +sing ly +cr unch +complic ated +nostal gia +k op +mo ve +k ale +mac ro +mid west +h ans +tri bal +nu de +௠į +bey once +congratul ate +cat er +leagu e +ðŁĻ Ĭ +la dder +cra shed +tech nic +karao ke +harass ment +ro ts +experi encing +kri sten +ðŁĩ ³ +ðŁ¤ Ĺ +reflec tions +guin ness +illustr ator +ðŁĻı ðŁı» +cen ter +nar row +comm ons +regul ations +Ù Ĩ +har m +cro ft +cu ssion +hong kong +st ical +intern ship +zo e +cho p +hoo ds +estim ated +batter ies +berke ley +smooth ie +shau n +cro s +~ ~ +cam pe +hu mp +b g +proto type +cl ick +shaw n +re viewed +tem pl +p f +jed i +blo gs +ray mond +as th +ba h +av ail +scot ch +leaf s +nik ki +to k +hol low +ur ges +of t +un like +lat in +u e +cat ering +mil i +alter nati +ma ver +Ð ¸ +ag le +pre order +lu x +cu cu +ðŁijı ðŁijı +t art +âĿ¤âĿ¤ âĿ¤ +arab ic +rapi dly +ar rang +all en +travel tuesday +pa ws +flo ws +st ability +flu id +ca pp +can berra +uu uu +sp ani +demon stration +m la +plac ement +m w +presi dents +awe som +bever ly +ani st +ne al +father sday +referen dum +la hore +o aks +deb bie +half way +gho sts +de bor +matthe ws +fi at +t fw +pre sen +rob i +de d +bro ck +laugh ed +am ounts +bam boo +kinder garten +eat en +mtv hottest +break out +u sic +fra ser +legis lative +p ang +modu le +sam my +go ver +ear ns +expe dition +gar h +concep ts +char lie +la va +bachel or +veg gies +deter mine +el lie +un locked +fru it +dal la +cou pe +wash ington +depo sit +iv ory +pau la +chic ag +gu cci +ðŁİ ĥ +cul tiv +pier ce +li fted +stu mb +re cover +musc les +conduc ting +cb s +mcla ren +sophi a +cel lu +oce ans +up loaded +game play +mal dives +kim ber +avo i +rac er +ca ine +cav s +h ana +li ga +ra ven +inter vention +inaugur ation +oo h +at traction +merchandi se +tune in +li king +juni ors +int ended +att acking +aqu arium +i wd +comp onents +sur ing +cent u +yogur t +ðŁı ĥ +show room +op tical +ty our +ju dge +yi eld +an to +pl c +transparen cy +recy cled +chi ef +ar om +ambassad ors +plan et +âĿĦ ï¸ı +om ed +vaness a +cour t +mar gar +hal ey +v r +reg ina +pd ates +hi span +live stream +âģ £ +ya hoo +gal la +secu red +w ir +bene ath +off l +n il +am b +ye g +out let +u te +pe ep +lind say +bent ley +... ! +he el +trilo gy +vo s +ty re +there fore +tor onto +ab i +simp li +ja e +exten sive +eleph ants +s or +orient ation +im peach +re play +constru cted +peter son +pa is +por ted +custom s +colla p +ad u +high lands +sal em +shel by +ko vic +stra in +ro sie +sen ators +snap s +bo bb +suz uki +bla des +k p +lo lo +gener ate +si ght +ma e +struc tural +predic t +jump ed +ah mad +sun g +just ice +gla m +vol vo +jubi lee +de tention +lo sses +pu ri +every time +Ð ° +ra o +ed ge +li mer +rese mb +har old +re tri +sacri fic +surpri ses +am c +srilan ka +bar bie +men s +fin n +ag s +ukrain ian +em brac +î IJ +flav ors +hom er +lau re +ou th +pr iced +ver de +fir m +ah s +cu b +tre y +par anor +pro fit +in dv +who a +har sh +al ot +crit ics +hu bby +fi gur +gi ra +ca stro +chan el +in put +origin als +ten ant +yy yy +ture rs +lincol n +co on +lear n +ch ou +ac are +o les +din er +hy p +bizar re +mc r +let sgo +decor ating +ðŁĮ İ +al ison +ar vin +f d +reha b +mccar thy +lot tery +da h +minne apolis +eli gible +diagno sed +emer ald +destin ations +s ans +or y +bla zers +n v +ba il +digital art +no c +mal ta +sol ar +pi pes +alleg ations +no ck +po pe +bri d +premi er +n x +present ations +ef a +bo ws +val ve +opp onent +Į ë +visu al +ing le +cate gor +e ter +po is +dan i +at tract +neu tral +th ene +cra shes +fred die +ut ili +c st +awak ening +slo ven +quali fy +pro of +fair y +le v +fre ight +enjo ys +cup cake +flav our +â ķ +protec tive +ðŁijı ðŁı» +is u +ad mir +h mmm +continu ous +ai res +rap tors +showcas ing +y uk +pa ste +follow er +instru ctions +sp ru +@ __ +the o +debu ts +ve tte +sto w +es of +ach ed +sul tan +sand wich +som alia +franc o +car ne +flu ffy +al pine +jas mine +he ated +viol in +ple ss +divor ce +per former +phi es +port sm +dar a +kir by +lo p +chill i +for th +sky pe +ðŁĩ®ðŁĩ ¹ +celebr ities +ed y +ve e +po ison +ey el +gra bs +ssi c +un o +wester n +rail road +am er +numer ous +s v +fo w +fi st +âĢ ĭ +reque sts +mar tial +em my +accept ance +lau ra +ภ´ +er up +hyun dai +out lander +u tt +wrest le +esp resso +demand ing +g dp +geo graphy +sas kat +tro ll +confe der +su es +se m +be ts +t ful +to sh +teach es +col oured +gal way +mac y +dis orders +bb cra +at em +fen der +lit ter +e sh +provi ders +renov ation +nomin ate +ps g +nomin ations +jen na +shar p +some day +z ur +bra ins +che shire +pre y +hu go + ¿ +to ken +r v +car r +tac tical +zel da +kay la +fern ando +photograph ers +j our +umb rella +woo dy +congress man +du mp +le vy +ju an +d azz +sign als +la in +an u +mic hel +por ch +al den +sibl ings +y ale +pe el +sw ick +gg in +ll c +k ale +s con +il d +pat reon +re el +qu in +wit t +mar ty +moo dy +ton i +der y +g ators +speci fically +dd in +ly on +tr ick +meado ws +p j +bor gh +vi k +tu r +bron x +pu ff +lan tern +ðŁ¤ ¦ +g ently +be stie +fac t +refu sed +fas ci +mp y +ðŁĶ µ +cross over +mead ow +indian apolis +duc ation +sle y +loo m +mix er +new music +film maker +prosper ity +li m +week end +cre amy +neu tr +lu ther +h v +nor thern +tw o +h ra +cat ches +appear ances +ha bit +kitt ens +n v +illa c +inf an +regar dless +liz ard +dun k +cur tain +ac om +in tu +ve z +e min +fl ats +calend ars +em power +ru ined +hun gary +vi d +we x +u lum +aber deen +o sa +k t +ma ssi +se emed +s den +' ? +tele phone +de fi +insp ires +me ow +z ones +bl ind +pl y +tuc son +advent ure +ge d +oy ster +ðŁijıðŁijı ðŁijı +out put +tt t +metal lic +sma sh +ucl a +sco ts +perfe ct +lu cy +regular ly +sp ic +rel ative +ath ers +mis e +batt ling +deci des +mat a +occu pied +random ly +cat softwitter +gi an +ball y +al ties +al lies +im men +sy rac +ðŁĴľ ðŁĴľ +l lan +au r +k ut +lam ar +affe cts +n ra +star war +ðŁ¤ ĺ +sc ram +en chan +pro cess +luxu rious +ar ray +sher lock +comp ati +dor f +stre ss +m su +s with +sal a +sof instagram +fo il +under stood +qu ay +r p +c ade +ja w +en ab +en coun +ðŁİī : +do ck +satur n +mu ll +lay out +ra rely +happ ily +fix ture +or ph +over looking +her bs +m itt +pil lar +nol an +pe tty +str y +u i +mu k +o res +o vers +á µ +re creation +we sley +ri t +kejri wal +sto cking +g v +subscri bers +moo se +ma e +ber t +opp re +assign ment +u ro +high lighting +cal vin +we igh +cambo dia +av on +ke m +dis abilities +read y +char gers +p ads +iz ing +illi an +tru ste +col leges +associ ates +alban y +mil ton +cr on +bu r +har dly +si ghts +anti ques +e cho +surpri singly +ha iti +cap t +ph p +op io +ine quality +equ al +ken y +sch mid +autograph s +ren t +qu er +cit rus +challeng ed +te c +epi de +fe st +z hou +li me +citizen ship +cry stal +convin ced +mess enger +copen hagen +âĿĹ ï¸ı +war ran +develop ments +ï¸ı âĥ£ +fore x +hi ro +sne akers +xi de +vi va +stere o +bat ting +ss el +ho st +beng al +critic ism +q c +cr un +attemp ted +ry e +determin ation +cre ations +d read +label s +pos se +anc er +joh an +si ster +partner ships +les bian +k st +guaran tee +bar o +fix ing +ma son +m ous +chem icals +t less +bio diversity +par o +bhar at +ac ol +refu ge +en te +t iti +dys sey +respon ds +lef to +in er +se vel +rahu l +ol ine +frank fur +cho reo +enjoy able +c to +strugg les +wood land +heavy weight +gen s +rece p +ac cred +ðŁĺ ¡ +trans formed +list en +at op +n k +sur ge +be re +gover nor +prison ers +clau de +t ill +mu lator +emo tion +water loo +star t +ðŁĩ º +clean ed +grand mother +fear less +afric an +astron omy +ðŁı ģ +ภĻ +the world +su itable +anth ony +k and +tt en +meaning ful +disc lo +jaco bs +à ¸ +tom linson +ghe tti +ty pho +sub stan +as co +te k +nag ar +mu d +am on +vacc ine +f ty +fle sh +no el +infl ation +portu gue +glam our +tra m +v re +te qu +roun dup +w yn +rejec ted +mosa ic +si ghting +cal f +o ta +com position +go pro +gonz ale +e ed +b ard +tu e +effec tively +we en +al to +ri bs +rel ate +thir sty +fu rious +di m +ch ard +perfu me +s ny +chur chill +k of +master class +wa ve +ðŁĶ µ +er in +own s +to be +sk illed +te m +go f +en i +tor i +cra zy +l ick +resi stant +ici al +ag ar +! : +g ali +del aware +bl itz +koh li +pu ck +avail ability +hi malay +influ ential +cro chet +victor i +read ing +ho bby +vie t +j as +en gra +sk ul +ðŁĩ² ðŁĩ +educ ate +tech no +distric ts +blu es +se tt +seven th +lear ns +ee ee +apocaly pse +hang out +cru el +mu tu +bru h +hel en +she er +c tion +kle in +tex ans +ce real +sh ine +ne red +gra s +am bro +f ella +hin du +matthe w +li ma +mir anda +je wel +so ho +euro vision +neighb ours +chand ler +be sides +ðŁ¥ ° +ast ros +thu mbs +ren ault +ra ve +hi red +ðŁĸ ¤ +it ary +z or +bla zer +k ine +ea u +kat y +dc comics +pe c +ro dgers +water proof +kill ers +super int +pre serv +as so +brew ers +promo tional +sc am +villa ges +sket ches +ju icy +for life +au dit +so lo +fundam ental +len e +philipp ine +t end +conserv atives +sponsor ship +dd le +a ine +h tc +os i +hul k +w af +ภĻ +evalu ation +ant ine +sle e +robert son +roo sevel +ag i +sophi stic +emplo yers +bubb les +ko wski +inter action +sh u +bou le +ic an +j are +han k +leg itim +k nicks +kar ma +recei ver +per ks +u h +sta ir +sun i +labor atory +gra ves +voc als +oo t +c ture +thri ve +tic o +ãĥ ³ +b w +carto ons +mcdon alds +dra w +y ung +pl er +li d +eth ical +groo ve +ent a +international womensday +pat ron +wor ries +ðŁİ ħ +ðŁij ĭ +ka therine +di az +tor i +bach chan +tru st +min eral +ic om +buil ders +bor n +col oring +lat te +ca se +revolu tion +tra der +ox id +chi pot +inst antly +sou thern +se hun +pro b +her nandez +lis bon +hu awe +p ong +me a +ro oney +wheel chair +ke en +be tt +cor in +regulat ory +di splac +ka ren +sch em +sun sets +wh ales +remin is +he p +hi de +mar cel +pand ora +do yle +th fc +ot to +no kia +trans gender +ko v +hawai ian +sha ve +so vere +exc er +nick i +pu g +st or +ro th +wee t +leg al +dig nity +po w +hom age +ðŁĩ³ ðŁĩ +s re +can on +la x +wo ah +quart z +ñ a +gree ting +flick r +nai robi +advoc ates +an c +vi i +eu gene +th ra +c re +el an +pen sion +th letics +ton i +re agan +x v +sto re +ben ch +har lem +todd ler +sent enced +âĻ¥ ï¸ı +glob ally +che aper +u f +ma m +nic o +ik u +tho u +ni st +dam i +th ala +rho des +sal e +bow ls +â Ī +las vegas +sanc tions +adm ire +mat ched +un able +travel er +ele ven +straw berries +âĢĶâĢĶ âĢĶâĢĶ +stu dio +jac ques +im s +valu ed +s no +cheese cake +n xt +e os +s x +f x +ton ic +hat ch +chic ks +gra ds +hand ic +r ory +as p +ri pped +denti st +n en +lu fc +âľ Ĭ +di ge +hop kins +sher man +f da +for all +ash ley +str and +h y +liqu or +buffe t +ess ence +phar ma +suri ya +ðŁĴĻ ðŁĴĻ +festi vals +z an +re fresh +pur ple +uni forms +kenne th += ) +as an +hel sin +transform ers +k ali +person alized +chal k +bo bby +â Į +the mes +depar ture +prin t +illustr ations +qui et +agre es +gri ff +Ø ³ +m iti +toge ther +conven ience +ab ar +car lo +turt les +info sec +some what +ar lington +scholar ships +emir ates +mu ms +st ella +auton om +fe ather +g ore +nom inees +fragr ance +Ñ Ĥ +w ong +thea stern +gr e +z illa +is i +bump er +go o +do zens +ab duc +âļª ï¸ı +o ils +don ors +sil icon +i pod +fortn ite +ðŁĴ ¨ +tor o +spark ling +consci ousness +pal a +nu m +moun ted +ffin s +thi eves +team mate +pra b +om er +ta pes +bo d +mit su +ste w +e re +p bs +tu sc +lo we +ra de +parliam entary +h m +ed gar +ðŁijĩ ðŁijĩ +to a +a gh +hon i +s late +ge ek +ap t +hard t +ta p +horiz on +grow th +make over +hi l +paper back +id an +reha bil +gi u +possi bilities +let tu +fran co +bo ss +ach er +does nt +mo e +ta ker +huss ain +ml k +di l +th ia +ham a +real ised +raven s +curric ulum +m ith +k night +ted x +r v +isai ah +cumb ria +birth days +f ing +pre z +mu barak +exquis ite +clear ance +y en +par i +ev o +à º +modi fied +app lying +imple ment +disco vering +chap man +indie game +dis k +crowd funding +mach in +li vel +sty led +âĿ Į +ma king +rehear sals +nutr iti +subscri ption +and ro +cre ators +car ries +ky lie +cam den +appren tice +tax pay +c ca +tuesday thoughts +pis sed +er man +dete c +freed om +mer i +.. ! +psal m +sun light +per spec +be ings +book store +rock star +fun ctions +p ence +fav es +z n +obam acare +sp ill +coven try +pi geon +pi vo +ba it +kol kata +av al +don or +wa h +privi leg +tra ditions +rajas than +ten ess +portugue se +yn es +tack les +de fic +tor n +pol ling +thor ne +in a +bened ict +bar ry +cal ories +ver dict +save the +nor ton +off ice +main stream +impro ves +fr on +respon ding +real tor +scotti sh +de clar +r l +shi v +supp lier +re sting +swee ts +qu i +. âĢ¦ +whit ney +startu p +thank you +teach er +h alls +ha ve +hand made +pro ving +quar tet +ro chester +li an +virtu al +mend es +of icial +mid lands +x box +meas uring +o vo +accommod ation +bri des +collegi ate +intellec tual +in car +ni ag +ðŁį · +sf w +coco a +co ats +civil ians +presi dency +mat rix +sweethe art +tri athlon +wag ner +ra dic +plann er +the o +execu tion +k um +the walkingdead +sc ar +ro tation +blo gging +bom b +re son +bb les +st are +assi sted +e do +brand ed +war nings +thor pe +acknow le +satis fied +sho res +ri d +dor a +phys ically +bi gh +appro ves +ha h +ric al +vers atile +pret end +lu m +ab hi +ye e +sp it +ãĢ Į +dj s +ash tra +j t +ven ues +gram mys +cy clo +tr acker +over watch +repl ica +el yn +nr l +lind sey +hom o +ballo ons +kitch en +si s +am os +ende av +ðŁĴ » +a rec +thu g +hoo ked +hr c +new york +bur gh +americ as +patric ia +ug u +ap athy +ha st +psy chi +cor k +petro l +ðŁİ ¬ +ak u +po pping +psycho logical +au x +g ma +cad illac +wa ste +auth ent +bri stol +nam e +que er +to ber +jer ry +com in +ch ant +privileg ed +op ar +lo ser +tex t +mar ker +stri es +equ ally +ak i +christ mas +gare th +ble w +em ma +imag in +se als +che at +conditi oning +j ana +ren s +dar ies +o asis +disc ounts +coun cil +i ka +shir ley +vou cher +al ps +w x +q r +dri ft +attemp ting +ut c +Ø ª +gonzale z +m f +jo ker +paralle l +pa re +aspe cts +proce du +n p +am a +rale igh +bright en +gu ire +radi ation +cre scent +ho b +il le +str and +v ore +n ard +che st +di wali +av atar +al der +d ling +pa thetic +ðŁĴ ĺ +spir it +jor ge +film making +ðŁĻı ðŁĻı +challeng er +b j +down town +ht ml +ade qu +twi sted +in ely +( ' +wra ps +oper ational +y ne +n us +mag net +market place +health ier +snap shot +dam on +inter ven +fe derer +ow ls +biscu its +j p +ro deo +blue berry +lec tion +fron tier +summ ers +re yes +pede strian +go l +caf fe +refur bi +bou lder +me ghan +speci alty +la ss +e i +suspec ts +appro x +rr r +ra th +st im +cru shed +he d +wh un +lo af +cr ore +river a +gene tics +so ck +wa sted +ny pd +answ ering +do ve +bel la +ol in +du n +fi ji +pre tty +spar kle +y un +j d +euro pa +li fts +am ber +mu r +te k +boy d +roy alty +in do +ri b +go tham +ti est +inst alling +ke mp +the photo +cos mic +) )) +whole sale +loy ment +eas y +su ing +sett led +af p +pro ver +suppor tive +re es +ne ath +deli ber +c é +wel come +pic oftheday +new born +pat ty +sun s +si est +fl int +diffe rently +spo ilers +troop er +g ins +cor y +look out +equi pped +ta pe +to by +resear cher +u sh +ke yes +al ma +induc tion +k w +k har +sl ick +bri de +e ur +cra ving +book ings +ch es +tr unk +vern on +sp her +cryst als +rel atively +pom pe +uni ons +val ley +par a +w ant +ok c +de af +ser gio +len non +sh ay +cr a +v at +he e +t we +liqu id +pol y +ðŁİ ģ +b ent +be aring +motor sport +bar be +te sti +han i +fin ancing +astron aut +water colour +ri sh +comic con +gar t +wr ong +ber n +it an +ste pped +fil ters +c low +me x +dem ons +all o +expand ed +comm and +et ers +go ats +si ri +y r +pot tery +mari on +i le +el an +san to +person a +du ke +hom eless +li ghted +wheel er +chang er +cab bage +sur real +ham burg +sma shed +str an +k not +i art +ob i +be dro +di al +th ick +b ingo +fu s +vacu um +con ve +ati ve +accur acy +accoun t +re fer +ri z +spider man +ban a +r ite +u b +ab s +medic al +lin k +si em +> >>> +be tra +g lowing +re actions +pupp et +spa ghetti +ang s +re medi +pray for +roy ce +char lotte +£ ï¸ı +gh et +affe cting +ro de +soci alist +mo ses +az i +o it +re porters +cd t +ap ing +s nat +minim al +wa ist +sie ge +>> >> +ri g +schmid t +h are +ec a +thor n +he mp +es the +cly de +th a +don ut +moham ed +ling erie +le gg +carpen ter +perform ers +de a +imag ined +cur se +la sh +ct r +agu a +ro ar +gr i +ro le +j fk +resur rec +roosevel t +maril yn +sm alle +will is +wa ited +char ities +the res +li k +origin al +car i +c ough +cru ci +la gun +contra st +k ou +arm our +re moving +t ent +maz da +bri ghter +thi ef +cor ner +tequ ila +buzz ing +al bi +p am +az ure +disc oun +pixel art +possi bility +ham ont +tra des +bu da +hi ve +vers y +fin ch +tran spa +em i +terri fying +in qui +g ba +sub stitu +collec ti +plac ing +cin dy +k ann +pa tho +diamon d +mour inho +guine a +anthro po +air s +pu mps +ì ļ +pas o +cur ling +an ita +resi dency +ne wh +jo on +cigare tte +que ue +ex trac +gam es +spl en +ex press +public ly +bon nie +tribun e +ba ek +reason able +c or +timo thy +she eran +Ä ± +f dn +su tton +concentr ation +carav an +x avier +al ger +cy lin +freder ick +ner ve +pe ak +lettu ce +j ail +pre game +kav an +up graded +eco logy +squad ron +gra pes +goo g +pa stry +ðŁĹ £ +ãĥ¼ ãĥ +mil ano +awa z +presen ter +ðŁĮ ¿ +her d +king s +tem plate +fl our +h v +k ley +i ya +spe c +at er +frankfur t +co ch +tex ting +del i +communi st +regi ment +ele anor +anticip ated +ðŁijĮ ðŁı» +thephoto hour +ran o +survi ving +simul ation +daw son +ar in +aqu a +m or +âĢ¦ . +cin o +ira qi +sh az +dun dee +we s +dra u +hann ah +s news +occup ation +ste en +x m +ang les +sett ings +gur u +kno x +or ca +shap ing +w ent +dr illing +zz ie +br i +kis sing +fin d +ma ine +âŃIJï¸ı âŃIJï¸ı +ðŁĮ į +lar ry +bu sted +ta vern +acti vely +- " +replac ing +no d +un lock +. " +âŀ ¤ +affili ate +to w +l n +happy newyear +di f +j m +green wich +contro versy +daw g +con dol +sav annah +compens ation +touch down +te o +amb itious +embro i +convic ted +iart g +bar ack +tr ance +testim ony +au dition +thum b +my ths +be x +que z +orch id +den y +entit led +hoo d +gr ant +in box +blue jays +r illa +smalle st +bur den +in famous +divi ded +boun daries +t ter +el t +wy oming +be verage +me sm +one ws +budd hist +y ana +as sad +is ms +bar rett +predic ted +back to +tw it +e there +cap tains +escap ed +ay o +lam borgh +gard ner +la ps +k al +adverti sement +insec ts +na po +am en +ac y +r and +g k +te h +k athle +tri dge +pan cake +at ro +pyram id +bu la +paral ym +gau ge +en cies +tom y +biscu it +but cher +quali fier +coun ty +ke i +po ols +dar ker +should ers +ðŁĩºðŁĩ¸ ðŁĩºðŁĩ¸ +sp re +( " +writ ers +g m +ðŁİ ĵ +k nit +hu ff +mt b +philli es +o st +den is +g art +licen sed +inter face +ex cel +d well +from the +co fficial +az zi +appear ing +fore st +n ana +ke ith +manufac turers +beck ham +) ? +e se +col ony +delic ate +ut ter +mc in +transpl ant +pre ferred +par d +ari e +hu b +po ds +perspec tives +pic t +del u +app er +be than +p mo +crimin als +femin ism +sh ack +circum stances +fel las +prote sting +wa x +sugge sted +t ator +dre w +om ni +fa ke +kath y +re b +del ine +ber ni +mi sty +ðŁij © +er able +break through +men swear +millenni als +chan yeol +la z +inser t +rep lies +phra se +n x +ihear tawards +audre y +gran ite +rac ec +ori e +ter ra +innov ations +britt any +at eral +pe ar +bio logical +sh ments +institu tion +m sn +frequ ency +d man +neg lec +t f +ste fan +fox news +ty po +comm s +sequ ence +car men +wh ites +econom ist +exe ter +se um +re sorts +cas ually +bun de +divi de +Ø ¹ +ga g +cre ed +reti re +cau cus +rapi ds +wrestle mania +tul sa +sunder land +fundam ent +o di +yam aha +v ary +intri gu +el se +be acon +an gie +tra ded +tran sm +g ents +kn itting +gal ac +ðĿ Ĺ +u to +sea side +hol t +re rs +far go +train ers +mon soon +b ale +sou ght +mad die +h w +co li +fr an +fav s +ðŁĴ Ķ +int ent +r ally +s bs +lemon ade +barack obama +bre ad +stick y +explo sive +chel ten +t j +as soc +ram en +hom ies +v log +mi ster +lor d +âĢįâĻ Ģï¸ı +aly ssa +sketch book +ru mble +cat ch +migr ant +discipl ine +un likely +chronic les +fl ora +sl ams +am id +s boro +coo p +ju mps +tran qu +mel is +sof ia +en ri +gab e +sy ri +nicol as +cha i +w v +be cky +foo ty +ta o +suppo se +ðŁĺįðŁĺį ðŁĺįðŁĺį +plu sh +ri sh +ðŁ¤ ĵ +k ha +satur days +ac cent +he c +lim it +carl ton +wi red +taylor swift +ðŁĺ ij +sq l +har ro +recipi ents +g at +go p +th of +amaz ed +gh an +ðŁıĨ ðŁıĨ +por to +cla re +di stant +na c +ohi o +ðŁĻı ðŁı¼ +mt n +anti bio +dino sa +me sa +par tial +b v +lear nt +lov ato +questi on +ex tract +gossi p +gi bb +niag ara +ðŁij ¨ +displa yed +so oner +ste vie +nug gets +ml n +bro m +tur b +give aways +stu pi +bl ink +c ili +conven ient +mo h +vi ve +f ric +cau se +cham ber +cu les +ne arest +is se +small biz +t j +canadi ans +smar ter +bra sil +ra re +que tte +w ha +cand le +at omic +ðŁijį ðŁijį +warri or +relax ed +stri ps +ne ur +k ka +r fc +jen sen +reco vering +respon ses +sal am +ortho dox +acti ve +ell ers +n it +âŃ IJ +metro politan +centu ries +vi da +gra ding +transpa rent +sim ple +do ts +superint endent +elev ator +autom ated +red skins +ima m +summer time +jona than +ge aring +michel le +confl ic +m ice +to te +publi sh +pa x +) - +na iled +á ´ +tele scope +ser bia +ba b +ape u +st ically +sen ti +r ats +isol ated +grou p +hat red +paranor mal +stan ley +ali on +safe ty +l s +ठ° +nex us +alexand ra +mas ks ++ + +tr on +au k +brother hood +brow se +mix es +sim one +mu sk +appro ve +lo la +ex p +per th +fu turi +un seen +d m +chel se +sc outing +o we +portsm outh +k ram +mi ze +di spen +su p +d lc +adver t +tere sa +is le +cy cle +met all +shi elds +marin ers +ra z +ing en +fun d +an go +jon es +o ka +mad den +broc coli +domin ic +situ ations +mer o +cric ke +puni shment +d b +sha king +ðŁĺ ļ +m q +ari ans +le h +cla w +we ds +d ure +ni el +j elly +gour met +tra ders +le vi +w ages +kne es +wi se +heaven ly +avi d +melo dy +z ack +ban anas +apprentic e +pro p +fun ny +o de +respec ted +me gan +fe wer +dra fted +med it +gra pe +us army +cru sad +vo cali +prepar ations +non sense +us age +th r +ro th +wiz ards +insi de +promo tions +mon a +red sox +si g +eleg ance +ch ia +univer sal +ãĢ į +ra ja +un ga +pol lin +filip ino +ak a +t sun +ik on +bi king +decor ations +z ac +cade ts +hum our +ag m +re ppin +vac cin +elo ve +u w +dia be +galla gher +az er +do l +a while +pro minent +wel sh +t ann +' ) +bi en +wa g +in al +c wc +wic ket +ur st +q anon +x e +out door +dun n +star r +co logy +ric ky +u efa +reb ounds +s music +inf ant +ðŁĻ ĭ +so p +u mber +hand ing +beg in +sor ting +ha sh +sp ati +re k +buda pest +black hawks +dele te +ro m +can did +auth ori +de bris +spe cul +inter section +marri ott +im ran +ðŁĺģ ðŁĺģ +cru ises +ram sey +rafa el +aware ness +vas cular +beyon cé +ru g +ðŁĺ Į +festi v +ar am +s able +bas il +p ill +flo oring +un beaten +implic ations +u f +w ound +for ge +poin ting +po ts +popular ity +ðŁijı ðŁı» +mani pul +s lots +deb ates +abs ence +ver mont +never forget +wri st +gl oria +ren ce +hu sk +mel ting +ðŁİ Ł +br aces +tim ely +transform ing +am ps +ma k +po e +ah an +gener ally +nd p +ale ppo +unic ef +pro fs +nor d +ma sk +jackson ville +v v +sh ells +bloom ing +oper ators +char coal +ne ville +ma gi +chi p +sam a +ir an +re forms +accu mul +ru e +æ ľ +web sites +ga on +devast ating +sto s +glaci er +ra pp +chipot le +pr a +or ous +rom ney +seas on +decor ative +c isco +dit ch +compla in +ll o +assu me +ðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤðŁĺĤ +n els +cent ric +ft w +car rots +tat a +can ter +per ience +li ers +demo s +bl unt +oper ate +reserv ations +le ah +sub stance +di son +an te +elec tion +v ue +squ are +non profit +ca a +f su +y am +ãĤ ¤ +v ladi +comple tes +mar i +philli p +ne ill +er as +ka it +men do +mahar ashtra +g p +dan e +provi dence +ther apeu +juven ile +me mo +in corpor +aa aa +seven teen +teen ager +à £ +or ns +wi de +cu teness +tw d +ff les +bar a +com edy +over time +y az +bar on +unemp loyment +ðŁij ĭ +exter ior +den se +cent res +match up +history month +artif icial +qu it +e sk +war n +cr itic +j af +ðŁĵ ² +inform ative +fu els +recy cle +nam ing +stri pe +sol ic +mole cular +dee pi +con vo +s sel +na e +de scent +ti z +accoun tability +ter ry +r ito +sl ay +em o +dem ol +sens ation +co v +tor e +round table +y ol +excu ses +ॠį +tur quo +hh hh +pod casts +cele b +me ssi +li o +man n +contribu ted +u z +gener ator +ele ts +veg gie +indu l +en suring +detro it +pun jab +tran spor +instru ction +ad d +por cel +pan eli +cir cles +persi st +clay ton +sp n +dog softwitter +is nt +sp r +retail ers +p w +hun gar +el ena +mon aster +gu atem +je ssie +an z +ra shi +fle e +car ving +fau x +l al +hen ri +d jo +du ll +s ana +lar a +glo be +cri mson +com pass +pau se +na b +lion el +ba ths +u fo +invent ory +sin gh +sat an +ðŁĩ ¸ +ce ments +in form +gener ated +bi den +av g +tas ks +de er +sa u +ja iled +pa stel +sc c +na il +steel e +per is +lamborgh ini +pur sue +mar gin +u ch +bo sch +dra in +cl ara +bo m +lat ino +web ster +rose mary +r ha +s oun +billion aire +not ch +percent age +con or +' " +hom es +earth day +h ort +big gest +di sin +wal ton +edit ors +im ma +om ar +equi valent +pharmac eu +ah med +cam eo +han ni +under rated +ge ment +micro bi +v oo +honor able +obe sity +âļ ¡ï¸ı +limer ick +invol vement +st agram +boule vard +bur g +blackand white +liber ation +fi ve +inter im +sm m +rival ry +cap abilities +stat ements +thu mb +ve d +sw ans +bar ber +e que +seren a +hel m +noo dle +sam pling +n awaz +sing le +thunder storms +sh on +in ev +ë ¯ +to pp +orch ard +bi an +ðŁĺ Ķ +door step +salv ation +marke ting +r ons +cle mson +ra vi +in take +stand with +sin a +ha iku +ple y +elector al +ph illy +la ys +electr ic +cap turing +u pp +er gy +believ ing +cul tures +es day +inva sive +ed ed +spee ch +end ur +viet nam +boy cott +pe de +deli ver +ðŁĴĸ ðŁĴĸ +mer chant +st ir +den ies +poc kets +o ti +cu ddle +ro land +mm ed +den ed +lear ners +hoo p +sour cing +h acked +di m +environ ments +ben son +jud icial +wor cester +pear ls +govern ments +arri vals +cor ners +tun ing +la bour +y m +or dering +le wi +i fe +hygi ene +thou ghtful +indone sian +campaig ning +princi ple +assau l +ru bb +at v +wil ly +en tre +il i +ph on +du ties +âĻ¥ âĻ¥ +sn akes +lo op +am ar +conver tible +bon ding +ment oring +max well +ethere um +destro ying +ax is +ca iro +fin nish +sho ck +ðŁĺ IJ +cal eb +com a +pe dal +co re +contin ent +el son +temp o +helsin ki +ac p +tack ling +st ated +bl a +dou b +sma shing +a ja +camer on +disru ption +warm th +being salmankhan +bullet in +o de +syrac use +ar an +mc gregor +bul k +an ton +confir mation +sp ine +im ran +instru c +jac ks +chi o +pal m +str e +embarra ssing +un t +elimin ate +to ss +c ise +a ws +oni sts +sh inee +jo s +ho se +li vely +opp onents +mo vements +recogni zing +sandwich es +sh akes +exerc ises +se at +profe ssion +merry christmas +lu gg +adopt dont +mar vin +byr ne +un le +he t +ku wait +rah man +aspe ct +humb led +gen es +f and +long time +) ; +cam pu +an gus +ðŁijį ðŁı¼ +q uran +sle eves +s lic +¸ ë +twel ve +your e +i ke +go gh +b st +dic tionary +reflec ting +to on +yar n +em bed +ðŁı ´ +re serves +floo ded +ver iz +du sk +estab lish +pro li +au d +ritu al +or bit +declar ation +recor dings +cam o +cas sette +good luck +cu tter +bo p +b ho +che ating +paci fic +ma res +tim er +col t +tr ous +tomor row +han sen +ci e +w ang +ban i +circu lar +ac ute +far mer +co ys +p se +ir ving +w j +haw kins +b ison +ur day +cru ising +o te +k ath +whi stle +your selves +ant is +sla sh +thorough ly +ke sh +ser ie +ex em +en ig +guil d +sh red +ho gan +ap o +ä ¸ +pu zz +ne tball +au ssi +panor ama +ws j +av is +ar ming +hum ph +brow ser +cri es +fo ggy +mat te +ðŁĮ » +it er +tal lest +by ron +cap tiv +je su +any ways +flag ship +p ton +we y +fay ette +financi al +f oul +solom on +jenni fer +cucu mber +ar gue +tex tile +wrest ler +john ston +pa stor +ðŁĺŃðŁĺŃ ðŁĺŃðŁĺŃ +cac tus +edi ble +re served +ric hie +met res +ingredi ent +h ella +un to +ch ol +cele bs +po ets +gra ham +hay den +coinci dence +b aw +communic ate +flet cher +/ - +tole do +ecu ador +coun sel +s laughter +line ar +at p +os u +jo el +ev ed +conqu er +ru stic +plic ity +recogn ise +room mate +cr acked +jas per +ph er +ðŁĮ º +wo ven +mo ist +ff c +ste ering +ni sh +stand ings +frequ ent +ar di +haz el +as msg +bau m +d art +si dd +nat h +ch ero +card board +c ss +n sfw +pa ir +ðŁĺį ðŁĺĺ +occur red +homeless ness +mal one +ph e +xi a +pad dy +decl are +theat re +b f +per sian +ta d +ax e +susp icious +lam b +mu cho +sen ior +st as +k ite +st ing +gra d +k af +wat ering +Ø ¯ +spi ral +th ms +educ ator +jer ome +of c +clo ck +su l +pe mb +.... ..... +park way +de aux +restric tions +m ons +need le +e j +le agues +water melon +am an +pl enary +max im +w ab +coming soon +bry ce +vi gil +super market +fortun ate +turquo ise +presi dent +li v +inter ns +feel in +fix tures +stun t +st aged +premi eres +lo k +prac titi +shor tage +log ne +ve c +con cor +roc ke +li g +com posed +syn thetic +di p +cam ila +ch is +j ou +su san +eye brows +supp lement +satis faction +moham mad +ti bet +house of +pu n +as sam +shado whun +psy ched +se duc +mand atory +her bert +sc allo +stream ers +proto col +block buster +produc es +sch nei +lau rel +tri be +time hop +pl a +mod elling +tv time +mtv stars +wi dow +me tric +ch am +con do +flow ering +ale c +d ms +inten sity + ¨ +mccar tney +islam abad +k b +f fi +ph al +anal og +f ond +h acks +positi vity +treat y +sub marine +conne ct +sel en +categor ies +cu b +organi ze +si k +quote oftheday +remin ding +am or +loc king +ðŁijı ðŁı¼ +comp ound +et te +b out +rec ur +fe rence +mi zz +tren d +hip ster +for tress +forth coming +preli min +o dyssey +ang p +del ici +even ings +ðŁĶ ¹ +i q +d w +da ir +kathr yn +christian ity +moon light +ha b +wh oo +f bf +se th +genu inely +pa x +char ity +deplo yed +b nb +bu cs +ju dg +con ge +plant ation +im press +car a +sc lub +sco py +land ers +compla ints +b ama +re build +x y +real ism +sh our +le in +brac elets +mer a +assas sin +an chor +ðŁijĮ ðŁı¼ +lin en +con fron +chronic le +comm ent +cat alog +il les +gor ge +me try +jung kook +love my +sent in +se em +fit ness +alli ed +ts man +digital transformation +pr an +lo ft +min ton +alden richards +en vel +cher ish +certain ty +zz z +rhin o +per kins +en rich +cape town +ome ter +sec tions +ske leton +def enders +ðŁĺ Ŀ +pen c +bri t +ja h +capital ism +ðŁ¥ ĩ +baz aar +re me +ex t +kk k +conver t +stor my +b ye +kar an +chry sler +ad os +pre ssed +syn c +ation day +dang er +bad ges +refu ses +em powering +ly m +ex ports +adoptdont shop +ðŁĩ ¯ +th c +awa ited +focu ses +fin ed +o at +haha hah +âģ © +n family +fi ona +luck ily +thr illing +ty ping +out break +di es +he u +craw l +ne sses +o ath +scri pts +gee ks +ðŁIJ Ŀ +p b +mathemat ics +al is +________ ________ +gymna stics +acti vism +recommend ation +gre n +wa in +cour ty +n apol +cau li +hor nets +g als +jo ckey +dir ty +at ar +enor mous +pe st +greg ation +an os +ii ii +def ends +black historymonth +at x +mb c +lugg age +wit ch +co b +la sts +cu m +gg g +ba thing +n ar +ce bu +ðŁį ĥ +navig ation +min e +re jo +ðŁİ Ģ +gif tide +re ta +use less +pu ll +defic it +al lu +ati me +it v +tr illion +pu e +ac ies +proce dure +l ori +jen ny +c ad +ul ously +dr ac +promo tes +ing the +can u +woo hoo +na omi +zar dari +ts u +be ir +sd g +le ver +we ber +ab ud +lun d +crow ded +deplo yment +ter rain +ken ny +ho f +witne ssed +lo ch +j k +bul ly +w ren +poe try +do ff +ww i +mo red +din i +cul ture +promp t + ¥ +maur ice +to pps +r m +cor respon +ab out +jewel s +gi br +eag le +ðŁĺĺ ðŁĺĺðŁĺĺ +l ending +sou ven +ç Ķ +contemporary art +establi shment +j ong +âĢ¦ " +gat or +patri otic +mc coy +v ape +human e +feli z +coach ella +re posting +ste als +fu ller +n ering +at ra +( - +bla ke +he ather +wor ms +discipl inary +rede mption +y ard +am in +" @_ +d nc +t ds +k appa +ne wark +comm its +spe ars +j ams +t and +msn bc +inter medi +aim ed +at ic +teen th +observ ation +kash mir +kavan augh +ou l +san francisco +re u +bel ated +cho w +pass word +st ills +deta ined +sar i +day ton +dar ren +itali an +ar th +amu sic +ar bit +w m +v m +he m +dou g +my r +a sho +pre v +vin d +bra h +sta g +ภµ +pre views +gu k +con taining +leon ardo +sad dle +ru shing +st av +lon gh +gam bling +ve gas +reserv ation +end ale +bal a +fl a +vari ant +he dge +bulgar ia +nat ali +we aver +sol st +encoura ged +ap c +as parag +ne st +cycli sts +fe l +ìĬ ¤ +overwhel ming +pey ton +j it +a post +mb le +ble eding +neighbour hood +a very +expre ssions +mac donald +gi gs +mon ds +illu sion +n ct +cam ero +over head +my th +ol y +vi o +et v +lau rie +unve iling +pri or +con n +iron man +di ff +day in +crit ici +con go +re vision +wal e +direc tor +p ines +black pink +gar ner +cur ated +manit oba +h ac +common ly +bar ton +.... # +mor tality +live smatter +philos op +shor ter +con vince +fre ak +vend ors +insi ghtful +el ly +sens ors +e led +s berg +weight loss +u kip +sp ur +priv ate +qu a +ss c +, ... +supervis or +advis er +amaz ingly +less er +at es +mah on +oooo oo +sar as +pmo india +waff le +un ders +toler ance +sculp tures +her sh +kno cking +smo ke +cathol ic +gri m +tra veled +fli p +ge off +dinosa urs +sle pt +scar let +ok i +compla int +ob sc +nam i +la g +cross fit +u fc +mc cain +refe ree +sad ness +pen ny +li eu +mo de +ki er +vol s +w is +el on +she a +ba o +son ia +cla ire +em manuel +moist ure +di gest +vi ii +t eller +ch on +access ory +night club +foss il +aw an +hu sky +ab original +brand on +ffici ent +cou gars +ste d +ad mitted +igno red +content marketing +ag as +v ase +execu ted +negoti ations +she ad +n and +tab lets +go th +ts al +d fw +on ep +protec tor +sp ho +gaz ette +andre as +ss er +comp ilation +ha v +contain ers +bro ker +soc al +porcel ain +hy uk +air ing +ðŁĴ ° +publi sher +scen ario +spart ans +re viewing +itu des +ed el +pear son +ba sh +mau i +a ad +ðŁĮ Ĭ +li u +ul ate +program mes +fav our +web design +real ty +motiv ational +cro sses +' ... +bus ch +adjust able +ar jun +mist ak +dimen sion +pi stol +weigh s +en y +unve il +indy car +gor don +f ade +fran ken +qual ities +bet t +loc ate +ker r +sp c +confu sion +ne e +luck y +bas es +dep ends +fire fighter +ol a +re t +mar oon +ðŁĶ Ĭ +w am +defin ing +whe at +bi l +é s +b hai +psy ch +ta u +ic ans +thi k +ob ile +inspec tor +ìĨ Įë +ill on +go s +ev angel +fa i +si st +voc ation +bur ge +chi stan +renew ed +enthusi asm +en ting +ag ri +ike a +m sc +aero space +sens iti +memo ir +hosp ice +co caine +der ry +mechan ics +Ħ ภ+tin o +reduc es +collec tors +in justice +supp re +v ana +ab un +nap a +su sa +os lo +e ff +en core +lic ence +ched dar +z al +moun t +ðŁĴ IJ +threat ens +!! " +archi e +fu tsal +scu ba +jo s +gn on +se xi +s official +compar ing +domin ant +tof theday +fa it +propos als +gi ft +y as +cn c +l r +ha b +reser voir +beli efs +gener al +mar ti +t d +est e +ì ł +wi l +ðŁij ¯ +ðŁĶ « +sp x +et work +excer pt +e instein +hir o +sil hou +team ed +per ception +corri dor +mental health +hin ts +ben ny +induc ted +sw x +wi desp +spe ak +cher yl +dru g +ðŁĺ ķ +h f +asparag us +myster ies +fitz gerald +off er +therap ist +care er +dam aging +ts d +per u +wei bo +y ay +phoeni x +disc re +mac book +bar ker +stig ma +sp read +roc kies +kang ar +bri dg +pa i +bi shop +ta iled +capsu le +ðŁĴ ĵ +ge of +roy ale +short listed +o ste +ash amed +ch app +key e +cl a +screen shot +austri an +nati ve +en ight +juli et +michel e +ðŁĮ ´ +travel ers +pi l +football er +win chester +ðŁĻ Ħ +azer bai +gold eng +organis ations +interpre tation +predat or +ofthe week +lo gan +pok é +mari e +cal la +t nt +cin de +ge tic +fit fam +gra v +ow ens +ðŁĮ ± +shoot out +sal is +commissi ons +co he +p tic +ni xon +hi a +amb ition +mar ine +cruel ty +t k +cru de +sal ty +jim a +mon go +ir ony +on wards +arre sts +strang ers +ig er +cycli st +ra g +exten ds +tra dio +bour g +mo i +el la +e able +lex us +au l +der a +histor ian +mor ton +ti ff +man ner +ko t +d k +po inted +mar qu +a an +en ey +du blin +on poli +em ili +secre t +fl o +âļ ¡ +ba j +ste ep +accompan ied +rum ours +dev i +purch asing +fi g +pu b +sch oo +autonom ous +go alie +x ia +autom atically +re vers +ter o +fu ku +titan ic +shoo k +sand als +see kers +exc av +nor dic +bigo live +ba ke +r att +z ak +ne p +ðŁĺ ¤ +cand y +billi ons +book worm +pp et +à ³ +sur faces +sc ars +phil ip +do gg +ci gars +co te +transl ated +cur ator +sin dh +han gover +bre wer +on es +el ton +ðŁĴª ðŁı¼ +mar cu +elli ot +righ te +di oce +ru ss +rail ways +grand son +as cen +apo logy +awa it +mob ili +re spir +parti san +oli vi +stri ke +yo o +white house +expre ssed +pu ps +bed ford +cul tur +fro gs +fly ing +cav ali +c ds +fri ger +street photography +re solve +tali ban +kan g +cru shing +ju m +ðŁĺ Ĵ +william son +tan g +cur ly +t man +veter an +fa ire +artificial intelligence +un anim +pre n +back drop +fr ances +oc cer +doro thy +work ing +ar thr +conver ted +day light +serv ant +pad dle +compla ining +thir ty +nad al +ak u +ibra him +ad dressed +p iss +green house +batt alion +si mulator +out lets +embroi dery +ðŁĵ ± +fis cal +ger ard +sas sy +ðŁİī ðŁİīðŁİī +vent ures +mer it +public ity +ðŁij Ī +sophistic ated +c tu +conven tional +condol ences +isra el +tra dition +ar an +te ss +gla d +ðŁĺĬ ðŁĺĬ +correc tion +ge on +am d +or ship +be ast +ch ment +ì ŀ +nic o +wk nd +wel s +cushi on +beli e +vo c +idio ts +under neath +pu ma +corn ell +en ation +lu l +swa ch +ab ig +u rer +mi e +form erly +ca f +er nal +chor us +juli us +sen ator +âľ į +wh ir +salv ador +ph d +uni fied +boo ster +graph ical +w rec +son ny +mi z +dere rs +s all +ven s +tusc any +wi d +y ong +kur ds +w az +trol ls +mac ro +cat urday +pre ssing +sa sha +cent ennial +gu sts +em c +be fore +den ise +cu st +ðŁĵ ¢ +lo oo +base l +eng land +y olo +ar du +manife sto +do ha +ì ľ +kni ves +bourne mouth +bi bl +bar b +al icia +Ø © +com er +cycl one +g it +ane ws +character i +vent ura +in tra +sf giants +hu t +be a +dar win +ell er +al v +re ese +bl y +kar an +conclu sion +man ny +fla kes +unite blue +nad u +co pp +ed ges +lanca shire +i als +o tta +philipp e +l ent +che e +ment ors +festi val +an ism +compli mentary +r j +pu g +d ine +we i +cli ffs +sar my +ti veness +treas ury +il and +after math +rabb i +ou n +bou quet +herit age +zi on +sur render +shen an +in ks +kar l +gh ty +pol icing +exam ination +ce y +per su +measure ment +hydro gen +lu han +âłĢâłĢ âłĢâłĢ +war i +о Ð +j y +fow ler +mis h +al fre +âĺ ij +bb naija +cat alogue +recogn ised +sa ver +hu skies +col in +mun do +si va +p ng +discoun ted +man utd +fre sno +de vin +prelimin ary +tro phies +pla stics +du g +pro cu +indi go +g ard +dy lan +pit ches +ground breaking +in son +bl ac +an thology +f h +expl ic +r ard +admi ral +so chi +la shes +splen did +en vy +ad v +sex y +festiv ities +stic king +bi b +thr ill +op p +ari el +botan ical +endur ance +fe males +br icks +vat ican +black pool +ber mu +br ough +roll er +bi d +sue de +sloven ia +mm ing +ml b +med alist +di ans +rehabil itation +ne on +s go +li thu +ram os +z ed +pi anist +inten sive +broad band +stu dy +peter sburg +lu ca +ah hhh +phys ician +dill on +tele com +gri ef +mu n +ac ro +si ded +s ly +blo ws +classic cars +tri um +ar gy +? : +h ri +marsh mal +âĢ ĵ +to pping +war saw +tran sc +preserv ation +b av +re friger +experim ents +ä º +gl it +sli ga +g age +fac tor +flav ours +br ony +sp o +cook book +carri age +aw ay +ny fw +on ian +w g +simp sons +ro lex +ðŁı ¿ +cro sby +ãħ ¤ +cre di +syn dic +pu bs +ali fe +poor ly +mac ed +ðŁĺ ŀ +behin dthe +w enger +n ats +ðŁİ Ł +rubb ish +procedu res +typho on +opho bia +er do +fu el +vi era +bu mps +millenni um +new zealand +lec tures +it on +mil ky +respon ded +ê ° +landsc ape +.. @ +bo ther +âĸ ¶ +z hang +huawe i +tu ition +s worn +in u +y or +pa olo +au ditions +ab il +malay sian +ho ps +fe athers +mp le +au ts +ã o +boun ty +ic he +ì ĺ +sh q +pin ot +ge ars +disapp ear +video games +t na +alzheim er +ðŁĮ ŀ +a ji +under wear +swit ching +sign age +o scar +ec on +dro w +cl int +pl ated +gun dy +emb lem +ho es +ici st +nel ly +juni or +road show +miner als +at le +alexand ria +ac claimed +v ell +shi va +ad he +en ne +amne sty +h ounds +councill or +ðŁĴ ¦ +aes the +part nering +influ enced +mag no +fl are +extin ction +civil ian +maje sty +va il +law makers +rac ks +mc c +ori an +sp ices +er rors +may er +co ca +pa i +s ooooo +reti ring +ba thro +ðŁĻĮ ðŁĻĮ +âĸ ª +su f +endor sement +buil ding +broo ch +pal la +arvin d +ag ent +kar ate +r hi +c tv +ta ine +um m +ba x +reig ns +uni of +enterpri ses +adel e +fla ke +at tire +bru ce +ba hamas +gra vy +sa in +che ek +tri vi +lo v +e en +bb lo +lady gaga +itt a +. "- +du stin +observ atory +eigh th +bloom berg +kh s +f cc +gi st +commemor ate +ve er +sexu ality +ed c +nic ole +vac ancy +u ser +son a +:' ( +dipl oma +t end +up grades +Å Ł +jura ssic +cardi ac +dr s +widesp read +à ł +dail ies +vend or +sim plicity +wi der +len ses +supp lements +de pos +ob served +vin es +parti ally +renew al +collabor ate +ali g +fin ity +ph u +zz y +pe tit +ðŁĵ ħ +z in +i gu +sm ack +fall on +ðŁĵ £ +back wards +comp onent +o so +compati ble +bin ding +zur ich +thom e +w ounds +ly ric +fresh men +sne aky +fi bro +di et +emplo yer +in sect +h ated +sch er +raz or +n sw +boo ker +califor ni +av fc + ° +preten ding +pep si +al is +un titled +k art +grand parents +e the +o ck +lux emb +visu als +small business +abdul lah +min ho +su baru +h ra +reve aling +heart breaking +clar ity +am g +sl r +** ** +âŀ ĸ +recor d +ici ary +min ded +ye h +exce ssive +knu ck +icec ream +tru th +ev ic +ta stic +ant arc +ren dering +, , +mit t +loren zo +st patrick +bound ary +zi g +vo cab +osa ka +fur n +tu n +gu l +s ounding +blo gger +utter ly +g af +adv ancing +l cd +mar gin +lifel ong +solst ice +sh ra +wa its +ple ar +bre ach +en ligh +ad er +itt le +c ation +ho on +stu died +?? ??? +k ash +ev angeli +ps l +wei ghts +met als +ty res +tur no +wi e +car b +g ale +se al +sun ite +am ic +patter son +á n +eu ph +up stairs +quali fiers +khali fa +apple music +ìĨĮë ħ +vau ghan +al ter +cru iser +mu a +t ana +kat rina +id ols +spo iled +secre tly +fi bre +part nered +um es +gi ov +com et +screenshot saturday +k eller +fil tr +fe t +con way +pe u +bad minton +gi d +m ound +don key +bu ff +lea ther +lar gely +bro ch +int ments +am use +r k +sto ve +impac ted +con t +cr acks +prison er +bar i +contrac tor +ori oles +domin ate +pol ar +am elia +dr c +ðŁijĮ ðŁijĮ +vi st +su arez +injec tion +blo oms +ðŁļ¨ ðŁļ¨ +sti ff +pay pal +sno wing +thur sdays +goo se +we dge +educ ated +weak ness +de cker +abud ha +bree zy +Û Į +hope ful +o bi +rai der +gh am +de u +se ve +par tly +fu t +infu sed +mer ri +than e +some time +hu e +me in +cre dit +sli ding +ran de +cher ry +dead pool +sh ol +ar am +under wood +sky e +distur bing +m nt +poli shed +guardi ans +ha dn +pic asso +ari us +ak shay +ir ri +j h +happ en +la kh +dal ton +at the +s well +mar sha +re h +cour s +j kt +top us +serv ice +r ink +hack ers +dono van +hor o +tc m +may hem +cha se +dev ops +ken sing +sc up +sh ere +quali fication +c live +ton g +n ancy +mar is +der dale +ber man +cinde rella +jol ly +ci c +loo t +collecti bles +hom icide +g ge +epide mic +su ites +mu ddy +gi mme +e rec +- * +tal la +lis le +embro ide +ðŁĩ© ðŁĩª +veriz on +ve ctor +be anie +arti san +ga in +flo res +vi gil +u so +ðŁĻı ðŁı½ +grin ding +gh er +air ports +respon sive +shaf t +can cel +ceremon ies +e me +at ari +bru shes +eag er +bo hemi +children s +yan kee +ma a +suspen se +mor an +mac ar +sun flower +cre w +vo id +ke ar +fashi oned +jen nings +sunday funday +sub missions +me ad +her man +wa i +crit ically +le um +baek hyun +for cing +co bra +ãģ ® +acqu ire +al k +ge ology +pri mar +import antly +ire z +bunde sliga +curi osity +sen a +stric t +con soli +win ters +ven om +chelten ham +ðŁį º +cen a +t at +ba in +glo ver +under cover +as ses +car n +memorial day +am eli +i rene +ch on +syn thesis +spe edy +mitsu bi +sla yer +compos ite +under stands +pe w +inter rup +hen ri +mor row +an om +thof july +g lee +thre e +ðŁĺ ® +and hi +ch att +renew ables +ye s +trans fers +!!!! !!!! +bab u +du ter +lo ops +pe ers +o ilers +pau lo +ic ation +h mu +war a +mer cer +hom eland +fu ji +ale y +year book +re m +re en +ab sur +bo is +] : +caes ar +shot gun +kur dish +o ren +ra e +anci es +ty pic +f h +def ault +re plic +lu k +trans actions +r ys +infan try +ðŁį ¾ +cho w +chick ens +ba gh +wy att +ay e +gg i +bre ws +ed itions +mi ra +commen cement +pre su +peris cope +ic hi +guatem ala +zam bia +pain ts +wit ches +wan i +un dere +cro y +vo ws +us mc +hear ted +theat res +shu ffle +le vel +mul tic +squee ze +fer n +app et +post al +mal t +on board +ld nt +co o +s sc +k ac +ðŁĺ ĩ +sc rap +mar cos +deal ers +ann u +mill er +co ve +ul ary +vladi mir +be ef +th ur +pick led +se same +bengal uru +mo tt +kathle en +hi st +no tor +dr ank +du chess +snow fall +e ff +tin y +j n +sy our +speci alists +scot us +bay lor +eve rest +mali bu +pre m +harm ful +l ali +b ates +g ye +differen ti +and ra +geome try +el over +black out +== == +ko ta +inter act +asi an +la yo +samu rai +fi del +exhau sted +gla di +pd t +spher ic +anti qu +guit ar +stu ri +ho pper +ang le +f ills +sla p +mi th +rod ney +ong i +in som +pre venting +cassi dy +ap ho +ore gon +lo in +ham mond +contribu ting +f n +gar ri +ori on +comp elling +escap ing +aim ing +plu mb +bi stro +be asts +concer ning +bo e +do pp +shop local +stumb led +âĤ ¹ +naz is +âĢįâĻĤ ï¸ı +gest ure +war ts +us open +hi ggins +char li +hang s +bom bers +° : +fe eds +c ch +st il +nic ola +ðŁĵ º +clam ation +tro pic +af ro +ou k +expen ses +der rick +al ine +fa w +reg ard +im er +sat in +thi um +ry der +pear l +te ss +mm mmm +sen ses +ðŁĩ ¹ +positi ve +exhau st +occu r +nor ris +lil ly +is les +direc ting +yo fficial +count less +sam ar +on stage +flo ck +mir rors +arch er +mo i +k d +vi v +in os +si kh +le i +sen sory +br its +kno x +chest nut +op y +coli seum +z af +di vin +adap ter +:) )) +tem ple +ku n +hel mets +t df +gu ide +m old +o ids +lu ther +he is +monaster y +sp ree +k lu +brit ney +jagu ars +gre ats +c cc +ky rie +machin ery +cric ket +re ro +ab o +aspir ing +semi finals +ale ss +sig natures +var d +me th +her bal +hol den +king dom +ap or +reg gie +ore o +palestin ians +em mys +sec tional +ro i +ney mar +qu el +cu ll +l ka +haz el +estim ate +ul ties +go w +be a +purch ases +bel ts +protec ts +m é +gue ssing +bb o +clau dia +fr acking +jon ny +el k +cel tic +al mighty +ra je +courty ard +ig i +can es +ðŁĴª ðŁı» +bank rup +le thal +âľĮ ï¸ı +graphic design +vad er +penc ils +rough ly +dan te +m fg +const ell +cam el +j b +bloss oms +en to +balo chistan +cine mato +ill ard +jer sey +con sent +dent ed +con templ +sch er +hol i +lou gh +st our +a yo +begin ners +cur b +v hs +a jax +du ff +av eng +dom est +commit ting +ai red +cha p +hedge hog +disappo inting +freel ance +in land +char ms +ðŁĺį âĿ¤ï¸ı +ai sh +m x +buck le +ti dal +per mit +bo ating +ra cha +kend rick +b ello +b hi +ple a +estim ates +l b +apo logies +jay a +bb l +ast oni +inter state +main taining +el bow +mu p +ep it +ðŁĺ ¡ +viol ations +def end +be h +sl c +am ir +pur i +ti um +fi fa +blur ry +scri m +ðŁĻı ðŁı¾ +ma ple +rel atives +âĺ Ŀ +cho c +con nor +⾨ ⾨ +whi sp +list ings +ma ze +than king +ri dd +grass roots +shi fting +desper ately +gor illa +den i +ju les +stra th +g ley +ja in +bu ick +t anner +ðŁĴ Ŀ +ga e +pri m +it ors +n ano +separ ation +armen ia +bor deaux +ðŁ ħ +pj net +bu rial +e bon +glo ss +re new +gri er +spe eds +comic books +sym boli +pur poses +ãħł ãħł +spati al +no table +ci on +n ps +ho ffman +nor man +rt g +du sty +situ ated +tr an +k fc +em en +nic kel +hast ings +sett ling +gr it +l ena +w aw +art s +gu m +ca regi +le wis +sapp hire +rememb er +embed ded +t lc +bl at +serge ant +el sa +boot camp +bow man +photo graphic +pill ars +direction ers +classi fied +no is +ve er +barre ls +wh oop +ðŁĺ± ðŁĺ± +fe male +petro leum +medi a +e fc +poké mon +ठķ +enthusi astic +var un +pro files +pedi atric +acci dents +con rad +jan g +jo jo +ac or +ob server +l f +live stock +for gi +fo s +el m +an and +go e +c ere +avoi ding +gri t +om an +thank fully +scat tered +nick y +cylin der +chees y +di ver +mahe sh +cav es +ear liest +qu inte +subjec ts +b end +gul f +vocali st +glu e +pat ches +un stopp +sny der +demonstr ating +pi o +hor ns +wic kets +and the +r ama +yo on +stra ight +bed time +or ang +bul lets +sa urus +min ers +inci dents +! ... +ðŁİ ¸ +ag ers +hand les +stat es +in ity +d ons +incredi ble +emin em +avi v +ru dy +moz art +folk lore +appli ances +mt l +fre y +di as +hu a +page ant +stri ve +im prison +bul lish +r ana +al erts +bb mas +hy per +derby shire +re cre +re dd +debor ah +cosmo s +law son +mel anie +psy cho +ho or +doo dles +sni per +shad y +man tle +canadi an +new year +inter actions +separ ated +cor ds +spiritu ality +ap u +it o +p ct +pel osi +rebel lion +se iz +wor cester +sec tors +ul i +san ta +Ð µ +ðŁĩªðŁĩ ¸ +bi ased +class ical +gam ma +dee plear +emer ge +back er +sur ance +hand crafted +ðŁİ ¥ +franc is +mill an +ic i +cro wn +wo w +stri ped +un fair +relax ation +³ ï¸ı +embrac ing +she alth +pale o +martin i +dist illery +wr ink +or k +na th +hay ley +cour thouse +si ber +sa di +quiet ly +mel t +m sm +me h +smart phones +rel ent +pp ing +war wick +co logne +gli a +cot ton +pro g +lon e +ip sw +star ters +expan ds +u mp +su ed +ski pper +infe ctions +ing le +à ¡ +cler k +demonstr ate +ac ar +ðŁĺĤðŁĺĤ ðŁĺĤ +ti bet +bun s +alo m +demol ition +ssi a +g st +[ ] +so ar +âĺ Ģ +ðŁĺ ª +ðŁĵ Ĭ +dee pest +beyon d +are t +att ends +activ ated +di mit +âļª ï¸ı +high lighted +magaz ines +rum or +az za +steph ens +dol ph +sho ckey +mat s +we av +mel an +serv ers +tra um +ku sh +æ Ĺ +bab ys +pa z +a al +la use +break ers +canter bury +ul ture +mi ri +euro s +tane ous +impre ssions +du tch +il d +gh i +pur due +adequ ate +l p +sy ner +ang ler +du rable +gal ore +ro wn +mg mt +ðŁĵ Į +lu cia +âĺij ï¸ı +zay n +bor row +. ( +north umber +cru sh +eng a +su sh +extra vag +t out +ma hal +ali stic +ther mo +gall eries +es se +chi bi +attrac tions +lex ington +legislat ure +docu mented +resi den +brow nies +w f +st ool +plan ets +sho ppers +conduc tor +ms p +tr icky +fru ity +end ra +feel the +whi pped +hair style +re fer +oo k +oc topus +audi ences +ku mar +after no +op tim +c fl +ni p +gen i +alpha bet +ann ab +lam in +accep ts +l ng +ðŁĺ « +t ine +ac om +cheer leaders +t k +gr on +v g +k ung +ja x +dha bi +r ss +mack enzie +beir ut +clean up +gy psy +st ell +bur ger +hurric anes +educ ation +st ina +âĻ¡ âĻ¡ +unfortun ate +jere mi +bad ger +at ers +: âĢ¦ +ter ra +subli me +stu d +y mca +mr u +duter te +bren nan +bul b +mel o +yl on +hack er +c red +gu d +as an +pad illa +embroide red +vietnam ese +pione ers +projec tion +re boot +id c +an ey +pri mer +suff ers +win ding +p on +sto day +mor n +u ch +all in +adid as +eliza beth +tu ck +o graphy +ðŁļ Ģ +be g +os borne +ghet to +r h +cn n +ir ma +ma kin +cab les +mur ders +oc ks +inst a +al as +si k +cu ff +la re +foo dies +o vic +at om +geome tric +em pathy +ภµ +cent enary +newsp apers +administr ative +ðŁİ Ĭ +sti ve +contrac tors +le tt +tas mania +awesom eness +den sity +ve en +prince ton +frequ ently +re ject +gh i +modu lar +ceram ics +sh ag +ki wi +can vas +sweat shirt +an j +ti mm +napol i +il er +appe als +hamil ton +ma yo +we ave +arrang ed +whar f +occu py +b vb +as aki +ot ter +nor m +vi es +de tox +tion al +dere k +id ad +ad missions +constitu ency +u pper +woo t +allo y +se ve +lu b +un comfortable +ed win +ab re +d wight +ar che +virtu ally +sp ol +pri e +ai i +er r +swit ch +bar ack +se ok +cou l +wn t +pou l +o live +caffe ine +cardi ff +notor ious +de mp +ex cess +bar r +t ford +a jay +bump ed +my thology +shel ley +fal con +shakespe are +must angs +no ted +bon e +civil ization +sy d +par sons +un official +hy ped +sp ends +oppo sed +v ings +space x +noti fication +deci ding +bio tech +out si +sal ah +! . +fe d +ss y +c ms +bad gers +cr o +ela ine +n ba +dy our +n ant +honey moon +climb ed +conom y +ath a +m ell +ne bula +nature photography +juli e +bm x +inve sted +mon o +lieu tenant +wat kins +techn ician +o se +ka e +ì Ľ +mc queen +pre ach +trav eller +flexi bility +ze bra +reta iler +p ant +ben der +brand t +squ id +war rant +veri fied +cas s +pier cing +hon ours +t ying +mor ris +kis sed +op rah +panor amic +me i +splat oon +wich ita +ari as +gal li +indy ref +good times +athe ist +confe ssion +ow ski +re pping +ad ditions +mechan ism +z im +j ans +su f +cho pped +beg innings +vitam ins +ãħ¤ ãħ¤ +or th +po les +ru b +antarc tica +indie film +web cam +ket ch +bre tt +cle ment +her on +defe ating +hydr o +buc ket +wand ering +sid ney +future of +b inge +on ies +knock out +administr ator +syn the +l ent +jan i +bar ley +premier league +ner ds +cr m +bra s +bot any +evol ved +rot ter +ro wed +tum or +weal thy +Â Ń +mon arch +li shed +da hl +ðŁİ ĥ +bu ch +ken yan +Ø § +red ness +assemb led +se mit +hud der +shro p +ran i +lear ning +mor y +iti a +geo graphic +worl dof +f b +pho sp +boo gie +am ped +? ... +che w +dwar f +ar us +s sen +ru sty +recru its +h k +gar de +app lause +vol umes +invol ves +ta c +hand bag +trans late +ffe l +se ym +aqu atic +trans fer +zo di +and r +acade mia +cr ater +te z +ar se +adap t +col oni +snow man +mal i +hang in +di schar +oy sters +pho e +colon el +w ba +hispan ic +thri ving +sh y +ag les +sales force +cre me +so les +la fayette +â ī +ter ia +ach a +sp erson +go go +car ly +the ore +am ore +vo x +af t +ãĤ ¹ +stap le +mu ffin +di agram +ino x +su stained +av ent +me ta +arbit r +dec ay +ado le +Ð ½ +ec ol +ph o +n k +o cu +gr anny +ç a +luxemb our +stad t +alber to +le vit +am as +d x +or phan +co bb +as c +lo gy +immen se +chan ts +off line +p ent +bre x +w inger +plan e +i el +nichol s +ca thy +nar uto +low ed +/ // +ignor ance +cat astro +you ts +sch en +buil d +haz i +s ine +critical role +du g +dete ct +lo gs +en amel +stpatrick sday +ed die +co pa +cigare ttes +ho ff +kay a +la goon +ra pha +air borne +choo se +puer tor +ke v +gui ding +fro sty +bor ough +mir a +ðŁİ Ĭ +cade t +anu sh +yo gi +e ger +fl ing +slo pe +nin th +we ston +foot wear +f n +may weather +a am +pla in +stair case +witne sses +work outs +ro bust +dex ter +co hort +ðŁļ Ĺ +sp ell +ha ze +o om +organ ising +wild fire +cont acts +av on +min o +upd ating +ðŁį » +li thium +ing ual +k is +au ga +lo com +de duc +u da +th ak +boy le +mp er +hot tie +eri k +re vised +is la +travel photography +oo za +en qui +confe rences +clo ver +g room +cur ves +live on +per f +displac ed +bo log +xx xx +ðŁĺ© ðŁĺ© +te al +ve ssels +rain forest +cal ci +pan ther +gira ffe +ta sted +imag ery +pad res +day time +bas s +ri pe +opio id +nu e +vin yl +invent or +sen s +process or +mu t +gad gets +bibl ical +shann on +jacqu eline +car y +the resistance +ali en +n vi +co sy +bi har +fo ley +ren d +mu gs +fa ken +cl one +ni allo +gra bbed +chi hu +power house +n tt +chero kee +spon ge +imple menting +rh ine +le one +ðŁį Ģ +pret tiest +infra red +impro v +swit ched +tu bes +con tr +bl k +projec ted +be aver +yo t +bbcra dio +thi gh +per secu +apologi ze +w ack +po ster +oli ver +az a +lou d +( ?) +f the +women shi +spar row +blu sh +us able +sc ales +it ative +peu ge +ne eding +legg ings +glam orous +mat ur +c z +wat t +da b +tam ar +et sym +bau er +heart felt +h n +else where +bir ch +alu mini +hu ck +e me +j l +traf ford +d z +por tions +ana sta +arthr itis +esp n +ber gen +viol ation +yo shi +c z +northumber land +clo sures +ðŁĩ¯ ðŁĩ +smi ley +r w +tel ugu +inten si +gre gg +ve ga +dun geon +south bound +ba il +domin ican +semi final +chap ters +h itch +van ity +trans iti +recomm ends +sati sf +bar ca +queen s +( ( +de struc +stra it +ra vi +dess erts +in tru +har am +k os +fo e +fat ty +pais ley +magn itude +dri dge +com ey +schem es +vision ary +our t +down loaded +ðŁĻĮ ðŁı½ +gd pr +lan i +p wc +gu ad +nic est +stake holders +re ferred +george town +arvind kejriwal +schnei der +in doors +all star +strand ed +gen der +ze pp +ma sses +ðŁIJ ± +pati ently +bl dg +z ab +we arab +vi vid +he ck +d ella +sy mb +je opar +la ger +à ª +comb ines +ne c +br ay +flo p +tx wx +jo ys +pon t +pro found +sur round +mad hu +ma ble +ay r +te as +n sa +open ly +er nest +ãĥ © +to po +g na +anti oxid +ti an +e tr +c ello +ma thi +gener osity +b iting +man ic +kel sey +chee ks +ten der +w th +pron oun +ultimat ely +gu sta +ari anag +ger ry +ble ed +red dy +mic h +mitsubi shi +oper ated +sex ually +ma u +cl lr +vi ds +co c +mel ted +ðŁĮ Ī +q ld +ite ch +instru mental +end game +ðŁĵ ĸ +ener gi +brow nie +tam il +at in +domin ated +pra ises +fire place +sens ational +men a +k arti +un prece +ru pt +ori ental +mc cor +tour naments +scen ter +re eves +prescri ption +sam e +fra u +tru ffle +em bo +roman s +bla sts +techno logical +pr at +b sb +y ar +tren dy +ac l +al ad +ðŁį ģ +o hh +bankrup t +tho ven +regar ds +is er +war wick +vine yards +real m +niallo fficial +do ta +ge mini +to do +v able +¨ ¨ +la u +wre ath +ju ve +nat asha +le ver +lor i +hor ser +cc tv +air bnb +es anders +sin clair +ema biggest +high school +con test +optimi stic +t te +ðŁĴķ ðŁĴķ +ss d +ye e +hel ena +con sen +ric ks +jes se +an ic +ðŁİ ¯ +re acts +ro be +independ ence +vol tage +m ington +s ant +à¸Ļ ภ+-------- -------- +sentin el +ke tt +rehear sing +aaaa aaaa +sof the +stir ling +sear ch +wi gan +stand out +sna il +pent agon +Ä ģ +ch lor +cru st +net any +chemi st +disapp eared +ric ardo +sp iders +bo se +war ren +me ssing +bann ers +gu el +par ach +ma id +coun ted +epi le +bon fire +speech less +se tter +meas ured +rejec ts +nik ki +le ster +foren sic +fab rics +alo ha +pre served +wat ford +deta iling +dar th +bo u +car ly +... ' +tail gate +noti fications +å ¤ +pas sive +trous ers +balo ch +ro ther +typic ally +à ¥ +sp it +wi z +sic ily +technic ally +ex pose +st age +hu bb +cre am +cap s +po ke +sle ek +ju ne +tempor arily +de z +awak ens +l ame +_ - +ji ha +tues days +advis ed +advis ors +exi sted +dis agree +news room +lo sers +world tour +dr ying +al di +har ness +foot print +hobb it +p mln +i ro +que red +asse ss +gaz e +sa b +th ian +í Ĭ +ti f +ob serve +ev il +dra wer +swee p +cor y +co dy +kyo to +cal lum +n inj +lau rent +be i +sket ching +custom ized +du r +regre ts +knox ville +ìķ Ħ +mess aging +grac ie +abun dance +bi dding +bre wed +fl ouri +therapeu tic +alt itude +ho gs +bur ner +elec tro +wonder fully +he ater +post pon +li very +r all +ad as +a ac +sau l +brook lyn +play house +âĻ¥âĻ¥ âĻ¥ +char itable +in y +z ah +compet itions +be av +plu gged +o is +do om +astron om +speci alized +max i +ta ps +cellu lar +depre ssed +folklore thursday +cri b +e mul +ë° © +fi gh +ru z +car lisle +spe ar +side walk +de i +depend ent +lac es +nh s +ðŁĮ Ļ +reali zing +net work +ric he +re gin +re fresh +st ral +pa thology +pla id +psyched elic +hin d +u ka +algori thm +lin king +progre ssi +fe y +d ade +hydr ated +b ant +fam ed +cot sw +bo ise +as c +rac ing +ja vier +ww en +mar lins +poo p +swe pt +toni ghts +we f +ani me +slo vak +âŀĸ âŀĸ +cla us +lem me +cli ppers +re ls +arianag rande +r te +ko t +thal apathy +hungar ian +zu ma +y von +is u +jour neys +clin ics +be be +ww f +n ws +super heroes +er it +sle ague +identi fication +mo tto +ba i +sour ced +ill er +ap i +pri se +unprece dented +dam as +tuni sia +dra in +undere stim +e ther +quarter ly +rewar ding +al ham +wolver ine +cab ine +hyp no +nad ine +hav ana +da e +ðŁĵ Ī +dr on +read ings +b ati +pic o +mer ci +iti an +wal kers +el ope +mi key +god zilla +bur lington +abu ja +social ism +at ility +sh ell +harry potter +g no +ab ur +re leg +fel ici +ro gen +neuro science +inst in +ath am +vou chers +j arre +fu se +def ici +monte rey +de port +mid day +pp ard +fre ed +ame ter +wil t +n ingham +pr att +liber ty +slo gan +o to +pr i +co ated +c pd +ne tt +il las +mal awi +evol ve +accessi bility +ðŁĶ¥ðŁĶ¥ ðŁĶ¥ðŁĶ¥ +or nament +b p +el is +son line +chi ro +fl ick +ib m +ar ak +en ables +gar land +san e +cu ties +tri p +rotter dam +n ys +lam ps +lu cas +bo g +ra ils +travel led +hic ks +en u +sab ha +scru b +hi er +hart ford +fo o +fer nandez +tre vor +mat tress +appo intments +ale j +fe i +o logist +saf ar +oc ta +sr c +sha un +ambi ent +dri c +bi ker +she e +must ache +h ta +bo one +her ty +car dio +bra kes +rec ital +consi sts +overwhel med +cau l +robb ins +im it +al th +ur l +bi bli +on ne +black livesmatter +diffic ulties +tel ang +tall er +ðŁĵ Ĩ +deb ating +bur rito +mo vember +strength ening +bo e +te stam +mirac les +base ball +re nee +ðŁijī ðŁı» +al fa +âĺ ĺ +unstopp able +ec s +g mo +giftide as +path way +fen cing +ðŁİ ¤ +b ham +ra s +sk o +d led +thel ast +magn um +bin ary +wil de +wil der +wh ati +barbe cue +h ism +can oe +kur di +eli ve +advant ages +mad ame +bi er +mis sing +enter tain +air force +y ama +c is +hash tags +j is +ve il +dream y +ten se +may ward +ch ateau +hunt ington +âļ ĵ +v all +up on +bl ouse +dun es +ðŁĺ ´ +fert ility +m ole +curren cies +st u +ber lin +toa sted +div as +wal t +lar k +por a +hit ter +um er +chil led +bal ancing +fa is +y in +or tiz +east enders +h ate +ur al +ap ril +tim el +à ± +per o +sto cked +respec ts +th t +best friends +giving tuesday +be ad +inv ent +im i +nap les +comb ining +tok ens +thir st +ma sc +par rot +sp u +dent on +* -* +t res +subur ban +wid th +si ve +con tender +siri us +lo k +troop ers +outra ge +tur bo +frag ile +me ssed +do h +disc ord +netany ahu +re sign +forgi veness +mo han +mun ch +cam ou +identi fying +enab ling +hot ter +thorn ton +jai pur +ar ya +ðŁı» âĢįâĻĢï¸ı +mu staf +maj ors +o ke +du ffy +roh ing +til t +ðŁĩ®ðŁĩ ³ +rock star +she ep +hend rix +ra v +in vention +do u +lagun a +gru mpy +sw is +im pe +) ' +you ths +bun ker +st ache +oppo se +indi es +acceler ate +ml p +ed en +w ann +k ail +akshay kumar +su pt +pol ym +midd leton +extra ordin +wil son +australi an +alumini um +way ne +alum nus +mat ics +gri m +er nie +opp a +competit ors +rand all +h ence +decla res +pre aching +sha he +can e +sustain able +stap les +le dge +ad ena +doctor al +bur gundy +decor ate +ren dered +ri sen +pr ank +di or +bee thoven +flo or +ac com +to t +ho dg +touri sm +say in +objec tive +mar kers +premi ership +en abled +camou fla +gi ant +Ñ ģ +smo key +ric ket +pan g +de pending +s ation +evol ving +inter cep +cen sus +tof the +re en +mendo za +trum pet +marke ters +an it +ðŁĻ Ĭ +north western +v la +foto gra +blackand white +che wan +wi g +tro om +ginger bread +k n +ro mero +n fc +or chi +fun ko +sour ce +f s +ra ped +o st +tar ot +ann ually +ðŁĺ ¬ +r ill +del av +.. !! +se s +can n +medic are +ph el +ape x +guardi an +rema ined +r pm +a ñ +story month +instag ood +neighb our +p ing +sem ite +my stic +as cot +mat er +hand ful +dang ers +ti d +ana heim +opol y +sh allow +nami bia +tor ia +procu rement +big bang +announ cements +prosecu tor +beng als +sal le +en roll +ga stro +sugge stion +ba k +ha ul +budd hism +berni esanders +flu te +fati gue +cyn thia +cho i +ir win +gu a +str ous +h p +ba p +satisf ying +play a +ðŁİ ¼ +inst ap +al ice +t p +irri gation +ðŁĩ¬ðŁĩ § +in tric +clu es +ple x +sa x +he pat +dump ed +signific ance +by u +medic ation +pro v +tough est +corn ish +âŀ ľ +kel ley +u v +si zz +si bling +me st +di stor +diplom atic +aun tie +b hat +son ic +bren da +pump kins +ro ch +black burn +ur ged +shi a +arrange ments +floo d +sa unders +lec turer +nou ri +popul ations +diplom acy +consist ently +ðŁ¤ Ļ +t mund +cauli flower +l ily +vocab ulary +vari eties +coo ker +up town +qu ent +mo sa +re inde +velo city +spru ce +social medi +i ber +volun tary +proce ssed +bal tic +y ang +leban ese +d p +dol ly +arrange ment +y uri +cran berry +kal yan +elev ation +cli ff +pu shes +ìĬ ¤ +sil ic +co wx +eter nity +sla ves +vine gar +glou cester +con tained +breaking news +aga inst +renov ated +norm andy +hero in +ys m +mo ds +gre ek +un di +tren ch +v h +encoura ges +head ache +gr ange +: ' +ever green +Ù Ĭ +reck on +ab used +th ru +cho ice +ti dy +col der +scho ice +ha in +bru m +li ars +bre it +yor ker +sh ack +he idi +micha els +sco pic +fasci st +play ful +ca c +yas ss +sh ad +.. ? +qu en +ram irez +clif ton +pr s +best fan +âģ ł +gener ating +head set +disappo intment +abstr act +bo iled +paren thood +azerbai jan +exhib iting +bom bay +oli vier +ko so +un lea +mat ernity +iz er +si ves +r hu +col l +saskat chewan +fre akin +de k +na g +stab ili +ðŁį ķ +organi zer +bo sses +ar u +u va +at able +ta un +after wards +fert ili +ver ge +az i +mor ph +๠ģภ+jer k +cosme tic +ko w +stru st +ap ache +post cards +for mul +ì ĭ +spin al +jack pot +elec tri +Ã Ń +lo y +gra der +diab lo +ar di +he sit +f w +arch ery +pa sh +the ories +repe al +re live +per cy +âĺ Ĩ +im in +syn chron +sham poo +coup ons +o to +la i +thou ght +luxembour g +mo v +ðŁĺ ¥ +ge mma +se ated +m ga +strat ford +un certainty +shi fts +est o +fo ol +fire arms +cor rie +ki ki +appa rent +p ills +olym pia +fi d +elev ated +de cks +ignor ing +av alan +ro v +whist le +p tsd +milit ants +robo tic +pac ers +quil t +bankrupt cy +lic h +per cussion +celebr ity +al s +( ; +su t +pokemon go +h g +off s +gibr altar +scre ams +billi e +gen ome +mar in +be ams +arch bishop +em in +bedro oms +g ated +ol ly +warran ty +at own +cudd les +gun na +k ic +vi ve +cy mru +nar row +pro b +le o +refe rences +manufac tured +cho pper +brun swick +sem is +don ia +r ye +man o +hur ting +? # +hol li +investig ations +c els +ðŁĵ ŀ +le ster +temp les +sto rey +mc mahon +toi lets +wo of +ï¸ İ +le verage +at om +night mares +victor ious +haun ting +custom er +ag i +yo ongi +mon ty +ver onica +w ur +inti mid +blan kets +volu tion +j m +âĺ İ +am on +jud ith +ðŁĺİ ðŁĺİ +distr acted +dri p +hurric ane +and es +revel ation +tro op +ab leg +col lin +tibet an +wor rying +inter nationally +eat er +camero on +brad or +y uk +ðŁĴĹ ðŁĴĹ +tra k +slo pes +ci er +ne a +ol er +ta ka +albi on +volcan ic +am n +a fi +ob stac +face time +ger ing +n pr +metall ica +organ ic +ðŁĴ ¡ +ki dd +d ances +pemb ro +wash er +m its +om er +emo tionally +tan go +ip o +do cks +scan ning +spec s +tho m +the ology +emer gen +om i +g pa +selec tions +un necessary +ima ge +ter s +induc ed +gi gan +rent als +supp lied +m fa +shan kar +lat er +pa jam +cla ve +Ù ģ +ma hin +carl son +avi an +ano va +kati e +aj ith +design ated +chocol ates +investig ators +gla zed +prin cess +er ry +ra gn +ou rable +hr u +sun dance +peuge ot +steam punk +gh lin +gre ase +hi res +z ap +per ce +j ill +tom e +he hehe +joy ful +mae stro +ni shed +gene alo +v ich +p its +fox es +good man +emer son +lo bes +con verse +o ats +thom son +ra him +mal ware +ah i +man kind +re sin +im g +sw ood +kin der +sc roll +ar a +sak ura +ro bbed +xi on +ny a +c ism +ce dar +be in +mour ning +tor to +heath row +done gal +bar b +hydr ation +k or +elim ination +su pdates +hill s +appe ti +star red +ko m +gw en +dd d +cra y +sc anner +personal ised +seren ity +re design +meta ph +box ed +judg ment +no se +ë ¹ +er ad +ac ne +supp liers +ener getic +v om +as ap +ðŁĶ ¸ +ir vine +hat ch +la ss +ad ren +waff les +accur ately +ici o +itt le +se un +occup y +web cam +thene w +ent es +ga i +j w +accoun table +vis or +ir rit +licen sing +hudder sfield +gen ie +ðŁİ ¾ +atmo spheric +ten sions +spart an +clif ford +ol an +north bound +ame en +cen sor +u el +ster y +$ $ +far rell +hy ster +cl t +se dan +rep lied +descri bing +micro wave +sla b +pro sp +assi sting +ru bio +e than +hh hhh +gu ay +z man +ra ise +roll ing +o e +n ile +ambro se +scar borough +hero ic +coo ks +mor t +chop ra +ðŁĮ · +to b +shav ing +stac ey +dor m +motor sports +wi ki +fol ds +sp iced +stress ful +liter al +fu dge +pe ggy +wa ite +tre sses +se sh +pr ic +ðŁİ ħ +fri ght +r va +mumb ai +po m +tt v +cel lar +tom e +andro id +dor is +tsun ami +tin der +o ec +m wc +dor tmund +no thin +l iti +so u +believe in +at u +kno cks +mag ni +ss sss +ro hit +ine ws +ang i +m andy +ke ttle +intermedi ate +av ant +cur l +endor sed +ori o +ur t +consider ation +wi res +shel ters +b ino +vik ram +imple mented +ly dia +bu k +paro dy +c news +under graduate +canu cks +sam i +polit ically +ro tten +gh z +tex tiles +over load +moder ni +recre ational +fli r +bat on +typo graphy +ov ation +intrigu ing +pilgri mage +al ge +ad ays +tcm party +sp elled +cur ls +boo ze +ste m +ann es +ir ls +spon ge +sho pper +sig nation +bra ss +mi stress +le ah +beg inner +lau derdale +augu st +pre school +ta ping +tai pei +execu tives +b d +rhe tor +esc or +immun o +deeplear ning +stat ues +it us +manu script +ly ric +cor vette +mol ly +la ge +de p +cn bc +le st +je ssi +fi fe +griff ith +oppo sing +ran g +dr ills +respec tful +p ity +d ell +har ding +play boy +blo ke +shut out +k ili +o sp +se attle +bc poli +mis es +journ als +team ing +es ther +fre ddy +Ķ ï¸ı +metr ics +no tre +gar ry +for ty +navi gate +perio ds +bened ic +j id +da w +ance stors +restor ing +con g +aller gy +tit anium +c ence +lean ing +ab bas +v ast +uc f +roof ing +e man +seve rely +vo gue +ve au +in bound +d z +tane ously +stret ching +man chester +dr yer +dav is +kan th +the game +it ted +re tain +el les +conge stion +frat ernity +ol lie +lo ki +fre ely +cho o +pon y +sc ep +tab ly +bal t +rock n +di me +lo gging +ðŁį · +ad u +ha voc +water ford +char is +swee tie +run ning +ner d +erdo gan +z ara +weigh ing +fif ty +pre cise +low ell +kurdi stan +r yo +or th +syn th +lin ers +phenomen on +art illery +il legally +constru ct +nostal gic +gar th +al ta +shel ton +a sean +w ander +dur ban +di versi +bon o +cl on +le man +sh un +obstac les +appet ite +fe eder +respir atory +di xie +formu la +an to +so ber +extin ct +au c +ing les +legitim ate +; ; +min nie +ipsw ich +dram atically +ðŁijı ðŁı¼ +ingh am +milit ary +mon et +us navy +for k +dun no +play er +q otd +st oo +ex or +ethiop ian +film fest +pe red +c ate +sau di +in ner +sin cere +tion ality +ale e +de eds +cooper ative +ir onic +cro cod +br ary +post season +cam per +can ary +e in +exten sions +nb d +sher wood +spo kane +hu mp +jit su +ê ¹ +dar yl +p si +stab bed +offer ings +expe cts +cav al +body building +fr aming +f ca +ye arly +bom bed +sk il +resear ching +jud iciary +gree ted +tu dor +mil o +innov ate +ðŁĺ Ľ +r hs +ru by +contribu tor +fam er +soci ally +m lin +fi ery +ut ter +beau t +it os +de voted +rain bow +bar ney +pe ren +ar jun +r na +gab by +ut i +hann ity +pick le +ser v +qu akes +pp e +fe m +wh itec +j n +victor ies +ðŁ§ ¡ +gol fer +congratul ates +resul ting +mechan ic +ur ve +cen tered +kie v +an s +in cub +< < +c mo +bestfan army +dap h +en ham +on cology +ku sh +t xt +ori ented +fashion able +c sr +sa hara +r ack +pd p +han son +ภĩ +ti ers +ra r +pan am +in sky +sa hi +testam ent +asth ma +in her +fisher ies +or der +ho we +gall on +ep is +suz anne +drow ning +paneli sts +ðŁĺ ² +ë ¦ +al ach +commemor ative +at tribu +ðŁij » +mo o +visi onal +week sary +gu st +ak in +poin te +ee e +di spar +ni pp +dent al +st all +pi an +bor e +ul ster +tic k +ir r +tae hyung +micro phone +bermu da +ga ard +el er +plumb ing +hu gely +âļ« ï¸ı +race way +cam bridge +mar cel +burn ley +to ast +holly wood +fa sting +me red +hib ition +ca pped +benef icial +ow ning +cont amin +arab ian +to on +cap ac +hul u +sm ir +nutri ents +se in +graph s +con ditional +ðŁij ħ +or ac +play in +nor the +tor nad +mar ian +ju mbo +lex i +incredible india +road to +uk one +confu sing +sp h +shan k +pi ed +mq m +positi vely +sher ry +path ways +consi ders +tof u +argu ments +resil ient +che tt +with dra +ter o +ated ly +sw ana +he b +fli ght +har ley +decre ase +kind le +book shop +³ ï¸ı +marty rs +sm ur +mc cl +concer to +sti me +rejo ice +app lau +cle ment +mer kel +jai me +im mortal +isle of +mar co +youtu ber +stal king +me too +st ack +sp ouse +u st +lu v +âļ¾ ï¸ı +eque strian +ev ing +fl in +nick name +the big +as ar +st acks +wal ker +bor a +kidnapp ed +hur ling +humb old +rec alls +co pper +ann is +se o +mer ger +mu ir +ad dy +ðŁĴª ðŁĴª +be x +cr acy +con an +congratul ation +mid st +âĻ ¬ +for bi +op tic +cr ate +crocod ile +mad agas +secur ing +ast on +o gue +savi or +salis bury +love it +fuji film +cast les +as st +ar rows +sp acious +tr s +poly vore +progre ssion +m ri +nel son +bi m +indic ator +o da +pe pe +re signation +gu t +sne aker +log ically +az y +are lla +te aring +jo shi +ssion ism +q pr +mari ah +p x +ble ed +mi an +med ley +we iss +ker ry +gat ory +at al +madi son +av enger +nab y +pl and +gi les +fresh water +d ington +ta j +demonstr ates +n tv +bul bs +sunday morning +pe ake +souven ir +wa h +ton nes +m kt +complex ity +con den +ross i +b ing +y ds +su k +n go +mid land +ol y +life is +ri pple +mo reno +dd ers +tu s +á ĥ +bou l +x a +hol dings +wn y +shadowhun ters +ke i +asp ire +m ous +ow en +so ak +skir ts +moun taine +stor ming +ch rome +ri ots +sar ato +amaz e +less ness +nav ar +crit eria +ra fa +indul ge +ay er +por to +nam o +........ ........ +yi elds +val le +j h +mac ron +sa ins +dur ant +tra ilers +wo t +confeder ate +sh rin +id ol +form ally +ten e +motor cycles +than g +no de +bang er +dal y +p ats +enroll ment +au ctions +at al +ar bor +lo gos +de arest +trans action +dom ingo +fle a +ser mon +de ck +sin cere +questi oning +juli o +was p +pre tz +armen ian +k ham +inflam mation +picture sque +acci dental +film makers +ðŁĺ ļ +ðŁĴ į +ca sey +so b +yee zy +good will +parag ra +ss ly +fe ather +dy ed +assassin ation +na de +b cs +app lies +femin ine +fe u +ext ent +depu ties +l ack +psy chic +go i +kill ings +pse u +ðŁ¤ ª +un c +mar l +tan e +mck enna +sur fer +influ ences +free way +hack ney +mal aria +el and +te au +rema stered +Ø ± +raz or +gg y +cor ro +lak sh +fla ir +honest y +hoor ay +de pp +am c +wedne sdays +q a +ed its +- $ +se villa +dou bled +human ities +c cot +som os +r ine +af a +si oux +re construction +wel ding +th reads +am ish +encoura gement +po der +bo ck +bal m +p tions +stand up +accompli shments +guar ding +convic tion +ac ion +napo leon +depic ting +att ack +su i +wear able +âĸª ï¸ı +pot ter +esc ort +vis e +to ts +bo on +event profs +angu lar +womenshi storymonth +bar row +sch i +ac comp +ti k +l end +kensing ton +wol fe +st acked +cra shing +exhi bit +wing ed +sab rina +ma sa +k ms +alway s +et t +pla sma +counsel ing +pick les +nfl draft +mr s +inev itable +coura geous +staf ford +writers life +ho s +e j +gh yun +trade mark +adri an +influen cer +coron ation +ra ging +explo red +usa f +excep tion +eu x +tan ker +sw ami +pac ket +ðŁij¨ âĢį +f en +she en +a ero +j l +re gal +nw t +au ster +meh ta +char ge +a ste +b ate +inf eld +racec ourse +collap sed +fle ece +z il +al lie +alternati ves +geor ges +ðŁĵ į +quir ky +fc b +nat geo +philanthro py +bra i +every day +ðŁIJ ° +ach ers +ja an +fin es +q i +fisher man +distin ct +gri mes +nation alist +comm ence +ro wn +âĢ ³ +z ing +f ter +hr w +baro que +bl ender +kitt y +hoo ks +c ited +w anda +consen sus +reinde er +an and +supp ly +me ds +v n +ol ph +rat chet +shel don +secur ities +ë°© íĥ +cro m +mosqu ito +j eric +im mac +dimen sions +â ¤ +di ssi +sponge bob +dami en +steven son +jo anne +del ish +yi kes +than x +surve ys +postpon ed +alco holic +al ised +ðŁĻı ðŁı» +do ch +sen tim +mered ith +com pares +b ago +happy days +mo ss +ãħ ĭ +ne c +gn ment +frustr ated +comb in +ri v +ec lec +col lo +compli ment +actor slife +ct to +nic ar +op hon +apar the +man t +ja de +trol ley +optimi zation +eye on +eco logical +qui st +ep he +ॠĩ +cin co +appo ints +old school +c pr +behavi oral +min aj +:- ( +tag ging +ev al +jo aqu +ðŁĺ « +ha k +de me +jama ican +so s +hy att +hand book +libr arian +hanni bal +pump ing +ch om +f man +ga i +hu ll +respon ders +green ville +n us +vau gh +ðŁİī ðŁİī +ta xi +gold berg +man tra +te ase +forbi dden +metho dist +ati vity +* *** +ec t +mc gr +Ħ ëĭ +se b +amid st +disapp ear +thy ro +phili ps +er ina +v icious +stream er +million aire +ma p +str ick +hack athon +gh a +ed ic +mi ka +pe ck +ill i +anto ine +ar ca +op tic +ma ure +ðŁĩ¦ ðŁĩº +cla shes +man ly +âĺ ģ +al var +and res +me i +el m +ww ww +al tered +l te +ê¹ Ģ +mo jo +for rest +thal ai +non t +spee ches +acknow ledge +ign ite +x factor +ðŁ¥ Ĥ +mead ow +disru pt +debu ted +scrim mage +pharmaceu tical +fi dd +found ations +philosop her +et al +publi shers +bo ys +c ke +ru gged +opti mism +re be +phil harmon +nar cis +ral lies +lu is +go blue +fol ded +un acceptable +optim al +li sa +pol aro ++ . +en za +âĿ £ï¸ı +mon opoly +grace ful +dair y +du a +diffic ulty +judge ment +o si +mer sey +flu x +new found +ter ns +dimen sional +in vic +al ba +am it +abudha bi +alger ia +autom obile +the ad +lo tion +acceler ator +vac ant +iti on +lu f +al ic +pl l +bla zing +ba z +sen e +ðŁij ¼ +villa ins +direc tory +eis en +to ck +broch ure +ri pp +hb d +zayn malik +nic he +lo lol +certific ates +mor se +fac up +x ham +un wanted +im ports +carne gie +fan sign +mo u +r alph +destroy er +sw ing +trek king +cili ation +pit bull +g aps +ho well +defin itive +mc le +f ps +et z +bol ly +lyn n +gan o +at ure +fur suit +co il +na v +but ts +tro jans +eu re +en ko +sch umer +horri fic +install ment +br b +subur bs +a bel +vi r +de sh +cun ningham +ðŁIJ » +span n +sch we +ke mp +tr u +ste alth +qu es +le w +deli ghts +ko ch +hu mili +cr iti +il t +sp ells +mi ley +car ic +ðŁį ´ +lc fc +substitu te +oun g +? !! +af fir +predic table +class of +er r +cy press +chand ra +age ing +__ __ +ther land +don caster +el in +yo shi +sail ors +har ris +jo anna +niger ians +h ers +pla gue +pro cra +k no +can ton +busine s +un h +pra kash +c in +bow en +co ating +m als +be gging +smith son +ponti ac +sp ies +dam ian +pl ine +und ant +al ta +one ss +shame less +da q +bb m +wal es +stam pede +ser um +Ù Ĩ +cataly st +x n +ab sc +free zer +ch un +ari os +mc cre +fore head +he ars +damas cus +tac oma +ardu ino +encoun ters +stan ton +lg b +ab as +" .. +ke te +drac ula +ele m +g ne +zepp elin +la brador +pul p +op tional +or n +russi ans +san itation +hil ary +etsym ntt +pen alties +au st +ig ans +olympi an +medic aid +vers ace +va pe +re stra +pe ep +sexi est +st alls +di le +the a +punjab i +pupp y +tuesday motivation +ðŁĵ ļ +the flash +roc ket +mo dest +chihu ahu +on na +k sa +hur dles +ca ve +fail ures +sp lit +bo ho +gur l +disappo int +ho ward +nug get +fran z +stal ert +kaz akh +for getting +sch ri +ag ate +am at +eve rett +du et +veter inary +juli an +ch ills +bra ve +ghost busters +lan do +gre ets +profit able +d é +ti r +ze e +om en +pd x +gray son +har i +fix es +stab bing +swim mer +symb ols +compli ments +po se +func tioning +th nx +gi r +corpor ations +bar low +lo e +off season +distin ctive +marvel ous +nik on +enri que +ky u +ja ws +amo to +lom bar +travel blogger +fa h +ouri sm +tri stan +so e +ce ase +ðŁı ħ +z ac +mck enzie +taxpay ers +swim suit +bl o +les ley +kan sas +w ks +ki el +provo king +my les +str ing +kangar oo +galac tic +fif th +s ke +we ir +ll is +mat ory +ðŁĩ ¿ +un ci +re productive +roo ting +ti des +gad get +.... ...... +alex ander +bow ler +scre w +apo log +eri ka +wal ters +shet ty +lan e +ban ter +as ant +me so +v ain +" "" +us i +fer din +accomp lish +man sfield +bom bar +collabor ating +cla p +it ure +s da +smo ky +na k +im person +car la +com ra +bur gl +lo co +ti es +in hi +trac ey +se is +diss er +rr rr +dra y +prote ct +cor ona +hun ger +ck en +c eli +trou bled +predat ors +fic tional +shav ed +riche st +metab oli +ful ham +gro oming +mono chrome +wa sting +as co +ast e +ti sta +remedi es +ung soo +south end +perman ently +bu mble +procra stin +ident ical +practic ally +ma scul +su ke +assu red +val erie +devi ant +grizz lies +thi er +pur a +ne pal +not ts +bil ateral +spo il +car mel +cine matic +ph l +ni fty +ma o +hypo cri +la ser +pan try +mathemat ical +el isa +coordin ation +bel mont +a it +radi ant +bo iler +man g +f ag +cr c +h ams +br in +â¬ĩ ï¸ı +famil ia +âĿ £ +sab er +ru pert +gg an +rit z +mic h +sal ford +le vi +gra l +ðŁĴ ¤ +n ino +ce d +business man +ul tr +sim ply +compre ssion +pa ins +hal t +ë°©íĥ Ħ +landsc aping +n f +croo ked +er d +itt in +ddle ston +sur passed +ino a +da g +bl en +exten ding +at ing +al gae +ball er +u mar +snoo ker +col lu +flo wn +thu b +ridic ulously +ki sh +op le +di re +as ser +ari sto +sc iss +h ating +trou ble +syl via +suc cul +plo ts +sincere ly +al er +laure ate +br ack +att n +rif les +me to +collec tible +cu omo +conte stant +consist ency +ant z +rang es +abig ail +de b +mini ster +grow ers +an oo +hoo ver +dream er +nu cle +resear ch +mi y +sha hid +ma v +d honi +cin i +do j +hin dus +part ying +dal i +alon so +inform al +clark son +it ton +ki an +cit yo +mor i +la sted +as pen +libr ary +susp ici +qu at +den ial +fol der +ch ori +swee ping +eni x +ðŁį Ĥ +Ø Ń +nas car +handmade hour +mou l +heat wave +em er +exam ine +ib n +gr ind +po v +tion ist +m bo +she ila +integr ate +om es +take away +cer v +con nie +tic ket +ce led +bi en +visu ally +madagas car +sor ry +gu i +park run +tra its +la be +pois oning +ॠĢ +vi able +bohemi an +denti stry +bad os +spr outs +mask ed +te ddy +ðŁĺ · +sa f +sa as +ji ang +ti ght +spe aker +withdra wal +bc n +as signed +class rooms +fle ming +ðŁĴ « +super girl +tot als +table top +e books +horizon tal +cra z +flu sh +j ard +c dc +er son +ãħ ł +green wood +ni h +co x +ad a +lit re +go ing +v icky +cur ved +lou ie +gra ins +hy e +lon ge +reme dy +tra inee +san jay +super stars +ma ser +man u +s age +wh l +ðŁĺĤ ðŁĺŃ +ðŁijį ðŁı» +m sd +en z +rab hu +j oo +gh u +ac er +e po +resurrec tion +justice for +bl ended +mo da +avalan che +france sco +re spective +g s +ye ast +wel ch +devo tion +ge tin +athe ism +am ic +carol yn +lo c +ld nont +ave c +us da +le gged +bra very +b lower +cow boy +he h +sti ble +buff al +chann el +run chat +âĺķ ï¸ı +ide ology +best seller +y oo +pe anu +bon ne +fel ic +edi son +fr actu +naren dra +pp ets +seym our +ri viera +he ctor +necess arily +bi anca +soci eties +the best +w g +sent ences +win k +vacc ines +pal ooza +jam ming +as f +mp us +agre ements +ec k +ba c +hon ore +com pul +wild cat +im posed +yo ga +hud son +can celed +l ich +fu zzy +es que +ch uk +w vu +se k +fli pping +r hon +wi shed +wh a +cap ability +len ovo +ìĨĮëħ Ħëĭ +vi vo +tv d +nor a +sil k +pas adena +yo semite +valu ation +clo cks +u ber +mr c +dar kest +au bre +ss o +bell y +wrest lers +kill in +lou der +buck ley +ge el +ad on +un s +appe aling +ðŁij ¯ +semit ism +list ens +fit z +ãĥ³ ãĥ +ny lon +ar ty +seem ingly +hal a +su ited +et y +she ds +mu ffins +ap ric +um ents +u ta +jam mu +chelse afc +star z +yo ko +roo t +clean sing +di ar +pione ering +ihear tradio +dig iti +fin dyour +can o +ðŁĴ İ +z ol +spac ecraft +six ers +moi sturi +b ile +ti sts +hor ton +rang ing +colum bi +mete oro +senti ment +ep l +foo th +text book +drain age +r ly +sc ue +imran khan +ðŁĴ ¸ +margar ita +ed dy +predic ts +gamer gate +advis e +growth hacking +love you +ug and +v f +beng hazi +s later +ne wor +ch el +independence day +p np +cul len +hoo dies +num bered +brit t +t sa +kl tu +s ages +mom o +onep lus +col l +gu ts +w ta +mesm eri +enh ancing +chiro prac +j is +teen agers +m one +constell ation +sweep stakes +e ze +slovak ia +la ye +pear ce +wa ver +po gba +k ron +sur geons +mar x +ti d +gg a +desc end +p ours +upri sing +wal la +sab bath +bachel ore +mack in +k am +peter borough +hor a +ðŁĮŁ ðŁĮŁ +think big +r j +hy drau +sp al +univers it +ðŁı ī +mail online +league of +ten ants +w ally +lan ce +heav ens +dd r +bol ts +am ir +i phone +ci gar +en du +re i +el abor +r inging +john son +characteri stics +sal oon +algori thms +tal kin +m tn +di ve +region als +ff ice +hat i +deviant art +so tto +shir o +l ama +k we +f aded +por ting +tu mmy +est ates +buen os +ðŁ¦ ģ +beli ever +pen etr +dar n +sp ite +can opy +fashi oni +t illa +pet als +eli jah +bra wl +marty r +ë°©íĥĦ ìĨĮëħĦëĭ +mid town +eric h +d apper +sm town +me gam +ww w +le le +on s +cat fish +fir th +fossil friday +ball park +th aw +pot ent +illi e +cre ep +car p +so ap +gun dam +infe c +yy yyy +ठ¨ +z ag +rit t +calcu lator +bo ca +ok o +to ad +threat en +refin ed +olym pic +accompli shment +bacter ial +a ji +tat um +feli z +she ed +j at +th ic +jam al +ðĿ ĺ +lin a +ðŁIJ ¯ +jo king +yot po +pin ch +ak ron +her b +motiv ation +li a +ho stage +cre ek +gam ble +russ ell +patt i +fo tos +c pc +bro ken +back the +cla ys +u mm +stock ton +mat ernal +ü r +la kel +cent ury +be k +infe cted +ภ¡ +smack down +man ned +ta hoe +sm es +bas a +su la +augu sta +. * +rohing ya +gre ed +counsel or +silhou ette +gra vit +cla use +' - +bo bc +occa sions +now adays +dic tat +be ard +n ally +brigh test +kab ul +inc india +dhan ush +archae ological +che ape +mizz ou +d hi +ov ski +bax ter +asse mble +à ¢ +gi gi +ac am +wis ely +haz ard +north ampton +âľĪ ï¸ı +me th +bla sting +re unite +mu lus +ali zes +t read +mil a +ed ward +ko va +pe sto +ðŁij ¶ +vit z +hydrau lic +refurbi shed +mo tel +isab ella +hom me +sever ance +uph ol +mis erable +f ari +lat ter +ef er +crack ers +es l +ac io +yy j +in an +ec b +z ind +pan as +tru cking +re ed +sh aker +burge ss +em pire +ag nes +n ington +art works +fr s +ti le +bi ome +eu n +ch ong +americ ana +god father +go blin +i shi +! ). +temp ted +gen omics +mand ate +ck y +ðŁĴĻ ðŁĴĽ +som ali +br andy +in ven +spoke sperson +pc b +yu an +h g +fa z +starwar s +ro wan +blue grass +don g +d day +trin idad +er ton +ban ning +re tention +cu red +tober fest +re set +we is +deta ched +behindthe scenes +immun ity +ph a +bra y +ðŁij ½ +ran cho +ram say +est onia +nd tv +] . +cab aret +tar o +d v +show cases +plu m +ðŁij ¸ +son oma +pre pa +memor ab +e stu +drive way +u les +magn us +x r +nn n +much as +en ge +stre amed +fore stry +audio book +tro y +reck less +kil om +ru ler +ra k +proce ssion +i ons +po ole +noc tur +wh s +farm house +per a +par me +hypocri sy +s ics +v ant +cas k +holi stic +au st +Ð ¿ +in do +ðŁij© âĢį +di so +disp atch +ol sen +make it +en nis +cent re +ar range +ðŁĮ ¼ +sal ted +ea siest +f ate +reg atta +mo zz +ac an +sin i +g ically +ch ops +chick en +work in +ha gg +invol ve +wee ds +book day +wake up +ky r +michel in +fu ss +re juven +vac ancies +incar cer +m st +sc ents +sovere ign +kick er +à § +bo d +âĢĶ > +sa h +mob il +shrop shire +oph one +dress er +mis suni +hep burn +i mo +foli age +diagno stic +as san +cycl ing +guil t +c sa +puertor ico +win elover +wake field +do ggy +k he +pa pp +co g +al lot +cu ck +poe tic +mi o +re vit +mag ician +ç ¥ +ant enna +west wood +mber g +lux e +oat meal +Ø ¬ +te at +ffe e +sear ches +l ly +plu to +el on +let tering +inno cence +fa i +ann on +telang ana +ma it +neu ral +can ni +ar oma +a stor +fe x +co cac +mon etary +f ent +un sure +' @ +indi rec +teh ran +isol ation +li bs +make up +merce des +ff y +he tero +de o +sco m +cur sed +veteran sday +franken stein +shre ws +de co +ge ese +lefto ver +ha did +vari able +acade mics +carol in +under going +vari ation +na h +ssi er +gamer sunite +pur suing +emer ged +ll ers +control ling +ro aring +mete or +vol t +daw gs +be aver +is life +bathro oms +aci onal +pre vent +lake district +in als +y ani +gra bbing +sac ks +le z +sw ay +k ool +time s +klo pp +la de +con cord +resul ted +revi ve +recon ciliation +ol and +az z +gir o +mand arin +de en +nutriti onal +is coming +van i +aw www +der ived +love your +stop the +shou ting +nov ak +ðŁĻĮ ðŁı¾ +lo af +displa ying +sunday with +ma guire +ch eri +ðŁı Ł +re match +qu ic +Ú © +y in +ðŁĺ ¹ +ili ve +z ip +our ke +down loads +sw at +missi ss +care rs +t ment +proper ty +hahahaha haha +gi bbs +sur rey +ar ise +tic ism +sti a +ir ling +fro g +co se +bas sist +fore ig +lea u +pil lows +hol la +eli e +disclo sure +peanu ts +inte ch +ww c +plun ge +trium ph +cor i +sli ppers +ðŁĻı ðŁĻı +neutr ality +ma re +hair y +gang ster +hu mming +cust ard +mer lin +ale a +s by +dam p +mo han +ver bal +j st +gu tted +b jor +un finished +ðŁĩ¯ðŁĩ µ +un happy +âļ« ï¸ı +by pass +at su +fis cher +sa v +afric ans +re use +mid way +demo lished +ger rard +her cules +Ä Ł +medic ines +cl icking +sur round +jo ong +wav ing +tri bes +wet lands +offici el +argu ing +l le +do va +su zy +club house +ne gro +ob tain +ga o +gl ance +assi st +ch os +ãĤ ¢ +âĺ ķ +adri d +occur s +st ans +par don +livel i +emplo yed +re visit +ff xiv +bb le +ne aring +min er +ðŁĺ ¹ +giov anni +up to +mar vell +mar se +to wels +cb n +engine ered +y elling +spart an +si ans +ðŁĻĮ ðŁı¼ +se v +coyo te +sta di +t cm +app en +shenan igans +open access +so aked +ma squ +le vine +stro kes +l k +aparthe id +hipho p +char don +may may +ha asan +stri pped +fr o +scri ption +f ton +h f +pri sons +marsh al +ķ ãĤ +an cho +com promise +classi fication +buzz feed +bblo ggers +deser ving +) / +s way +ob o +camp ers +poder nfamily +p oured +bri e +squir rels +se ize +: # +le k +ti mb +st acy +nas daq +repe atedly +br at +mi ghty +competit or +mah one +de si +o ke +bm w +shi e +f cb +cheape st +minim alist +par amount +n ate +har as +insan ity +lat eral +ment ality +mo zam +ta pped +yad av +u sp +b way +the od +bil t +ra ids +em press +adap ted +pat ron +nut shell +ag ra +be aded +sundaywith marsha +vi king +proce ed +main tained +thinkbig sundaywithmarsha +sn es +mus ica +to wer +ch ab +bo k +sm t +insul t +harve sting +windo w +ru ther +be ige +dec al +indic ate +ma iling +ri ft +po le +ander son +ch oral +sp ride +l ili +ev elyn +imrankhan pti +.... " +ke red +un dp +water falls +se ars +le mans +world series +ri el +ani e +app ar +score rs +lam p +a than +phys icians +qu inoa +refu sing +vu itton +unle ash +s la +pat i +shou ts +inten tions +fo amed +europe an +neighbor hoods +me er +man son +du h +br at +con es +bow l +kazakh stan +ठ¿ +in appropriate +del hi +ketch up +ful ton +s ys +consul t +gar field +to go +f ml +f led +b ds +facilit ate +ree bok +selfi e +elev ate +activ ate +bi ble +ca wx +b ys +cam ille +sy ou +sk ool +her t +w bc +ple dges +recor der +po sh +ac re +so aking +mat il +v sco +shoot ings +pla r +e con +ðŁĻĮ ðŁı» +rashi d +u bi +ðŁ¤ ¤ +sw inging +wi pe +rap tor +m su +music video +dur ham +at tic +apar ty +fe tus +activ ation +aa z +motiv ate +ðŁĴķ ðŁĴķðŁĴķ +j al +ठ® +ag on +sche er +stal ker +fo ster +az zo +tele gram +vi gor +s laugh +screen shots +entrepre neu +kri stin +inten tion +ch illi +fr action +don a +ge a +tc u +s ite +la k +em il +d nt +bor o +wil kinson +re cu +ato day +t anya +bl anco +cd n +brilli antly +g cc +ac c +evacu ated +ther ine +den ny +cait lin +she pard +pou ch +hand held +sou theastern +ha a +à ´ +re solutions +led ger +sr in +r ar +shat tered +chim ney +im with +mete or +hand led +ra ke +town send +en han +shi py +duc t +tw x +inflam matory +war hammer +theat rical +gro s +sk ar +sco tty +ni el +tit o +tin i +conne ction +_ . +goldeng lobes +sha q +ðŁı ³ï¸ı +hall way +fron ts +effec tiveness +gla ston +d hs +ex pi +to h +c pl +sc s +re o +ha g +resemb lance +hor an +abu sive +qu er +virtu e +cho lester +a q +shan e +m ce +carri ers +di stress +re wind + ¡ +voo doo +int act +ann o +ðŁĺ ¤ +pi led +adi a +ãĥ ³ +en ow +di gs +light ly +goo fy +turb ine +governor s +con te +re open +pa h +i ve +cra fting +swee ps +jo di +an de +zu cker +kaw aii +o ko +v ai +out line +kri sti +ts n +insp o +qu int +fil thy +lyn ne +listen ers +depar ting +or d +t weed +, & +ale k +sel fish +nor ther +recogni zes +i ps +be s +a ed +w ills +pe at +surround ings +mon uments +ais le +be cker +la v +quant ity +v ah +helicop ters +tu cked +alv arez +sha pe +o bey +ad diti +road side +m ite +bl ers +ep age +j au +ignor ant +b ins +lu lu +x o +c fo +ee eee +apprentice ship +shef fiel +to i +ho k +faken ews +deplo y +aid an +husk ers +ãĢ İ +west brook +mi ster +confi gur +car r +fic a +proceed ings +ha w +ste ak +mur derer +pay day +a jo +p vc +don ates +bi af +nom nom +be it +k ali +x rp +ahmed abad +se mic +che y +x tra +an twer +head lining +squ ares +roun ded +flu ore +bol d +disa sters +am oo +gener ic +cran es +brief ly +gi g +auster ity +anticip ation +for ti +treas urer +cann y +ce cil +dete cted +check list +ภ§ +pam ela +bar bados +an field +hear ty +tx lege +peren ni +arro g +ing ram +âĹ ı +ty ne +spo on +r ation +am ba +m be +cam el +h hs +york shire +reflec tive +fre aks +to k +ju do +partic les +du bs +ban jo +accred itation +prover bs +over dose +inte gral +gu ang +mc s +super car +af b +al vin +ail s +x tre +st aging +tw ent +rabb its +mar o +inste m +dol l +cr ay +sant ana +ble ach +mini ons +che ap +man t +di vers +catal onia +lo is +mat ri +cou gar +kay ak +e gre +p so +a ia +å ® +char lton +tr acked +sc ari +pe tt +f wd +x in +gra vel +br ic +bigg boss +ar den +hu gging +pal ms +st v +li mb +the movie +handic ap +ri me +z ai +stu b +indi a +lithu ania +rhy th +p ita +maced onia +high ered +brid get +schwar z +ske let +hi kes +ant arctic +c ps +mash up +Ð ° +n ell +chand ra +he ir +an us +sher idan +mi mi +muse u +bec ca +an ir +bar rie +dioce se +compar able +ðŁı³ï¸ı âĢį +yuk on +me p +hor mon +mer ic +al f +con quered +christ church +ðŁĴĻ ðŁĴĻ +hazard ous +poo h +cont ing +retro spective +par ame +na ir +con sor +ho tra +astoni shing +cater pillar +u man +ti sm +t vs +serv ic +croy don +mor ales +c g +cu m +te ur +scan ada +s all +magno lia +el ise +th our +à® ¿ +ag omez +phel ps +ë°©íĥĦìĨĮëħĦëĭ ¨ +wh os +weav ing +si sd +pro poses +cro ws +pre sale +econom ies +bernar do +sha hid +air show +mc cann +hor ticul +nr l +du el +mongo lia +tou lou +requi rement +struc tured +ed i +o lives +he a +cu ter +Ð º +enthusi ast +harri et +domin ion +sub mer +ðŁį ĥ +sa ab +nes burg +mo ff +def ended +bur t +rewar ded +gold man +op tics +khali d +house holds +buc kets +ce cil +che ss +substan tial +ef l +oper ation +evalu ate +st n +rece ssion +l ll +tom as +tru ths +ak bar +s words +p act +embarra ss +ha o +ay urve +scrip ture +ny cc +op t +di ameter +sc ented +organi zers +re lat +ha e +dream ers +de se +ðŁĮ » +restric ted +n ale +r hp +dol an +mun ster +ha ired +consult ants +jo ints +hu mil +d ill +relent less +t é +af il +ut ilities +japan ese +condem n +pet ite +colli de +q f +peach es +cou rier +l ore +âĺİ ï¸ı +reli ability +ch uk +ðŁĻ ĥ +stu res +ge ther +ho stel +bi er +- _- +â ĩ +e ze +ta ilo +di ent +blu ff +chu ffed +pil ip +mon arch +e em +bu chan +b ick +op au +ku ps +ภ¢ +pist ons +sp ins +m and +ce st +bur ne +v ile +cher ries +bec kett +need les +pan ch +ë Ĥ +haha h +trou bles +insi sts +do you +g mc +mor tar +deleg ate +in n +g anda +sin atra +ठ¤ +spee ding +pu pil +pre mises +ali gnment +pi kach +as us +j alan +Ø µ +lime stone +fol kl +parme san +ce il +mo y +shawn mendes +ac up +hu st +ot es +med ina +ma di +gta v +censor ship +ar g +swe eney +sy kes +col o +foot steps +cann ed +adv ance +gta online +healthy living +ðŁį ¾ +a ig +p ality +oc s +he brew +im minent +berk shire +jeremi ah +out going +bak er +entr ata +ma ids +gro ves +bo c +a del +m fw +con science +arm ys +nut ella +conte stalert +novel ist +la h +ban ker +marque z +ðŁı ¡ +to ff +out age +gr p +ðŁĺŃðŁĺŃ ðŁĺŃðŁĺŃ +musc le +du dley +nvi dia +mi di +m uni +ess ays +dat ac +car ter +ภ£ +t ans +i ves +public ations +al er +ok wx +il u +cu tt +har p +out law +luther an +br ill +bo lic +do well +green land +be sties +path i +pay ton +gue st +har den +ðŁ¤ © +ann ed +evacu ation +po ised +mc der +b han +o i +envel ope +ci d +ca vi +ta pas +book review +grey hound +âĻ ª +fe ud +lun gs +for te +rai der +ff er +oni x +dep end +yn wa +rel ating +de vs +ðŁĴ IJ +acqui res +d ha +j yo +priv ati +can ine +k b +cra b +sar din +imag ining +k j +em por +down hill +ne z +ta eyeon +nick imin +gb p +à µ +w ap +sec co +ma shed +ðŁĴ¥ ðŁĴ¥ +augu stine +diss ol +dic tator +â ĵ +vi per +ed fringe +vau x +hard work +book let +no x +chi ff +ðŁĴ ¨ +observ ations +xbox one +u sher +ke er +lu p +dal las +cal gary +ma dra +di ous +k bs +wood ward +hero ine +lu mber +sea world +o ws +mc ke +maver ick +gu la +cross roads +fan g +s ade +nik ol +chee tah +me c +pp g +er ick +ðŁİ µ +tox ic +bj j +viol a +sp ire +ch ino +tra vis +institu tional +ha as +low ry +w ac +ea e +hu mid +mp ton +ru ck +je w +c ine +zim mer +se f +bhar at +fre es +aam ir +ðŁĴ ħ +z inc +wan e +multi player +royal wedding +e el +preci pit +qu ery +kimber ly +isa bel +ful fill +ig an +vau l +pan e +sc y +dig it +gun n +u tah +dog day +fi on +xia omi +da c +el ast +cha vez +ro blo +g ine +ten th +ab h +ke to +hur dle +na dia +memorab ilia +ha bs +qu an +h w +hv ac +pix ar +ec cle +kram er +accu ses +ðŁĴļ ðŁĴļ +per se +mean time +wa hl +atle tico +âĢ¢âĢ¢ âĢ¢âĢ¢ +ott oman +no vo +k us +conne cted +tru sts +d mv +spen cer +rahu lg +do ve +sto kes +bolog na +enthusi asts +à ª +rockstar games +ted cruz +du ras +s acked +late x +immer sive +cer t +lu cin +princi pals +fa res +sa ils +far n +am ent +saf fron +quent in +check point +fer ris +ex cur +ðŁijī ðŁı¼ +bai ley +se h +ter re +mad am +s band +wan derers +cumber batch +yy c +digit ally +blackandwhite photography +roll in +moroc can +ðŁĮ ħ +din ner +d well +to om +m ye +ez ra +cp fc +war hol +me er +jon ah +no aa +s gate +so on +secu lar +g ating +ti o +dri ver +si ssy +assan ge +ta th +ed mund +bobc ats +ra ji +po stage +stu ds +m gm +kat o +edin burgh +meet the +shir t +fa a +mens fashion +sp reads +wi m +car ts +phoe be +j ars +bot swana +Ù Ĥ +ed war +sk ar +ri ve +gu sty +c tv +ferdin and +su therland +nickimin aj +k v +si us +bee ch +re z +desi res +on ial +camp o +quar ry +lor raine +gil more +ig gy +µ ï¸ı +ho pping +avi z +ðŁĮ º +uni sex +dedic ate +att itudes +ste er +jun kie +rail way +y b +whi sper +key an +k us +ju g +di x +a ins +sum mon +ov ich +sy ed +her ald +ma ison +me ded +wild flower +main land +ri sky +ru kh +over looked +ki c +destro ys +nam an +ki p +z ano +champion sleague +ban dit +quin cy +smi le +cal vin +open ings +ta pp +ol ulu +spec tro +accred ited +ap k +pra ised +bar nett +pol len +premi ered +selen agomez +tou red +screen ings +uu u +mis o +en se +adam lambert +guel ph +har yana +hu tto +le ar +l tc +po ached +brex it +æ Ŀ +tt c +pa vement +mon gers +ro e +ad ers +ling ton +particip ant +ca red +ga il +y ates +lan tic +dash board +jo o +feli pe +ssi onist +bu m +s end +a eri +thu gs +luci fer +a he +dete ctor +fil ly +gas oline +ham per +hump day +the ta +the band +fore casts +o hhh +lo bb +hol l +cp u +az u +ad ar +hai ley +bu b +car t +quo ted +an archy +pan cre +twit art +al den +st ash +the less +or ni +belie bers +mor mon +partic le +avi ation +⬠Ĩ +webcam toy +sad dened +cru is +ham let +n ct +roll ins +marque e +saw yer +reli ance +a ura +di ec +soo thing +sig nings +ak is +à ³ +at kins +aer op +ðŁĮ ¿ +y ab +sh ari +con nol +du bbed +manufac ture +convin cing +feelthe bern +ra u +pu lit +on ec +gem stone +ur ging +bag u +ga h +aci ds +fi anc +zodi ac +sn oop +her rera +initi ated +ven ge +profess ors +pro di +stron ger +e mission +bb a +hal le +ta pp +haw an +wh im +compe ted +myr tle +ir port +cold play +ach e +ske p +m son +ss ic +calli graphy +swim mers +me y +pp c +thri ft +po c +re places +commu ter +âģ¦ âģ¦@ +go ers +lo gue +para dig +bas kets +sensiti vity +joh an +atl antis +& & +suit case +anxi ous +l h +str i +gal loway +stre ad +war den +gr ounded +ffici ency +li feat +reli c +disgu ise +island ers +f cofficial +classical music +b mc +en field +bi que +oak ley +bat man +sla ying +ner ves +mul tit +calci um +projec tor +scott sdale +ant ino +gri ps +kim mel +des mond +prote stors +hi atus +metaboli sm +conclu ded +press er +ti pping +sli de +e to +hun ting +aus open +ri k +pp ery +innov ators +pitch ers +ag ger +fun gi +z ad +proli fic +rockn roll +bl ames +ct ar +stam ford +q ad +mozz arella +insan ely +den ver +ph ouse +nom ad +ï ¿ +s ris +pro du +hen ley +pag an +am trak +ru bi +in cl +tu tor +sco tia +wo es +sing apo +fun nel +turn bull +know ledge +gri mm +real madrid +we are +missi les +con sol +emo jis +sne ak +smi ths +ru iz +br ou +i el +ha ver +ðŁĮ ļ +kin gof +basil ica +circul ation +prin ters +ta pping +ri dley +dra gged +ha j +writ er +fundament als +personal ities +me tre +stereo types +bur le +best of +n ffc +ha th +mini stries +a ali +trac ing +pav ed +ł ï¸ı +g ic +insp ire +tu g +ha re +repe ated +ex pon +lol li +rho de +pre cin +install ations +instag ram +az ar +i es +sole ly +du kes +mission ary +van guard +fursuit friday +on d +pol ari +ma st +har an +jos é +jack ed +ec oun +al ities +ne ph +ra vel +moder ated +sco w +s fb +uru guay +as o +ni g +au du +p ints +lat ina +ben z +m itting +char ted +mat ology +cit ro +biop ic +ðŁij Ń +djo kovic +fox y +agu il +so to +an ada +sin king +sc rap +hair s +bethan y +fact friday +ðŁIJ IJ +unlea shed +) ( +contra dic +ram on +coast line +y ong +sn sd +li gan +p ome +mit age +ge tt +wat i +ri sk +so aring +bru sh +f pl +av an +å Ĩ +lar son +sh ear +mul til +blu r +multi media +chun ky +par i +n ani +weir d +cholester ol +char les +dream ed +tan ning +puzz les +fr am +hand ball +ch ag +beli ze +al u +bang s +Ñ Ħ +detec tives +mc g +ish q +bo thered +saf c +mp ing +ten eri +g ays +sail or +an gi +mul ticul +gue ssed +ros é +high ways +bro om +chatt anoo +- ' +see ker +on ed +at f +lu c +> < +bar i +per cep +jewel ry +as ph +sor row +sl ing +mam moth +jac kie +ë § +wilt shire +sa o +can cell +im paired +tor ial +bre ed +guy en +jud ice +tit le +pro spective +applic ants +ðŁį Ĭ +epis cop +e id +b yo +stock ings +ðŁĴĥ ðŁĴĥ +ll p +sna g +keep it +l ough +ol son +matur ity +!! !" +cop ter +i sha +bl i +wil mington +tr youts +th ai +ðŁ¥ ³ +pe bble +kra ft +f p + º +ssi vely +li vin +contest ants +tex tures +jo an +h dr +film festival +prov ence +wi do +op end +c si +sto wn +cro ati +ad just +host ile +analy sts +il an +cu ppa +bru m +newfound land +good win +me tt +mall orca +plu gs +bu k +bb hutto +wrest le +sa ire +sho pped +for za +le head +vi vo +ba st +ro xy +reg is +hard working +hon olulu +desp air +young sters +ni g +impro mp +roll tide +de emed +tre ason +ru shed +for ged +ff f +pikach u +bri ggs +do it +ac cent +la us +gla ze +compet ent +a ho +photo g +mid field +le go +har vard +min orities +re illy +slic ed +once upon +initi ally +financi ally +landscape photography +har dro +qu o +mm ers +par kinson +smu gg +read iness +bru tally +glou cester +mp ed +bbhutto zardari +mur der +ye d +dat aviz +sr t +dow ning +bi ans +m ü +fle ck +fli pped +s ly +brilli ance +ri m +k um +bubb a +ko i +knit ted +sor g +ma is +ðŁĮ ² +ti ss +su stain +sen su +ak han +zi est +exam ines +chardon nay +user name +short list +re bs +on o +dar ing +hard wood +che que +righte ous +light ening +dir k +shra dd +du ra +down stairs +sh al +ami gos +ru ff +s law +ri es +red nation +man us +ðŁĩ§ ðŁĩ· +distin ction +u bun +dur an +mi gra +thi ans +la ver +domest ic +k x +jaz zy +justi fy +belong ing +insul ation +color stv +drun ken +chann eling +qu and +xi ii +enligh ten +kan o +fati ma +teen choice +terri fied +p ba +as ley +met museum +dun e +pack er +ki o +ðŁĴľ ðŁĴľ +bo iler +fas cism +ar mored +back grounds +in mates +embarra ssed +defin es +th d +we go +silic one +lo on +el ding +bor rowed +he mp +ak sh +kaw asaki +br y +de af +kill er +dispo sal +ðŁĩ ° +glaston bury +un covered +o xide +po ff +d ant +k j +ku ro +dri zzle +peop les +fe e +pro pri +dd lovato +pi ggy +ot is +aller gies +u bis +pengu in +ser a +vi z +prosp erous +ici des +tornad oes +sene gal +web cast +sto red +enchan ted +bb cone +bay area +entrepreneu rial +rednation rising +experim enting +ang an +lot to +they re +por e +er p +seren e +east wood +bro kers +bar ge +stal lion +timber lake +tailo red +dy stop +b ate +lat ors +di xit +bran son +dynam o +ky lie +shame ful +bt wn +spring time +mix ture +s ounded +lu ton +dad es +mal a +op ra +en ic +rahulg andhi +se wer +~~ ~~ +ky u +nor theastern +ca er +bc u +nir vana +kitch ens +ous y +al m +river dale +hid den +fl int +sp d +pat rons +katy perry +au gh +exhib itions +sm c +shu ts +at ore +da in +some thing +ber th +bo g +por ter +gen to +con cussion +ang lic +ro we +gr illing +scar lett +master ing +mor nin +comm ented +si me +si zing +christ y +ce os +st m +at ry +tari ffs +vac ation +pre judice +p su +paren tal +far age +can a +cap com +koso vo +you re +men stru +stal in +grape fruit +br an +che sa +dav en +exc el +!! ) +๠Į +distribu tor +ce a +bride sma +millenni al +wa in +ob serving +mis ery +plan etary +expo sing +bra ised +comp ton +don gha +q l +spring steen +th ul +syl ve +cab o +pal ad +niel sen +gaz ing +ba ja +r oud +orchi ds +johan nesburg +se man +d ji +oper ative +affe ction +eclec tic +at c +mut ant +aw x +nic e +mel bourne +indu lg +tu lip +dias pora +wel p +big gie +mississ auga +retri ever +or an +tam my +c ta +hipp o +seas oned +ger mans +eng v +marvell ous +im f +rela ys +mon tan +maur iti +me ister +as surance +reig ning +su fficient +han e +no thing +pos se +nav y +in love +brigh ton +en qu +ch ung +sweat y +es c +cal ed +man s +nicar agua +sl ices +mo cha +washington post +bb n +dam ned +grow ing +en burg +lo an +me s +wh oops +believ ers +spi el +vo daf +l at +s led +cricke ter +brown e +golf ers +bar ra +wat chers +lu igi +sw amy +mom s +pit ched +san tor +cr s +si re +sc amp +bo de +ste war +jon ny +ent ity +pac qui +mind ful +min india +bear ded +temp t +scorpi on +eat on +authori zed +ar to +s vp +op athy +cch ini +house music +disney world +âĢĶ @ +pro pose +di y +expen se +ten g +pupp ets +sm el +d aca +per ry +fin n +boo sting +lefto vers +cou gs +satell ites +man y +az e +g ong +fi e +metho do +fer ries +ðŁ¤Ķ ðŁ¤Ķ +explore rs +load er +attrac ted +il ton +godd amn +pi azza +doc tr +sav ing +paragra ph +visu alization +may ors +work flow +ack les +ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ +ठ¸ +twer k +clu t +lo ver +te ases +si an +o te +deter ior +accor d +l fw +swar ovski +nat al +tra ps +k ina +analy ze +laye red +bever ages +un it +ran som +pe shaw +dest ined +astro logy +si pping +miley cyrus +cam ino +marshmal low +bli ss +out back +fa q +int oler +humil ity +po ppin +hallo ween +mon tene +op hy +nu n +tattoo ed +a as +ðŁĮ ³ +dale y +qual ity +du sa +fisher men +swi f +ter rac +st au +le in +trol ling +ship ment +garden er +march madness +head band +gr t +bur nett +w and +!!!! !!!!! +gh e +du x +hu d +war ner +ðŁĩ ¦ +ex ile +rescu e +rat a +d han +duc ati +dro wn +bl ends +spi e +alli gator +simul taneously +broo ke +u ke +k har +comm union +ri ka +ford fc +chin atown +you rown +me y +can al +syste matic +de pri +ox ford +an il +w ut +equ ation +be z +fle ur +the good +lang ley +ad ity +ed ith +al fie +о ÑĤ +en cry +br ill +ex emp +ce sar +mb ling +ab ri +sc icom +j ing +school ing +mi ka +mechan isms +impromp tu +rhe a +moo re +crime a +be sto +wri ght +el ders +ro ds +kam al +folkl ore +be et +mini on +reli eve +thr o +team usa +pas cal +made with +boli via +itt i +free bies +desi red +best selling +l iness +la den +ke ane +mi sts +hipp ie +atta chment +@ / +se w +flan agan +âĿĹ ï¸ı +supre mac +stl cards +si as +q u +rh ys +ste ep +val leys +v w +pav ing +disp at +al ison +por te +id u +new sc +soc ket +mo s +co star +re vo +prote ins +stanley cup +m cal +ear ring +se cs +mc lean +cap ric +nick elo +ad en +v c +shou se +adap tive +maxi mize +entertain er +pro se +gri ffi +six teen +lam ar +mi rage +saudi arabia +awe ather +ru st +in filtr +fashion week +ðŁĺĬðŁĺĬ ðŁĺĬ +selec tive +bubb le +a den +fen nel +deci sive +m ta +mock ing +mb les +st amp +mu le +bernar do +gr in +po tt +j ingle +vet tel +colom bian +cam o +motivation monday +ba han +p ly +dh ary +k ami +x men +sleep er +gar a +my sti +confi dential +conflic ts +p neu +ce s +insur tech +clean se +me rely +va is +tu x +the great +shar on +ma j +hol a +eco systems +aj ay +aa j +hu sh +har mon +backto school +wiki leaks +reflec ted +ðŁĺ ĵ +commemor ating +ac et +buck ingham +messi ah +tu ous +hor net +to be +d q +he ine +mi g +pl ate +nichol son +sp ie +cumber land +nor mal +pho bia +happy halloween +city fc +mc el +gilli an +ke to +lu de +de mise +su ga +str ate +mcgr ath +visit scotland +foo led +cb r +gc se +col ori +po td +missuni verse +fin ances +ma poli +for ks +Ø ´ +cann on +medic inal +ðŁĹ ĵ +kh o +wre ck +pan to +bag el +gu ll +syndic ate +ic y +pr c +ki en +zi ka +ti sh +pe ta +c co +li za +ch ut +ex traction +el g +gl i +fu eled +pos it +respec tively +leice ster +br ink +vulner ability +im ported +e sha +ðŁ¦ ħ +r ural +re ll +gam ing +atlan tic +aband on +no ah +re solved +pro state +aller gic +ps d +âĺ ¹ +dun geon +fang irl +illumin ated +m hs +white sox +d ently +ck o +endor se +over ly +dazz ling +prior iti +night life +ut il +be have +flam en +east bound +ðŁĴ Ł +ilove you +gov uk +mozam bique +alle gi +dr i +testim onial +ath s +ì§ Ģ +mm y +shab by +pro secco +friend ships +cal am +dam ages +off set +jura ssic +jun o +arre ll +ðŁĴ © +interven tions +dare devil +car ver +run away +ran e +truste es +ha ute +dep ths +ðŁİ Ń +me in +sacrific es +con cier +ne sting +i zzy +me tam +ilove my +ur ine +du lu +mal hotra +ve ins +night ly +co at +an di +he witt +lon el +ci ble +wr ite +jen nie +sant ac +ĸ ï¸ı +str ato +singapo re +sop rano +kri sten +cheer ful +flee twood +fa iri +m eli +wa st +tur nt +sfor sale +sc rolling +angel ina +ren dition +jeric ho +nick y +or b +fla vo +patri ot +ash eville +sick ness +re fund +aggre ssion +b pl +ãĥ ĥ +elu sive +thi story +hang er +bu ffs +vil las +at kinson +sp h +ja it +decl ined +wo k +supre macy +oo tball +ey ang +ðŁİ ĵ +s ford +ath i +consu me +road ster +e so +u pro +reci pe +au f +uc i +ar on +oo oh +cs go +re ich +mc d +min ute +ladi es +pun k +rut gers +mee k +ariz on +ta j +land lord +de gra +autu mn +lyn x +us f +b hi +fairy tale +dongha e +bet sy +explo ded +chen nai +op a +pro tag +br ant +ðŁĵ °: +g f +pal li +ðŁı¼ âĢįâĻĢï¸ı +su t +ill ini +colum nist +shir tless +de centr +sear ched +ec or +bu ggy +s ack +ðŁĺĤ ðŁĺŃ +de t +ther i +or naments +bring back +to v +quarter finals +ic he +con stra +gi er +buchan an +vi x +kay aking +mu stread +swal low +mel b +sc af +op al +may oral +har at +ðŁ¦ ĭ +schedu les +id f +ha gue +ro z +a ah +d mc +du plic +ca che +orph an +frac ture +rec on +ch av +bun nies +al ain +mustaf a +ðŁİ Ļ +vac ations +dynam ite +tex ted +broad caster +ðŁĴ £ +ste amed +rock er +di etary +luxury travel +inaugur ated +sa wards +vaugh n +lincoln shire +click ed +kra ja +f anc +remo ves +layo ffs +mc far +bre eds +win nie +jon ghyun +incen tive +vari ations +pat ton +atur day +persist ent +pr un +pi ers +dal es +æ ĸ +breast feeding +r ance +ta wa +Ĥ âĸ +mur doch +cap tive +thi stle +nic a +commod ity +cou ldnt +board walk +graci ous +practiti oners +n gc +scru m +ner o +camoufla ge +col on +he i +phys icist +saturday morning +ten er +si won +colum ns +bru ne +y vr +ba ir +reti res +hal am +cab er +shaz am +min u +cas cade +milk shake +gri d +d ren +vin cent +so dium +plat ter +cheer leader +chen ko +y ak +elimin ated +ty po +y man +re think +âĿ Ĺ +ts ville +bernardo kath +ex tr +ðŁĺģ ðŁĺģðŁĺģ +ta o +re per +mo ths +em powered +c iting +transpor ted +mon ks +san at +cle ars +bachelore tte +camp bell +racha el +har le +hand ler +climb s +inter ference +rele ase +sh and +r bs +hr h +ãģ ª +val le +r é +sli me +w akes +chu bby +slo an +el ves +ath en +attor neys +micro scope +ston er +sc aling +o be +c out +se man +mid week +bal sam +ðŁĺį âĿ¤ +ti ful +v ish +lo tta +ri pping +re mn +ti re +le ap +ha vent +la by +hi mach +whisp ers +we in +ðŁİ ¸ +wild flowers +se le +u cc +li ability +az ine +sw ings +k ya +ta ir +re main +e do +flo ps +poc ket +grand ad +exam iner +gr is +ffe ct +ðŁijĬ ðŁı» +stud ded +heart beat +de acon +firm ly +infec tious +ste f +out lines +le asing +cla ws +sen se +tab s +hoo t +mo sul +spa wn +co a +hog warts +ve in +alban ia +manu el +b ino +vaux hall +scot land +go bucks +mat ty +phy sio +tor ino +const able +investig ated +s lower +mistak en +bay er +wild fires +vo ic +x on +time to +chas sis +bar ric +pi on +bald head +woo k +regi str +dra fts +b hs +li gue +l ick +staf fordshire +baf ta +dar ry +je anne +ven ding +cor p +⼠³ï¸ı +kid dos +fen way +ca o +west bound +ðŁĺ Ļ +dv r +quick er +bla h +goo die +ðŁĴĭ ðŁĴĭ +vo x +esp er +fac ade +cor relation +red bull +rou p +decl ining +chi ve +mc gee +tur o +in der +f eller +fu g +il ysm +mar di +peshaw ar +ki eran +ine ma +meat balls +pe ck +depre ssing +sen sing +gi z +dd ington +spring watch +ro aming +yellow stone +horse shoe +am man +week day +ol or +ðŁ¥ ° +boo sts +spr int +scar ves +je e +bee tro +cl an +all the +ìĦ ¸ë +enlighten ment +ado be +re generation +? @ +cont ag +yach ts +to u +mor a +en voy +r ani +go li +dhanush kraja +wood working +streng ths +se di +disc s +ar ina +sc on +lit e +ano ther +ðŁ¥ Ĭ +ye men +gu ern +sav vy +lo yed +biom ed +heart break +comra des +milli e +pat ch +un f +jar vis +bl aming +commemor ation +ge y +å ¥ +cardio vascular +alig ned +docu ment +. ? +aesthe tics +em u +the irs +le h +ps ic +si f +pl ateau +ex pend +domin ating +rob es +mauriti us +excep tionally +hom er +discover ies +bra un +ten nant +insul in +ðŁİ ® +car bs +te as +? !" +zi e +franco is +brow sing +th ol +cla rence +hel per +ob tained +cas sie +le es +! , +pome gran +hu bs +presti ge +] [ +mach er +bott led +pun ch +pi pe +o ch +gall ons +deliver ies +u ra +un day +mon de +depic ts +re gency +outra geous +khal ed +car o +he arti +za g +develop mental +over coming +stati stical +flavo red +for ds +cre atives +lau rence +di as +sun screen +in ked +pre acher +n ul +impac ting +auti stic +âļ Ķï¸ı +o ss +pel icans +cele ste +v b +ru mp +mc gra +fair fax +hu mor +bbc news +row ling +cal der +seam less +ag ne +p ti +mix ed +t shirts +mer ci +b tob +women instem +genealo gy +pre ven +l our +cra dle +gi use +Ð ¾ +chron o +fair ness +chocol ate +tor y +as da +pre scott +stret ched +al man +u il +re charge +in tre +ob st +hosp ital +hay ward +teneri fe +fried man +vap ing +confe ssions +ye ah +bal li +luck now +cor pse +sculp tor +amp ton +t pp +indic ates +sur plus +tru man +ðĿ Ļ +sin ha +in vo +sovere ign +ke v +establi shing +engra ved +assu ming +ðŁı ģ +sou za +fab i +ton ed +oun ge +del oit +dow ney +no ble +om or +car tridge +ðŁı IJ +u hur +hol loway +succe sses +r sa +âĦ ¢ +ma zz +tw d +disc ourse +. < +y at +satis fy +com pri +ठ¹ +graph ite +disser tation +ar ter +í Ķ +b ally +zom bi +ly ons +a ic +u bc +pra da +e il +da x +cla i +grand daughter +extravag anza +chall enge +ðŁ¤ ŀ +po ver +primar ily +dad dy +man a +bi kers +inqui ries +da un +fel ine +gener ative +he f +benef iting +lind sey +pol ka +demonstr ated +al le +rand y +o su +low key +weir dest +red bull +our y +n ous +wood stock +cre denti +nic er +g ado +aly ss +ap h +prepa redness +station ary +incorpor ated +dy er +sarato ga +cele sti +: " +antibio tics +or gs +inde fin +ap ron +и Ð +fif teen +no f +ðŁĶ Ŀ +ph x +te ga +m z +organiz ational +on air +band ung +pleas ures +mor i +secre tari +rac coon +ca shi +pil ates +k on +geof frey +la o +kam p +depart ments +back packing +an am +à « +crack down +aun ty +on do +li zzie +ph ers +cu n +ðŁĩ ± +k pop +pu t +inten tional +connol ly +bar clays +hs fb +swin don +u ku +s ally +a int +âľ ħ +pen ang +up lifting +epile psy +inter ro +bun gal +go ku +blue berries +ठ¦ +u ssia +sil ky +mou red +i stic +bri efs +me ats +go b +ch aser +state wide +pra sad +gl itch +ar in +ban ff +memb er +ðŁĺŃ âĿ¤ï¸ı +lo ving +hall a +ภ¡ +smo kers +yak u +scicom m +physi o +sw ol +lem ons +gel ato +ch ool +capit als +ki stan +ti ghts +spi kes +trav ellers +ik lan +commissi oning +ar ine +emabiggest fans +empha sis +front line +pad dock +destruc tive +ba ha +l inger +je wish +shet land +mc gin +mon key +ko z +s one +raj ini +te h +y en +c vs +masqu er +gir ly +we sle +was nt +bro dy +termin ator +gil le +mag gi +bir die +jeopar dy +cu bic +vm ware +intric ate +an up +to pia +east on +sab res +investig ates +bu sting +bil ingual +valent ino +in format +fer re +advent ur +hydr ate +for sy +az iz +san to +e de +whist ler +continu ously +d ham +un used +ji had +addic tive +vi dy +do b +i do +fi ed +ni versary +n one +fu er +ðŁĺį ðŁĺĺ +coven ant +prin table +immac ulate +o em +cl t +serv ants +consu med +un released +sc um +pack aged +me re +ìĦ¸ë ¸ +to by +ta f +spo ons +me al +f ball +fair field +jan et +silver stone +dart mouth +follow me +voy ager +kom bat +anni ver +ene w +mag dal +ho ve +sa th +grizz ly +car di +gart ner +sand y +kan ye +post ure +po ign +im pulse +radio logy +horiz ons +si am +aish war += => +no che +tr is +el yn +com me +du i +ce c +councill ors +cudd ling +creep ing +loc ke +manag es +trans ferred +ne cks +di er +dan o +v ick +lun ches +d he +en sures +cri ss +ul ster +bann on +cont enders +sp am +sweet ness +med al +hon duras +arc tic +ultra sound +in fr +disco vers +ei ffel +ca sters +ru ben +du st +awe ed +atri um +lest we +se ared +ðŁĵº : +ty ne +ex changes +little mix +l le +astron auts +hersh ey +work day +kno b +so v +re signs +today show +der man +an th +af c +ta ster +sw oo +sa eed +per ing +narrow ly +rn li +best buy +panas onic +obst acle +farmer s +ðŁİ Ļ +pa wan +ki est +ang ers +absur d +oh my +sin o +pist achi +sp ice +giu li +prime time +ko w +k ens +ex agger +! ?! +u ba +midd les +ju dd +e jec +slam med +pen sions +of a +re create +b hp +xx l +liver pool +thre sh +pur ity +ni eu +hol ics +wr ath +ra do +gli o +am ma +dile mma +cr u +lets go +.... @ +âĿ ĵ +sugge sting +tru mps +hor us +f v +ic om +refer ring +predic tive +tar ts +ge tte +so ck +glo ssy +pin ky +al ec +thy me +ou ra +thero ad +pe tr +cr am +p fi +dv n +me ier +incen tives +tun nels +mobi l +rec ap +extra s +upri ght +rev amp +per severance +, - +ot p +mir ror +ar wx +ger ry +ma her +g or +hom epage +am is +ag ra +made le +best friend +sirius xm +bun dles +admir ing +t dsb +ðŁį ģ +ch as +slow ing +ro h +wall papers +âĢ¦ / +tek ken +gang s +tal a +lind say +shou l +line backer +tool kit +ur anium +caly p +ab rams +mat thi +ðŁı ¿ +hon ourable +da yo +ver sail +tan k +st c +fr itz +spl end +pat ag +anno yed +on day +devast ated +chattanoo ga +national ism +mas sey +jen n +tail or +dev gn +org ans +zu cchini +on fox +sat ire +wex ford +dis grace +no to +vol ta +âĿ¤ï¸ıâĿ¤ï¸ı âĿ¤ï¸ıâĿ¤ï¸ı +à ¶ +home owners +poin ter +m cr +au sten +day sto +mo ons +pal ma +gra zing +e so +influen cers +shahid kapoor +compli ant +measure ments +develop s +y d +par l +p vt +rand olph +tor tured +ger ald +eli as +deepi kap +war mup +hick ory +g ap +co ffin +am our +re neg +moun ting +seven s +ig le +hi er +dec ad +tri ght +esc apes +wer ner +t fl +ful filled +ni ger +sour dough +re aper +choo ses +spin ner +week nd +fil tered +sh uk +kat i +old ham +open source +kh anna +at elier +conne c +opho bic +gla s +complic ations +ar son +counc ils +sm ol +as sy +lur king +ling ui +han ks +e in +Ù ħ +ru gs +n guyen +nou veau +men ace +le v +alad din +ru ining +round about +k m +con or +shoo ps +may day +traum atic +prab has +ka iser +k ita +rou ter +pe dro +re tar +stun ner +spani sh +distur bed +acade my +e learning +wit ty +sen g +fer al +av y +sta b +ke aton +ur du +ko to +hu i +coo ke +ari an +the personal +u ma +se ap +a sting +rhetor ic +hand writing +munici pality +consor tium +ðŁIJ Ł +glasgo w +ra ya +eli za +polym er +bro th +prac ti +correspon dent +addic ts +gay le +ail ing +o fe +p li +hear tw +st itch +sight ings +prie sts +sam o +slo th +good wood +roc co +sab c +summ it +l ace +pres ley +itt en +cin cy +thepersonal network +s week +pe gas +af con +regi stry +ci m +le th +dic ap +cand ice +flu ent +sm ack +pede stri +al oud +car ac +priyan kach +p gh +ir ons +dol ce +lat via +dece ased +thero ck +cla p +cen e +fo am +morris sey +gre t +essenti ally +com cast +be agle +argu es +ing ed +- âĢ¦ +sa g +ha san +ðŁĻ Ĩ +ðŁį ° +nh ra +kann ada +indic ators +on er +bri xton +at as +screen play +sor ority +sha heed +he em +class mates +tain ment +es i +breast cancer +zucker berg +aur or +en cia +ref ers +kae per +vor tex +com part +lym ph +photograph ing +ste ff +rest ling +par sley +mom ento +th man +lac king +du tt +ocu lus +fin o +fren zy +ra sc +der n +dis missed +noo k +met gala +sh ill +rapha el +maver icks +exhib its +eag erly +c pa +amen ities +. âłĢ +exo dus +ern st +lit a +deal t +womens march +i ain +score board +campe ones +c en +ti ki +garri son +fidel ity +bra g +road map +psy chop +lo e +ble u +ðŁijĬ ðŁı¼ +sau vi +spr inger +temp tation +ru dolph +ac ura +wic z +parach ute +stro l +len ny +zi k +dom s +nb af +al pac +vivi an +ro ve +pre et +perpe tu +sna ke +air soft +infl atable +prin ces +ati e +ffe y +pati ent +m ire +chel le +sl ack +groo vy +# : +up loading +!!!!!!!! !!!!!!!! +siem ens +provi sion +v fx +need y +f ats +to poli +bhu tto +sa thletics +alu ms +t winning +south western +adop ting +last night +man ne +la ga +tw ell +ac ia +-- -- +eye wear +hur ley +fle e +sa ch +pe cker +cost ly +is k +cr ates +polic y +ero sion +in go +wer k +ðŁIJ į +torto ise +therap ies +inter net +chihuahu a +ri ps +fre i +ed or +tai ji +t fc +do d +demp sey +christ in +chen g +hi ps +gra eme +com passionate +cavali ers +histor ic +soul ful +crimin al +ja c +vin ci +expi red +sur at +turi smo +k ona +se aweed +ber ts +le ica +expre ssing +a al +wor t +break fast +her ring +am used +rhu barb +mar tian +cospla yer +y ash +stri al +ra ul +refer ral +dw ts +j w +ad ler +cur tains +gu r +val ence +tyr one +sw fc +coach ed +re born +diabe tic +cho ke +nor folk +investig ative +ðŁĴ¯ ðŁĴ¯ +z id +v mas +phi e +objec tives +âľ ĭ +over due +di vers +mat su +ðŁİŁ ï¸ı +casu alties +ภ§ +al k +stand ardi +re alist +arti facts +pand or +ke x +in vin +( !) +ine y +par aly +mr t +fay e +the voice +on ga +de ed +skin ner +az wx +speci men +priyankach opra +nu evo +bar kley +toulou se +resu mes +football ers +cit i +fe tch +è re +lestwe forget +ðŁĻ ĭ +ch unk +dri fting +manipul ation +equ als +pu tt +ky ungsoo +âĿ¤ï¸ı # +ela stic +par ano +fo y +do ping +cin cy +ss ler +interrup ted +al ay +ado res +ame thy +con voy +ãĢ ı +Ĭ ãģ +black list +gener als +sa chin +bru shed +oun ces +non stop +illi ams +bt sarmy +u av +ru ff +bur ma +bi k +defen ce +schul tz +bo asts +lonel iness +go re +trans forms +alum na +@ @ +ra ppers +ne hru +car o +himalay an +wearab les +ge h +pepper mint +re development +flam ingo +cos by +big baldhead +ag ri +bare foot +sco pes +re gram +gh ana +ðŁİ « +i heart +sa die +carri e +microbi al +ku ala +sk ater +quer que +âĻ © +gen res +reas oning +ch ased +as o +sli pped +en can +vam os +ker s +ad verse +mo il +commod ities +with you +sil ent +hy pe +an de +am ination +whi spe +lit z +âļ½ï¸ı âļ½ï¸ı +ri ff +pp y +lam bs +gan esh +ab sent +regu lator +marse ille +en roll +par cel +wa p +by rd +ðŁĩ Ń +tu ber +country music +par l +contro llers +responsi bilities +we y +ch ate +montene gro +chic o +mil an +l ms +tra inees +appropri ately +un certain +popp ies +ed sheeran +nutr itious +gar o +deut sch +awe some +ãĥ ¼ +comfor tably +land marks +et i +re usable +daniel le +ro sal +co les +just ic +c cs +f anny +ni m +mc u +clin ch +at ene +mer ge +im db +ang lo +uc cino +pan ini +an not +bur berry +feat ure +predic ting +fashioni sta +s ask +imag inary +mm o +south sudan +spe ar +hu bble +jo inthe +coyo tes +sli go +ko dak +sit com +polaro id +roo ted +corru p +ðŁĻĮ ðŁĻĮ +bris ban +at z +ah l +re my +tal ent +aval on +ra da +pau line +locom otive +go ons +ne mo +maser ati +ic u +stu tt +histor ically +sm b +pres by +avo id +so oners +rhine stone +w ad +ri sing +tro t +mo des +reg ent +optimi ze +re ece +sm u +ver ti +newyork city +cor tez +ra c +in case +sin c +fiel ding +e tta +tiff any +al monds +sad dle +k rat +mat ter +g low +star ving +gl o +cra ppy +sl ur +st d +monit ors +recei pt +maymay entrata +mc il +un is +rain bows +cal dwell +pacqui ao +j op +a fe +hoo k +es sen +wiz ard +medi an +fla ws +com s +âĿ Ħ +ing h +ha ynes +anton io +tem plates +ou ter +na w +cardi gan +bel grade +ðŁĴ ī +hom o +a ise +ro pes +no ve +what you +tri gge +concep tion +ad ukone +na di +fri ars +sw er +adju sted +hot line +san ity +kau r +down loading +c gi +ten or +eth nic +app alach +ภ¸ +pa g +gol ds +on set +investig ator +car tel +peace fully +jarre tt +cat alan +poli o +n um +fru stration +dhar ma +my life +âľĮ ðŁı» +aber deen +mu sa +bin der +spark ly +fle eing +instin ct +co ping +domin ance +ill ers +er a +u conn +lo oms +living ston +gal i +he s +c ma +bel a +se ley +mon k +la ch +mar x + ´ +m erica +woman in +es sex +ra ina +jim i +nep tune +z ack +chine se +mart ins +chand elier +her n +with us +ear l +asph alt +modu les +st p +ul la +psychi atric +mile age +captiv ating +si der +men to +mor t +tran ce +tal bot +ab by +ì ĥ +âľĮ ðŁı¼ +j ak +daw n +turn up +scre wed +fe ds +blue print +ðŁĴĸ ðŁĴĸ +har sh +er os +insom nia +ban kers +ta emin +mis conduct +hu mber +gi di +edu ardo +con a +musc ular +consu ming +ra sh +don nie +di pped +col lie +samu el +melt down +ðŁĺįðŁĺį ðŁĺį +me z +exam ining +schwar tz +pri stine +ðŁIJ Ŀ +ve it +ful filling +an esthe +gue sses +dra ft +som me +soli d +pati onal +ho ped +evolu tionary +all er +enter tained +sli ps +lud wig +conclu des +sen sible +bon net +cra ze +tra s +haz ards +const antine +ed ics +star trek +to c +occu pational +in cheon +deepikap adukone +pizz as +new comer +de part +oppre ssion +ebon y +foss ils +tro jan +el en +ste aks +k hou +positi oning +ug by +red cross +ak h +dol ce +us mnt +pp en +dil ig +ma vs +call er +cost ello +⼠Ħ +dy n +thing s +rhin os +a xi +sar kar +con vocation +att ers +ss ss +fun gus +eu gen +russ o +squ at +w sb +eli on +william sburg +s off +defici ency +be arer +o kin +key stone +t wain +cal ming +break able +wa res +horser acing +com bs +bun ting +u it +t land +ðŁĴĻðŁĴĻ ðŁĴĻ +ga stron +sab ot +ick ers +commissi oners +sen ate +ii ot +ath ena +nit rogen +an tony +ero tic +di alo +mis sou +hypo cr +âľ Ī +kaeper nick +can v +d roo +clevel and +o sh +mon sta +stefan o +^ ) +sh ul +po ison +ha e +commerci als +ma ul +nit ro +co worker +alo e +vap or +t ents +russi an +qu id +question able +mid get +po ker +girl friends +sin the +erit rea +ten ure +depos its +buc keyes +spot ter +theod ore +trin ity +joaqu in +u cci +follow the +caf c +mp a +ðŁIJ » +plo tting +dom ino +ta ek +sion ally +dicap rio +pa p +car mel +ig er +bt cc +beth le +www bigbaldhead +foo die +bagh dad +mason ry +off ended +à · +ภģ +sc ro +vers es +ori ent +ar ches +pi yu +know your +gre e +ta kers +gu ard +dish on +bucket list +bha fc +war dly +ðŁİīðŁİ Ĭ +leigh ton +pe w +stra y +assaul ted +in hal +ly fe +amar keting +l x +kat z +ubun tu +me o +carto onist +turno ver +mi z +dis like +mul len +mo f +bl and +hi des +emer ges +chori zo +truste e +ma hog +lan sing +paralym pic +fa int +fa una +ch al +sn ar +cat h +bent on +cast illo +sli ppery +apric ot +oec d +bar o +l z +he ming +clow ns +co workers +peru vian +commu ters +y ell +ðŁļ ´ +under ing +v j +tt p +fli pk +w ana +soc ent +Ĥâĸ Ĥâĸ +ठĤ +oo sa +jag ger +di sm +e less +d ham +cali f +a official +ec lip +harro gate +gra pp +com rade +n tr +concentr ate +thi ghs +bit coin +bel arus +ë ĵ +end uring +now watching +industri al +pi p +ar on +ar at + ® +whit by +oooo ooo +sa ree +tic als +mis leading +yo on +year s +sle igh +roman ian +sciss ors +vam pires +ac up +ab ba +th weeksary +cent ri +fl ye +u o +c bi +bu ena +sin d +mar ino +bur r +re building +ठ² +anniver saire +ac ca +ðŁĴĢ ðŁĴĢ +gett ing +tu lips +wolf pack +âľį ï¸ı +more than +ta kin +ðŁ¤ĺ ðŁı» +u be +mon ic +dou bts +mo wer +co balt +don ne +specul ation +argu ably +kak u +htt ps +prosecu tion +din ah +stam atic +disclo sed +bever ly +fl wx +cra bs +extraordin aire +war mest +imper i +o logists +trac es +par c +lake side +am r +ter i +hour ly +domin ation +ar row +shrews bury +ance stry +wr angler +trigge red +pen sac +roo ster +survi ves +a on +bo ko +val or +love is +la g +pe y +fo cal +out laws +bl anc +artic ho +wit s +marsh all +die go +support small +u ca +sa h +je et +syn ago +gover ning +ðŁĴ ¬ +sal ads +cre ate +miri am +cen sored +ami de +no u +z eta +allegi ance +* ) +bl m +ric an +pa stors +oly mpus +blo c +whir l +star ry +pr one +y k +p ne +congratul ating +be v +so ber +love island +sa ir +an ing +tutor ials +q e +lun d +in ist +cle ver +taxpay er +ali z +wren ch +dd ling +cap ri +h pa +ðŁı» âĢįâĻĤï¸ı +na j +o j +futuri stic +jelly fish +ðŁĶ¥ðŁĶ¥ ðŁĶ¥ðŁĶ¥ +cel ery +plan k +fil a +ne me +un healthy +lec tions +ðŁ§ ¡ +rit chie +n ws +mi kha +wonder woman +âĢ İ +hip stamatic +ka g +ðŁĴľðŁĴľ ðŁĴľ +poul try +mo w +wor ds +lo ff +ðŁ¤£ ðŁ¤£ +relat able +re mixes +keny atta +ke m +re signed +fo d +stra igh +j lo +hu tch +box ers +colle en +mag s +instruc tional +ko l +attrac ts +pra g +account ant +go ggles +br u +th ole +mar row +leu ke +oc to +pon ds +bubb ly +he ist +ìĹ ij +im p +a har +ha unt +hall mark +psy ch +kkkk kkkk +col umb +jump suit +cost co +si delines +ag gies +over turned +ni b +key chain +fu k +f af +mi am +assist ants +cy cled +ri der +dam mit +red wings +mag es +kin s +ì Ĥ +ho d +son t +carol ine +" ' +cu le +bra id +fel ony +ar ities +ruther ford +depic tion +isab elle +ro ach +k day +fifth harmony +em y +li gam +bari sta +albu querque +gro ss +ðŁį º +oo ks +ðŁij ¼ +dun can +try in +jag s +g ould +li tho +âģ £ +а Ð +sam my +tun g +cas ser +apo lo +aaaa a +man g +as ics +sh en +p ye +tur bul +ss p +saint sfc +on lin +n anny +he ster +do z +ภĶ +th read +ren ts +kh and +ðŁĴª ðŁı½ +un conditional +rob son +car re +ph on +sacrific ed + £ +auto s +par ker +oc a +log in +kee gan +hard cover +dough nuts +ðŁĮ İ +spit fire +refresh ments +saskat oon +commod ore +j f +rub ber +halam adrid +child care +stra da +io m +ri k +dak ar +ther mom +cro pped +gar u +ali k +ven i +i ft +si ka +ritu als +z ul +e ch + © +su dan +l land +i me +do cker +ì ¤ +fe ared +fa o +wal ter +no g +mutu als +l h +ali gn +mon ia +concep tart +ðŁĻı ðŁı¼ +sco e +compet ence +sw ine +ly me +laun ch +green er +abstract art +inqu is +gran ada +ga elic +flu ff +d backs +grave yard +ba be +acade mic +adventur ous +joh ann +~ ! +bi bi +| # +pl ings +gett y +as b +âĿ¤ï¸ı @ +staf f +religi ons +bang or +world bookday +me gh +de vin +ash ore +meri dian +gi thub +qui z +all stars +be stest +ir resi +ack er +do te +war rington +pol ly +newor leans +cr ou +wi gs +che y +smithson ian +la sag +de tour +bor is +stra ps +mari ah +inten tionally +ko h +ðŁį ¸ +ssi an +mar issa +cor al +episcop al +casu alty +tom o +supply chain +sam p +on go +ro o +cavi ar +p fw +clau dio +buff alo +s ations +mat ty +snap back +l ds +al arms +mat te +âĺ Ķï¸ı +conditi oner +d ors +he x +fi zz +a stri +sus sex +secur ity +qa eda +all star +cocac ola +as one +cl icks +sc ans +mu te +he avier +ðŁİ § +âĺ ŀ +lv l +book boost +youtu be +fla shes +f jor +c su +explo de +do dge +cair n +gonz ales +th ill +pel le +hart ley +renew able +re tin +e stre +costar ica +shipy ard +nc fc +pri ya +a ghan +an ath +plu gin +co rey +re bound +or u +kat rin +hor mone +gi m +mahin dra +s sus +park land +har per +fanta stic +infer no +ep ilo +wrest ling +fe ct +c it +ac oun +to ssed +monu mental +char tered +bu st +pe tra +âĮ ļ +wildflower hour +sweat ers +* . +bl er +ate ch +go wan +demo graphic +bra l +suici de +renov ations +vu el +sin ister +ar mani +miso gy +ph arrell +nap s +un iting +crusad ers +cor gi +insu red +than i +no or +g q +d ada +bicy cles +snu ggle +sch an +ten berg +ss al +fe mme +bo il +½ ï¸ı +re ap +occur ring +hus sein +divi d +sto ke +sh alom +na ia +o lic +frustr ating +Ù ĩ +ig s +gro ver +scen arios +n ds +bru tality +med alli +bu on +sas s +skate boarding +ony x +lor ry +ny u +gau tam +mm ings +gu g +end i +lo thian +comm ando +chal k +ph ora +asse ssing +ti gh +crun chy +ad ay +is l +ci ara +pilgri ms +kam al +p to +brit anni +t ani +sm c +l ure +app store +ab y +golf ing +cl c +fa u +an as +shu tting +regul ated +carn age +scow boys +all enge +c ma +humbold t +rel le +ku mb +her i +refin ery +sound check +d wayne +bos nia +i sp +the alth +anni v +relev ance +my a +bag gage +dre ad +s bc +th ed +bu h +hi jab +lo id +ke w +c te +respec t +lovel ies +cu bes +celebr ate +dir t +sav ers +_ , +gar ment +pulit zer +mas jid +beat port +al arts +encry ption +s ner +ple ads +found ry +sym metry +ru mi +birth place +scallo ps +supp le +pivo tal +t ati +no de +so d +pro xim +tr ics +col dest +bren t +mand u +cla ir +e ach +and alu +hi ddleston +ðŁIJ º +mel ts +v ance +pin n +se ments +scre ened +sa chs +o bl +ic ha +âĺĺ ï¸ı +school ers +heal ed +lo gged +ðŁ¤ĺ ðŁı¼ +ic us +bore dom +b ish +b ffs +tal king +sure sh +hoo kem +de on +de fl +ei leen +ðŁį ķ +women intech +ri sotto +rang er +adverti se +ภģภ+tel ly +la go +dart moor +d ong +sk ates +lo go +un ner +mail box +ma sala +lo oooo +amethy st +che wing +c bb +australi ans +rc mp +game art +# ... +kor n +extre mism +fruit ful +anci ent +pu bg +pol ite +wh it +mur als +m gr +line man +dav ao +ste ms +ten nis +av age +tu pac +gigan tic +hs bc +auto biography +up the +ี à¹Ī +re gal +fig uring +ku l +mis sy +hoo p +gra s +for ums +back lash +abduc ted +p nw +min ic +bu tt +bott oms +at on +ven g +ðŁĮ ı +del aney +prab hu +fan club +over haul +health ye +sy no +aa f +ren amed +kim i +un cle +man city +se u +qu anti +este em +um in +en zo +mel vin +under go +j har +far ah +coast ers +humph rey +mh z +children s +^ . +d hi +disrup tive +integr ating +r nb +over sized +a ide +ne au +docu mentation +ðŁijĢ ðŁijĢ +pal o +hear th +ri yad +pun ctu +abc news +secu res +boy band +bir ch +ju co +tra ff +legislat ors +bay a +ãĤ ¯ +no ises +collec ts +s warm +k ner +bi shops +stur geon +snapp ing +mo l +fre aky +chair person +tro p +lyn ch +car cin +art sy +e sto +cha i +fl ur +inv ali +sau sages +im el +j or +fun fact +wit ter +puni shed +ac ons +h ya +re versi +em c +dif fu +z x +sp aw +cla d +d mit +hol land +fre sco +pay roll +ab undant +stu ffing +mor o +c ny +boy cott +wend y +ele ven +pro voc +pil ot +tr x +be ad +climate action +ri on +assi e +ì ĸ +o sm +islam ic +ho ar +good reads +al ici +afterno ons +spoke sman +jo lie +it as +masc ara +âĻ© âĻ« +pre vail +beetro ot +lu jah +k li +dod ger + » +ru le +l n +scre am +ho bart +col bert +r tc +er m +pat ro +quo ting +s live +que st +non fiction +semin ary +prosecu tors +ve st +express way +g ge +nau tical +et f +ðŁİīðŁİ Ĭ +dur ation +cha ired +the film +fab io +she h +can o +ðŁĴª ðŁı» +with draw +! :) +cor pus +phen om +yel p +la wn +ent om +snapp er +but te +pin ball +pro xy +libr e +alle vi +n ada +gabri el +fo wl +eure ka +daph ne +tu nes +pun ched +wh ore +jo g +ren tial +man ners +o pe +wh ufc +gu th +revol t +sne aker +philharmon ic +ho ste +sovereign ty +ðŁĻıðŁĻı ðŁĻı +fish ing +sci art +fe ta +i pp +dump ing +kel own +gir i +dig its +sal u +san jay +twee ters +sp as +col chester +sc ab +ma dd +๠Ħภ+Ä ĩ +ged don +march for +do p +maure en +un plugged +di do +fashion blogger +up a +mex ic +tar y +pol ye +jame son +v t +grin der +mad dy +consult ancy +¬ ë +leagueof legends +ac cents +um ni +jane iro +tu ss +h ens +ampli fier +to shi +pret tier +pre vents +new town +red wood +vant age +ball ard +ar tof +a she +a sion +lac ey +ap at +gro ve +ภĦ +rw and +real tors +tra itor +bed ding +ö r +zi on +fla shing +cam pan +boom er +secretari at +ab ol +liti gation +cont amination +se dly +shred ded +in for +do herty +bench mark +ro che +skate board +sho vel +i zz +to pper +o ster +laby rin +autu m +k ong +hum mus +vi z +tech news +kla us +am using +socialmedi amarketing +i des +cast ell +ste e +underestim ate +cal ab +pa ign +b illing +unanim ously +g mb +fly fishing +hath away +commerci al +colour ing +skul ls +pivo t +te p +tb c +motor way +x press +construc tive +pu k +under lying +kir sten +mani ac +cha o +se ma +chiff on +ðŁijĮ ðŁı» +ver ona +kom o +stan doff +wi ped +c ated +bla ir +wor kin +m sc +bethle hem +swi pe +unexpe c +pe es +pe tri +orig ami +ðŁij ħ +mex ico +flav or +ru dd +cannab is +mar u +ri ddle +wor shi +sil on +sch at +ap se +tang er +bi ous +e er +questi oned +o zar +dan k +angle sey +char an +bak u +compe ten +re pri +bat ter +sa xon +cal ves +leng ths +$ $$ +âŀ ¡ï¸ı +immer sion +ga unt +car ry +cy to +b anda +shu tt +experi ence +el gin +mous se +ta z +ê µ +in correct +en z +b ham +mor on +so ver +ar un +ti pped +la ble +de arly +bau tista +í Ļ +mor tal +woo p +dt la +sho cks +dav os +ðŁĵ Ŀ +swim wear +her man +ðŁijĩ ðŁijĩ +z ir +neglec ted +grac ed +campu ses +av s +ar ora +swach hb +live pd +ac cra +enqui ries +shoo ters +kur t +vancou ver +brad ley +gar da +g ü +ol la +attrac ting +up ton +ne win +lu mia +furn ace +ev ers +e on +sw a +roo kies +a oc +v ss +bris ket +tor ch +yo da +heart land +tac o +ph ony +food bank +ab bey +bab ylon +u y +gre ate +expre sses +d andy +sc apes +survi vor +ron d +e ci +ha vin +ab el +chil dish +tor que +wav y +ur self +kanye west +year of +ale stine +o brien +al fon +sk ag +kore an +anchor age +val eri +de w +ðŁİ ¨ +land slide +car ole +christ en +go phers +af i +priyan ka +q q +power of +it te +pc so +tw ol +pr y +intellec tu +guer rero +pi les +wish list +w ren +time table +ë ı +prodi gy +gibb ons +. / +ne ur +anz ac +mur ray +vie st +pla ster +la ir +art gallery +inter continental +g br +bell ator +nam joon +mam mals +am el +y aw +saras ota +cam ar +bud ding +sum mari +aco sta +la sh +ey ou +post graduate +instruc tors +ti g +const ant +were wolf +ic os +cla s +glen n +bud ge +ðŁĻ Ĥ +er ta +sta ins +persecu tion +cumb ri +o ch +syner gy +hu ang +scand in +mid terms +comment ator +regar ded +perpe tual +bo iling +al p +lan ge +sch le +fac eli +twee ta +ri dden +ok toberfest +charlotte sville +ik lan +jo u +ch atham +b sc +ðŁį ¦ +stra uss +mel low +xx xx +happy hour +re actor +ww er +distr action +at orial +ðŁĴª ðŁı¼ +twin peaks +fay ette +a or +ko k +bro om +sy fy +ou se +am ag +Ø · +ubis oft +lu lu +hall mark +stu art +it ya +si deline +venge ance +re lu +sex ism +boun cing +un ites +gu stav +te ssa +stu mp +pro clamation +ima x +divid end +col by +ðŁį İ +play wright +un safe +co smo +ðŁĩ²ðŁĩ ½ +cup board +constitu ents +ang lia +ram page +ðŁĺįðŁĺį ðŁĺįðŁĺįðŁĺį +than ked +take aways +shro ff +de bat +kh ur +conduc ts +format s +à © +port age +graph ers +u ten +pre m +mo ines +condem ns +s ous +l ps +f cs +deal ership +leuke mia +bure au +ski d +guardi ola +ca ster +thir d +avoi ded +en cyclo +c sr +vi xx +analy zing +she ar +dulu th +shap iro +chan ting +stre sses +as be +mil itia +ãĥ ª +col lin +arsen e +sure sh +teach ings +yi xing +sh ill +nu des +sv u +clear water +war ped +pro life +artist son +it u +versail les +galax y +ax el +spring st +cal a +hu hu +sc u +commit ments +exe ter +poign ant +mo tion +conserv atory +row dy +rec alled +mu sk +emb elli +so the +âĺ Ģ +sto pper +sch ild +to pe +el mo +zi el +j om +barn sley +snow den +on tour +jour ney +hills borough +par ole +w ts +mo ving +ag ility +tiv o +ff ers +kindle unlimited +g wen +ann an +ah mad +tex tured +hepat itis +dra m +insi ders +tis sues +ãĥ Ħ +fc barcelona +cr atic +na acp +pe can +f gm +custom ize +concer t +g sm +pe g +p one +justin trudeau +super cars +happy holidays +bu lar +ado x +lap tops +digital health +destin ation +gradu ally +áĥ ¦ +popp y +ss l +inhi bit +star light +of fro +glo omy +x per +hal der +im plants +le to +hass el +a as +un told +en ci +liber ia +or an +con tests +il ah +sma g +sc out +mari anne +cr yo +schedu ling +lo s +kan e +stutt gart +ne se +law rence +da in +pho tom +car ou +ภ£ +g wy +national dogday +roa sting +band camp +kentu cky +stret ches +ke rel +ca she +ãĤ ¸ +sta x +tran si +dog gie +at ric +hal le +ci vic +brow ning +lein ster +cat day +high land +joy ous +in cumb +or lando +ro mo +col ton +del ta +car ab +ro tc +aster oid +goose bumps +mo logy +yo ko +an ds +tomor rows +red carpet +sm p +ca sio +ðŁ¤£ðŁ¤£ ðŁ¤£ +se au +rejec tion +rot ating +bi partisan +th un +mat i +bon i +ol l +ener gye +do it +l j +mother hood +lou ise +neck laces +el ite +ni x +l cs +en v +gl u +le sh +cran k +su sie +m clau +so tu +crow ley +rat ri +use d +bre ton +alfre do +ye o +travel pics +ti pp +elli son +sax ophone +me red +heu ghan +ta ine +f es +vi ro +suppo sedly +i as +dige stive +y le +li zzy +wildlife photography +bri anna +west field +ra ined +am her +ðŁĺĦ ðŁĺĦ +distribu te +bott om +pre serving +oil and +craf ty +de scen +col ling +shakespeare sunday +r wc +ang led +ci an +t ations +mon tage +me yers +france sca +ðŁĮ · +wi ggins +san ford +volunte er +car ra +bar k +vari ed +pl in +am u +kap il +rock ers +qu ind +br ane +in mate +ent al +impro vis +michi gan +re tweeting +progre ssing +mercedes benz +smo ker +physi ology +dor ado +watt pad +h wa +sr bachchan +w ga +vol atility +hi re +ac ap +wn ba +hein z +stit ches +kidnapp ing +bur ys +lim b +f itters +thumb nail +ton e +mir and +desi rable +ad dison +tar an +tamil nadu +spec tator +soci ology +amit shah +remo tely +âĻ ¦ +ham id +r ds +g lee +smooth ly +sch ro +er c +lali ga +he als +us f +ni shi +d hu +un il +h le +tro mb +bhu tan +pilip inas +se ung +whit man +te y +min ce +snow boarding +re au +k ker +av o +zach ary +ran veer +ti k +gover n +qu al +beck y +anthropo logy +att en +grocer ies +de bit +war p +sil icon +hawa ii +ðŁĴ ħ +pomegran ate +pe er +orang es +people schoice +end ure +ðŁĴĽ ðŁĴĽ +ãĤ¹ ãĥ +ac ial +a haha +stu k +imper ial +bl ond +pow der +kno ts +vin ce +wood lands +den a +watch in +mat cha +ma hat +galax ies +middles brough +k ö +stre e +resc ues +wal do +lero y +desp ic +real ities +tm nt +ha q +un o +pe c +bolly wood +blin ds +design thinking +he ms +and hra +ab sen +fan s +ste ch +shire hour +bla ine +shak ti +pu rely +ðŁı ı +tra fal +ke ynes +gr ate +to bias +spon taneous +satur ated +caval ry +pri sc +ðŁĺ ij +wh t +pas si +~~ ~ +vir at +patt inson +la o +weir do +sym pathy +ju da +occa sionally +cred ited +stat u +es co +hil ly +esc ape +dischar ge +se er +may nard +sud bury +z lat +or al +we er +encoun tered +sm elling +over sight +ê ¸ +that cher +mack ay +you can +fre ep +freed oms +prophe cy +ho e +ishq ba +dra ke +qu its +pel led +tur k +o vi +wesle yan +new music +leg g +ch eng +h illi +ay y +pan ties +ad versity +ad jac +vaccin ation +ju ke +ga c +exce ed +time sof +sta ining +ep cot +v ital +up ward +bethe sda +apar k +ma hi +camp fire +enchan ting +rha pso +h z +na ver +fa x +vali dation +ac ad +ny r +as ym +coordin ated +depar ted +all ery +var ies +spr ite +chap lin +ss occer +s wat +bre t +relu ct +tunes app +super star +reminis cing +o co +home grown +dough nut +un canny +la pd +thyro id +! âĿ¤ï¸ı +botan ic +bre s +sp ade +i ste +echo es +du lil +bur sting +qui ero +ðŁij İ +loy ola +amuse ment +ha ils +sleep y +burgl ary +âľ ı +ro gue +cot land +mo ors +low er +wic ked +ðŁĶ Ĭ +compet iti +argent ine +yvon ne +karti keyan +ili ary +gat sby +precin ct +six ty +na ji +cam s +practiti oner +ðŁĺ³ ðŁĺ³ +pu ne +neg li +juli en +inv aded +cali br +cla m +duba i +mu k +lan tic +produc t +fe dex +ï¸ı : +eu ra +dari us +s ling +virtual reality +home stead +ðŁı³ï¸ıâĢį ðŁĮĪ +pac ed +in ha +pul mon +la zy +premi ering +ma stered +in he +con gregation +ba jo +sport ing +new jersey +hor ny +lma oo +leng thy +du t +yo gh +swe aring +philosoph ical +pap ua +in ski +know les +dy ke +âĢ ² +to ken +mc guire +ri ot +probab ility +mc con +gro s +su mat +c ite +da a +on da +mad dow +che w +board games +spar ked +re claimed +ad hd +ny se +imwith her +equ inox +boo ths +balsam ic +ha zy +dor chester +ag os +se aw +moder ator +seri ea +ander sen +pilgri m +âŃIJ âŃIJ +itch en +hal li +x ton +nathan iel +mun ition +celesti al +ga f +zo om +mark le +pen thouse +cal e +s fa +bar king +tu cket +em ery +cal orie +li que +ad ar +mc nam +tor tilla +wood pecker +mo town +bad ger +ayr shire +scram ble +dd ay +cra ziest +per rie +cho co +cast e +i ot +wre cked +selec ting +uss r +gra ft +pun t +lab ou +ir st +ba ek +Û Į +su ki +que u +ach at +te ster +aug mented +wc vb +sin ks +ðŁĵ » +ra ke +inter ne +be cause +belle vue +une arth +light en +ðŁĺ £ +turn around +labe led +unemp loyed +twitter kurds +le ia +h ye +great er +ðŁIJ İ +tim ed +i red +e tt +limit ations +cab e +s out +bee ch +anni hil +re trac +yo ona +ang er +den nis +supp lying +di z +" ( +sc ur +gun man +su ho +sauvi gnon +ภ¥ +wi ley +land on +choreo graphy +pre historic +ðŁı ĥ +var gas +assess ments +pinn acle +di i +chamber lain +ì Ī +v p +present ers +deut sche +sun shine +sal utes +r one +bu siest +- .- +motor ists +hemi sphere +al wx +ps p +ow a +den ying +cho c +gu tier +han uk +mus kete +jait ley +se wage +t ame +thin kers +shi m +se quo +pap ar +middle east +k wa +ke g +patag onia +no y +bar ça +take off +he a +à ¬ +n sc +g dc +ðŁij Ī +mou stache +mel ania +thr a +â¬Ĩ ï¸ı +pier ced +ze us +fon ts +ber a +it iner +q atar +contr ary +ire land +i fy +ou los +commun al +fin s +un paid +pa a +ðŁijĩ ðŁı» +ri os +ou p +f iller +cafe teria +à¸ Ń +kas i +cali ber +z ulu +v sco +ts ford +dragon fly +smo kin +pi st +psycho logist +diplom at +we bs +buc cane +à® ¾ +motiv ational +du ne +ba e +c fs +with out +er on +i ac +ate e +pen sion +fra zier +en sis +sk is +par ting +ger y +territ ories +nach os +eni ght +ever lasting +msd honi +tel e +sp un +po di +sab ah +environ mentally +ce ase +beau mont +mar ta +kel vin +ho ff +sun il +n da +co b +sh ale +ree dus +un boxing +u bio +re opened +n all +capsu les +mar r +himalay as +swee ter +ja z +f mr +twee ter +dha ka +na u +de mi +d fs +ta urus +fad ing +it utes +ci p +over flow +jef frey +don ny +car tunesapp +ðŁį ij +prefe cture +danc ed +c pt +ple asing +ital k +earth quakes +ul ation +hi o +ãĢ ĭ +ant an +nutri ent +de ere +selec ts +enrich ment +r iti +tram pol +bl amed +j ia +contribu tors +chesa peake +pi geons +tribun al +mad uro +w su +ilo ve +effici ently +dar cy +war ms +ar ra +ec u +ho wer +strugg led +rajini kanth +ðŁĺ¢ ðŁĺ¢ +hou sing +str at +eli x +disp ro +raf fic +thi erry +na sty +c fb +staf fing +al ma +back ers +hen son +sky walker +reale state +roo s +ness y +chan ce +cair ns +c ci +pe dal +ly ft +cross word +wait er +only in +kru ger +k ir +alej andro +car tier +car rera +re paired +ou at +un clear +un breakable +today in +qu eries +jo dy +gen ital +win ner +to l +kelown a +fascin ated +ãĥ ¬ +sris ri +squ ared +spr ung +negoti ate +priv ately +av en +>> >>> +g ical +gav in +chester field +zu mba +or r +nat alia +impeach ment +mn l +car at +criti que +credi ble +trac y +tan i +musi k +jig saw +gam bia +tol kien +fe u +as per +sav ory +fo xx +f itt +mar lon +l rt +v ell +p br +imprison ed +i om +chu l +wind shield +kay e +ba a +chor d +s art +al gon +minister ial +nat geo +la zio +nor ms +ðŁijį ðŁijį +lic king +fut bol +un sung +dalla scowboys +sh red +distur b +dev ine +be ards +ch f +b day +ro sso +ig or +ay i +si ren +k air +sti les +ro f +mag nets +un cover +mou se +bang ing +si ghted +spe ople +impac t +row land +kir a +environ ment +love the +p sis +mish ra +gl endale +ca jun +o che +de ception +sex ist +stra ws +s ga +buff er +apost le +sp l +pop up +ðŁļ Ĺ +r g +up er +ball in +i dy +occa sional +national park +ðŁı Ĭ +u an +innov ation +ภ« +te aparty +re tte +counter fe +b ha +rec s +ig en +ðŁĮ IJ +humming bird +cu r +ha ven +la zar +pue blo +: : +zi onist +op ath +inver ness +promo ter +carto on +cabine ts +mahog any +surve ying +r ational +feel ing +testi fy +so w +oc on +ภ¢ +ne el +mar is +sol itary +che mo +rad cliffe +sim ons +ros ary +new er +jo die +re tali +pra wn +pad dy +hen ge +k ala +im plant +at y +bren twood +par adox +ene z +re designed +p our +wy d +al de +௠ģ +sol d +biomed ical +๠Ĥ +tt tt +mat teo +ys er +new ton +de bun +ner dy +loo l +wo on +elisa beth +ec c +wh i +ach o +salv age +sal aries +qu ity +navig ating +oph thal +con soles +re built +o pec +ast ers +sho red +set list +kathr yn +rhy mes +re visiting +ash ish +li ft +re post +sole il +âı ± +weal th +sa at +we c +king james +flipk art +field work +se gu +mo dal +bu b +are rs +ðŁį Ĵ +clo oney +pad dington +necess ity +guth rie +pen te +li mo +jo sie +ar tin +en c +l hs +betra yal +info graphics +i er +mo a +hear ings +bon jour +sym bolic +ag ro +wed ges +krist ina +wild flower +athle tic +photograph y +pe sh +ca hill +chi lean +gou l +fi oren +ðŁij ¶ +z il +sk im +bad oo +deli a +tre ble +n cc +ðŁĩ¦ ðŁĩ +a house +bul lock +sol itude +ا٠Ĩ +can cers +futureof work +hu tch +water shed +war mongers +sp illed +colom bo +mo th +associ ations +weigh ed +global goals +not just +christ i +tor g +swe ating +man eu +clu sters +âĢ¼ï¸ı âĢ¼ï¸ı +ta ped +ul y +tru sting +yu suf +te in +ra b +, ,,, +sin ai +audi ble +explic it +cro wns +sch iz +at least +ðŁĹ £ +de bra +je suit +ene gger +z hen +one sie +i it +ss f +gur gaon +chak ra +bear cats +k ran +k awa +reque sting +han over +g end +sor os +mer cy +lovel y +do omed +tim my +ku z +ul l +ab ram +sa ison +ãĥ « +clean ers +re mo +circu its +bar red +o th +mo ist +madele ine +gall o +u j +per mits +hea viest +car ols +az te +gior gio +flo ats +decl aring +us rc +min at +craf ts +pri ma +conven i +nickelo deon +danc ing +ceremon ial +blo gg +tw p +anglic an +she k +k nick +( (( +hubb ard +harve y +hit man +fen g +we some +for za +s word +op us +bro m +gi bility +z al +m unch +dance hall +gre edy +hd mi +re birth +ðŁĺĭ ðŁĺĭ +s world +figur ine +com post +k f +engra ving +gior no +st ana +k man +ham ster +compos ers +aj e +func tionality +pol k +is ons +air planes +te se +hor rors +musc at +gi ven +sp ence +ðŁĩ¸ ðŁĩ +eli ot +ach illes +fre ck +crypto currencies +sou ther +hal o +bor neo +polit ic +hahahaha h +up state +si ena +obsc ure +hau sen +lloy d +happy friday +motor bike +bon a +americ as +hol s +- ( +spor ty +un aware +reven ues +christop her +bank sy +av an +ev apor +com press +eyel iner +to dos +buff y +renewable energy +ly rical +ar chan +rapi st +fair trade +lma ooo +beat z +pro active +la pse +ir ical +revers al +po de +mcin tyre +mac au +ãĥ ķãĤ +nash grier +f sa +g all +çĶ Ł +perpe tr +il ya +configur ation +% ; +str ange +rac i +ภĩ +pic kups +kov sky +mam mal +w ps +g able +compar ative +z h +save our +da vey +on etsy +mu ssels +mis er +cri stina +electr on +cra ve +lo ren +precipit ation +m z +ðŁį « +vin cen +snow board +no ida +ah n +marin ated +g tr +town hall +min is +bethe l +adv an +su ra +shi el +fur ry +ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤ +lyn d +so il +sc ence +sen eca +shar jah +dick ens +credenti als +av ar +per k +requ iring +pre fer +j ian +de ca +r ach +ing for +del e +be ep +ðŁĴ » +cis ely +hu ddle +green sboro +haw king +ho ax +hang ar +ç ľ +mis o +lo vin +gre ta +ab ad +logi e +at an +snow flake +mahe sh +fear the +al kal +bobb lehead +ba hn +ju dged +fu tu +feli x +ðŁį ĵ +pi ke +der iv +notic es +au er +dis super +or da +wi pes +am ino +stri kers +foo tb +dram as +pun ching +score less +heming way +bi h +bal lad +chat ter +am mo +kle in +fabric ation +kari m +z end +hi sto +vol ta +rock y +marke ter +xtre me +sequ encing +paradig m +cle ats +boom ing +âģł âģł +block ade +promp ts +yogh urt +pur pose +nu r +regu late +nois y +ing rid +bird watching +bar tender +Ù ĥ +wor dof +cha otic +shor ty +el dest +z app +onceupon atime +fl yo +rit os +mike quind +ðŁIJ ´ +regi stering +. ] +ad ol +gg gg +pur ge +kid lit +ar bor +val ves +synago gue +o th +unanim ous +veri fication +dar rell +ãģ Ħ +vander bilt +tape stry +pro sper +did dy +dra fting +de cep +marqu is +st int +michael jackson +pee led +men us +bb b +sc are +ema il +wri gley +it is +f ell +some thin +bar ra +ed gar +di pping +pu ddle +sla de +lear ner +jal en +ðŁ§ IJ +the daily +mikequind azzi +ju x +iq bal +mckin ney +ra iser +ef an +dr one +cat o +pic ket +cro we +l att +uk o +giuse ppe +hin i +synthe si +ponti fex +song writing +to d +swit ches +din ners +h q +gabri elle +pensac ola +cir cle +expo ses +ev s +riyad h +pro men +o ck +sa j +cit ation +brew co +jo si +ep aper +dri f +point less +tang led +cri pp +line ups +fairi es +daz e +mour n +bla dder +sal z +bur undi +book mark +the people +sub sequ +princi pal +sk er +court ney +a oki +rac ers +ad m +mom a +critical role +hou n +shed ding +sa ka +ace ous +mck ay +hus bands + ½ +me da +accu sations +ro sel +nc is +witne ssing +or ama +go ds +hil ton +el man +ÃŃ n +meg ap +cra ven +announ cer +crit eri +sheffiel dissuper +milit ant +consu l +hoo ded +aby ss +b x +ma dam +lo cu +mary am +manic ure +grat is +ac tresses +ros ario +this dayin +king ly +gn ome +cel ine +r ous +he el +lil ac +vish al +ab h +thor ns +s ls +ne al +construc ting +be ren +s lang +ma ins +far ra +sar ko +pai ge +gu iller +l ala +ice berg +nou n +plann ers +u mmm +ou ses +ill ary +ma an +box ing +zi pper +srin agar +migu el +o str +mp o +responsi bly +lan terns +appli ance +x b +gren ade +neglec t +dy sle +ham mock +ne ctar +wit cher +r gv +di ence +ser bian +seed ed +cru z +bi sh +sp he +e q +sky rim +alge bra +phil ately +bungal ow +ge off +y ves +demand ed +consider ations +the vamp +pawan kalyan +co ded +grit ty +erup tion +se infeld +uni denti +ëĭ Ī +wor m +ac us +se ung +dun g +ro land +su d +di visions +ab lanc +shor test +j f +p oun +plant based +be to +tough er +mc o +don et +mark us +v fl +ðŁı ł +open ing +co ward +caber net +o xi +burle sque +sand ra +su mo +consi st +tho t +cay man +motor ola +gutier rez +d slr +y w +no bel +nov ice +moms demand +grun ge +sp or +d cc +pre sses +sli st +allot ment +voc ational +ft c +pu ja +lo ven +utt arak +tan dem +sh ep +come dians +anat om +cant wait +healthye ating +west side +mar gins +chi ang +asbe stos +stupi dity +proble matic +fit bit +: $ +ceil ings +shu a +protec tions +bio tic +beng ali +re sts +bien nale +tim o +cul min +e minent +affe ction +unbeliev ably +individu ally +canvas sing +wh itt +nov asco +chin son +h pe +go w +gloucester shire +pa o +thresh old +chev ron +s ine +we ther +pp ie +aqu ino +antwer p +âĸ ¬ +po on +inst af +equ ine +cinemato graphy +nbaf inals +vali ant +kil kenny +te rence +syste mic +sr l +p ound +made ira +pl ough +tre cht +mat ed +mp d +ransom ware +ph in +li qui +bb ce +boom er +i standwith +con ju +r te +nar a +foo lish +da shing +vier nes +br ite +da u +juni per +ai da +you now +ra zer +de i +repe ating +comfor ting +adjac ent +e to +ca sted +chat ur +mu er +syn th +san itary +mac le +independ ent +law ful +e erie +h or +ðŁĴ Ń +am rit +vel o +station ery +mu f +may may +contempl ating +elabor ate +gre gor +dri es +ac col +ภļ +schwarz enegger +ill nesses +day break +follow back +collu sion +electr onic +jo vi +hiro shima +ta w +hom ec +mic ah +qu itting +fro sting +ben fica +hel i +s ical +pic cad +corpor ate +ment orship +you are +sing er +shi va +ru ne +ing er +ri um +play able +doo p +wil low +ter re +ni p +at d +war bler +profession ally +er ase +proce ed +pedestri ans +mis chief +ben ding +alas kan +c kett +mo p +dd les +shut ter +ge ared +atene o +ma deline +g ations +o sha +der ick +sw ild +an gry +pat ents +hun k +decre ased +fr y +ðŁĴĸðŁĴĸ ðŁĴĸ +sal on +quant ities +d ario +ni gel +ku ma +jen n +happ ye +xx x +rex perience +pro s +au sch +rele ssly +ham burger +fuku shima +er ne +stat ec +ren d +may field +j one +lef ty +bern stein +sm il +gener ates +fore station +band its +ta yo +r ca +ac ci +rodri go +kn app +elo vers +vege tation +u ral +le ft +ħ ï¸ı +worl dre +sur i +embar k +w son +ba you +mu ller +mo vers +ðŁķ º +presby ter +l f +cre e +bat b +sal am +demonstr ations +an ec +n pc +it ics +to graphy +re inst +thur st +tal e +off ences +smart city +bro tha +ofthe year +in valuable +ear n +ðŁijı ðŁı½ +kre mlin +gra dy +town fc +guern sey +ma ha +contag ious +dre x +be en +( £ +nati vity +k tm +somer halder +comp ounds +íķ ĺ +" âĢ¦ +af g +ott news +h ound +fire fly +cil an +donet sk +volunte ered +ak ira +è ª +sing ul +st h +dro wned +mand o +he ir +ðŁİīðŁİ Ī +tax is +y uki +vel d +k ans +el k +ran ts +hash tag +t eng +ro g +a at +gru b +e ber +in india +colo ssus +sig ni +so ever +mile stones +der o +differen tial +phu ket +master mind +an gh +mel ani +bro ker +actor vijay +stun ned +continu ity +af fl +vo cal +perenni al +fianc é +in complete +hun ts +re issue +domin ates +tur meric +ro am +ri on +bag ged +nas sau +fu t +x ox +national trust +jo ye +san o +hearth stone +dis respect +le es +h se +siber ian +offe e +re stock +wolf gang +re gan +plan o +un wind +re par +mil le +] , +skul l +fat ally +concep tual +ðŁĮ ² +f é +ber to +b ms +u a +mag na +notre dame +le te +la undering +heartw arming +buffe tt +go at +pe abo +wind mill +v ac +continu ally +az alea +mem brane +can cels +make yourown +athe red +p to +tor pe +ðŁĺ ł +ðŁĴ § +sc ares +le aking +z et +pix els +ac i +kh il +marath i +ðŁĻı ðŁı½ +u la +tam u +chandi garh +z agre +aa b +pronoun ced +aubre y +sand er +pun ta +har low +ic elan +celebr atory +so t +unci ation +stru ly +mc dowell +deepi ka +remin ders +my stical +ct c +chat ted +s ica +bar gains +ch hat +ru bin +m net +oiland gas +pel ican +o at +mor ality +k our +i h +nu clear +gc u +ric her +vene zia +m ma +le ith +ac company +rich mond +sports net +ba ahu +smu ggling +mm i +ðŁĩ®ðŁĩ ª +twi sts +sahi b +.... . +amb itions +il lo +histor ical +fo rec +show biz +pon ies +chas ers +remo del +will ing +prince sses +am ple +cushi ons +ac les +lot r +da ch +an the +in corporate +new bury +ki ri +fried rich +ab v +ball ers +alber t +ðŁij Ń +let i +nan op +ci de +anal o +n sf +)) )) +griffi ths +valen ci +ro ano +fun run +babys itting +ca day +ent re +u ck +slu g +tic al +the sims +ro ar +car ney +g am +sto we +fi d +bun ny +sham rock +pe cu +mol ina +go cougs +con tributes +transform ation +mo y +v aj +sever y +antioxid ants +thir teen +sight seeing +l j +reversi ble +odd ly +hoo kah +nou vel +hal al +fe i +stab les +mul t +ho pped +bra ids +inter change +ghana ian +ww ww +eth no +con junction +ago v +ye ti +earth and +ts p +con serve +heir loom +metaph or +woo f +tor io +self less +n wa +em ilia +yl ene +y xe +gi ar +moder ating +pro bz +b fi +ne er +du mmy +hanuk kah +we bber +k v +eye brow +dag ger +su mp +ra ges +ork ney +tb o +hal sey +assign ments +tr onic +scri b +co on +an war +# âĢİ +jal ape +flori da +qu aid +haw keyes +âĻ¡ âĻ¡ +street car +ro g +dat lantic +gran ola +un changed +expect ation +Ù ĩ +mar lin +gu mmy +ðŁĻı ðŁı¾ +awareness month +oil painting +mu th +per ch +jun to +villa gers +mor g +che ated +web comic +the future +d ps +la kings +men tioning +vo or +ident ities +accor d +mc gu +l pga +rum our +massi vely +m pls +heal y +d ate +sp oli +re visited +on t +al and +scru tiny +lakel and +bl ending +< / +an kara +jami edor +metab olic +f ences +ann y +å ħ +semic on +oo tt +space ship +wack y +le ta +ap ac +she e +in herit +do res +ðŁĩ¨ðŁĩ ¦ +gent e +tw ick +ri ms +gal ve +de ville +king fisher +scorpi o +ow l +al ar +vari an +ðŁĹ ĵ +vene tian +star dust +then orth +q ing +har rington +consul ate +spectac le +ho bbs +tur ks +gre er +mat ing +ðŁİ Ģ +ðŁĮ Ģ +direc ts +í ĭ +pompe o +vo iced +la os +tz u +pro me +pri sm +mer c +fortun ately +bc fc +mcdon nell +not sorry +smi led +t ba +for war +mid term +dar by +we instein +up grading +wol ff +bron co +cab ello +ðŁ¥ ĩ +fi able +shar pe +bat tered +sat o +myth ical +instap ic +pre pped +eni um +e spo +di aper +explan ations +who pping +ragn ar +pe el +antibio tic +l acks +harri son +li sm +au l +qu ail +martin a +sent encing +sc ams +di di +tr onics +ãħł ãħł +go ff +za in +param ore +cha ined +clin ton +li ff +cott ages +em on +reve rend +consu mer +ce an +t any +lum pur +e bay +sto ol +ðŁĺ» ðŁĺ» +ta pro +h ath +modern art +just ine +prover b +app y +tra x +mani fest +am bu +nai k +pe pp +r sd +mer chants +kitch ener +shi fted +li zz +âĺħâĺħ âĺħâĺħ +âĢĶâĢĶâĢĶâĢĶ âĢĶâĢĶâĢĶâĢĶ +uto pia +tom o +ou ted +com ers +chiroprac tic +book club +cin dy +pro hibition +se uss +ë¯ ¼ +thin kin +rr rr +go fund +t ack +om b +catastro phic +ling u +guild ford +bo td +ॠĭ +plan ter +^ ^ +win k +kath mandu +sto ppers +smooth ies +re efs +hin d +bell amy +Ħ ë +waste water +vo or +nat l +! ] +re el +y ap +scoo by +work space +corin thians +bl un +obli gation +g bbo +dy son +cra vings +ell ington +dap l +wre xham +earthand clouds +uk runchat +positi oned +kal b +four square +jo ck +im pending +even ing +ath y +pro claimed +c ites +ann apolis +san i +mar th +ir l +accom mo +ka a +fin a +y aa +di sper +ec ar +bha k +will y +ðŁĺĢ ðŁĺĢ +mcder mott +mo j +gener ational +u said +train ing +lon ely +lo res +impe cc +âĢ IJ +beav ers +ma ki +he b +aap l +å ı +wolver hampton +leader board +me u +c fa +easter n +hu r +civil war +ou rage +hor ned +le high +awar ds +evi dent +gi gab +r ous +ma del +ro byn +ur gently +k ors +en as +heis man +bam bam +fab ian +f om +evalu ating +assemb ly +out sourcing +hun tsville +ðŁĶ ª +justi fied +cashi er +sp aper +buc keye +analy tical +illumin ati +au tho +o j +sha de +geel ong +wh ey +he aton +terri bly +ele k +un charted +sd live +moto cross +her mes +dar shan +dar lington +cash mere +gri pping +cilan tro +pun ish +... : +ðŁĴ Ħ +inst ance +der i +lo bal +muk her +sp ar +thin ker +fre mont +com piled +color ado +vig ne +sm d +whe ad +villa ge +le ek +formula e +ta res +persist ence +?? ???? +ped ago +he z +alzheim ers +vul ture +off ence +is great +suff ra +kick in +h mmmm +broad way +ï¸ı @ +art i +alli son +endor ses +ry u +lolli pop +soy bean +kend all +cer a +inv ade +( ðŁĵ·: +conver ter +car pets +ho bo +fr it +pe ac +es qu +ern an +ou f +an il +di ffer +ch ing +bre cht +sp g +daven port +stra va +sever n +n gos +stor ians +fe te +parame dic +j hb +al amo +sne aking +gold coast +roof s +isi l +depic ted +projec tions +nu mb +o ss +ep i +glu cose +zid ane +infin iti +íĺ Ħ +ran som +ton ics +fal k +g ler +ou tw +re ss +week ly +the on +n ole +ðŁĩªðŁĩ º +vol ley +sum mar +neg ativity +sam son +ye w +aus votes +ju l +ju dy +f art +pra yed +pal ate +multicul tural +double header +cycl ones +pier re +ãģ ¨ +âĺ łï¸ı +rt w +conver ting +wir ral +l ari +ir relevant +austin mahone +an che +ya an +sd f +$ . +explo ding +ulti mate +prof ici +gofund me +cell ence +ep stein +bul lied +sep tic +à® ¤ +lu mber +cu ff +vsco cam +pl or +ภ¥ +se ok +ro to +venezu elan +sor ta +spir ited +daniel padilla +team sisd +radio active +icelan dic +ðŁĴ ¤ +ver e +accommo date +shi pp +ot ter +ol ina +e go +su la +san antonio +de as +simil arities +âļ ¾ +y om +bro ward +å ° +can cun +veri fy +on te +candle light +ìł ķ +inf ants +az am +ðŁĺ ° +le ven +un stable +bloom ington +x ford +con tour +y p +innov ator +histor ies +po y +lolo lol +ex pires +cat alo +bill boards +an ab +el ic +novasco tia +fa ire +ìĿ ´ +rock well +gr ille +az tec +joh or +ur struly +fi ren +dun lop +id le +port man +jo es +tx hsfb +hol m +cham ele +under world +lo ss +ti em +therap ists +past ure +pa ste +ing now +vul can +ra gon +lar kin +o shi +ho co +child hood +umb rel +success or +kath y +iz en +° ï¸ı +share holders +ol ga +ai b +he ap +fl aming +ro u +air tel +rat t +z ane +vo w +thor ough +sn ag +par th +un conscious +ve y +new release +gh ee +croati an +facilit ating +swan son +astor ia +to logy +master y +ðŁ¤ ij +bil bao +trou pe +the ori +chey enne +ro tt +shore line +gra sso +master chef ++ ) +vi x +ellen show +as g +an ak +ku ya +safar ilive +debu ting +blu m +list ener +v ins +book shelf +smart cities +makeyourown lane +; ; +ðŁIJ ¯ +ri zz +on ward +bull dog +bear ish +vir uses +fri gh +lin den +we iser +sn t +gon a +dre sden +fl anders +cu k +wheel ing +ba u +atu esday +surf ers +swi ft +mc call +arbitr ation +aw d +mon c +b ine +at x +re fr +mi ro +po sey +n are +rit ter +âģ ¦ +play book +blow out +sports manship +s oooooo +malay alam +gri ms +bur bank +infin ity +sar gent +oit nb +joseph ine +ski pping +par kin +excur sion +semin ars +jo har +par tridge +post game +ll ll +blan che +temp ting +m na +lu ka +is ers +to ffee +bar ron +he mmings +sa e +go hawks +cu pid +li mbs +con se +un common +z ada +head shot +so ils +pione er +mam ma +sem itic +pan dey +jamiedor nan +spl its +vel a +son i +ra ff +t mobile +âŀ ĸ +pra wns +lit er +enjo yment +egg plant +tu b +cultur al +us ic +suspici on +sy cam +summ ed +ma du +ho ck +up wards +eye ing +ri ve +assas sins +âĤ ¬ +out fy +chi ves +t ner +la is +por ridge +sad dest +w cc +vick i +sna ils +biz italk +mill an +ðŁĮ į +sam oa +j ing +mi key +gu j +chel ms +eli gibility +arma da +thro p +surger ies +ãĤ ¿ +mo hawk +ex its +me m +is lington +c me +land fill +kait lyn +ðŁİ ¼ +combin ations +tomorrow land +ver b +cor a +pre cisely +na om +ðŁĨ ķ +shr ink +sof tly +merce de +mand el +poo dle +ball erina +sop h +jux ta +y at +ary an +hesit ate +lo wered +gu lar +dungeon sand +ron an +my ri +sp f +men opau +gra sp +pa thi +fe asi +fla w +shi story +ste ward +gg le +fay re +cli que +credi bility +yo g +sec tion +mu sko +se ville +no tt +cal m +mate o +indic ted +fi ba +by l +lin o +u kin +!! # +enig ma +siri us +bu sc +ðŁį Ĭ +mac kerel +psal ms +a at +tomorrow spaper +ðŁĺ ĸ +p fc +........ ... +shre k +mul let +o sh +danger ously +immen sely +am ur +ðŁį Ĥ +pro por +sy a +london marathon +abo ve +obli gatory +pro v +ra cha +alex is +pri mary +sh h +ether net +d stv +cou gar +un lucky +ni l +steak house +mel a +fc bayern +cause way +ca therine +fluore scent +nx t +to kyo +au sp +releg ation +qui zz +shored itch +proud tobe +promo s +inter acting +home brew +da esh +w pg +stead ily +provin ces +bal lots +i ah +al to +< << +you u +ri ley +prefe rence +tra verse +incen se +am munition +ho dges +# @ +hail state +tart an +witch craft +vent ilation +liber tarian +! âĢ¦ +ow es +% ! +ong chang +bru shing +le ic +fi ber +under attack +down load +ex pir +hy o +pompe y +mc bride +y ag +stre e +com bat +ten ding +ai ra +gug gen +ab ra +in na +fli ps +aw al +m ach +dol lar +inspir ations +z um +o du +it ty +video game +aqu aman +har u +bel fast +je b +but ch +us gs +calcu lus +go yal +mor gen +x finity +stand up +contrac ep +sab re +na be +in secure +gener ously +epit ome +l w +t ca +narr atives +don nell +pand as +ber gh +tu t +ker al +fel icity +br ampton +quinte t +nom ore +ðŁĶ ij +lo i +alham dulil +ðŁĶ¥ ðŁĶĹ +ston er +shaw l +clin ical +bren dan +gon e +fla wed +tri ppy +j g +al location +po aching +ve vo +mo cks +lef tist +bon uses +condem ned +abil ity +st ating +microbi ome +bio logist +for you +wahl berg +ss or +ift ar +w ul +ÑĦ оÑĤ +pom er +me me +ver te +tre ll +tra it +in let +hormon es +deliber ately +vill ar +battle ship +p bl +tw enti +ho kies +dal ail +say a +may fair +han s +die ts +⾨ ⾨ +od in +hot spur +pap i +k ana +k amp +fin na +flo tus +ti ans +unic orns +tribe ca +chang ers +fore ground +out a +inv aders +gett ys +tomorrowspaper stoday +mac millan +hand written +w fp +u de +state of +base d +âĺģ ï¸ı +cas m +psy ched +histor ians +fol d +d da +ag grav +p ans +green way +au sv +ðŁĺ ¶ +shradd ha +inde x +be sti +zim mer +t ness +eye shadow +ot te +go ts +distribu ting +pro min +yo l +ace a +tram rahim +hoo per +supre me +jam min +intu itive +quali fications +sli m +sid di +jay ne +tri pping +g tx +pun s +e manuel +om g +mid summer +in to +succul ent +ri en +new mexico +o or +hoo king +in f +ðŁ¤ Ŀ +flir ting +na hi +g friend +t ps +hel ix +z s +on ie +ct f +kri s +irresi stible +fla p +ðŁijıðŁı» ðŁijıðŁı» +us wnt +ru d +ram ps +pin oy +ot w +lol z +low ering +favor ite +t mc +phra ses +her mi +aver aging +em br +ben o +estu ary +sle eve +ribb ons +ta sh +ภ¹ +x f +aw gs +sun ited +brew eries +anir ud +pun ches +ol die +ip ads +wi fey +land lords +d ji +gun ner +íķ ´ +tex an +ex op +cas sandra +s off +ðŁļ « +igh ton +bak ers +awareness week +v all +ear p +bts bbmas +apologi zes +âļĵ ï¸ı +was ps +states man +snat ch +watch dog +ra fi +after party +spi ke +j er +peri ph +r nc +mu ll +le en +shi es +li eu +urstruly mahesh +mer ton +de sai +shi f +ðŁĮ ± +pe dic +gos ling +arrang ing +ww g +gen y +you uu +netfli x +e ttes +k wi +bernar dino +am iga +Ø ¨ +kashmir i +t ings +emer itus +de cat +ab domin +dc i +pha ses +d jan +be am +op ry +i shed +the ellenshow +the st +habit ats +to ons +mclau ghlin +ri pper +micro biology +tal aga +clu eless +ss u +cro che +bro mance +longe vity +zagre b +prev ented +tra ve +spo ilt +darry l +migra ine +al cat +dd dd +vi v +ser pent +mat tel +jam a +con quest +î Ħ +sam sung +presbyter ian +ket ch +fire fox +mo tif +le c +cho pping +cher no +j ann +ðŁIJ ° +pro lon +wake up +conver gence +mersey side +heart broken +lo oming +hal lucin +mai ze +commun ism +mo h +twitter storians +serge y +res eller +favor able +ed gy +re iter +mal aga +live me +ka hn +pul sion +big g +kim kardashian +ati o +tyr anny +ru ption +q ant +pro ven +by z +pu shaw +kri stin +e er +tar dis +ri z +awak en +mi ko +un documented +path finder +indirec t +resemb les +h ler +conce aled +scand al +re im +d nb +cr itters +attend ant +apprentice ships +aa u +scre amed +l su +fa h +har bour +ed d +bat sman +li ss +mi sha +spani el +it f +advan cement +fa c +close up +cecil ia +medi c +narcis si +lav ish +gi ac +ma ys +le it +wine wednesday +pushaw ard +let to +curren ts +bug atti +out ine +w j +un do +ler osis +devo tional +ðŁij « +on na +fais al +sa una +himach al +am ii +à® ® +di zzy +screen writing +ph x +sp n +ick i +ag irl +fi shes +wb z +pi m +bo ar +ac id +! .. +rocke feller +n ga +dra stically +simpli fy +dru mming +autum nal +gur mee +lor de +jo ann +give up +b our +am ura +der land +sim pler +wat son +tri dent +concor dia +bel lum +bre k +dum plings +vi on +dungeonsand dragons +sp ri +ascen sion +wil datlantic +u st +rob ins +legi on +insi st +jar o +gue ss +so b +bigh it +pool side +negoti ating +mc gill +bil d +techn icians +miti gation +ajay devgn +b to +ant en +cosmo politan +ðŁĺĬðŁĺĬ ðŁĺĬðŁĺĬ +patri oti +temp er +promen ade +nav ajo +nam m +wrink les +dc fc +le ach +bru nette +r f +cout inho +al ti +tradition ally +op tome +na z +accord ingly +rec ard +de ets +sw ell +po sure +whit ening +strang er +illi on +here ford +u wu +ro bber +cotsw olds +cl en +gor ge +nam aste +re lish +gri ff +adren aline +bla sio +val e +ê ² +toler ate +rail minindia +jen sen +ho ven +el lu +ob sole +eisen hower +unidenti fied +than niversary +body guard +Ø ¯ +i dge +sch al +stock port +sn i +re taining +po po +pix ie +oli thic +ki er +ha jj +sa z +cor bin +!!!! !!!!!! +v it +me gat +de h +circu it +af fleck +theore tical +hope less +u ab +slu mp +b ice +jam med +let stalk +can i +side ways +labyrin th +re fs +ha hn +jare d +ðŁį ¹ +jam bo +ph yl +enhan cement +c tr +ful lest +se ye +do ba +cho ic +yo s +cb j +andr é +re watch +pri ma +doctr ine +for gets +u hm +ar ound +u le +art lovers +shi raz +har th +ex tor +Å ¡ +unexpec tedly +eli us +y x +em my +se ac +ðŁijĩðŁijĩ ðŁijĩ +correc ted +com bu +wom anc +cou gh +what son +publi shes +divers ity +back bone +lock down +mesmeri zing +nor te +ma b +desig ner +í ģ +ra gh +mole cules +get outside +the beatles +semicon duc +nach o +lun es +ham mers +sul tan +o on +fe ren +att ach +ar qu +uttarak hand +s ash +; - +tre ad +i ko +ar thur +scandin avian +r ation +ga el +charge able +fish y +v ma +hand bags +char a +ay ne +de fam +sett lers +qad ri +pal ais +in wx +apocaly ptic +poo ja +a es +at ories +proof ing +n lp +ts la +v ina +li do +dee phouse +informat ics +v v +pp ings +di ss +à ¯ +uhur u +st ony +betra yed +b aff +my ra +as pen +allow ance +tam ara +ci f +cor bett +ser ge +di go +ambi gu +pain ters +p cr +p ca +nom s +lo ft +ve e +opend ata +ðŁIJ ± +alex andre +identi fies +fantasy football +re production +brom ley +ware agle +mm er +p ss +cu es +ay at +hut chinson +sar ac +jack man +ira h +ap ink +col s +aussi es +ex ecs +day ton +ðŁĻ Ĩ +im v +har am +chuck le +authent icity +ar do +incub ator +ภª +photo shopped +embrac ed +fight for +gor man +zz zz +schol astic +cri sps +te apo +mid night +ga ine +col lier +s ate +de tte +å Ń +imag ine +i ff +tw ili +i fication +teat ro +nor ma +es ur +emergen cies +rise up +r inger +hass le +cait lyn +tranqu il +vers a +se b +over look +gin i +bo go +se re +may ne +henri k +contamin ated +rhapso dy +pro portion +wildatlantic way +âģ© . +organis ers +tran e +stand ard +sper m +laun cher +ric ci +her ts +paper work +showcas ed +mer yl +pen a +p imp +disa strous +^. ^ +phar a +x is +fron tal +sw irl +sp ills +swag ger +smart watch +sizz ling +savi our +cat ar +bb cr +refurbi shment +dr is +citro en +absor b +patrioti sm +il leg +chro mo +fresh ers +ru s +lim iting +ef ish +down ed +man dir +hazel nut +p all +mac on +disappear ing +quali fies +bo on +bar racks +am ine +gen dere +ðŁļ ĺ +j es +ãĥ Ń +qu ito +middle weight +sch au +quad ru +aci ones +limit less +ðŁijĮ ðŁı½ +ch man +ar av +regulat ors +it up +batter sea +mil ford +g z +tic king +gh ou +cru shes +tu tu +dread ful +fam ine +for change +dalail ama +ðŁĴ į +whit aker +hash mi +h us +vo d +bet te +aa ah +iso o +ðŁ¥ Ī +ha ar +la ine +b v +all day +spr out +indie games +free bie +gree ks +but ler +ill in +ha al +ware ness +si ma +public health +gam a +wa a +oun g +goo oo +okin awa +off enders +im pose +ho c +young ster +story teller +sc ap +figh ter ++ , +whit es +music monday +re za +go ducks +bri a +mi um +cas per +cru mbs +a ad +marti alarts +ch p +ri gged +tn g +harve sted +sa k +do jo +mill wall +b nw +oc d +histor yof +t mr +si rens +fan ci +caregi vers +vir a +son i +recur ring +acknowle dged +ðŁı Ł +oph ile +bu cky +stre ssing +roo k +di gger +vi val +san do +fle et +si ers +sel caday +refre shed +anti fa +a que +po lo +disappear ance +de mb +âĮļ ï¸ı +ren ted +ber ger +g mb +cu la +ss al +goo dy +u hh +marcel o +w anna +soft ware +shop small +turt le +tom as +fri sco +ðŁĺį ðŁĴķ +jim enez +c su +day z +an do +wyn ne +choreo grapher +cerv ical +trail blazers +ed g +zend aya +travel blog +el s +whole some +co g +lab out +ar ney +del le +su isse +ma si +ine se +om be +fi ddle +re claim +pa u +wat cher +sla in +ber ty +opti mum +el ites +min is +tur key +patro ls +ger ard +au reli +wild ly +wal tz +br gy +w ob +cre st ++ ++ +ve z +fro sted +davi do +the x +param edics +p into +han k +du pont +ur g +fo stering +micro poetry +spec tre +---- > +ne uro +fri da +music al +galve ston +e ffic +sc ape +pal azzo +th all +pro visional +p js +au re +ðŁĶ ľ +mam amoo +kit ties +cre e +wa k +lo ool +lu pus +cn blue +à º +ðŁİ ¬ +rac ed +tro se +om as +stri de +co ors +⤠µï¸ı +in comparable +cy ril +broad er +arec lipse +ðŁį Ķ +inter val +ti ru +co working +w aco +a ham +a bee +flouri sh +the times +ol ini +kick boxing +lu cer +at la +as un +casser ole +mi aw +lobb ying +jan ice +cir que +re flex +le ary +sanat omy +tem pest +se mb +mur dering +us av +ro bo +on et +p cc +nati ves +life of +sa ha +ruth less +rel ates +appeti zer +pye ongchang +nor d +er u +a thing +ug ly +pl ying +bran ce +organ ise +kend ra +dat o +chees es +par ma +burn out +a stra +pre toria +adjust ment +uk u +sl o +li ken +fav ors +cli ve +be ets +snow donia +go tv +sy n +open house +pan i +portra yed +sl ated +me cca +ren al +supportsmall streamers +staf fs +da o +bi ker +vik tor +tit us +admi red +ðŁĵ ± +hurric an +he ats +gl ory +photo genic +mer i +de por +burn ham +or angu +dj ing +impre ssionism +ign ition +ca i +w ynn +de pe +cove ted +colla gen +sau s +or nam +administr ators +ss on +nh politics +hahahaha hahahaha +aspir ations +r gb +swol len +so we +sc r +diver gent +hou ghton +han oi +d ory +ni ki +land ry +b cci +ðŁijĮ ðŁijĮ +is mail +tri pod +her d +bhat t +dress age +tab by +ingu ish +hur on +à³ į +à ł +to das +evangel ical +chor ds +st john +slo ppy +marty r +face book +ali ght +sen sei +kath niel +r ites +zi one +u o +revel ations +weight lifting +pan o +nc wx +ac ton +à® ķ +Ø ² +som a +à¸ Ĺ +respec ting +mar che +fore man +be tty +ki k +shi bu +po on +argy le +k swx +et z +mar bella +brac kets +stand by +fire side +defi ance +v ex +britanni a +in habit +appo int +piyu sh +le ash +sci ento +fla sk +sen na +> : +at roc +sand erson +id lib +dhan ush +ðŁĺ Ļ +en thr +hit ch +de dly +al ley +dor k +mon do +cudd ly +mis sin +ye sss +night ing +j pn +w ary +ump ire +ma z +ê ³ +bab s +ĭ ãģ +stan ford +posse ssed +exce eded +ðŁĶ ¶ +wall art +tra p +j il +hi bis +sp ying +scri be +khali l +trans lator +lu mb +di zed +ch c +super vision +shut ter +ja g +_ * +yester days +ms f +hi hi +gonz aga +gille spie +vive k +ec static +this morning +ch us +ed es +ston ed +be es +ðŁĩ¹ ðŁĩ +tur in +ho ver +at rics +ster n +sam heughan +auti sm +mi ya +eye witness +writ ings +travel tips +chut ney +px rtg +keny ans +my stic +k rit +/ $ +red head +world ly +am us +op la +le ve +gab bana +se en +o clock +gang a +keen an +sc ent +ol dies +go green +corner stone +comp ly +con cours +ðŁİ¶ ðŁİ¶ +ha an +con fis +aw son +cle op +î Ģ +su zu +sau té +al gar +subscri ber +este emed +ãĤ¤ ãĥ +worth while +mel rose +flo ck +bri ghtly +viol inist +p ere +sli pping +and co +si gh +ha van +cu lo +m sa +fibro sis +matil da +ra fting +aw ard +ë ª +mm mm +ge aux +ste iner +sin n +help ers +beet les +ai mee +tai wan +pistachi o +mac beth +m zan +descend ants +on sale +in r +il m +grou se +sa ig +mo w +bi gre +adjust ments +tu la +mathe w +transl ates +mu h +bol lah +ðŁĴĽ ðŁĴĻ +amo res +ab outs +bomb shell +bla ster +x avi +s ns +k roger +ga ther +erad ic +daf t +chem o +ben ches +ðŁĩ© ðŁĩ +ut v +our a +n ko +gator ade +biaf ra +ok state +im danielpadilla +dom ains +open ingday +kid do +do i +ric e +day care +mac millan +ba thurst +cheer leading +ðŁ¦ ģ +cash back +k won +hob bies +exem pl +ries ling +âļ ª +ag les +ny s +every thing +nav is +ad di +magne sium +faceli ft +ark ham +grand es +extre mist +don at +vit ality +pump kin +be tta +sl td +arti san +li by +pe aked +ah hhhh +mary am +assi m +un sc +ment e +al aya +low ers +ar as +gri ev +le ip +gr ati +cri ses +spr ints +exe cute +w to +ms d +mag ical +re viewer +spark les +juke box +ðŁĺĤ âĿ¤ï¸ı +pay back +licen ses +dun kin +bel t +lake wood +h ateful +bud gets +rev amped +ph erson +ky iv +went worth +ro sen +cru ise +gi ggle +def star +assassin scre +ym outh +win kle +w fc +band wagon +b kk +w iring +kear ney +south side +pe tit +! ðŁĺį +nor dic +mir za +mu gabe +v l +scon es +k tv +sand al +du c +m alls +ðŁĴŀ ðŁĴŀ +it c +al ay +im pair +un rest +flo ss +c é +ab ou +var ying +muse o +ser ver +di ya +hibis cus +ero y +mer ritt +fin dom +f pp +un usually +go tt +conting ent +ali aa +ball on +jo l +hi ked +zy me +ay r +ag n +ga z +perio dic +spar ty +practi sing +lin ton +tal is +cy pri +womanin biz +radio disney +ðŁĮ ¼ +jump ers +endo cr +ðŁļ¨ ðŁļ¨ +and on +shar apo +mi er +ma sonic +fac tories +vi en +bb ers +ìĽ IJ +hol d +ke bab +be ak +approach ed +ac milan +mun ro +ko sher +excell ency +negoti ation +walt disneyworld +cr ouch +te asing +suppre ssion +en ya +b ce +transformation tuesday +cal lie +vis was +p gat +ic ted +end ings +esc u +recru ited +it fc +collabor ations +g ino +snu ck +ausch witz +i fc +x ii +ke sha +ger vais +clo ak +x l +sa ad +prob ation +pre cau +mac in +anasta si +le k +e azy +daysof code +mariah carey +yo g +stit ched +boy friends +sh ar +ph ile +ag u +twin kle +phi shing +week ender +ic ton +gurmee tramrahim +al ton +l eness +all an +pen ultimate +kry stal +go u +lan de +dis mant +ab using +nor se +pat erson +ed mun +ap an +xi umin +sk el +cat walk +re act +wal led +t angle +br yn +ve to +super moon +cas ablanc +appreci ates +ski d +bo th +catal ina +ele ague +cyber monday +cau tious +ðŁ¤ ĵ +nov o +hamp ton +ha ye +jose f +var an +lo bos +roano ke +orph ans +tt in +squ ads +ishqba aaz +black panther +e tu +k sh +cru mble +cess na +reli eved +scul ly +pollin ators +explore canada +ki es +kam loops +kir an +pri mal +sett lements +hot spot +brain storming +ce dric +bi ennial +sh ant +âĻ¡âĻ¡ âĻ¡ +do on +hear n +walk way +fe m +ve al +deport ation +tox ins +elimin ating +descen ding +by the +bla sphe +ha sta +comple ment +as cent +ri ga +provo st +âĸ ª +wee ping +anti semitism +employe e +unearth ed +pin o +natali e +bla d +ang ola +lock heed +in ian +ag r +ni ster +im pala +m ke +fan atic +âĺħ âĺħ +ðŁij ¸ +lu ch +simpli fied +gall ery +econom ic +cy borg +con i +sel ma +in ception +ko ala +dv ds +cre sted +m mor +visi ble +n sd +ðŁĻĮ ðŁı½ +w under +refriger ator +re opening +e era +carou sel +as p +balli stic +victor y +mo tive +tre y +sharapo va +si i +mon ter +int end +west chester +sp e +cy mb +vi dal +ll ama +uni v +fin er +crafts manship +jazz fest +b ch +ag gio +n cc +lamb da +tranqu ility +cis co +ba den +so bbing +of i +go ta +ru mored +war med +ore an +ac ton +mar ci +gh ani +âľ ĵ +as sorted +pembro ke +pen elope +da f +at ty +aim o +pretz el +carni val +than os +ko chi +mer sal +ham radio +ar twit +cas c +guer rilla +kush ner +k app +al ise +todd lers +steward ship +o tti +ter ri +tem pe +rest less +vit o +zay ed +rsp b +pi on +hi ppo +haw thorne +in as +am ily +nut cracker +lo p +d ali +tro pic +ðŁ¤ ł +ul o +jare dle +py rene +pale o +usa ir +m ould +it ated +gene tically +biom ass +ðŁĩ³ðŁĩ ± +do dd +practic ed +monarch s +un manned +m buhari +am al +photo gra +ko ol +bren don +ju ices +cu re +world bank +poin ters +ðŁĴ Ŀ +tur f +le ds +bor ussia +bapti sm +warwick shire +moun ts +gay o +be gg +co pied +asi ans +k g +moder nist +gi d +front man +concentr ated +y t +sc avenger +iron ically +adi c +ps n +ðŁ¥ ī +cultur ally +yu v +mac arthur +fertili zer +be withyou +ri gor +min ors +z oning +âĸ ł +ri r +adole scent +vin ny +ren g +sand stone +gu et +we sth +ple dged +lac ed +sp ide +v ai +ty coon +seiz ure +du p +appalach ian +ro k +cathol ics +sey chel +posse ss +la ger +jo di +cham p +stra s +d ina +cent uri +cal der +blur ay +ðŁĩ¨ðŁĩ ³ +mo do +an nette +youtu bers +chap s +ang ling +label ing +a qui +pk wy +ly le +bi sexual +lit ur +dug out +li bby +grey sanatomy +sub stances +august us +rall ying +fi del +ing ue +äº º +hallmark channel +tooth brush +m á +adi rond +ag gi +ðŁĵį : +cru sade +tax ation +k z +i ver +dou bling +room ie +wa b +en rolled +az on +a ju +grand children +as df +ðŁ¥ º +mat ic +ough ton +utili ze +ðŁĴ £ +pon der +rais in +dys function +co bain +butter nut +e man +su red +dri an +and friends +with the +on omy +heine ken +bri dal +leader ship +pyram ids +deutsch land +jo cel +bo wel +y qr +horse power +be acon +ing eni +gra dient +fer mented +mo om +thing y +pot assi +wrist band +bor d +bo died +ðŁĺŃ ðŁĺį +ma pp +ka u +cyber punk +ph ish +loo king +co ates +ap ur +am ie +uk labour +at in +g la +adop table +shel by +v illi +ri ya +m ingly +cli mber +bumble bee +ðŁĺ ¸ +c sd +âĿ ¥ +hospit alized +c ki +hat er +ch r +re tina +it a +fan base +beat rice +gwy ne +go ss +fo s +favor ited +swachhb harat +mal ade +mon mouth +" [ +si van +sh hh +command ing +sains burys +wee d +g man +ss w +rep tile +iv y +tro pics +roll ers +over cast +ex position +masquer ade +man crush +wa ist +spr inter +sle et +le vin +j pg +_ ( +o pel +explo it +ap a +po we +wrec king +jong in +or b +er ick +bo sco +pra ising +ber tr +to wing +in security +ku t +resto cked +rr p +prescri bed +trafal gar +per t +g ases +app rais +g har +music als +âĸ¬ âĸ¬ +mc fad +ag ony +conditi on +equi p +shi k +atra vel +ðŁĩ¿ ðŁĩ¦ +ke h +abduc tion +pe oria +wil kins +g ms +as d +ev i +ðŁĴĹ ðŁĴĹðŁĴĹ +u z +mo c +halle lujah +guad alu +lou vre +dra wing +go ve +ph ant +fri e +web dev +program mer +z able +games com +clari fy +li th +kin ky +âĿ £ +labour doorstep +son ata +ju ris +mai den +vi adu +buch arest +conditi oned +capit alist +u de +ps b +sp ca +lul la +footh ills +kay o +bon d +wom b +roun der +ce sar +bur sts +ap ra +sw oon +sab rin +fra grant +cle arer +ku brick +cli max +jour no +ag le +ðŁı½ âĢįâĻĢï¸ı +poo ch +hal e +sol it +sal mon +organis ms +bron son +art en +hodg son +alo ve +vent ure +bb i +ae a +ðŁIJ ¢ +ld n +d nr +o zone +el las +man ny +azz ur +un beat +tru ffles +th ong +ma ñ +las ers +ley e +gettys burg +back packs +or is +ma ison +craw ling +la bra +cl ing +dra gging +ste al +dou bt +de van +ck ers +agent sof +photo bomb +elon musk +abo y +dist ances +story line +sp i +nor than +europe ans +wh ale +ser pent +ðŁļ ² +fi or +tr it +ox o +awar ding +class mate +su fc +smar test +rich es +pr k +big foot +ar mb +bi polar +dw elling +om ars +k wan +gri me +m eng +freder ick +navar ro +sorry notsorry +jaredle to +pa ve +sl ack +barn sley +att ar +evic tion +accumul ation +o ir +cat chy +wel ter +vik as +has see +nik ita +mo yes +mathe ws +shi v +gat wick +pro filing +compan ions +mar rake +an tics +ðŁĻĮðŁĻĮ ðŁĻĮ +se se +bo i +bart lett +poison ous +ab uses +ym m +kam pala +guggen heim +imv kohli +dol om +bre e +thro ttle +gare th +fitz patrick +un ya +par ad +mar got +j nr +we a +potassi um +p nc +disgu ised +cra sh +ren ergy +ill ic +coup led +ni els +ci ones +æĹ ¥ +im ent +despic able +d ye +what cha +conne ctions +paralym pics +gaunt let +wait rose +suici dal +star ship +vap or +st ou +law maker +coo led +si mo +then o +offro ad +ja den +bas que +vick y +lu kaku +centr o +tri sh +strate gist +medic ations +hor st +b fc +gra il +sharp ly +ad itya +tom b +kau fman +tri pad +sam ba +pastor al +brit ney +sag an +hill side +mas ons +sar a +z one +x u +to tes +rob bie +app en +mon tag +der o +short film +charis matic +tat ors +ki ba +and ri +al arming +split ting +ic ar +th ug +scari est +sylve ster +an an +u trecht +a difference +me ade +bu ster +air strikes +cu ffs +account ants +ðŁĺ¡ ðŁĺ¡ +new t +bo tt +issu ing +cl ancy +wwen etwork +kyu hyun +rese mble +pajam as +sin k +kin ney +sul ph +or k +li es +la gh +or ton +ra hul +d sc +we will +re am +collo qui +shar ia +hec tic +sar casm +land er +tm z +endor f +ro z +ham mered +fri s +w adi +pope francis +he it +flash light +un born +op es +hol iness +ðŁIJ ¦ +nach t +im sa +gr acing +bj p +ver ts +c sc +home owner +a que +bigo try +anni e +bag h +âĿ¤ï¸ı ðŁĺį +car i +thom p +dispo sable +cardio logy +pat ented +hh hhhh +ld r +stephen son +cro res +fan ning +cli mat +ðŁijį ðŁijįðŁijį +ðŁijį ðŁı¼ +aer on +piccad illy +bank rupt +sil via +emplo y +don ny +commen ting +screen writer +io ta +ce an +anc ers +tu an +street wear +ठ¯ +sk ine +esp a +asi f +os ce +she ppard +more cam +bott le +der s +orac le +google play +aver aged +edmon ton +steph an +sister hood +cru sted +stag gering +methodo logy +congress woman +c abo +tri ggers +mil ky +gli de +tooth paste +room mates +nu ff +gu am +sprink les +alternati ve +wat fordfc +uof t +hal ey +cont acted +bun dy +pro stitu +gh ar +pre ston +on site +hil ar +g ts +c att +hamp stead +? ?! +ðŁĩ§ ðŁĩ +bbc qt +aless andro +resi st +ma idan +t ko +shad ing +pin up +gal lo +sin u +at ec +fun k +ac lu +stri des +rhy me +wet land +bbc springwatch +t ins +wild card +st our +flamen co +pau la +onto logy +gang sta +am ade +ãĤ « +t bs +skelet al +run ner +jard in +harri er +hun ted +z hen +believein film +de mean +au diti +re start +chon dri +âĿ¤ï¸ı ðŁĴĻ +mcla ren +ga b +sh um +au sa +lewi sham +y pg +k jv +fur nished +dor o +bon ded +mor ty +lat itude +_ ) +lo va +water ways +vin ai +shor th +drun k +c ay +ay ana +kap lan +capp uccino +spr o +life boat +has bro +spol ice +tor on +do ing +dam n +sh ree +foun tains +ent ation +mar u +boar der +to pless +j ada +chan ning +ul ls +en closure +gib son +fractu red +brit ton +à ¶ +t ous +por th +dra f +tra iling +mar gate +eli fe +down ward +lin n +gla des +girl power +ak rish +u ki +ron da +ts c +appreci ationday +vis ing +lo om +ðŁį ³ +mex ican +ar gos +y ya +jad ine +south port +d end +si sta +rede em +men g +bra xton +antioxid ant +s key +mp g +fin ding +vibr ation +ce u +kh art +di mini +cl ine +shel ly +hin es +ī ï¸ı +to pical +no ver +ma xx +prim itive +illustr ate +b ounds +tren ton +join tly +breed ers +u chi +wakeup america +b ada +ðŁĹ £ï¸ı +gu acam +sp heres +pere gr +youth ful +lo lo +bir min +t ly +jeremy corbyn +defe cts +co sm +a rent +v aa +bag els +medi ac +cori ander +ic ago +g haz +ab bas +re model +struc turing +pu m +out law +ad ani +r bc +gul ls +n li +confu se +ðŁijĩ ðŁı¼ +vil a +mcnam ara +correc tions +mug hal +ser i +re gain +ss b +lea ve +haha hah +gran de +di stressed +re chargeable +ho a +hou sed +sti l +attribu ted +opath ic +di ps +pri t +head phone +conclu de +pil o +he t +ut sa +nit in +je m +sni ppet +tutor ing +op er +sun k +en sla +cha u +ac orn +quinte ss +ran kin +affili ated +our lives +cl int +se ater +isa ac +ba shing +sme ar +nur se +doo dling +" ; +sa ku +atroc ities +im am +g fs +viol ating +comm end +brad shaw +er ville +b illed +b be +thul hu +i phones +moo se +di os +re w +me thane +strang ely +whis ky +ti ghtly +spiel berg +radi us +notic ing +wi f +ig nati +i fa +ap is +w ali +ha itian +bu shes +y z +v l +ex ited +asse l +tru ec +dom en +ash er +in king +newyear seve +hend ricks +bat i +ìĿ´ ì +rich ter +mon santo +con line +agre at +ðŁ¤ ¯ +master pieces +ar n +rough s +cle ve +se v +fashi ons +to ya +sh ail +cop eland +aqu ari +dec als +are you +y aya +a str +fon t +ml m +ar ca +pp or +pol lock +xper ia +conserv ation +chain saw +ag gie +?! ?!? +si le +sh on +ìĹ IJ +note books +marque tte +de us +bb led +spic er +mc cabe +nor wich +modi fication +boo sted +stru m +sales man +bang le +nis san +hez bollah +brea sts +a af +anth us +sk er +ow ed +her os +gi fs +fo sters +eat ers +du es +_ / +lymph oma +sf am +me gal +afri di +ag ic +p amp +jeal ousy +ðŁijĮ ðŁı¼ +calcul ate +napp ing +g ale +ðŁ¦ Ħ +lub bock +assu med +ren ting +íĥ ľ +subur b +ãĤ · +tech nic +u cla +in front +gar net +ster oids +stri ving +ho war +mo ver +le ton +bull do +is in +ci ao +sn z +fore front +d ams +mid wife +ma wards +cla pton +we in +subsi dies +spr oud +rother ham +phan tom +ar ach +spi el +rac ket +sel amat +no on +l bc +enti ally +ðŁĴ ¸ +sil ve +m oud +kine tic +y asi +ðŁİ © +o ol +mi ku +i za +fer a +flo ren +barber shop +groo t +z est +ne ars +stan is +z and +police man +juris dic +form ations +appar atus +sp d +arti fact +to sc +motiv ating +womanc rush +re dro +diagno stics +ra za +out fitters +el xn +dod gy +ry n +sh d +ortho don +ol de +jay anti +bal ances +quic kest +can ton +friday reads +! * +na a +a ak +ðŁĶ · +behavi ors +rasp berries +ä » +polit ical +cam il +å ľ +di k +ast ounding +lie be +novel ty +tur moil +sul ly +spring break +hon ouring +cc g +ðŁı Ĵ +my little +ky c +pro ms +ðŁķ Ĭ +à ¨ +bi ge +av ril +ðŁĩµðŁĩ ° +mari on +as ants +sur ya +oc tag +luf than +ac ron +fayette ville +ti que +love s +en ca +de kalb +ta ver +de vote +aux iliary +joh annes +tread mill +ay an +qu r +donald son +cher yl +" .... +s ven +kir sty +gun ners +ra dish +o ahu +v sky +i ble +con course +b ps +elo qu +ash ford +te bow +roblo x +ma da +dri ving +th day +spro ject +m ms +band ed +. !! +libr arians +flan nel +intoler ance +her al +ç µ +neme sis +list a +tar ak +cry pt +star plus +vish nu +sc ale +cr is +% ), +j illian +regg ae +pegas us +ol in +ip ment +man ic +l fc +godd ard +ite am +parl our +anch ors +lee minho +talla hassee +ant it +d ho +kid ney +y ash +batt led +az ad +gar is +faul kner +sni ff +papar azzi +ed m +phy llis +con tested +aa ay +se ca +k ton +vel ve +rain ier +for um +tam pab +ho sp +trac tors +ox fordshire +no tion +guang zhou +ðŁĺ ¯ +ref ill +wednesday motivation +sli der +mukher jee +pr att +fon taine +alph on +af ar +ts i +pest icides +fi ends +mo cking +bra w +tran sat +do ses +co res +hom ophobia +docu menting +zlat an +con doms +s é +sun set +kun st +ton ga +ภª +v ation +sp ray +chow der +ra ps +palla dium +nor wood +music history +hoo ker +si si +osp rey +ph ys +conce ded +bob cat +ar mad +ze it +Ù Ħ +ðŁĺģ ðŁĺģ +mer idi +ðŁĩ· ðŁĩº +corn wall +! ), +touch downs +ze it +chal et +mm m +al che +gor illa +fo ss +ati ku +lumin ous +ivan ka +be ek +sta res +sw iss +âĿ¤âĿ¤ âĿ¤âĿ¤ +scru bs +me ath +gusta v +jo gging +confe tti +as os +ers fc +breit bart +applic able +autho red +ya ho +h in +displac ement +j v +ðŁĮ¹ ðŁĮ¹ +ot c +non profits +diec ast +gu sto +inte stin +c ages +me en +lu kas +moon ey +ðŁĺ · +very day +tor ah +is sion +wa c +lever aging +ish able +cu se +le wood +may an +turn table +ju ice +tru sty +tu p +eti quette +supervis ors +stu n +gu zman +confe ren +ric o +fe ast +back ward +pol aris +mic he +jo g +h ing +field house +vel ing +sho cker +esc ence +ठ¾ +vi be +anasta sia +mar ched +kill ing +Ķ ë +fe tt +exop lan +... ( +snow day +lo h +ir ani +la khs +del a +po caly +boom ers +dictat orship +ac er +tur keys +quarter final +muskete ers +ðŁĴĽ ðŁĴļ +sf x +museum week +sc ala +ri sis +( ðŁĵ· +ãĢ Ĥ +z ies +bo eh +hu es +lu sci +dol a +impeach trump +roo d +don caster +tor re +hero es +fo yer +tar i +blur red +ke w +frank ly +dro id +ap al +Ð ¼ +y af +bre t +par agu +cac ao +ðŁĻĮ ðŁı¾ +ru e +head aches +shaw ty +char ley +pal er +go wns +correc tional +ðŁĺ© ðŁĺ© +breaking bad +ol ing +da p +endeav our +cit adel +tra d +incumb ent +medit ate +foo ted +ðŁĴ µ +shab bat +dayof the +wil lem +gal way +to red +marri age +f illion +sleeve less +aud itor +jin young +invin cible +kad una +a and +volcan oes +mon eti +indie gogo +buccane ers +ðŁijī ðŁı½ +ãĢ Ĥ +lay ton +cuck oo +hu mber +buzz er +Ï ī +to re +stra ins +sto m +pa ine +s we +du ff +z ou +si mi +li pp +ur n +se agu +ðŁĶ ® +sun dae +hi c +ðŁĺ ¨ +bull pen +u per +flyo ver +al dridge +glo bes +ali es +ken zie +ge es +y cle +sp lin +mag enta +j ha +bal u +gh orn +ti pper +wick er +taste of +con clave +ch ale +inv asi +cat er +dio xide +me gab +win n +at p +transform ative +nest led +hi g +bri dging +lil ies +chee red +bad dest +sc rolls +real is +dipl o +ðŁĶ « +conce ssion +prefe rences +explo des +er gon +introduc tory +ine au +ch af +som es +land rover +spir ation +sex y +sco recard +illustr ates +soul mate +wi en +inter disciplinary +fore casting +ent ities +glu ed +en lar +cur t +percep tions +boot leg +mi re +asho k +v az +hor ne +cal le +ac ulture +ther oy +night time +oc al +character design +ar mist +ðŁĺı ðŁĺı +yah oo +ac eae +to se +even to +sou t +nay anth +wh om +v are +ri gging +gen us +hi ve +com mands +sti e +day a +ethan ol +en f +hi fi +flu ence +cle mson +re invent +thermom eter +humor ous +emer ging +aci ón +ðŁĺĺ ðŁĺį +s ity +haw ke +accompan ying +t ility +ðŁĺ ª +re cess +protag onist +l ery +dun dal +int l +britt any +q bs +off the +marri ages +how to +viol ated +adel aide +wit t +lanc er +pak v +hu me +st ade +bra gging +ou tright +ad c +super st +real time +cu res +garden ers +ero ck +dale jr +ver o +bar tol +mo ti +mc fly +v pn +st ink +over rated +guer ra +e tis +ath ome +twd family +th ab +tn x +rafa el +family travel +x ley +sat anic +equ ations +ru dy +wal dorf +stan i +tu be +meas les +zimmer man +obli gations +i ously +bow ser +trans former +sho ppe +shak en +gh ouse +to d +ke tball +share holder +mar ca +kp mg +ak an +given chy +coast al +au th +roller coaster +mar ches +coordin ate +cine ma +apprentic es +par lor +mit o +men on +consider able +bar re +glo ss +enh ances +jaz eera +fal mouth +thra sh +stat en +k zn +eng el +samanth ap +flo ppy +sal om +ðŁıĨ ðŁıĨ +w ack +deliber ate +osc ill +herit ag +du sted +orni thology +pad dle +fer ns +bar un +cl ans +anticip ate +a ay +mat ically +é ĩ +tu mble +post man +unic ef +tro tter +op d +leaf let +ge ist +cease fire +scre ws +cre ation +wal nuts +longh orns +under statement +ab b +proxim ity +na x +un ity +turn pike +orda ined +dub step +chak ra +me ch +love her +look alike +donne in +vir on +Ù Ī +bang ers +vari ants +out dated +in ta +cri sto +sp elt +food and +f on +stefan i +margin al +hu tton +ti ara +tel ford +qu en +fair grounds +que tta +mikha il +heal er +v ball +ty re +under grad +gl end +hom ers +scri bed +main tains +po che +mis sal +mar ko +u as +á n +sh p +con vey +pad re +sab a +pu glia +madhu ri +pa xton +chap lain +n ago +ca si +... !!! +fli rt +sal eh +k are +di re +stam ped +extre me +ðŁĺĥ ðŁĺĥ +ho ppy +guadalu pe +advant aged +eu char +p low +un n +mac qu +port land +cla sh +pe s +lou bout +y p +keep ing +arca dia +fran kie +fi u +de th +encyclo pedia +si ze +inve sts +ðŁį © +geo logical +fran ç +con front +ðŁĺ ¥ +d ys +af m +tex an +graph ene +repost app +ac f +ur sula +gaz a +dd led +fu m +wsb tv +m be +fron tiers +chrono graph +ke s +inter faith +tab oo +spar ta +won do +flori st +em braces +ca w +no el +arch ers +ðŁIJ · +roman o +ban an +sh akers +melo dies +geo thermal +se phora +ìļ ° +оР´ +pro c +hand shake +pan de +popul ated +slow down +hor tons +registr ations +un deni +lan ts +pas sover +thak ur +li ef +adhe sive +pe tal +micro scopy +memph is +confir ming +air drop +mesm er +perce ived +ming le +lifel ine +gh j +worcester shire +pas sions +ach er +el lar +ah o +firen ze +bar ang +letter man +hat field +lu cha +je ter +e shop +william s +horo scope +pre de +east bourne +dur ga +di version +al trin +seis mic +premi osm +nar co +ti r +ori g +or m +land fall +ci ous +lin do +max ine +x ico +tra y +os wald +c ba +ric otta +n cr +mar au +ภ² +gladi ator +ch ery +lun g +u me +po psic +lon ging +can als +ta ya +decentr alized +sho pp +pres sures +mahar aj +eti had +wal greens +succe ssion +sign aling +li g +staf fer +north korea +def ying +as ma +de g +peri meter +oak ville +m sk +balti more +rece ip +de ple +ðŁĺŃ ðŁĺĤ +jambo ree +> .< +rsp b +puni sher +consider ably +in tothe +pari sian +acceler ated +polye ster +low es +fr ying +sauté ed +mou ths +seychel les +ra x +go dis +dak ota +house wives +the me +mat inee +black bird +ye sung +pre fers +pelle gr +in ated +trun ks +stronger together +re pet +re pairing +ped als +toler ant +her r +dun ne +indic ation +decat ur +b tv +exhibit ors +ik on +friday motivation +bra gg +live tweet +al ves +womens art +foreig ners +wal lets +min dy +lan ey +bb in +tv miaw +lif ter +tar get +tam e +dr ou +astro photography +mp c +g pu +nord strom +fric tion +run off +lov able +sp nfamily +ext ingui +bloo dy +sch el +arti stry +sw ish +scar ce +ph ils +max im +pos sum +com promised +sty li +sc fc +is sa +birmin gham +sket ched +angel ica +ordin ance +je ts +conqu er +ðŁĺ IJ +online shopping +s ori +reason ably +nue stro +ar turo +ch l +benef ici +spho to +wel t +ni kk +ðŁ¤ ŀ +dan ao +for mid +as se +af irst +âľ Ĥ +gil lette +as sor +an onym +sel ca +fe mi +bear able +y and +ar mory +cre pe +celtic fc +bra vo +in expensive +de lec +ge cko +new market +snow flakes +kab ir +con tra +can ning +mor pho +gar wal +ðŁĴĥ ðŁı» +fight ing +mu tation +woo dy +ju gg +gr aces +premiosm tvmiaw +kenne dy +gu p +sa e +op ha +off spring +fini sher +bet ts +span ning +mar j +h one +sh ing +contin ents +samanthap rabhu +un related +l acy +explo sions +benjam in +sophi e +no ting +micro soft +as sen +a hoy +i ker +ho fer +mo e +ah madi +yan n +an ak +ma hi +be u +aha h +creep er +baahu bali +am at +pri ory +haw keye +deloit te +sko da +print making +assemb ling +mirac ulous +no ch +sw o +leg a +oper ates +border lands +eli e +stron gh +rep tiles +pir ate +un fold + ¯ +qual comm +un predictable +ot r +rose wood +direc tional +counsel ors +corn ell +liber ated +j ad +ir regular +bulgar ian +high ness +vodaf one +sw ild +mini mize +gra zie +๠ĩ +r stats +stre ep +ome tric +humb le +lu mp +l ille +b ü +home depot +tripad visor +ki wan +a via +er z +ex ico +du f +blu men +mi zing +ar ma +in im +con stan +sor a +ju al +au n +tw ell +tren ches +her a +r k +po plar +recipe oftheday +ll an +bhu ban +short ages +ing don +bridge water +ðŁIJ ĺ +fortn ite +cam den +un cture +pro w +colon ies +t ks +n go +b hm +live pd +spl ace +sli ke +happye aster +ter rence +revol ver +j ed +yy yy +office of +m ts +exist ential +r ourke +explore bc +sse d +pri est +vix en +si ding +k pa +a har +ju ic +ob struc +foren sics +uk mfg +cancell ation +we ary +ab q +ele c +pri zed +deb ts +me zz +salv atore +m dc +gre tte +c gc +th on +snow storm +ts ch +cook ery +å ¹ +wa xing +n acional +mur s +ra ve +cap es +ger main +dri pping +sub mitting +ome lette +iter ation +aj es +shim mer +fu eling +ðŁĩ§ ðŁĩª +li po +bo bble +un follow +islam ist +hi ber +cat s +agentsof shield +sen si +____ _ +ster ia +inst al +ausp icious +har row +over land +femini sts +inst ant +char iot +blind ness +sp ed +sc arec +nu it +mini atures +ho seok +glo ck +fifa worldcup +e te +dis m +we iner +ex foli +ear ts +ภĶ +my art +man il +iss ant +form a +in cu +buffal ob +in tim +mc cul +anj ali +po po +un doub +hil a +fun gal +thank ful +fu tur +en dish +ren ds +th ar +she ff +ring o +nichol ls +io wa +po tom +cl ams +ãģ Ħ +acon f +stadi ums +di mp +di k +residen ces +do v +caric ature +seagu ll +kl m +confe ss +sla pped +cele b +turb ines +pp v +nur ture +el ab +.... .# +tu ff +de press +al far +amii bo +di spon +e wing +que er +friend s +for re +âĺ ¼ +sw t +aqu arius +head liner +cur d +fi gs +o tters +love fl +kare em +go vegan +fri yay +consol ation +at ri +ì§ Ħ +âĺĿ ï¸ı +poly ne +gu ed +o ya +la us +intestin al +cam illa +scal p +pi r +leed s +horri fying +bore tum +dand elion +fer rer +ell ic +as x +so ren +re loaded +ale ague +navig ator +ine tte +add ams +al chemist +ak shay +dystop ian +awe c +n aya +al isa +ai led +ag or +avi ator +ali zer +smo bile +findyour park +cop ying +to ddy +sh ti +mon ger +cal houn +nap kin +break up +y atra +se thu +ric hi +eras mus +fer ry +am ore +prac tise +bo bo +power point +oo se +li ffe +chin a +sh ka +fad navis +du ane +war on +fal se +ðŁļ Ĥ +wa shes +disc ip +==== ==== +g k +ab b +stub born +medi eval +p ci +ðŁį ª +maril yn +h yo +man di +cr i +prede cess +continu ation +om usic +s lat +wh al +mall ory +bon n +shen zhen +ca i +âĺ ĥ +sa fest +for wards +dra wers +bla sted +sle e +mor phe +mb ta +dumb ass +ÑĦоÑĤ о +alhamdulil lah +ec lub +al beit +heal ey +ayurve da +adverti sed +cro cs +itt les +bry son +be i +nj pw +honore e +fu sed +ðŁĶ ĺ +mul tin +n aga +de parts +ko p +kin o +jhar khand +ed na +ax le +mil ton +supremac ist +marrake ch +domin ic +tran script +] [# +: ). +wo c +sur rounds +o gil +leaf lets +co well +whe w +tru de +proli fer +succe s +sports man +con dom +po che +k up +imprison ment +{ } +scram bled +å Ľ +ka ine +cell phone +metam or +con i +remn ants +ee z +down pour +afterno on +exerc ising +ber ser +architec ture +wick low +m ns +is p +bo c +n iss +mn wild +stu mble +r si +lu ffy +sil en +dd ad +bul lies +haw ker +bb cc +scu ba +e pp +que ts +for aging +pal let +ha di +cinemato grapher +cat chers +to aster +k hi +lite coin +kid lit +amher st +maur icio +ip ad +mar malade +fe y +don nelly +g to +est as +cere bral +ant grasso +zz led +vir gil +swa pped +ðŁĺħ ðŁĺħ +no dapl +greate st +nhl bruins +fra ser +b mo +ane w +. âĿ¤ï¸ı +se gregation +remark ably +mccor mick +lo gger +er as +contrac ting +âłĢ âłĢ +yor ks +uku lele +touch screen +de cked +ben n +south wark +ra vin +nu mis +ðŁ¤ Ļ +ru t +gre co +eth ic +red neck +ar r +t cs +ih ri +ðŁĩ« ðŁĩ· +l k +inher ited +zy k +viadu ct +marty red +hi gu +ss n +be in +street style +fer gie +bank of +æĹ ¥ +stake holder +exempl ary +cre ss +ess a +ero tica +intre pid +gom es +bra un +bethan y +bang tan +pulmon ary +m illing +doctor ate +trump russia +ठ° +s ani +bl att +pla u +depri ved +t le +ful ly +bour n +st ak +lufthan sa +kio sk +far oo +def y +bad an +ðŁĺĺ âĿ¤ï¸ı +rit z +tri sha +ran ds +middle sex +arab s +pro j +sport scenter +repe ats +iv f +bleed blue +as sure +o bs +territ orial +ele n +bever ley +ann ah +âĿ¤ï¸ıâĿ¤ï¸ı âĿ¤ï¸ıâĿ¤ï¸ı +z l +for good +science fiction +gla u +son ya +pri th +st weets +mix ers +mari o +ant elope +writing community +went z +den ham +be di +sf o +harley davidson +look book +immuno therapy +or phe +es ville +ed ged +tas k +sb ball +corro sion +kilom eters +co sting +play back +ke ke +di visi +u ter +re location +yel led +pen g +up beat +ser ve +âļ ł +hal en +stir ring +reh man +en v +schu macher +frag ment +alkal ine +sb k +resil i +share point +rol lover +tra sh +counter part +âĻ « +ob itu +à ½ +ãĤ ¹ +mul berry +ðŁİ Ĩ +auton omy +spra ying +nat l +love you +fran ki +nu k +esc ar +can teen +ali baba +de plor +mole cule +pu d +fort night +blon die +sp hin +portra yal +ta che +bu te +consi sting +freep alestine +c sp +im mort +d ns +ðŁĴ¥ ðŁĴ¥ +tour de +coo king +archi val +ga thers +bit t +b anc +pre mature +snow ball +poetry day +lou dly +fug itive +ed ay +em ra +ðŁĩ¸ ðŁĩª +sci en +node js +jur gen +je ong +band ana +un is +fox sports +v andy +pro visions +wee p +tu k +i ko +h oun +zig gy +z r +fil let +bat a +tin k +con e +we want +k ilo +hor ace +sl t +sc t +stay tuned +victor ia +umb ria +att acker +ingham shire +fright ening +no ir +fr at +con tempt +lia ison +ho i +br ink +tr ill +ni agar +kick ass +dun das +not my +rho de +bu mble +no xi +fa g +spec tators +mancrush monday +jin ping +distr act +dais y +wal den +portra it +ar thistory +vol tron +ev el +is c +ac m +r ite +na o +de ported +swe ats +ru fus +lo bo +labor day +gam o +ihri thik +bl it +abdomin al +ãħ¤ãħ¤ ãħ¤ãħ¤ +i it +e q +bu sy +allu arjun +un disclosed +de ton +pro create +ki l +ðŁİĤ ðŁİĤ +mitch ell +ki i +inherit ance +al p +jo burg +pat rolling +compul sory +un signed +ni am +l ga +eshop suk +tr illi +ma w +appreci ating +rock ab +mañ ana +an tal +mal vern +roy o +grand prix +sut ton +go ftheday +dig i +ãħĭãħĭ ãħĭãħĭ +t les +varan asi +erec ted +discip les +cont act +ðŁĺ µ +li d +⬠ĩ +scen tre +radi ator +ing tips +trans itions +thursday motivation +chem ical +separ ati +sal is +mi m +geo graphical +book fest +/ . +âľ ĭ +v ae +cur rie +ag garwal +acceler ation +the ses +lg m +u mass +pro portions +nat a +ani ans +ku ch +be acons +ap r +@ # +ðŁĴª ðŁı¾ +nu ke +sher aton +ki o +ma kati +polit ico +mor ale +ì Ļ +econom ically +gg ly +ss en +pa stries +intern ships +vic ente +fanta ken +aveng ers +accu se +slee pover +indic ated +the dream +ster one +ren ders +fro st +ou i +gre gg +d ore +⾨ ⾨⾨ +pu gs +sat y +nu mb +hems worth +tam i +la ssic +schi ff +igle sias +ag awa +] " +re shi +game stop +divor ced +theat er +clau di +un conventional +prophe ts +ac in +twel f +tow ering +t ml +sc lerosis +k wan +ge ts +distur b +na ira +ener g +pir acy +pru itt +noti fied +hen na +bra m +ground water +bl s +opti mis +$ ) +luci e +biz hour +fang irling +gr ills +or l +ver se +c ina +law less +artistson twitter +tele vised +marshmal lows +radio head +bar r +m fc +bre vi +mmor pg +g aya +âĸ « +sub titles +j t +disney land +to bago +nh m +groo ve +fi awec +" / +ba o +scra bble +om ni +ff l +um c +si mba +ali er +ter rell +plu me +mi di +dig nit +co c +bru t +ad ata +alche my +d sm +ðŁĺĨ ðŁĺĨ +win try +spa res +cu er +conclu sions +to ys +od or +fl ann +gar vey +scrip tions +inspec tions +cat ap +ang lo +st louis +heim er +at ay +tr ich +en yc +chil ds +vent il +mont p +guiller mo +circu lare +z ell +mode led +craf tsman +al ina +stimul ation +cashe w +ju das +best of +to ire +susp ends +scol lege +real ising +by tes +bloo ds +as si +ðŁĴ ¿ +o hs +ðŁį ĭ +scallo p +ठµ +gi fting +camo gie +wil kes +o zzy +ðŁ¤ ¤ +ver onic +sav oy +deme tri +baby girl +ðŁĺį ðŁĺŃ +so x +cly de +induc tee +count down +self care +ठľ +vi ka +tor re +phd chat +pe ars +aw h +suff rage +le sn +admir ation +mp p +shark week +schul z +santor ini +clo ver +( * +stras bourg +ex iting +so yu +finger print +che a +ãĢ ľ +vin dic +song writers +so a +prou der +nam a += )) +simple st +delici ously +gil les +u q +mn wx +ep p +sh un +ken nel +fall on +ðŁIJ £ +sin d +tra gically +out es +modern ism +co ke +gy n +spi on +âĺ¹ ï¸ı +le am +compress or +apolog ise +twent yon +fan atics +âĻ » +sco tsman +sa wa +ko u +as er +ภļ +welter weight +phen om +twick enham +stri a +p out +ka z +gi am +cd p +ho y +emplo y +red mond +ภĦภ+sm ere +trance family +proto cols +pie ce +lu iz +iter acy +carl s +united states +har med +phd life +ch aw +foot prints +l é +cho ker +z ana +sli pper +eric sson +insul ting +articho ke +advis ing +acquis itions +op or +mut ations +re ar +ॠģ +pod cast +wi ther +kun g +íĺ ¸ +win slow +di apers +ðŁĵ¸ @ +ec ker +col lar +hu ey +gi ro +mono gram +kas ich +si veness +malay si +arom atic +gre s +gali leo +u ji +rob b +dr m +none theless +as a +: > +lo a +l np +at work +ag t +laksh mi +pipel ines +id al +stre l +re all +chain z +stone wall +san sk +ðŁı ´ +pied mont +hoste ss +ci u +t é +analy ses +wil helm +scott y +rw by +mosqu it +use mb +qu ins +ðŁij İ +tu cker +s conf +speci fications +psychi atry +broo kes +s ils +ol af +de to +co di +cli p +fil th +womancrush wednesday +go to +ang erous +be ale +w tc +paneli st +ne x +lar sen +emili o +tab leau +h itters +conce ived +americ ani +or tega +mar di +Ñ ĥ +pain tball +thir sty +new yorker +etis ation +go ss +we aker +u gh +tro ll +har ga +du al +ght ning +at ine +ðŁĺİ ðŁĺİðŁĺİ +cook out +pyrene es +po ss +authent ication +sports wear +yun ho +kir o +archi pel +shen ko +ren der +nov ation +divin ity +ðŁij £ +su fi +humb ling +ge opol +devote es +wait ress +tr ough +py ro +i ba +bl ing +gra f +epilo ts +bt r +of tball +bas king +domin os +so om +r ath +sher yl +qu el +astronom ical +wel d +track list +sig nee +slee pless +com man +ch ron +summ on +pure michigan +cri spr +sli p +la gi +ra q +um u +thal ap +char med +scru mp +quad copter +ski p +peter sen +mun i +ðŁĮ ¾ +mon aghan +tra ys +ick ed +canad aday +te gr +ï¿ ½ +hot ness +heavy metal +ab ar +gop debate +az ul +spider man +sun flowers +ľ ë +web comics +bar d +Ð ² +nichol as +slu sh +ram an +mark ham +ffici al +ff ler +íĬ ¸ +ple ss +anush ka +to to +sk aters +pro wrestling +compet es +ay ala +myster y +thr ills +mp g +independ ently +y ul +imper ative +formid able +tire less +st acking +ton gues +mal tese +pot ts +mat ti +char ting +chill out +super nova +ome o +sky sports +nu tty +ðŁĹĵ ï¸ı +ro han +insp ired +concier ge +ser ra +ma kk +gal at +chi pp +ye v +ì £ +reim bur +op ul +kimber ley +i eee +bre men +ch itec +or in +nak u +bon kers +foo ty +emer gence +ðŁĨ ĺ +sti p +serge i +zo ey +ai me +wou ld +dy es +destin y +vinai grette +dri er +circulare conomy +an archi +ss r +sch el +cin er +gro om +determin ing +gar min +cal ais +incarcer ation +bu kit +no i +chelms ford +mckin ley +chi pped +belong ed +tu mors +str oud +mi i +influen za +wwen xt +tun dra +tele communications +cat sofinstagram +t ages +beat ty +o du +ml kday +oo per +dang le +ak ley +cru mb +anti gua +ti mbers +rou hani +ðŁĴª ðŁĴªðŁĴª +ha fi +... !! +w cs +coo p +sn c +lit res +ãĢ Ĭ +ha z +co z +k ant +green field +cur ti +y ale +flye agles +what soever +wor thing +rou lette +flyeagles fly +un da +a inted +stand ing +lusci ous +h pc +effic acy +ash land +me ghan +ky wx +n pr +bath tub +ac os +h ani +mar cor +man tis +da isi +bo ba +ab bie +mu til +vi al +spy der +po z +g ti +el fie +nigh tw +metro id +anton i +mad die +dh ry +dar lings +ten ds +taek wondo +atlan ta +me ow +chlo e +ãĥ İ +ym es +siber ia +k con +gu es +mar iner +fac il +azz le +[ ... +han nover +bav aria +vir go +te uk +u sps +) # +wall a +sam pson +need less +ver bally +hay ley +bow led +pi us +lam pard +ham string +vol vo +road safety +cho king +sor bet +a hem +healthy food +brai ded +horticul ture +cr ative +che ek +ad do +the force +ko ko +schiz oph +j ie +w ada +twentyon epilots +h bcu +pro ton +pau ls +lou isa +lat am +kyr gy +com pac +sd k +sap i +?? ? +liber alism +ep silon +ai den +w usa +spra yed +baske tball +kim ono +blue wave +ali as +ë§ Ī +mug shot +ce c +do gre +ad ora +ðŁĵ· @ +kra kow +intrigu ed +exhau sting +astron omer +ven ison +lady bug +ci v +bra e +us m +bri be +acup uncture +pembro ke +ke ating +chi e +y ad +t si +sm i +see ding +gate shead +lis boa +gy p +canv ass +ðŁĶ´ âļªï¸ı +op i +ni r +soci etal +ly te +ati es +c sm +ar tery +al in +aka poor +abstr acts +âĢ¦ âĢ¦ +teen wolf +ne we +travel gram +sentim ental +per ched +han del +ho ek +f ay +coordin ating +anim ate +man ian +effor t +jer ky +f ck +adri enne +ma bly +tra ding +my el +spi ro +sol a +stor ing +over drive +monday morning +dream team +pul se +bon di +ber nie +pgat our +tri poli +son am +plat t +âļ ¡ +ag roup +îIJ Ĵ +inv ading +v cu +k ell +ñ os +un dead +pod casting +mercede sam +mana fort +cor tex +que so +impecc able +pal mer +wil doz +sport sc +guacam ole +dispen ser +cate gori +stun ts +per il +invit ations +dune din +xi e +achi eves +saf er +pre ds +ph an +knuck les +k ak +igno res +lovemy job +aru ba +ound ation +datac enter +co vert +gr ing +cou ple +ا ر +vol i +mc cle +arti sans +lu do +kal am +arom a +under taker +hu la +wiz kid +gu mb +god frey +bakers field +ker n +engine er +car ve +pal in +guaran tees +pe bbles +b ays +zi eg +fin k +â¬ĩï¸ı â¬ĩï¸ı +down pours +ro chelle +rasp berry +ðŁĺ ® +gra phies +stom p +caf es +ari zed +utt ar +cal vary +dri e +crusad er +bus an +tux edo +si u +seam us +cul tured +blan chard +town house +ge red +butter milk +flu ctu +roger federer +hel i +ðŁ¦ ĥ +u ous +ram esh +mu ppets +email marketing +ye ss +br ice +ri zio +pel o +donnein arte +u rable +inve stin +bump ing +raji v +sav a +thro wer +fore x +o hhhh +th rust +pull man +r fid +sep sis +le ed +fri ght +roun ding +ne b +ph ins +ai sha +utili zing +squ ats +gold smith +j ic +bo ks +vau s +i po +exclu sion +tari ff +po kes +min al +land s +en force +washington dc +or char +g x +mar ys +ey our +aussi e +bak ers +un popular +latin os +lar ge +pu tnam +bol o +wa de +pel o +di zz +ob struction +fla ppy +weare the +depend ence +pajam a +e te +y ann +e wan +disc la +a ay +kar ina +e ic +an trim +w soc +neg atively +kai do +fotogra fia +dh ru +colo ssal +mcle od +k wang +mani pu +ex hilar +us atoday +summer slam +co les +tapro om +unbeat able +de ma +tic ks +k ling +fil s +campaig ners +ภķ +brew ster +audu bon +qu ay +ch s +ki gali +d ler +strength ens +som al +sign ingday +gol ds +pig ment +orche stral +g q +lin kin +ðŁı ĩ +ta w +algar ve +ho v +ear le +gold fish +am ig +ex er +ben in +dru id +ðŁIJ ¸ +she m +quat tro +mer cen +men te +incorpor ating +bon anza +state fair +en de +concep tions +e es +âĻ¥ï¸ı âĻ¥ï¸ı +d son +fire arm +orb ital +we h +multi p +fo b +requi em +p light +thou se +sa id +oc re +remem brance +n old +chi pping +be v +er t +ca thy +sy m +ri ggs +m ley +dialo gues +sl ender +how l +gau teng +wd w +to bi +smo kes +im plo +b pm +ad n +mom basa +cap sul +bloom field +artic ul +cle o +goog led +flu ffy +l ard +en zyme +ve sti +ibra hi +fl ame +e mea +out ages +dispro por +ble ak +an sel +ick er +st louis +stock market +good friday +sau lt +stal led +pro m +ep som +b é +the se +sau ces +me w +lit fest +pre d +re u +kar ak +si enna +ell in +bio technology +ï¸ıâĥ£ - +tac tic +sa in +por k +mon za +ka j +lu sh +compart ment +chang ing +shraddha kapoor +fo al +ar tem +cu ando +can ola +ori ente +me sse +d ited +br c +box er +bbc two +s st +ment day +em ing +de wey +kof i +âŀĸâŀĸ âŀĸâŀĸ +reali zation +smo l +tw ood +san je +flag staff +ber wick +cor set +can ary +whistle blower +et ched +com posing +squee zed +bow er +auto desk +ne h +mathi eu +ba ja +Å Ĥ +hy dra +da im +am eri +insi sted +mer lot +gar ros +heart news +gaine sville +cut ler +bo de +ðŁĺī ðŁĺī +lew es +scoun try +g sa +us u +cc m +god awgs +phara oh +cra e +mor ley +hyp noti +f ades +neur ons +fu zz +ing co +high landers +star k +vig ne +pac kets +amar illo +reu ben +insul ts +bas ic +vec tor +n me +ac ruz +tro s +transm itter +ðŁĺ ŀ +interpre t +ðŁĺ ² +pre quel +mc gowan +dis semin +ðŁĴĺ ðŁĴĺ +mascul inity +indie gamedev +ali ve +te t +pe tal +ema iled +ar med +ko o +he er +ba ird +super junior +metro polis +delav in +decl ines +stit utes +Û ģ +p tbo +g lan +cho res +e aling +chri ssy +ste mc +vi an +assassin ated +pron ounce +illeg als +discover y +cav ill +fri fotos +f al +so i +sabot age +t int +p dc +ðŁİīðŁİ Ī +ãĤ Ĭãģ +ji o +endeav or +in sig +commit tees +she arer +me tz +mar rying +h dd +g by +fre t +tri sh +pu l +scrip ted +sa ki +l w +ke ye +shim i +nan aimo +ca h +à « +tem pered +ici an +du gg +dish washer +air field +s rugby +gr inch +y st +r ms +mahat ma +lan kan +disc ar +dige stion +no des +l ls +om ic +gu tter +tis garh +feder ico +election day +bo he +master card +fire ball +âľ Ķï¸ı +oy ster +p ong +do k +en route +m vc +beat the +ali stair +shu b +sh aming +cherno byl +ghi bli +the s +pin ion +d bs +sal ts +ic tion +epi ph +nc pol +in convenience +whit ley +inspec ting +wood ley +wi ener +skil let +no les +m ca +h ina +a sha +willing ness +well ness +tam ed +show time +dis advantaged +ber nat +us n +mission aries +coun selling +arrog ant +quant itative +leg alization +ho dge +energye fficiency +cameron dallas +pos sessions +p bb +harris burg +v g +hindu ism +happy thanksgiving +fi b +re acting +tweeta picture +pol iti +mu ppet +hur rah +pac e +coast guard +guar ded +as am +par ry +fore very +x q +oom f +ke anu +j ind +ri st +customer service +sac red +ðŁĺ º +ton er +occur rence +mat u +val dez +red d +is ak +power rangers +pe asant +raj ini +abra ham +e mil +car do +tr il +hair styles +obsole te +sam pler +direc tive +delavin kisses +ver ton +glo s +sp ay +paler mo +com ets +man ziel +chicag of +ski pped +pic torial +h ant +b mi +a ol +re opens +pad dling +devo s +fra ud +bas eline +que ues +sp ired +sn are +eu ve +descri ptions +daisi es +ca ching +gall eria +tri mmed +stin o +recy cla +ic ular +bir ken +raw lings +fli x +chic as +b gt +lik eli +argy ll +thel ove +ga ston +bl anca +ha k +f one +sailor moon +h aci +ima c +fl yn +de can +bel les +ap ic +zo g +taun ton +con stance +lasag na +ker nel +in ka +har bor +collec tively +calcul ated +av ille +shil pa +pur du +gi mm +fun er +a est +pembroke shire +nighting ale +n unes +hyper tension +hu bert +sli ders +infer tility +comm ended +transat lantic +metr ical +!! @ +Å Ł +ss g +bac ca +inver ted +fun factfriday +it ans +albu m +acqu ainted +ri er +whel an +sar ab +mu e +snoo ze +pi ff +agre eing +sp itting +jer maine +n ye +âľı ï¸ı +am bush +ze ph +con greg +univers ity +s app +wann abe +pat rice +ib d +do glo +fri dges +sun d +king ston +ar gon +kam en +hardro ck +ds ley +do lores +ì ° +ota ku +pi ping +be having +âŃIJï¸ıâŃIJï¸ı âŃIJï¸ı +blue bird +an sari +teapo t +fire work +cro p +log ans +ty ped +thick ness +ig ers +c fp +dys functional +contra sting +et ty +aston martin +tx st +dra grace +at tributes +marath on +manu scripts +john stone +ðŁĺ± ðŁĺ± +bo er +ay u +aru gula +poo rest +con du +assu mption +anag h +no h +delav in +sit ter +g ö +mor ow +kick start +com i +gl acial +ghe ad +ba in +ker shaw +en dof +fre ud +om at +i af +hu g +sign up +each other +defin ite +tu bing +shak ira +ðŁijı ðŁı½ +uu uu +sw in +sham bles +ol as +sk ell +brit ain +kn w +clu tter +om y +j ens +hang ed +city scape +scra ps +un locking +dead liest +er no +breast cancer +a it +inspec t +fu ri +ðŁĴ Į +ku d +ju le +or ah +mi ds +m dt +bur gring +r attle +pu sa +stal k +cle ans +iss ance +z ek +worth it +nam eis +musko ka +council man +urban art +bar rac +un solved +tu l +g ita +white board +soy beans +em ent +cont i +saturday motivation +conveni ently +doc king +t ado +âı © +sp ino +puppy love +po f +fabric ated +robb ers +adop ts +ti fied +kk r +indulg ence +notic eable +macqu arie +chap el +sensu al +ki ko +melan oma +lore tta +li ance +ab en +sp lus +ga al +ac ele +lib dems +compar isons +ðŁĮ µ +rhy thms +mer y +en capsul +nap ier +ðŁijĮ ðŁijĮðŁijĮ +ðŁij IJ +plat z +fre sno +re formed +ran bir +el it +the best +bhu shan +vin nie +impro vised +s ittin +re created +e ba +ec ker +ac rob +pon te +cor d +gi ddy +eur usd +fe ver +intu ition +gar i +dum mies +bud weiser +amend ments +te tra +sch nit +ay as +mar ys +ci st +k ani +ker mit +ðŁĺ±ðŁĺ± ðŁĺ± +tin ker +strol ling +di visional +niger i +omin ous +menstru al +kar ab +k hy +bw fc +pan handle +l illi +well er +stra pped +son the +transfer ring +ethe real +sne aks +ru dol +gab les +jac king +cin code +for tune +canadi ens +con for +ab normal +frank lin +tit a +mu la +persi st +cu ties +ki el +ðŁĩ± ðŁĩ +her mann +aw k +fi asco +ko to +we ta +hi ker +budd y +preven tive +mcgra w +game boy +forsy th +top shop +si ob +sad h +in tram +follow art +so aps +dragon ball +ou x +morri son +๠ĥ +lu bric +adul thood +morri sons +âļ łï¸ı +her mo +ta ka +stall one +mis use +team gb +ra gha +con fined +at y +hom ophobic +nw o +sky news +ho ya +ac rosse +wi iu +pur ée +jed dah +ðŁ¤ § +advis ers +ph ine +an is +scrump tious +ë° ķ +c ke +vin y +ter m +s dc +o do +home school +vas c +leop ards +debor ah +illic it +cur ran +as roma +nau ght +mar ig +brand i +em p +ðŁĺį ðŁijĮ +î Į +su spend +lu z +initi ation +sch aft +jensen ackles +craw ler +post doc +des ks +trail blazer +den omin +tri x +no ise +po et +± ï¸ı +s mug +vol atile +proof s +pharmac ist +sardin ia +mash able +kim chi +co ed +schal ke +doo dled +c sw +sh ur +ro x +do k +chris brown +mathemat ician +ab ound +ang elic +rock ford +d ole +yor kers +ms n +g man +xavi er +bor rowing +mark ings +longh orn +k ja +diver ted +mm it +euph oria +ay yy +te a +pa h +ck i +un cut +li ven +ky ung +fan art +mer ing +red ding +amo vie +gri di +c thulhu +schol arly +ju dah +th bewithyou +eu calyp +ðŁIJ ķ +hert fordshire +cour troom +by u +auc tioned +ple ase +mar cia +ê° ĵ +succe eded +el as +arvin d +t lot +saig on +re tt +ra kesh +fd ny +as en +se bring +gladi ators +you know +v lad +gol a +par ap +ÑĢ и +sab cnews +one team +oh l +sun e +ri j +cd c +star gate +run down +plat o +ph c +chat ter +ra viol +mn f +mand ala +li et +ภķ +mari a +hun gover +consoli dation +fer rell +tradition al +ilove art +gal ap +ðŁı Į +que zon +espa ña +ðŁĩ¨ðŁĩ Ń +ho bby +steam boat +mali gn +guil lau +pro hi +its me +íĥ Ģ +in scription +al z +mari an +k ade +mm on +adju sting +ne sts +intern ally +ci r +vik ram +mal ala +k ph +fel icia +the real +cap tivity +at is +marcor ubio +kale ido +che v +mano j +le more +gent ri +vi ps +tro pe +" âĢĶ +pair ings +mal nutrition +fr ay +desig nation +brun omars +az e +tor rential +pan zer +ga il +under the +the ological +schizoph re +dazz le +freder ic +mo par +ad illa +so ggy +ra un +medi ocre +colo rec +i fe +p inst +blu ef + ² +world water +gir oud +clar inet +ad olf +tar antino +receip ts +assu mp +ðŁij Ł +coffe es +âľĬ ðŁı¾ +du plex +s of +r x +lin o +timber wolves +pan dit +mo tm +e ga +ay ama +ach s +outsi der +ll en +co er +til ly +cheese burger +ma ds +ple dis +emp ty +national parks +az iz +p mi +jun kies +f ener +sq n +è s +gener ation +cleop atra +bhuban es +mosqu es +ty free +popp ins +tw c +or well +n age +ka whi +hol low +dal ai +¨¨ ¨¨ +ou ro +m health +gi on +az o +vis as +reneg ade +re ic +w sop +ðŁĴļ ðŁĴĽ +e chel +tox icity +mü n +bun k +stimul ating +asth our +\ ' +ep h +ende mic +cn bc +shrin king +peabo dy +michel angelo +can yon +wal e +su mi +si ders +inu it +? . +profession alism +dr acing +plat oon +p ons +out bound +maple leafs +de sol +cen cy +a than +ver ma +ru bbing +ok an +ðŁij ł +mull ins +authent ic +Å į +alman ac +ga ia +bb q +on imo +ke h +ty a +tou ts +y av +re posit +, . +wi ght +se eyou +cal lof +done sia +bar gaining +gr anth +sd su +amphi theater +p su +re watching +wine tasting +peak district +dete cting +thur man +phe e +èª ķ +u mich +re r +sculp ted +go le +name sake +ðŁĶ ģ +serv icing +bau gh +pu gh +pen cil +dar th +munch kin +at orium +ten ers +sun y +rolling stones +mag ing +star rer +i dris +fe instein +ag ron +âĺºï¸ı âĺºï¸ı +supervis ed +chamele on +aggre gate +succe ssive +mo gul +inst yle +pol dark +custom e +ohio state +ha ya +ci des +broker age +angel ou +fifa wwc +de forestation +al ton +pam ph +hu gged +ho bo +change able +ku ber +bur roughs +demon etisation +cape cod +vers atility +or ice +le ila +womenin science +tu a +he dges +embarrass ment +ali fe +so ars +ni ghter +hy mn +gi pp +chas u +tech s +ni all +k illa +hi ka +cam els +valu e + ¢ +sc oops +mah moud +clu sive +adri ana +pac o +oz il +un as +transl ations +whispe rer +s bi +bu xton +bio tics +indi ffe +ken ney +k lar +et ching +barra best +inst ability +se ine +vo tel +blo gged +whis key +my space +t ant +lan dia +give back +illu s +aw ak +ac ab +f bloggers +cloud computing +blat ant +syri ans +band ra +sty n +an em +ke ted +kar thik +barun sob +pin ot +gu bernat +gay e +arti ste +i fied +conven tions +hu an +geni uses +eeee ee +fol ly +somer ville +pride month +ðŁĩºðŁĩ¸ ðŁĩºðŁĩ¸ +chemo therapy +paul s +bak ar +ìĦ¸ë¸ IJ +taiwan ese +fol lo +c ss +re ign +nn nn +fla un +catastro phe +iti es +frag ments +extre mists +ym oun +car men +eze kiel +conne cting +se h +man ta +remodel ing +we ymouth +at oms +ce m +ne well +lu mi +the open +mo c +mili band +g land +z shq +mag gie +mani acs +m sp +ad y +cre ams +le anne +e sta +py g +af finity +pray er +dun bar +ligh troom +ac adi +wyn onna +roman tic +state dept +sick le +wh os +lam o +et our +fin ity +shru b +shar pen +pun dit +ed on +af ore +mar s +jeff ery +ter ps +medal list +kath arine +accu sing +ta z +roy d +from home +confron tation +alle gh +ðŁijī ðŁijī +refresh er +ran veer +never land +jo jo +lu crative +en am +ca ver +pa edi +man jaro +flu ids +the ssal +oppre ssed +mu ss +joh anna +Ø ® +cn g +buil dthe +sett les +s ith +fu ego +cl amp +ar ag +pay er +ted x +mand y +inter stellar +fr c +ch and +b cc +mo lo +len til +johan sson +grims by +nature lovers +ðŁļ¨ ðŁļ¨ðŁļ¨ +shin de +x in +international dayof +transiti onal +sat a +cad dy +wo d +if u +ha ys +holl yo +j ang +ir c +co im +grad able +" " +ðŁį ´ +ঠ¾ +a el +n yo +west lake +time out +sof i +phenom ena +cultiv ation +ag no +un armed +so t +con j +gen o +royal navy +nutriti on +fair mont +ti relessly +sn g +re ty +mic a +lu cent +slo ane +droo l +riz al +od ell +critici zed +. '" +la ze +deser ted +co der +pra s +l illian +itiner ary +dav y +an ap +whi pping +hobo ken +kare ena +çľ Ł +vi us +ter n +nan tucket +mis understood +bu laga +st ant +chin ook +z am +reli es +d ss +ed mond +sket chy +m ell +fe x +rec tor +dist ill +day dream +wine maker +ri pley +billion aires +hel ene +ati f +cul prit +bertr and +wou ldnt +ma pped +v ak +gla dly +parliam ent +kidlit art +ware ness +goli ath +âĨ ĵ +view point +tat ted +fu ls +dor sey +ang lers +li ds +ki ya +bow les +be h +b ite +compati bility +ance stral +pro x +beha ved +gubernat orial +ch field +sab an +z h +teen y +shibu ya +holli day +pan cy +âĿĦï¸ı âĿĦï¸ı +seun gri +? , +ðŁĩ¦ ðŁĩ· +im itation +impac tful +any i +gene vie +añ os +bate man +gli der +af ar +ra sheed +effor tless +sh war +dach sh +er un +at os +kin i +ch d +kha ki +k lin +felici dades +bel o +as l +to ppers +fin ley +stac ey +rigor ous +kar ting +le ppard +car michael +be ret +c se +ak hi +mer ingue +ab an +ha ke +ger i +er jee +re sto +comm anders +pr it +fl or +ad ven +ex termin +remain der +å IJ +es g +martin o +lulla by +| @ +mi gn +in store +big bang +cor di +cau ley +ante bellum +dg ate +cro ck +span dex +scaf folding +ore os +ê°ĵ ìĦ¸ë¸IJ +pom ona +ma uro +uni versi +re mi +af ootball +t ant +sm alls +ne h +worl do +tropic al +mor ph +jav elin +gla r +arqu itec +reminis cent +tu bs +spide y +make u +syl la +progressi ves +blo t +shor ten +keep in +ch ak +ang st +super food +decad ent +ston y +neuro logical +ar boretum +ann ak +fe ma +per cu +dis respectful +small biz +lo x +co om +c sc +bs bi +pre valence +him ss +esp an +mo ga +fr ampton +sky map +mas se +levi athan +( ). +noctur nal +car ameli +ang or +amne sia +outsi ders +she alth +rhin o +ant ag +ag io +ðŁĴ° ðŁĴ° +take me +kab addi +c si +m sh +coch rane +thessal oni +sil a +ha us +du sting +obe se +mack lemore +mani sh +len in +m dc +gro wn +shef field +s rs +ke le +car son +ch um +dah lia +can tore +opp o +how ling +cyber crime +sur realism +sc ran +fa iz +thre n +rac ists +r out +pk not +se mana +sin i +mc cull +ma chi +alfon so +y b +sar dar +kend rick +den g +reci pro +on f +doom sday +bri bery +custom iz +art is +c pi +ðŁĻĪ ðŁĻĪ +sla va +let te +en s +âĿ¤ï¸ı ðŁĺĺ +cra yon +ad an +tr c +migr ate +simp son +row ers +king sley +farmers market +shee han +ne phe +bor non +car ton +mic key +all ure +u lu +sli pknot +heb do +gui do +dog celebration +online marketing +acceler ating +) .. +origin ated +macar oni +ed tech +out field +mit z +disc us +adverti ser +man or +ha shi +descri p +cap ita +ful bright +recep tor +con n +con ey +spion age +r attle +pre st +u li +blog post +acker ay +) âĢ¦ +red velvet +mat th +inspir ing +b sd +ker ri +po con +mil lar +re pur +accent ure +ä ¹ +ram bo +ragnar ok +dele ting +british museum +pat ory +leip zig +flori an +sci fi +in ers +br ate +yo y +melis sa +ab er +ma sa +po te +mosquit oes +transpl ant +r pa +; )) +bast ille +yl an +joye ux +melo dic +cap tions +atri st +roch dale +gott i +pew die +cuties aturday +who is +aqu aculture +tiv a +sp el +he ss +ha ji +fred die +co per +brand o +v k +photo book +* , +my dayin +micha ela +brune i +sr ini +in te +Ä ± +de ol +d fc +separ ately +bun d +ve sts +to c +me ck +rein forced +constra ints +car roll +sq ft +re ver +cam per +bird man +in action +gener ators +triumph ant +pe sts +o vo +gy pt +al amo +sc aled +suresh pp +sd n +is mo +gi os +) @ +justic eleague +restaur ant +gab i +den gue +next gen +exemp li +ap ex +inspir ational +down side +kid z +u pl +et na +alvar o +fel dman +bar net +m ha +es ch +bloo ded +>>>> >>>> +kan i +ho fficial +casablanc a +bir ds +ty ga +sw amp +o day +new castle +nb ap +ci sion +cho ols +af lo +ne p +mon ton +ak b +super model +down time +th os +sc wx +snoo py +ag greg +yo ke +nor cal +we tt +prolon ged +me tast +beat er +f ta +t lap +disgu sted +y h +voice over +itch y +ip c +ðŁİ ¾ +phe asant +stra its +ram pant +j g +fer til +assu res +fortun es +sal inas +liz ards +kett le +i bs +cyn thi +he g +mc cr +soccer oos +happen ings +cor den +ðŁĺĤ ðŁijĮ +t ches +egre t +wolver ines +congratul ated +ho gg +bott ling +wr i +fer ri +bo sch +af ire +og den +s jo +j dm +sv t +con tex +tol lywood +min k +me se +super sonic +op oulos +å ¸ +âĶ ģ +knuck le +gu ise +gam i +chu cky +z inger +radi al +compla ined +bo da +fe tal +discipl ines +cor ro +ðŁĩ®ðŁĩ ¹ +op ted +filtr ation +ad nan +em cee +mi stre +insom ni +fer gus +tra jec +on don +med tech +tanger ine +madra s +gru e +cab s +z hu +sureshpp rabhu +insul ated +day swild +pp m +band ai +v day +s ff +squ id +lo thing +not dead +expre ssive +cu ll +ala stair +x u +up front +fish ers +en es +um d +dis missal +sti er +sel s +lu st +re active +prote ster +eyel ashes +al im +goo de +gre eng +da ir +com pen +anush ka +proto typing +ma pu +bear ings +ðŁIJ Ł +for me +bsbi botany +timo thy +out skirts +am bed +are tha +wend ell +stre aks +ni m +k pk +sne e +fit ter +quo ta +p ate +win ning +ðŁį Ń +sho pping +ma inst +cul ver +ste vie +mcfad den +counter parts +gren fell +fol som +dor set +tech crunch +⬠ħï¸ı +tip tuesday +us l +tre x +geor gie +ranveer official +lic ks +se wn +k f +' âĢ¦ +jap s +p ate +orth op +fe sta +stra s +mon tal +hammer smith +fore most +wido ws +mad re +ite z +mito chondri +lig ans +z ona +cari bou +m ss +andre i +weather channel +gh c +: ... +ta ft +awe ather +al isation +bru tal +bliss ful +nik ola +mal icious +q m +mpg vip +bro die +bl itz +applau d +dri bb +v ague +dog go +transl ating +interpre ted +hat ched +ge tyour +benefici aries +spar ring +caes ars +aw illiams +la hat +bro ke +ti mp +virtu es +rel ying +pie tro +k tn +ici sts +pab lo +lou i +a ag +pn pp +cha st +pul ses +fini sh +usair force +type writer +thomp son +dog s +ut to +ãģ į +sand al +new ly +do ge +z w +wan kers +ne gr +mu cha +determin es +black fish +sk unk +mu ps +instru ment +phy to +daysto go +skin ned +hai der +con ten +ðŁIJ¾ ðŁIJ¾ +we iler +undoub tedly +chair ing +wall is +sh ard +zind abad +adul t +absor ption +pre sto +deplo ying +drum mond +battle front +seag ulls +how dy +juda ism +des de +part ition +âľ Ŀ +no logy +national bestfriend +lesn ar +film fare +co asts +christen sen +ac an +mb u +co pped +ru bble +sw c +fun nier +far ther +where as +nano technology +with stand +pil low +bow ers +to pe +it ly +con fit +ma kar +comfor ts +bo sh +cli pper +bal la +sti k +mil b +safe guard +musi que +eas port +ya z +pad ded +bad er +fore ign +chop in +archi ve +o ka +tran sporting +tml talk +aj it +consequ ence +sc roo +ff o +collabor ated +pug chat +ye mi +jav ed +au burn +o of +ma w +sau cer +miti gate +i les +evangeli st +ter ie +re cl +indic tment +cat a +bright ness +may the +whim sical +un lv +key word +cu min +med way +west world +tra w +im posing +form ity +coul ter +ab z +ny pd +grass i +kel sey +qld pol +clock work +f dr +di anne +âĺ ij +ad h +p ann +bra vely +ae ge +un lawful +ver di +pocaly pse +phar o +kar la +reson ance +ma stiff +la dak +bu u +ma iled +hi i +craw ley +tor rent +mach ado +liby an +effort lessly +fal sely +q vist +ke ef +craf thour +cheri shed +val kyrie +s ari +kal amaz +be he +ðŁĮ Ļ +th im +ro ddy +col trane +but chers +ach im +wk end +awk ward +cab rera +:) ))) +fran c +decl an +con dos +a ja +pandor amusic +char ter +ph ill +mon trose +hatch back +handic app +gre aves +eucalyp tus +ut most +t son +bur ton +mid wives +in cur +ðŁĺį # +moo d +compre ssed +tom a +must ang +mo g +as ana +te stic +sho tel +in sol +cor sair +nh q +ben ny +sm ma +kap ur +in con +jon as +ener gies +don al +as ad +se z +n pa +archi ved +stimul ate +do p +hy d +gri eving +ãĥ Ī +ron a +why te +tree house +ss ell +sand ro +ko bo +ther most +se clu +hi ya +ge ez +mam as +prisc illa +flav oured +fas s +w old +maker space +cospla y +p tv +happy valentinesday +sequo ia +love craft +gu an +d tm +ci i +yoko hama +pos thum +re q +ðŁĶµ âļªï¸ı +galat asar +dol by +hamp tons +disturb ance +stone henge +ok c +disrup ting +month sary +jun gle +head lights +du stin +micro sof +happy mothersday +ko ko +gra zi +te sto +na idu +mal ay +ari al +ru mb +ab oo +har man +tra pe +spo ils +je ho +go dly +lock screen +z un +pi ous +ma gento +l enders +prob able +corpor al +m our +aw al +su a +call me +ton ne +go vin +devast ation +x j +gear box +war lock +per me +it ate +gaza underattack +du val +paras ite +clement e +le th +i va +fro zen +tho les +to bin +cair n +s ill +luc kiest +conver ts +st ale +pan cra +euro pale +wis dom +sch ur +ì ¶ +verti go +bi j +u bc +nu re +righte ousness +mt c +factor y +ver st +revers ed +hur i +hee chul +fab er +ar r +ul ous +ven om +ph at +green ery +bra dy +à ¦ +: (( +never giveup +di sha +mo ta +health care +dun ham +dex po +den zel +bb ins +f ics +wh am +mc g +eli an +wat a +str alia +tel lu +pe sky +spin off +ar moured +re acted +do fficial +te du +sag ar +mor ally +paralle led +fi os +dow ner +dau gh +re do +world cup +tari q +bar ne +glaci ers +oc cult +barbar ian +her mosa +!! !) +y ur +inter nation +p ss +sit u +p int +american air +sw am +dopp ler +ðŁĴĻ ðŁĴľ +cincode mayo +le van +hell enic +mc ne +ju di +yu h +st x +qu are +ðŁĺĤ . +sti g +g els +mot ley +hard work +euro zone +e ad +ç¥ Ń +seab ir +ci us +la id +alpac a +presu mably +pewdie pie +boo ted +am ari +tam ine +sol ace +bar row +acade mies +x ian +om ination +dun geons +b ma +de ity +ai k +stab il +hir a +affection ate +ving ne +new port +ãħĭ ãħĭ +thir ds +re tains +aroma therapy +ski er +ni ma +do pe +cr inge +con domin +to or +anim ator +sar aj +seas cape +minim alism +lake shore +calla way +berg man +à¤ Ĺ +whisp ering +stupi d +ri ghtful +requ is +ir n +se va +ut pol +tuber culo +squ ish +de but +govern mental +christ ine +all man +weap on +s ito +bur i +lo lita +leaf y +fu ch +tin ted +mck en +a hahaha +ðŁĩµðŁĩ ¹ +repe al +ne gan +ðŁķ Ĭ +tail gating +game insight +ðŁıŁ ï¸ı +yaku za +z t +ti ring +pro posing +bow lers +tra itors +ak shi +cler gy +cit o +up sets +tu scal +symph onic +sil ently +shu ff +black well +ðŁĺĤ ) +ko be +rober to +ri dg +dc u +mer ino +ft p +east side +. ~ +nb l +mn leg +ts for +frau dul +ca pping +in my +gymna st +ston es +ss in +twe aks +shag gy +oak land +dem sin +sang ria +mm va +hen nessy +down ton +ri ghtly +in it +aga ve +ob last +northe ast +friend ship +dal a +tro phy +ðŁij ½ +mag in +margar itas +ê · +ww fc +fa sh +di ke +cu d +char t +ðŁij ® +refuge es +jop lin +n cs +imp y +firm ware +pas cu +flam in +health tech +bell letstalk +w aka +ol ls +la go +co wan +bombar dier +sh ome +ðŁĻ ħ +mc master +na ve +well s +u ta +tell ers +mis fits +kap il +face off +af firm +a pro +whit epaper +super yacht +speci mens +al located +... , +- __ +ka w +dachsh und +djo ker +s work +qui ere +or um +ðŁIJ ł +som m +c mt +ingh our +skin ny +lgb ti +gi ggles +break away +resear ched +par ity +my al +ms l +re tained +si vity +make inindia +sol ves +defam ation +wal tham +sri racha +road way +concep tu +al in +iw ant +å Ī +del ft +tender loin +ga ins +faul ts +sw ire +st ellen +pol lo +dy ne +bornon thisday +asdf ghj +sq l +sali m +advis es +vo ip +ìĹij ìĨ +un touched +she il +ontari o +uph ill +so bre +de shi +nov ella +du tton +craw fish +ا٠Ĩ +ma a +tw ine +kal in +ðŁĩµðŁĩ Ń +ye ss +brook s +hoo siers +ton ka +umbrel las +ay ers +ate am +acqu iring +su ction +ä n +wi es +tari ans +soci o +mat tb +shepher ds +o so +charity tuesday +s logans +ninj as +al bat +by te +bash ir +trampol ine +mydayin la +i ja +bas el +ror y +gol die +fi rec +un noticed +pecu liar +sch a +ker son +mour ns +liquid ity +qu ipment +hi bs +ar s +aeron au +slide show +sla bs +delici ousness +sk itchen +hta fc +full erton +cre ighton +aer ob +procrastin ation +az ores +white hall +uss occer +medi ation +djoker nole +and me +um en +noxi ous +jo ss +ili fe +anni vers +sudan ese +et res +under mine +whole foods +diso be +kor i +ade le +eli z +can ti +al on +gymna sium +sarko die +meteoro logist +yl de +ste en +stamp collecting +nas al +lo tt +fran ks +ex ol +ack i +good year +animal rights +y les +vio lets +mm es +s thel +ra pping +tu scan +wai ver +tur ner +eat local +northe asthour +anim ations +tom morow +t sh +ff ame +bra e +pe tron +glam our +br yn +d cs +bal es +ðŁĶ ¶ +bro v +bre v +b ons +physi que +car ne +x e +elix ir +vol ved +l oma +ìľ ł +æ ĺ +van u +ri gs +bal ance +va res +bon ita +sprink le +perfec to +di on +le ak +calcu tta +o ba +d ma +c mon +tun er +pneu monia +bo gus +apolo ge +cl ough +bor ne +)) )) +revi ved +o varian +ner f +c legg +fan fest +cho u +reali zes +mc n +li gu +leg alize +just saying +for ster +bo sni +k hi +in dom +hei del +en cryp +si ss +ed di +mar bles +brisban e +y ing +pre paid +wal sall +cooper ate +orche str +mar isa +ho wie +che wy +bren ner +andro meda +e gan +sto cki +cav endish +ag an +ban o +de ir +go g +bl k +re thinking +ch ig +rhe u +sni p +p eng +semin ole +m swx +an nex +lyn da +lewisham ilton +cu mul +tb l +dolph in +agu ero +........ .... +pre lude +at our +gr anger +too ting +ro tun +dis ar +home items +da res +**** **** +ðŁij Ĩ +compre h +jin x +as well +iri e +circul ating +ðŁIJ ¥ +over board +cultiv ate +rhe tt +oriente ering +ca k +bal kans +s itt +jas min +britney spears +ro tor +se aling +g bc +oc ci +f as +eman cip +com er +war time +tic kle +son ny +pac es +log g +at rix +sr p +g win +do bbs +uz be +the wanted +dru sh +ex tru +m icky +honore es +dar win +re dux +mm j +ram i +jalape ño +io c +do ver +ju ju +whit ney +s eng +en ly +au ch +archipel ago +vigil ant +man gal +wil dest +parano id +hal i +bb ly +sanc tioned +real ms +con co +u ddin +c sk +play time +libr a +sav ag +oc tane +rec tan +re turn +par rish +mor rha +cc p +c mu +sa iled +se vent +ro sie +pil ing +he w +boar ded +seg ments +neph ro +( . +cr ats +bak es +ðŁį ¸ +back tothe +sibl ing +kirk land +ke o +gu wa +bre ads +ðŁĺľ ðŁĺľ +t q +haras sed +ga u +wil bur +j isoo +ep er +li sam +tri ppin +sh ino +ru kh +beast mode +cho a +inst aweather +rich land +gar i +fe z +cowboy snation +fur suit +k run +a en +sycam ore +se gun +ent ennial +di h +o ax +demsin philly +ðŁĻ Ģ +sn hl +pen nies +pass words +ma kin +ty e +d eng +kni gh +jeep life +hel pline +a for +zz zz +ste amy +pic ker +iter ate +happen ingnow +ki b +bloom berg +martyr dom +bul ly +assor tment +a hora +zo e +no i +illu stri +agar wal +p sc +electr onica +recruit er +gar diner +rad ha +naf ta +dot net +pi ero +geor g +bel s +ðŁĺĤ ðŁĺį +tuberculo sis +run nin +mor is +haul ing +ev oc +bre thren +sha ir +frame works +a stu +ri gid +ku ma +kre me +jin nah +insu rers +ny u +f ere +nol lywood +good vibes +- ... +toi le +sk ril +instaweather pro +cze ch +pa vel +one piece +nike plus +fi let +cav ity +ðŁı½ âĢįâĻĤï¸ı +ðŁİ £ +dra stic +dail ys +siam ese +re bu +oste o +lar k +f re +sh elling +p é +glad ys +ðŁıĢ ðŁıĢ +gusta ve +submer ged +grand stand +att u +won t +f pv +b ley +jon i +ang ames +weigh ted +al ou +ठ¶ +les bians +f j +anni es +am l +dor ia +dav in +be ta +can c +madewith unity +ha j +bad lands +mu l +blu ec +pa wn +cov ington +neuro logy +htt weets +dysle xia +thel ove +ne at +fork lift +autom ate +une ven +monte ss +he in +ha g +rel ics +competiti veness +can elo +mar tens +bullet proof +sk ittles +g ya +pri mo +americ afirst +woo o +abor tions +?? !! +ma che +ld ers +rl ly +preli ms +direc t +cour se +swa in +super cell +ec centric +sting ray +ple ts +wil cox +west in +okan agan +kir an +car bo +bomb ings +ra rest +bo h +gaw d +di gg +mo ana +enti rety +en closed +dodge ball +par ton +milky way +at r +thorough bred +re ally +qant as +epiph any +ine e +aero smith +spi eth +ar thro +ell ini +du bu +bra ving +âļ½ âļ½ +re structuring +illumin ate +equ ili +mp i +ash ton +pony tail +ma scots +flat tering +cru m +ast a +à® ° +stranger things +bar nab +ر ÙĬ +make shift +got cha +will am +cho irs +kilom etres +gho sh +eu than +dol ly +un ning +the ar +cre we +w sw +j ace +dis miss +ke an +ho ta +kh at +~ > +thir u +ren dez +hart man +tee ssi +cas ca +z ah +hydr ange +fo d +aw p +mzan si +thick er +nago ya +ne va +sti que +cast el +dam ian +there by +ji ang +ale k +music islife +ra q +calla han +gou ache +somal iland +sean hannity +ra heem +lo se +elo ve +whar ton +rectan gular +illustr ating +har ne +auti sma +scra pped +ell and +decre e +nag pur +ki pp +so re +n md +ma as +gun a +gart ner +bel li +then ight +je on +gendere quality +gi ver +a el +gar ments +ne u +mardi gras +mar sden +ro wer +pollu ted +camer aman +vin od +be asley +cro c +ji u +hollyo aks +anesthe sia +al les +ste ward +lati mes +ðŁĩºðŁĩ¸ðŁĩºðŁĩ¸ ðŁĩºðŁĩ¸ +tic ian +gor ia +come dic +ðŁ¤Ķ ðŁ¤ĶðŁ¤Ķ +nai ve +sli ons +ł Ī +bur glar +ðŁĺŃðŁĺŃ ðŁĺŃðŁĺŃðŁĺŃ +york shi +se ñ +fan boy +lau rel +inci dence +potom ac +rober ta +presi den +pr yor +os bourne +w ku +te me +pal ae +ðŁ¥ º +re boun +itu de +red dish +k hand +coloni alism +north carolina +ðĿ Ĵ +manne quin +lady bird +ta sty +knowledge able +g shore +ðŁĮ Į +à® © +qu aker +salz burg +med alists +chy na +bridesma id +ma ori +ro p +outra ged +in adequate +truck ers +al ana +ìĿ ¼ +ri x +oooo oooo +command ments +lam beth +aa j +eco friendly +bla z +morecam be +boun cy +rou x +rai ded +mi zed +sh c +gaw x +labor atories +ru bs +rest room +consult ations +ca jun +virgin i +so ir +rev ue +ple in +wag er +ç ¹ +we do +growing up +! ðŁĺĬ +face ted +sin ners +ho vering +ti ene +seas oning +an ja +leg go +il is +fla x +dev o +ash ram +mati sse +ker i +go wer +bo tox +mar shes +unh cr +ts m +opti mus +dun i +stu ffs +so k +order ly +n bad +islam ophobia +raviol i +fab er +cre ds +won ka +in fusion +over weight +daily news +assi mil +acol lege +medalli on +kili manjaro +sti ff +tham es +sun ken +th ard +my dubai +hilari ously +han nel +plu mber +fair view +separ ating +rasc al +qui en +necess ities +confeder ation +ll ll +: ] +weak nesses +bron co +ra ffles +el ot +ãĤ¸ ãĥ +advent calendar +ðŁİ ¹ +stra vel +tun ic +k su +im peach +e spionage +! - +di ment +cur rant +bio de +commu ting +by ron +ðŁĴĵ ðŁĴĵ +shad ed +tr uro +cray ons +ar ne +h sc +fre aked +dram ati +fle ek +u cd +marl borough +^ - +cross ings +mal o +black ops +bin ance +cho ked +chen ey +pl o +ge stures +val edic +ryan air +rem ington +v cs +mc kee +ec z +be gs +nail art +mayor of +happy fathersday +war t +pet itions +n ingly +clean energy +bro x +sl alom +exist ent +ab ay +ug liest +tom p +stom a +sel by +goal scorer +ben ji +overwhel mingly +lan s +semiconduc tor +south korea +re scheduled +sk yl +en listed +dow ski +si del +rosen berg +nas ser +white head +pri us +har are +en n +ry der +í Ĥ +mon g +clas ico +transpor ter +po tty +is me +** *** +vic e +sk it +ode ssa +l mp +her n +raci ally +pin oy +paragu ay +obitu ary +go es +bu cha +side walks +angu lar +un constitutional +transiti oning +i bu +gu ys +un packing +oooo oo +black girl +ber gs + ¯ +wordof theday +trump train +thunder bolt +m si +fasci sts +ठ¬ +t sk +collap ses +raje sh +loveis love +migr ating +set back +ðŁĺĬ âĿ¤ï¸ı +t els +safety first +nar rated +jae joong +un answered +lique ur +en nes +dal go +bill ings +salt water +mer maids +lon gs +clap ham +we arec +pic collage +n ach +h ace +pois oned +lo th +ag na +adel rey +guar dia +poli shing +peace keeping +d all +p isa +la pland +process ors +de andre +so bs +p once +dra ins +c be +ðŁİ¥ : +spla sh +meat ball +fon tana +worcester shirehour +ne v +bri sk +b int +ac r +po x +cay enne +skril lex +j fc +hahahaha hahaha +gla s +en gul +tempor al +oni zed +con cre +com pose +vibr ations +plant ers +fer t +criticalrole fanart +t bli +sch allenge +huck abee +munici pal +iam bic +radi os +ne vis +dura bility +mc cla +horse back +inst itutes +ful fill +atta ch +ate ur +ak an +resi sting +illumin ation +hand le +hair care +om ent +macle od +ka iser +g no +bear down +ly f +gl omer +distor tion +z m +san k +roo sters +is now +as ports +ag en +wo ken +st george +ro mper +my le +econom ists +ru to +t will +health and +d ito +ws l +tair p +pra kash +mic heal +h ts +w rights +kat su +fioren tina +defen seman +d itch +var sity +texan scheer +ba ham +sc anned +we il +seduc tive +ðŁijį ðŁı½ +fu e +er win +dav ison +ter ran +moo ds +wool f +re source +@ . +cu sh +ðŁį ° +regre ssion +cur led +la zer +jo anne +ab bott +mo z +down ers +mm mmmm +valent ina +k hair +dream t +cro ok +che k +ste aming +nephe ws +cl eric +as ober +indefin itely +w ye +us news +joy ce +flu shing +wynonna earp +ron do +kis s +hot dog +bar ns +sax ophon +far ley +gas p +decre asing +al way +pe x +l sd +shi ft +p outine +ra zz +rescu ing +ni ko +ho ch +cc l +u aap +n ts +m car +il wx +conqu ering +ket tering +stur dy +delay ing +sto k +vani shed +cath ar +bin gham +in v +ic hiro +he mo +budge ting +[... ] +be ss +sebasti an +slow ed +ðĿ ij +musli m +stun s +acton climate +ve a +se ton +rose tta +oun t +hard in +flu id +ca w +ðŁ¥ Ĥ +yach t +un l +sp hy +provoc ative +or ic +is back +__ _ +nicol as +gy an +loo se +fl in +reb ate +: :: +! "@ +com icon +she ff +down stream +chic hester +beach life +mom life +diabe te +ar ra +van e +ok u +ye o +man go +try out +app ell +he irs +arjun a +dd u +na veen +movi c +soci alists +s back +criteri on +soyu z +k her +da z +yol anda +wine oclock +re ina +one w +leon ard +en dez +u bs +support local +facilit ated +carameli zed +b pa +vuel ta +my tho +m ami +spe are +nbap layoffs +fe vre +nick jonas +im print +c so +craig slist +la salle +gi deon +ha doop +dis regard +w ud +tu c +ma gee +acou stics +ta a +qui e +pol a +cr t +dw yer +dis sec +capit ol +men tion +kn oll +he igh +fin ders +plac ements +l se +indi ra +gur i +madhuri dixit +kingdom s +iambic pent +geor gina +je ky +conflic ting +bay an +aga tha +uph old +dr on +vic ar +ex pat +periph eral +pe ssi +fa f +ance stor +? .. +wid get +pun c +comm enced +beav s +air waves +ad dis +po a +de sses +co den +vu e +ru pee +kar in +spo ck +m sy +ภ° +pr ick +fill more +ti fication +thing sto +sar de +em ile +pere ira +n ad +bright ening +arre sting +wo king +usc g +sp ill +raspberry pi +hu go +ite c +is ma +cuff links +optimi zed +oc c +mi wx +en ka +el ited +afford able +sa kh +coron ado +ho h +at ul +ai oli +jim cantore +accoun ted +vin ay +her mit +groo ves +ran ch +r illa +we tter +ou tof +veter in +ni kov +ki an +fair banks +ram apho +n iti +k ko +ru sty +ne stle +tv xq +shahe er +âĿ¤âĿ¤ âĿ¤âĿ¤ +penn ant +gem stones +dem debate +ðŁIJ Ĭ +auton ews +support indiefilm +mach o +ve x +new sat +ne ti +conce ssions +can died +yof the +mac au +den ds +cricke ters +san iti +mari ano +gh at +ar toftheday +¡ ľ +e gos +gen oa +chat bots +bri er +al labout +mon ty +spi ed +r tr +comfor t +sni ppets +real time +gra in +exam ined +en lightening +tt u +god bless +release the +sing ular +ki ans +ha ka +sor ren +defe ct +mar g +equ ities +d orian +su ka +per l +aishwar ya +pul lover +preci sion +fair way +ne ve +rive ting +vill anova +en com +ak o +passion ately +europale ague +siem pre +x vi +enligh tened +c fr +âĺħâĺħ âĺħâĺħ +wast eland +is f +new comers +emergen cy +amphi theatre +- . +text books +figur ative +tre mb +pe sc +ab hin +ab bot +ac acia +har ds +por sche +kau ai +el isa +car rick +abo u +elli er +be ch +neu tron +galap agos +ru ben +in nis +how to +nun s +sab ine +i ac +clin ched +no tori +fi ves +cairn gor +per i +gr c +ðŁĴ¯ ðŁĴ¯ +mal m +twelf th +di ff +rout ines +marty n +lin den +synthesi zer +nu mber +game cube +fal kirk +byz antine +queu ing +gr ill +scal able +char red +rou ting +her bali +gri zz +ðŁĺŃðŁĺŃ ðŁĺŃ +tol l +termin als +l pc +ab d +war mups +remo vable +¯ \ +vi go +pap aya +ne ve +lov ingly +jo kers +ib les +sse tt +poten ti +pel e +gi gi +sadi q +leg acy +son o +ru pees +retar ded +ele e +par r +fi ance +ey re +say ers +pend ants +mak nae +al bans +adap ting +p ff +pu berty +ji u +ing rad +hypocr ite +diplom ats +phys ical +rob by +bon sai +ãģ · +f att +catal unya +âľ ĸï¸ı +ro ma +more land +so e +conver sions +stl blues +shol m +gra ssy +pra do +on u +assaul ting +> _ +sett es +dis graceful +aph ra +âļ½ï¸ı âļ½ï¸ı +ठª +kil n +goal tender +s ru +philanthro pist +b als +th n +stu den +sando val +dogre scue +eli ons +asse ssed +lar go +hec tares +sh rm +sa if +cle avage +no ches +n ene +fat alities +cur ing +clean ser +al es +p vp +south bank +pizz eria +marsh als +kni fe +an dover +tbli ghtning +sr sly +ou te +digi mon +timesof india +prome the +le bo +f su +wit z +rever e +man as +mam ba +ch ica +gu an +exhibit or +csr racing +d ere +xx xxx +gu sta +story time +ston ey +organ ics +and u +se am +min ogue +anushka sharma +ab a +ðŁİĻ ï¸ı +ugand an +chro matic +as sn +document aries +sh t +ru paul +loy d +k ats +e us +ite ch +me dusa +pan ty +kel logg +et to +talla de +sha a +do st +p ms +mari ana +je ster +croo ks +ðŁĶ ¬ +min danao +ind hoven +ðŁ¤ ª +le xi +tv n +jan is +co te +ãģ Ĩ +ser rano +iw m +ðŁIJ ¬ +k ke +distribu tors +cap u +counterfe it +camp site +ag gie +ðŁĺ ¼ +chhat tisgarh +~ @ +state u +san di +prevent able +cl s +can ne +mm c +i ver +sa haran +pal is +night out +do s +ap ia +absc bn +manag erial +aro se +mo wx +aro sa +ðŁĮ ³ +under dog +remo ver +astronom ers +lent ils +su scep +smoo ther +pend leton +fau cet +e mory +dal mati +af cb +tic us +exem pt +en rol +d heim +ðŁIJ º +restric tion +star fish +sto w +snor kel +thunder birds +she ad +homo sexual +dy n +as li +andre tti +dou che +dom o +tar mac +slu mber +pr onto +first dayof +mini ature +mari achi +argu s +recomm ending +mobi les +in ce +illustri ous +or c +adver ts +gr its +wea sel +pag oda +over pass +gre ys +maxi mus +arma gh +wood land +sun ni +ðŁĴ ī +ë Ŀ +ti one +soci o +ho s +ðŁ¤Ĺ ðŁ¤Ĺ +wind sor +subsequ ent +munch ies +id h +exclu ding +e mi +cu th +z ai +week days +law suits +barn ard +Ø ª +pe tting +net es +mul ligan +pharmac ists +ra quel +e ton +cran ston +gil ded +cle ary +ce ph +ra a +pam per +lombar di +as in +sher ry +pro d +for te +ari anism +buffalob ills +æľ ¬ +ðŁĶ¥ # +uu u +just ices +car ina +nat in +mas low +dro oling +cog nac +cam ber +el ong +r dr +in en +convic tions +am use +tro ck +harm less +visit ation +gen omic +bl and +beno it +chim p +tuscal oosa +gre asy +x po +gil t +se q +per mitted +christma seve +book s +mu e +old school +human right +be ati +ðŁĶ Ŀ +sh at +sculp ting +h wan +fern andes +sci utto +fu entes +endeav ors +maid stone +un paralleled +shou ted +queen of +mer c +band ic +ve da +sel angor +pi le +ja han +intimid ating +disapp ears +cl ich +za ha +w urst +hi v +fod ils +cor dless +aaaa aa +hy dra +bel inda +e els +bu f +su staining +rugby league +no c +brig itte +( ðŁĵ¸: +tromb one +soo the +smo g +ad p +stab le +ing ley +diagno se +ms g +we ss +tic keting +one e +nsw pol +e up +auto psy +adity anath +sun down +river front +si ya +p is +hier archy +dur ango +di jk +ren shaw +he aps +epide mi +david bowie +interne tof +dd i +nation ality +mb ar +air y +win der +w alia +elli ott +c x +bav arian +pl att +an tw +wi wx +sof ter +ne ha +h eller +th and +dani ela +bo ast +degra dation +ðŁĴ¦ ðŁĴ¦ +transform ing +man e +av ut +ðŁĺĪ ðŁĺĪ +vo ter +the e +t ate +pu ff +in door +sop roud +boy ce +boris johnson +wait in +immun ology +ðŁıĨðŁıĨ ðŁıĨ +âĿ Į +street food +liz asober +cavali er +c elia +need le +motor ing +g ato +, ) +ra de +harve st +t ms +jar pad +on ey +air men +v re +impair ment +abhi shek +snoo p +l ant +fam ously +bl ou +s ze +g ander +un touch +tu f +dee jay +col lateral +b ind +ðŁļ © +pin ning +ic n +' ; +the economist +ul tram +worldwater day +ti poff +the i +feed ers +campa ign +sc umb +day weekend +yo m +pe dic +h ough +ps v +pl in +on de +boston marathon +az zy +* _* +con ley +thi ago +hoo o +gal erie +luci d +je tt +gl itz +final fantasy +achiev ers +y ung +peregr ine +op hi +dam es +biom ar +âĺĢï¸ı âĺĢï¸ı +sk c +l ics +fl ank +ar rahman +ho of +uphol stery +t ats +wo z + ¿ +snor ing +ra er +l ju +ap d +pl ating +kan u +im ation +fragr ances +m ra +mor ay +mo tt +im muni +hearti es +bho pal +tim ers +g ata +color way +car nation +win get +si ghs +s ville +optimi st +chate au +olympi ans +ci o +singer songwriter +ny o +fi bers +bur ch +ag ro +mil ne +ig bo +cr amer +ation als +dan ube +pad ma +nor mani +en forced +bre ck +boeh ner +ar den +sur rendered +pros thetic +om a +ha iled +calcul ations +w fa +bi b +fcb live +fon da +west coast +que sts +friend ly +to wie +fit ch +bal ot +star dom +scrat ching +ho sa +thi ka +o ven +stro ke +out post +pharmaceu ticals +hi kari +mu y +af d +fallon tonight +squ at +or u +dra ined +chocol at +ë¯ ¼ +wor ths +ri b +mu j +that s +residen te +it el +boo st +mi gos +mul led +la a +etsy shop +don keys +me k +p tc +flin ders +e hs +ro hit +mu ir +g ad +compos itions +åĨ Ļ +combu stion +i kh +yemen i +wav ed +gar ci +ak os +oo ds +fu sion +se que +s lan +pl ur +kic chasu +shenan do +s ams +worl den +horo witz +with me +mic robes +k ki +ðŁĴĶ ðŁĴĶ +w su +patch work +fre er +y aki +the art +symboli sm +mil er +bt n +ma bu +side kick +motiv ates +sag itt +natur als +serv iced +ps ori +pa ola +qu ig +i badan +gi ggs +ë ³ +sciento logy +si oux +salam at +d res +cad bury +d hawan +ci ón +_ ' +swa pping +maris ka +james bond +explo sives +ay les +af er +s agu +cen sor +tom a +jeff erson +ring ed +par tist +ir responsible +aguil ar +vac ay +equ itable +altrin cham +ac ur +man ish +ger min +schoo led +pu tter +ed ad +nav al +toast y +sol areclipse +dish u +coy ne +ac co +mu ck +mar an +el os +len der +cro ix +worth less +ha ber +gun men +ðŁį ĵ +zen ith +t enders +hur st +hol tz +itali ans +car low +u cd +characteri stic +bun g +av l +u th +sa sia +rs l +red man +neighbor ing +green peace +sti ps +follow party +y gk +en os +omni bus +na issance +chri ssy +secu re +call back +ji hoon +memor y +block er +l anta +daf fodils +bil t +ffer ty +fau st +ie c +nipp les +so g +m nd +jagu ar +bol dly +ab poli +pro position +gun sense +evan sville +cu tters +we go +dou n +do x +stal lions +ka j +shi ppers +j awa +vol o +le ven +pap rika +kov ich +jor di +induc tees +app alling +dial ysis +allevi ate +âĢĶ âĢĶ +pie ter +mid wi +q tr +juli ette +inter mission +haw ks +act ment +one ill +k lin +vam ps +fam ous +cou ld +autom obi +da an +west end +elli p +nh c +mel anch +web series +ton gue +snat ched +smy th +tan gible +sl i +e asing +bar stool +over lay +afford ability +ting ed +ter as +ay ush +wanna one +rh ine +dan a +sh ana +kend al +fer tile +w ir +repl eni +lar vae +is ro +con vos +ab brevi +u cc +hun gry +bur rows +ag er +nav i +mat in +du per +cer n +ma don +ķ ï¸ı +é ģ +tu ps +hy att +sh ep +friday night +wis er +hei di +hat ton +p gh +foun tain +wrist bands +ahmadi yya +aeri al +subscri bed +so los +m ace +sla yed +for fe +dul ce +christ mass +arun jaitley +viol ate +ob stru +ni eces +w vu +idy l +fa ze +pre serves +infr inge +premi ers +inter vals +agen cy +( © +stand alone +di mes +bo er +param eters +ge tit +ðŁĺĺðŁĺĺ ðŁĺĺðŁĺĺ +tu lane +for given +scol l +mb ps +smash bros +rob bi +prima vera +ali st +ghost ly +ay at +ye ats +impre ssionist +ear phones +caul field +wai kiki +sal ute +sc ou +mu ay +louis vuitton +bak hta +ado g +inven tions +hur d +forec lo +stream line +thalai var +ch snews +will ard +t sn +euro parl +cru sher +my sore +gro wer +ra ping +pat ti +g den +sm w +muf ti +kid man +ab r +soun ders +skep tical +ðŁĶ İ +sun dar +i me +fer g +feather weight +ar lington +pas qu +ag azine +wearab le +nati c +mccl ure +inter mitt +hor de +six ties +car te +bha v +ze al +experi ential +ador ned +som mer +eno te +hypo thesis +stin ky +pro to +dead lines +vo gel +mus ings +monc ton +gu ter +f le +aci on +voice of +ta sha +inhabit ants +type face +s ba +bts x +ðŁĶ Ĵ +wor x +u hc +jo ko +cell ars +gor o +continu um +... & +weather cee +ha p +sr k +ris ers +lonely planet +un named +co eur +ðŁį Į +the world +ili ke +fa sten +ami go +ri ba +ramapho sa +staf fers +had ley +? ?" +fi ore +sal ut +hu ff +bez os +Ñ ĭ +ra der +kam ala +in line +fill ers +um atic +all in +shat ter +re in +o ku +ch ases +fla gged +baby metal +water stones +ts b +cut out +op hel +aam a +rockab illy +sto lic +jet blue +ich ick +down ton +uzbe kistan +pat na +la q +gr ange +) _/ +subsi di +sc p +newsc ast +it sa +twee tyour +e mor +archae ologists +uni fication +por ta +q x +protec tors +pro hib +charis ma +car tag +ren fre +scul pt +guwa hati +de ma +boo p +unf pa +dex ter +lay la +alleg es +sou ps +never again +l ys +cal c +bar oness +visu alize +ger ber +absor bed +i ers +a han +fon tein +detec tors +verst appen +sv c +formul ated +ac dc +li x +in competent +bh k +lour des +water house +snow ed +appreci ative +sig ma +lizasober ano +pen ned +pay check +tall inn +fanc afe +par isi +av alley +vi g +ru fc +hard ship +so cute +po ise +ì ¹ +roth schild +k ly +???? ???? +l hp +il ay +f hs +am ad +ide als +brad bury +bal boa +nic ot +kid nap +wol ve +tas manian +op t +matthi as +ãĥ³ ãĤ +super markets +mylittle pony +me lee +li ster +gr oun +fe dora +kind ness +en en +bra hms +¯\ _( +ros well +mar lene +ic u +re formation +or ail +he brides +dispar ities +terrac otta +swal lows +re id +influ encing +flu or +den e +tum our +blon des +thunder bird +sh eva +moga dishu +ka b +cre eps +i ving +ene ed +anno y +âĶ Ģ +intri gue +enqu iry +ar aj +tur al +kuber netes +end lessly +divi dends +tor a +ti sh +commemor ates +un ra +tri b +pon ty +ne m +diss ent +brew ingco +ðŁĺ ½ +nor mali +bi of +( ... +chil len +ì£ ¼ +mell on +av is +mccor mack +ing ra +enrich ed +custome rexperience +testo sterone +snu g +sett i +ger onimo +inqui rer +bre aches +very thing +bloom ing +mu ra +dispo s +bi de +de va +shade sof +in trin +sh ev +s ven +nayanth ara +gan esha +c ws +ber ta +label led +use um +nick named +ma han +car uso +ap ur +ðŁij Ĩ +w q +orphan age +discar ded +mag nu +lu e +je on +bridge port +pac ing +mercur y +( ðŁĵ¸ +marx ist +amphi bious +transplant ation +stit ching +then burg +gradu al +ãĤ Į +ro ft +ma ils +ine c +guy ana +dopp elg +ver o +re write +head less +harb augh +gate way +car sforsale +sw i +st is +mach t +un de +sura baya +stap leton +nur turing +mil ner +ya o +lma oooo +ko sh +arsen al +k ame +er ry +ar royo +dis misses +ru bbed +rc b +lew d +dil u +and or +vi de +ur in +inter sec +ha ar +al b +year swith +app leton +é al +ul livan +suc cu +monter rey +d mx +artem is +ron nie +farm land +s football +gro tto +anth i +ãĢ ģ +à® Ł +vid ya +jimmy fallon +ൠį +t zer +gravit ational +w thr +u hhh +e hr +tin ker +ti juana +scran ton +ram charan +bar clay +re van +m si +ka p +wr s +we thenorth +tor al +sat u +gro m +fac ep +erick son +z yn +se dge +oo dle +spur sofficial +ds p +sic ilian +soli hull +recei vers +ladak h +hend rick +ther i +presi ding +mc guinness +litt ers +gun nar +gh oul +wi b +n tv +kar o +fro ck +b lau +ampli fy +all is +ul lah +memo irs +kh loe +intercep tions +pet day +lo oney +con fin +ch ay +piyush goyal +frequ encies +ut z +event ual +warm ly +obli vion +an ka +ta it +âĿ¤ï¸ı . +director ial +ru lers +prince s +mu ck +stur ridge +deu ce +abri dged +bagu ette +un cles +pen du +min ding +forre ster +av ila +wall er +wall street +ment or +hin o +high way +crom well +fanart friday +mb i +co yle +a hi +tro ve +spie gel +pay tm +mcin tosh +jan sen +nit i +nash ville +len o +leicester shire +le gos +dic t +ðŁĵ ½ +sp ad +beverly hills +sy rah +separ ates +z ain +un fit +dra gs +tan ia +over flowing +hri thik +haw thorn +z ani +mac far +fi de +to tem +pe ds +fundament ally +cal ico +sin ner +j ä +hil de +ds d +ten ay +ta hit +mil f +lie b +inform ing +up lift +ra el +mortg ages +lec t +ii ii +guillau me +compos ites +old smobile +l end +gar th +com mish +bapti zed +scorpi ons +ru cker +bringback our +alli ance +thalap athy +tal i +sp ans +eri dge +wither spoon +lin da +sky lar +kor n +hom s +Ä į +sil enced +caf fe +ar ty +dist inguish +to wed +pun g +jessic a +ear nest +beau fort +t ama +study abroad +si khs +new bie +nav ratri +mar ble +loun ging +lit ter +dal it +so sa +iz es +gra de +com promising +tr iton +de tta +v j +chau ffe +spec tral +powe red +montess ori +artic ulate +hal ton +al co +ye y +mn twins +acoun ty +ðŁijı ðŁı¾ +âī Ī +mad men +kal a +gru m +chi k +ati s +su me +akh tar +job search +high lighter +bo ath +âĦ ¹ +tar zan +lam bo +âĽĦ ï¸ı +ox fam +dump ster +pretz els +mac os +incl ined +fac tual +adverti sers +shu i +pu ree +ml pfi +anti dote +cap o +pa str +merc ado +but ton +ar min +ag g +lol la +horri bly +er rands +christop he +time snow +monday motiv +li ss +scand als +mc i +dispropor tion +âĺ İ +sur pass +samar itan +so tho +pu rest +fl att +trivi atuesday +delec table +leop old +hermi one +chou dhary +en rich +¡ ¡ +subsi diary +ine qualities +bachel or +auto immune +la kota +i hop +ad jec +the simpsons +sh es +se k +gret chen +up stream +hin akhan +coper nic +x tina +lu g +tough ness +e ad +cli pped +bi us +sl v +fah ren +dee pak +ca u +x an +im mature +dig ni +bo bs +shred ding +but tery +accommod ations +de ven +chun ks +super league +sky bet +kil dare +je et +ë į +ce k +wrec ks +pro pane +oh l +tb d +quo i +trum pp +mi mo +reluct ant +ver ne +o ic +ma gh +ar nau +se ver +li dge +stair way +kicchasu deep +ðŁĶ º +mach ining +aama admi +ot i +c da +al it +pan y +inst alls +ac ct +e shop +di em +hard well +fulfill ment +sc afe +qu ack +extrac ts +swee tened +fi ghton +f di +d inger +wal tham +us ur +refe rees +seok jin +gran n +af rin +th n +sch af +par cels +bet is +amar ine +nom an +kh tar +mor itz +cou pling +bar ons +ðŁIJ ¸ +à ¸ +sl p +sad ler +x ander +tri ad +mc millan +kh z +divi ding +ìĹijìĨ Į +dar yl +zed d +le ys +pla ques +flu ori +tipper ary +on nell +di dier +lang ford +im c +the sun +bir dies +ar cha +ye ssss +t di +dar ia +cand ace +al tam +pal aces +ch it +sant am +event ful +book of +ad b +mon stax +cre ole +co el +âĸ ½ +we aren +sten nis +she ath +ati sm +gron ingen +mlpfi m +le pre +wrong ly +rsp ca +rendez vous +acknowle dging +pel vic +solic itor +sla ys +nue stra +lo d +is lander +fer oci +fashion show +ra ss +dge on +adole scents +sma shes +negli gence +grate ful +ved ere +sw oop +ing l +apol ice +vand alism +gan n +jo ao +di supdates +zimbab we +under age +radi ance +w of +bour geo +pla s +cr ani +gh ue +wrec kem +warran ts +re form +jim mie +at wood +ys l +neil himself +l bj +i man +tan to +nois se +ver bs +equip o +al together +mam ent +l ice +dou glass +tier ney +pri med +j hal +furn itu +braz ili +v ill +past els +n ison +u ff +paral ysis +jay e +im po +ðŁij ģ +strate gically +pakistan is +was sup +super bike +thank u +tru elove +sha ikh +israel is +vi p +to g +li en +la ker +grey hounds +cul ars +bian chi +balot elli +ar ran +loo s +str ates +he bron +ar vo +sunder land +the al +tomb stone +sand man +c pac +thanks giving +love him +lat ino +an in +aka if +ĭ ãĤ +tor quay +di est +alli anz +ðŁĺ ķ +golf club +cl lr +wal cott +sch nau +promp ted +nomin ating +len nox +val et +mon ro +may ward +e ph +ðŁĶ Ķ +inter oper +r da +re flex +arm chair +ê° ķ +stri pper +por ti +ph arm +ham za +ni reland +ne ue +h pv +port foli +sun burn +fris bee +be al +bapti ste +x h +ty m +pr ati +o vers +haz rat +deser t +der ry +us ky +em mett +ach arya +)_/ ¯ +shu d +may a +ham ill +ra im +nr c +fitt ings +cur vy +ðŁı ĩ +ster ling +ॠĢ +wal kin +short cuts +mil ly +ast ur +alpha be +pl i +pe z +miss you +rad ford +ml g +ta eyang +notjust lakes +du mps +seren dip +le ur +ra ving +e ster +de priv +absc bn +ðŁijĩ ðŁı» +scar city +o cr +mean ings +cap t +da hl +fer mentation +bri oche +to win +out lander +massi mo +en cro +ðŁ¥ ³ +buil t +po tam +kir i +tm w +monit ored +k ites +peoples vote +gray son +íģ ¬ +afri ka +a dies +i vote +gy ne +g annon +di x +c mc +ou ral +fox andfriends +bel i +ig ne +gl an +katrin akaif +co politics +qual itative +p si +lu cci +disc oura +âĺ ® +kel li +gau tam +carac as +reale st +pu la +in us +hill top +make aw +atten borough +tw y +r arity +peck ham +ma hon +corn elius +clin icians +ton line +tb i +paradi se +ka si +inev it +fresh ness +colling wood +lun atic +defen se +cop d +in fra +wain wright +sains bury +alab am +te ma +lac o +chec ker +releg ated +tren t +stal ks +huff post +bhubanes war +ast ral +share your +prim rose +hi me +cat an +end ment +en dow +cle mens +mal oney +hil ary +game time +den ise +collabor ators +b wo +radic als +gue tta +ici on +au a +snap matic +sat chel +excav ation +base man +s ão +gn ation +fel d +surve y +shah zad +ma st +anirud hofficial +tru cker +ot ago +geo graph +ethe l +âļ¡ï¸ı âļ¡ï¸ı +s ver +mu tt +internetof things +ancho red +wh ouse +bang la +bal main +ç¹ ĭãģ +break fa +á Ģ +twi ster +te tris +ca v +stag s +g z +au b +stor med +hel ens +yar mouth +st asy +gustav o +co sc +vin son +up p +sc ricket +assump tions +app e +nu h +u er +pre mise +n aga +e amon +coron ary +na f +north side +el mer +ro tar +out lining +el f +re surg +kat elyn +in can +hyster ia +ce e +am bani +pro lly +Į ãĤĬãģ +ax es +san jose +rem brandt +mag pie +even ly +scor sese +qu aint +f g +b buk +indian football +weare all +spd wy +pis ces +ec g +âĺħâĺħâĺħâĺħ âĺħ +pre orders +: | +ni pple +sal azar +ju me +jail break +min n +bas sett +ze tta +jef free +ad jun +tic on +san diego +drink local +chol era +solic itors +o bo +com post +ni an +wr a +tre ach +ic ic +profession al +del ve +leg ate +histor ia +cro issant +con noisse +nam o +palli ative +chem trails +i ority +global warming +comic art +behavi oural +re sted +li as +cli mates +Ł ãģĦ +rut land +nou rish +menopau se +hot ties +demen ti +ve spa +mel ville +anal ogue +tz man +str ung +im perfect +gl are +cir cling +ros berg +rec o +oc ity +lo ire +em be +do ssier +ne el +nan do +me a +gal vani +fin esse +ag p +berke ley +asi m +âĺº âĺº +quil ted +ish ere +un matched +po tion +for z +at re +selfi es +juli ana +ðŁļ ¶ +âĸ º +mel ton +âłĢâłĢâłĢâłĢ âłĢâłĢâłĢâłĢ +spin rilla +pur cell +ed p +at leti +tony awards +ra ja +pro gno +mol ten +stu ff +p ally +nobel prize +âĻ» ï¸ı +spiritu al +spe ake +sa sha +bri um +tru ss +critici ze +assassinscre ed +yor uba +u lo +fire man +workin progress +ef cc +fla res +ro bot +hi kers +cl l +shado wing +pat sy +leh man +c ns +å ± +guad al +à± į +ra pe +r honda +paralle ls +son ja +langu age +land ings +z ola +cr amps +bur ning +apprais al +jol la +ham m +kas a +gul ly +f go +uly sses +ri be +ðŁĴ Ħ +ib u +eti enne +bri ar +fin ely +comb ating +y ql +go tham +we chat +to paz +primar ies +l se +iz z +hel e +dispon ible +cy stic +bel ichick +th rush +kansas city +ge om +soli di +red bubble +by stand +cambridge shire +par fait +ast le +ow o +ind ore +stom ping +sm elly +ðŁ¤ ĸ +locom o +adm itting +hol me +clock wise +min sk +mc co +for get +ev p +cam ra +ab ella +yo tes +universit yof +mé xico +silver ado +ric ket +crom bie +pu j +eradic ate +deli ght +y go +glam ping +vic a +du ggan +coun ters +cf d +sc our +react js +pu ram +paras ites +in ki +vill en +stel la +li mbo +ang as +k cr +ðŁĴļðŁĴļ ðŁĴļ +vap ori +mum ford +oli gar +à ¼ +al oo +boo ties +ad r +k elli +dru mmers +av ici +nature uk +ron al +in trac +un splash +le che +g oma +el ine +envir o +bi onic +bu eno +mi k +av in +star ling +em powers +cake day +boy cot +ðŁĴļ ðŁĴļ +ðŁĮ¸ ðŁĮ¸ +v ach +m ci +fractu res +ger i +sk ing +exclu ded +lu ce +ja ve +ig gy +evi den +aki stan +a wn +mor als +luci fer +ha ban +tumb ling +sunday motivation +mo sley +captain america +sch icago +the one +mo td +d ts +ðŁIJ ¼ +rep ell +ii i +locu st +geo spatial +mer sey +immer se +desc end +ber nade +j s +boat sales +win der +cran k +sing leton +candid acy +ben a +ðŁı» âĢį +high lander +ol t +k prs +healthy lifestyle +four teen +end the +ith aca +circul ated +r ans +pre valent +ha vas +splend or +roo ster +kalamaz oo +jewell ers +enne dy +rou sey +es y +cann ons +ornam ental +// // +ren don +win ne +mol ding +eid mubarak +coun tess +simon a +ha wa +fo es +du ster +sb u +por tray +mar ries +goo dday +cho co +achi ever +ðŁĺ¹ ðŁĺ¹ +pre neur +tr amp +tom i +n bat +garden chat +farra khan +ever glades +ab ru +sou sa +se ce +homes wee +terre strial +bar it +sri devi +ol u +mel inda +f rick +can dies +ðŁĺŃ ðŁĴķ +qu reshi +family fun +exor cist +cardin al +ny t +dies el +cu mulus +capric orn +si ology +lor na +dou gie +an die +super sport +c fl +п ÑĢи +say ang +pe ek +ภĬ +lo be +j em +ing lis +gg led +c sn +amne sty +chu ps +ba es +sau er +ðŁı IJ +mongo lian +en et +back street +dr illed +acce ssing +ce o +b se +ai ken +pur r +wor sen +whe res +war k +testi fying +bu ri +bla st +aw g +ðŁĵ ĭ +re defining +hear ing +u ci +c mp +bon i +tail oring +ta ji +noc chi +em t +stephen king +ne et +compla ins +campaig ner +luci ano +twili ght +ti esto +pas sports +flo yd +cathe dr +na ked +caregi ver +b coz +ade cides +ku ri +ly k +br aries +dren ched +disc lose +ðŁĴª ðŁı½ +le blanc +je tty +gar ty +chip mun +b su +rhyth mic +ic z +fri d +anne x +ame x +solo ist +lanc ers +arro whead +speci fication +simul ated +na is +inver te +bo wing +wor ship +f z +abo ss +sha q +ì¶ ķ +challeng ers +an arch +aamaadmi party +ãħĭãħĭ ãħĭ +suffol k +so corro +sn ell +cla dding +absor bing +shaw a +particip ates +ðŁį Ķ +book stores +bak u +seap ort +ko jima +gab y +pack ard +electr ician +let it +mo wing +fa wad +young jae +hot mail +men ing +u rie +intim acy +con ti +: ") +lifeis good +in ciner +i dri +craz iness +jour nos +fran chi +bott len +al da +ff es +k x +south we +air a +clay ton +sco ti +f j +bri ga +ðŁ¤ĺ ðŁı» +demonstr ators +y z +stor k +na q +casc ades +travel chat +plat a +pad ma +fran ci +at tain +bat girl +lom bard +hoo s +d dos +neon atal +discla imer +r ss +r ant +di sen +tex aste +so cal +frac tal +cam ry +stri fe +sn acking +mu h +sant ander +mor ons +gra f +par ades +hu ston +dru pal +mi ento +kir stel +hy de +vom it +forti fied +sphin x +da v +bir yani +win nings +s baseball +mer ged +lovel ondon +ling ering +dream big +car leton +liveli hood +djan go +astri d +gri ds +down e +bru ised +s ne +scarec row +hel ium +f nc +bi ggs +an ter +restor ative +em pires +ab del +life style +kiwan is +colloqui um +me en +pr ick +anti que +ze b +mi mic +edmon ds +ðŁijĬ ðŁijĬ +q ing +pp el +mc gill +interpre ting +âŀ ķ +rash ad +do ka +narr ator +electro magnetic +ash by +sau ra +iran deal +âģ īï¸ı +krish nan +in di +ff en +bre a +os man +multin ational +chi ppe +recruit ers +aus biz +p ounding +re gen +cur sor +refu sal +mac s +in ak +ax ial +wa ifu +up cycled +hindu stan +cas sini +carly le +scrat ches +re ef +man atee +eat ery +ðŁĵ ¢ +un condition +sen pai +on ther +comic book +pro sciutto +de mar +mi se +ma ge +fre ec +aye sha +al der +android games +ley ton +ho ck +door way +chicagof ire +aali yah +sw elling +bi x +. ðŁĺĤ +evan kirstel +torpe do +kon stant +genevie ve +ma ia +ha user +do torg +hide ous +fi k +sp raw +e ek +z appa +wan dered +' ' +ra jan +bam bi +( $) +wid ening +tool box +sa ir +illumin ating +pra ys +out patient +i w +day o +lo b +sw fl +sha des +gu ms +coo kin +ko di +gri ffin +traum ati +ste a +slaugh tered +god bless +air time +pseu do +b sa +hau led +ar if +à¸Ńภĩ +le l +wc po +mil iti +char ters +worl da +ru k +k gs +digital india +is able +idyl lic +esp ino +marie tta +e bo +team canada +ab our +wil ton +rock stars +fav ored +phys ic +wrink le +tb r +d print +ball arat +ad al +z ey +ðŁĺį ðŁĶ¥ +tom lin +mt r +pal sy +fener bah +tight en +phil ia +ir oning +ry u +b ant +enqu ire +ca ir +abur ger +tru n +green berg +chau han +ir ina +sh ani +trend setter +pre tt +zaf ar +alo ve +v ici +pan ic +no o +lu stre +disrup ted +bal lis +son sof +mon si +inst ac +ake st +ëĭ ¤ +kw ame +horror movies +distric t +sau cy +mb an +ar mies +with drawn +med ics +loft us +er oom +be kind +ar ns +all on +un ison +davi ds +cr at +nicot ine +so or +sm x +on co +cospla ying +zombi es +har ms +e ger +ro sy +moon shine +fe in +ce tt +du brov +reg ents +ben itez +ðŁijıðŁı¼ ðŁijıðŁı¼ +ste c +m alia +prioriti ze +ic eland +ft se +v amo +lam ont +homo sexuality +bre es +regu i +cb p +te j +sky sports +deter gent +sha sta +de rel +conserv ancy +colori zed +accol ades +vis o +show your +nan ow +bice ps +us ability +bi m +dailys ketch +pearl jam +stran gest +mega deth +broad casts +bar ren +ar ton +chri ss +confi gu +lu res +is the +e ul +railway ana +global health +gi anni +u aap +s lum +consci ously +ab re +n up +bud get +v ada +e sch +real ness +er ased +th unt +be z +armist ice +ðŁij ¹ +sh run +o led +driver less +ðŁ¤· ðŁı»âĢįâĻĢï¸ı +won dr +sk an +sal aam +mother land +h wang +gen o +gang nam +tw right +endor sing +en ic +ador ation +pau sed +patric ks +do cked +plat te +ff xv +ethnic ity +auto show +side show +after life +re located +orphan ed +food network +dare to +and ra +sla ps +v live +swim s +re imagined +mist le +re vise +real ity +bhar ti +ðŁĴĻ ðŁĴĽ +late st +prou dest +gra sses +lan yard +fresh est +carcin oma +anom aly +zieg ler +sum ner +ly rix +gor g +is d +av el +swild life +me squ +john cena +euro league +sab er +master ful +yar ra +cogn ition +jacob son +abo lic +sir loin +shuk la +moj ito +su pere +st weet +me z +e sa +rudol f +gur a +where you +tt m +win s +trust worthy +ny k +bra den +table top +good food +es on +be k +lingui stic +gra ys +ch ath +h cs +mon i +de ans +cu ssions +ch ell +slo ws +he mi +d app +shar pie +boo sters +a os +str ack +se dona +mu eller +hard wick +or nate +thor a +sal ud +o twol +ch um +mi ho +for age +thel ittle +tear ful +ones elf +min dy +sm g +gmb h +emer ald +ðŁĶ´ âļªï¸ı +tu tti +recep tions +re vising +i brox +tope ka +sal ami +expan se +i books +dob son +cli o +at s +ðŁļ Į +mo ha +is ance +shu tters +moo t +jan ine +marvel comics +jor dani +pos er +kenne th +hy ung +de ja +ase ball +speci ality +eu ston +classic car +had ith +ðŁIJ ī +chas ing +iz o +gros ven +ag lia +thisdayin history +t row +om ile +hu ar +by n +sal ine +div ine +demon ic +ty ran +han dover +revit alization +pa ella +cryp tic +se dg +m end +dun kirk +bre d +wal d +sport scar +a ard +whe aton +da ener +k lan +br t +bakhta war +spi res +schu bert +ro ti +poli sh +o se +ag ame +wonder con +prote stant +bo sa +ðŁĺ Ł +d ü +joy ride +ger trude +âĿ Ŀ +gil a +v h +tw a +tra v +swal lowed +star ve +la in +ent ren +rei ki +su kh +cra ic +az u +web page +kee fe +hypo the +hir sch +hel le +camp ground +w amy +tra vi +sha hi +san deep +ru i +han uman +dw p +reposit ory +no or +no ff +un real +p ell +black history +har vick +ma scar +pay ee +pa sha +gastron omy +d ÃŃ +ai g +rosen thal +open day +embelli shed +t tip +sun bathing +go pack +end ome +ï¸ı # +invali d +final four +st fu +squish y +ra sta +mo sch +jam esc +die trich +sel a +mel b +el vi +t dp +sun i +sli t +j ha +bi za +spi ked +l li +l illard +vam pi +syno psis +az har +kendrick lamar +ĮãĤĬãģ ŁãģĦ +heart less +country file +air play +arrog ance +pre e +virtu oso +ãħłãħł ãħłãħł +raj u +le bu +for ward +tu g +dro s +mondaymotiv aton +concep cion +thel o +pad i +looo ol +ÑĢ од +it ss +eth ical +end uro +__ : +expend iture +mon ste +mas king +terri ers +ib is +e mber +cu mple +punctu ation +pi per +ir vin +ade e +yy yyyy +flash backs +cel sius +don nie +bo gota +ben evol +the script +shil pa +pro se +fin dia +ze ke +ne ko +do ves +blues lyrix +fro sh +sowe to +mp lo +al ai +sab i +raq qa +wf tv +stro ller +ian somerhalder +ðŁĶ ª +an on +mo seley +! ?!? +sta king +mol y +car tri +c sg +ast or +transc end +ma er +de ux +cow girl +sas k +pun ter +ma ken +o ates +love tt +grow ler +sag in +v n +ssi ble +officeof rg +y mc +sab ar +faul ty +ap ha +ak on +ðŁij « +snow don +ae w +raise the +ðĿ ĵ +grue some +clement ine +sp ing +lat a +worlden viron +mi mic +can aria +bakhtawar bz +ao a +fal a +ãĤ Ń +avi va +you uuu +thi gh +la dders +gu mbo +tz ky +fu zz +plastic pollution +est ate +strength ened +k ant +dr in +cal vert +transform ational +frigh tened +mac lean +elited angerous +ear thy +t son +to da +j nu +.. , +mic hal +i ban +je ong +is real +sim coe +exclu sives +blue bells +ben e +te u +pil sner +pens ke +athe ists +m pu +cartag ena +ðŁĴĹ ðŁĴĹ +million aires +kk kk +it ar +subscri ptions +remo te +ma fi +hin ton +w cc +ho k +ds b +ab leton +sevent y +pun ks +e indhoven +sh one +mcfar lane +lim popo +empha si +à ¼ +sin fo +pe tre +man grove +ch ino +ber tie +play lists +push awards +p af +deb bie +c do +r ino +ðŁı¾ âĢįâĻĤï¸ı +fol ke +bon nar +th ine +sl an +hal ter +evi e +aw some +vul tures +spar ky +seiz ures +âľ Ķ +ram one +ine ffe +al n +pro ctor +ast ra +the voice +gro te +sci on +dead line +am aya +tain ted +patter ned +exce eding +cross fit +kay lee +drop box +ru shes +tack led +mo by +retro gamer +n cbd +benef itting +shay kh +guild hall +gen try +dream cast +dread ed +bun dled +th aw +revol ving +n pt +kylie jenner +imagin ative +ron i +over came +family time +ds burg +car naval +relation ship +recogni zable +cor oner +ho le +fan fic +emir ates +bur ritos +analy se +thin ner +ne es +galli poli +bl r +cat woman +-- >> +au lt +ada ily +nau ghty +ili o +solit aire +mtv br +jocel yn +arun ach +rep ent +south gate +hy acin +essenti al +fent on +and um +it or +go pal +sl inger +po sei +aw il +wi elding +ra ila +eli as +a sto +à ¤ +tend ency +str ata +ker t +< - +im acele +da es +sti mulus +han ley +fit nes +ec stasy +lim ous +ha iling +ðŁ¤ Ń +chis wick +tar ies +sla v +pul i +moderni zation +black mail +b ingham +h fx ++ + +ðŁĩ®ðŁĩ ³ +ni v +we a +profess or +k off +bol ster +su ave +sequ ences +pepper oni +not te +dre n +ãģ¨ ç¹ĭãģ +hs v +o ga +ap tly +z ad +excel si +rin ka +mol dova +min n +ma bel +conferen cing +bas ing +of er +ob si +hamill himself +care less +brief ed +inhe rent +par ish +dub nation +town sville +sar awak +gee ky +doncaster isgreat +was abi +gu p +phen o +dra inthe +carrie underwood +ble eds +bbc world +ane w +alta f +dul wich +ani ston +w ti +sumat ra +gra fton +bl n +me ster +bode ga +re go +es q +an jo +sump tuous +mai sie +ï¿ ½ +wil t +jak ob +el vis +se pul +mu ster +air pollution +president e +happy monday +exten sively +fl ondon +t ls +play ing +pe ed +din ho +var dy +pi ka +n iro +au cus +ðŁį ¦ +nu ll +el ondon +juvent us +imag ines +dis ab +lit o +d ura +work places +promo te +mc caf +wood work +waw x +à® ª +tt ino +shar i +sem per +better together +ðŁijĬ ðŁı» +ze bra +pon dering +en chil +ho m +cosm ic +tan z +mo cked +ec cc +ath ed +abo lish +prop eller +paris agreement +assemb lies +indu stry +fraudul ent +pe sa +chang min +ax x +ðŁĴ µ +irr ational +cu sa +ramad han +octa via +on elove +jac ki +bar ak +taxi der +seri ous +nathan fillion +mc en +ch k +po part +grav ity +copp ola +reading fc +illu sions +j ig +ww x +re sh +ex porting +buzz ard +âĻ ¤ +p cm +lan apar +ko s +arom as +antal ya +ww dc +ven a +phil a +ball in +ðŁij Ħ +quin ta +ma o +f ery +eigh ty +sentim ents +safe guarding +r wa +pu ffs +luc ille +de cath +sl u +nu gent +de ter +braz il +ze iss +super bowl +subsi dy +alter n +hi dalgo +enz ymes +ä ½ +tag ne +hair dresser +adri en +walk out +oppo ses +can tina +bed side +af an +ðŁĶ Ĺ +prophe tic +dan es +un successful +super charged +pk k +exem ption +hart le +secu lar +cli pping +br s +united way +c net +pat chy +ha gan +e en +âļ ľ +var a +sym pathi +never trump +affir mation +om f +ny cfc +ma ja +sur ro +keer th +up scale +sandal wood +mon archy +kno bs +å ĭ +po tholes +hunger games +ter races +na sir +coun sell +welcome to +wa q +se aman +m ita +stun ningly +on theroad +in ability +) !! +bon go +ant v +sp ut +worldenviron mentday +resu sc +y td +fi m +eun hyuk +sa chin +rose anne +cler mont +ape c +am ina +v ening +n antes +al most +sin us +ex as +ty l +ti en +ple ad +lanc s +bur naby +re k +jo om +observ ers +disco graphy +cl g +âĻ ¦ +sn ack +r ti +o ily +crystal li +bru te +web development +topp ings +la f +an is +ad der +reli ving +car lin +battle of +we g +syri an +pon t +n dc +lagh ate +yu ma +sp p +p iti +ro bbing +mart ing +rey kja +raj put +nc ds +kie wicz +âĢ¢ âĢ¢ +vam pire +substan tially +opio ids +nepal i +k line +ar oo +under stand +lit t +u it +thro mbo +sar ies +qu ot +b alling +t tr +s gh +philip p +br ant +ac l +m ello +whit taker +. ; +defi ant +b gc +repl ying +mir ren +metamor pho +sch wab +bul ge +utili zed +pick ering +par don +d sa +ภĪ +doo ley +cumul ative +Ð » +ur gency +e mir ++ /- +¦ Ī +ot as +âı ³ +station ed +grape vine +ar ac +karan johar +f ancy +sau l +coo gs +lgbt q +ا٠ħ +jav i +u mmer +pl l +den is +dai pur +pu ffin +lewi sham +fand om +co pe +ves matter +s ve +hel pless +deo dor +ostr ich +kaz an +friday the +con dor +v x +sophom ores +rob les +cu tt +cli mbers +ë¦ ¬ +sle g +sn f +mac ys +hydr ating +grou pe +po yn +mou lin +hg tv +lmfa ooo +sulph ur +asdfghj kl +annab elle +hump back +bra ved +viswas am +multi purpose +hu midi +escor ted +barb ican +f ad +cor sa +ðŁ¤ « +pi ppa +here to +can y +ser gi +or cas +o vie +ed ou +s any +glob alization +man cini +food truck +f is +defi brill +sch re +sma fia +love wins +la ut +k aka +hol lande +game on +resurg ence +out side +olympi ad +int an +abstr action +rapi d +pal om +cal le +jas min +attack ers +swag g +mit ra +ky lo +à® ² +her mitage +gor do +e ira +so sfam +roll out +exc ite +sy nod +mer rill +c als +as sa +liveli hoods +ju ve +the black +gopack go +ant lers +alban ian +wool ly +qu iche +puri fication +are th +smar thome +ne k +all blacks +mex icans +is m +ger ms +comple xion +mar ck +u shi +ðŁIJ IJ +char l +ca stic +till erson +giuli ani +biode gradable +mal bec +bo is +ju bil +im es +r ame +gene tic +esp nu +ch ley +so ho +go pher +g sc +buu ren +cu be +bridesma ids +webin ars +to e +mani pur +viol ently +notic ias +ex changing +chi ev +replac eable +muay thai +bu ss +sp il +instal ment +div ya +cait lin +o lim +fil tering +whirl wind +sta red +prior it +pr am +pompe ii +mono logue +k ite +bu ka +âĢ¦ .. +vac cine +bre ro +woz ni +sol ent +re ferr +my rt +gridi ron +galatasar ay +fro ze +clare mont +ðŁ¥ ĥ +victori as +ssel dorf +pa stures +net neutrality +ch or +ðŁij ģ +ಠ¿ +we ho +symp tom +jo sel +in ous +dragon con +power ball +p te +four thofjuly +ec la +ear buds +where abouts +salt life +depriv ation +ch ter +wi ggle +syste m +ps st +ch az +d any +ri mo +oax aca +lanapar rilla +barcel on +melanch oly +way back +ho tro +n si +l illy +kur o +ja han +intellec t +board game +ðŁı Ĭ +sneak peek +k prc +jail s +cand el +zan zi +mor timer +star ch +ra gs +p fa +long live +k art +gir ona +cro cker +christop h +precau tions +war ship +per m +paren t +van gogh +gif ford +allegh eny +ra yn +ut m +sten cil +rec alling +pen ney +z azzle +ìĥ Ŀ +hin ds +aren as +nu ev +law ler +gu in +do this +ðŁij ķ +ì¶ķ íķĺ +we g +ti b +ri din +complex es +turbul ent +pe sos +de marcus +vall arta +sam sun +kis ses +hein rich +deport es +wil ms +ur d +then ext +inki gayo +ho wi +fir sts +carri age +clean liness +mas war +is ch +ax el +si zzle +road house +fr ans +ent ourage +co bble +boo th +benedic t +tal on +fc u +year ofthe +ray on +raider nation +fo yle +ko val +pi anos +l pg +bur mese +man ure +geo caching +cosc ino +b np +fer ra +stro phy +mar ais +ce es +legen dof +kat niss +eno ch +av ed +you know +d prk +ðŁĺ¢ ðŁĺ¢ +sp un +pro st +sor rows +cent red +ke a +gal icia +? ðŁ¤Ķ +ÑĢод а +bou chard +ðŁĴĻ ðŁĴľ +yu i +seed lings +jon ah +reco vers +ny rd +board room +su ma +my japs +tun g +sha i +ir gc +eli o +wag ons +ka shi +polic emen +john nie +ale coscino +shop ify +dot ted +de tri +va w +to fficial +in your +chal mers +trac ed +no vi +by es +ari el +nipp on +la pel +gri ez +b gs +fool ing +d ita +vijay sethu +nm wx +as ot +kr anti +hel m +ve di +sic kest +mo chi +k abo +shru bs +he red +b sp +sq m +ham r +dul kar +anth a +nr f +avoid ance +at en +publi x +be arers +nas i +ha p +h ells +ðŁĸ ¥ +ภ· +thelast jedi +oh wx +ðŁį « +wa hoo +there se +rec aps +ss nhq +bird photography +v ay +pet ti +pau lo +bel vedere +( * +gr l +du vet +c pec +sa it +por sch +meas urable +avi ators +fre mantle +bre en +on om +me and +life saving +eu ref +en don +embar as +aira sia +el is +dun kin +star magic +s ill +porto bello +ki efer +ex e +mu ted +ãģ ¦ +we thepeople +logi a +liber al +theforce awakens +min ed +haun ts +freck les +care taker +s india +âķ IJ +dev lin +list on +direction er +oh n +fi garo +em manuel +du bois +cl ones +bru ise +ðŁİĪ ðŁİī +disin fe +der matology +as r +s watch +dis comfort +tam anna +pi day +mack en +k atic +delu sional +shaw nee +gu d +al bino +p ali +din gh +cucu mbers +coffe y +anticip ating +treas ured +web summit +shel tered +sav or +pedago gy +m gs +sh ma +s bu +den ali +cam pos +bubble gum +o ir +le aps +y ler +r one +sansk rit +min t +meat less +futuri st +du de +a vel +prote sted +squ ire +z aki +sz n +har court +cycl one +bour dain +gather ings +d ant +advent urer +parag on +alt man +dd ing +ban erjee +snorkel ing +mother well +mis sy +en der +glo ws +ki wis +chick pea +por o +e fron +app t +u y +speci fied +gab by +e strada +com bos +bour bon +vin i +var un +steph ani +key words +car vings +amit abh +wr ought +tw al +re els +clu bbing +ubi quit +cri t +ambed kar +æ Ļ +prun ing +vaccin ated +boe ing +s ks +lo ona +hypno sis +edel man +pho l +he w +colo sse +mckin sey +u on +to te +sacrific ing +ox i +n ang +e mu +пÑĢи ÑĢода +m th +kers wednesday +argu ed +timel apse +ris king +regul ating +ni gh +likeli hood +cu bic +au ction +rein for +pi stor +no ses +ye l +snu ggles +pe i +jean ette +ta ku +ri th +guy z +ภŀ +y te +ver ted +pay soff +jau regui +hoo ligans +procedu ral +mi b +har dy +el eng +chec kers +all ine +the met +prou dof +keerth yofficial +collabor ator +ni u +infl icted +adv ani +re twee +memor iam +f icial +ti ghter +sal em +re viewers +br ics +ben digo +am ell +tur kish +sush maswar +paul son +pal awan +mol lie +stitch er +s burgh +ir u +hay dn +en ers +aro a +u zzi +saraj evo +hel a +apol lo +nine ty +vac a +sp on +vent u +jel ena +hei fer +avo ids +sp ine +pri ze +mar ist +re creating +me de +woo den +find lay +ro fl +n di +compreh end +yu go +y ü +to work +u fos +son ar +pi ston +recor ding +tent ative +art forsale +pel lets +fre do +ÙĪ ر +mu ses +custom ization +pro found +is ner +ide ally +si am +plan kton +cm dr +man ger +fran ken +customiz able +ठ® +walk away +swi vel +vast ly +no ton +lex a +ex moor +z as +tan te +reduc tions +lol ly +hip sters +benef ited +ë ² +ww www +mascul ine +fi ji +dre y +ph ill +ane ous +nic ol +men dez +disapp ro +ch ner +through s +shen mue +east man +ðŁIJ İ +yu ck +under tale +re ys +go beavs +eng en +c na +mer r +bir k +ãģ¨ç¹ĭãģ ĮãĤĬãģŁãģĦ +âĥ£ @ +yn na +ste ed +offen der +at um +vani shing +presi denti +love them +g nocchi +fri ggin +per il +mad hya +ag ne +dee jay +mar nock +m tb +fold able +@ ___ +stand re +bron x +bow ski +fin ite +cro ckett +b sf +ge tit +seren awilliams +mir o +ignati us +sla y +rin se +fon due +sel dom +s more +gan i +dy ce +dmit ry +cru mb +late post +pri mark +oh ana +flor als +do a +remembrance day +d ds +azi one +toon ami +air port +æĿ ± +th ad +fi st +dine sh +dr who +ad words +admi rer +pro je +kyrgy z +à « +manife station +le wan +j ic +thi bau +le ased +van ity +nouri shed +never theless +aug mente +fu elled +che ad +wil shere +ru di +p z +my co +mor ro +herbali fe +hardro ck +de man +dre ality +sp ades +ce vic +bha i +bar on +ultimat efan +hou news +to bi +stru t +ke el +affili ation +the masters +sm al +hu e +este ban +con v +om nic +datab ases +co v +ter ti +st g +snoop dogg +metab ol +leth bridge +ðŁı» âĢįâĻĢï¸ı +year ling +residente vil +nws l +iy aki +griez mann +c ous +ðŁĵĿ : +tor ian +sam i +ðŁĶ¥ðŁĶ¥ ðŁĶ¥ðŁĶ¥ðŁĶ¥ +g are +alli ances +whit field +we ther +refin ing +coy i +kra ken +ðŁĺĺ âĿ¤ +singul arity +lil i +h ns +bol dand +waw rinka +misogy ny +lo vers +c q +b dg +ad ona +gar ter +women of +sc d +recogn ising +mun a +str ou +sign alling +lare do +hell boy +alek sand +un available +pedi atric +as in +mer ia +ri shi +futuri sm +w ye +polari zed +e we +pro pel +in forms +cre ase +~ " +arti ston +like for +heidel berg +er ra +life in +len ny +inter rupt +cohe rent +ca z +vick ers +le veled +f bs +cab ins +bu mmed +apost les +we h +ten don +souven irs +infu ri +pier ce +asse t +m las +go th +di ggin +ann as +yl or +th waite +sw el +pan era +mur derers +croo ked +bs go +ac u +a on +re an +one of +ko hl +bloo dh +pest icide +lost dog +fle xing +ëĤ ĺ +su pra +eter nally +ðŁļ Ļ +pa olo +ol an +mom o +is elle +captain marvel +s lou +mistak enly +akhi lesh +mer t +il inan +bu on +bal kan +mir ro +mill en +der ail +dam on +tit i +bi os +re don +pic ard +par te +ðŁ¤ Ł +Ø º +son ics +fir sth +dd c +veg ans +tur ban +ni gan +lot tie +lyn don +star buck +pink floyd +life styles +am ara +a she +r sc +val a +sm er +cw gc +cli ent +buen as +jag an +coo ps +ðŁijij ðŁijij +speci alizes +snag ged +g lar +ben net +wildlife wednesday +bow den +pi k +art in +empor ium +ar l +re ba +pas ser +disappo ints +additi ve +âľĬ ðŁı½ +bay er +missou la +ha skell +comm ences +ni x +ne man +explo ited +plastic surgery +cc d +aso cial +vo t +sie gel +fro ome +kap am +far a +e ha +pro bes +mw f +meet ing +p bb +ak ins +mistle toe +kingdom hearts +for kids +ec r +bal e +escor ts +adidas originals +k wa +k ts +hallo ffame +ðŁĺį . +wag s +pot ted +o wing +honey comb +he fty +uro logy +mer le +b pd +stri pping +re ich +k state +gu ay +yon ge +shak ti +g loom +bat t +son om +n ery +el ba +blan ks +hel le +triple ts +bom bay +ak arta +ab ia +transm itted +rol f +ja is +angular js +fi erc +m ss +trac e +ॠĩ +tom bs +old man +kom bucha +fo l +e health +cere als +are lli +in ari +ðŁĴ © +wo l +liber ties +fa wn +af firm +nun avut +hyster ical +k drama +art es +âĢ¢âĢ¢âĢ¢âĢ¢ âĢ¢âĢ¢âĢ¢âĢ¢ +valent in +man slaughter +gal es +eo in +energi zed +del s +with draws +st les +sar castic +ram esh +incredi bles +lock hart +ya wn +ultimatefan live +oooooooo oooooooo +mu en +guru dev +te er +pe eling +new snow +lingui stics +direc tv +ag end +uni lever +ru ger +han dedly +ero se +li mel +the c +royal ties +fini shers +nr g +m gt +fid get +com ps +bac on +aggre ssively +ab it +ch â +tar de +slu gger +q anda +gre ening +d ats +ensla ved +spec tor +o ye +fre ef +b hand +stop brexit +mis conceptions +cav a +ðŁĺįðŁĺįðŁĺįðŁĺį ðŁĺįðŁĺįðŁĺįðŁĺį +multit asking +hou sel +ferre ira +cen time +ank les +jo dh +hel ly +fro me +out tuesday +nar nia +bal aji +l bloggers +jyo ti +ðŁį ĩ +lan cia +cap ri +y ap +nat ash +down fall +." âĢĶ +à ® +ligam ent +coat ings +ai ded +hi ko +fall ing +encryp ted +yeg food +infringe ment +cu di +ce p +ðŁĺį ðŁĺĤ +tra d +super rugby +ed win +wh iche +vi meo +lay ne +in vigor +he he +dubrov nik +bie ber +u tr +sham an +op ers +ham ill +en ig +di f +ar um +scrap book +min h +diver gence +mckin non +life time +guter res +wil le +ple as +patt y +mic ron +k z +dom aine +ru sher +m ds +ches ney +screw driver +âģ© , +sle dge +hau er +chan a +stam ina +sprink ler +pl n +he ff +bol ton +om on +car rington +accor dion +jor ge +inter ception +in puts +gu ll +tran scription +vanu atu +it ical +eth os +tic h +spac ey +pee king +u mi +ha ger +psycho tic +illi an +illi a +bonnar oo +an ese +pu c +laghate parth +en hall +econom ical +dre dge +% - +u we +tu bular +scoun cil +pe asants +fl er +tumb ler +he p +ford ham +row ley +initi als +ev asion +er nation +plu gins +coch ran +c attle +acid ity +ðŁİĬ ðŁİī +re grann +jump man +ef ace +x ma +patri archy +esco bar +cristi an +tip ton +nu eva +hack ney +back seat +kill arney +aid an +sta dion +simul taneous +ida ho +a je +u th +figu re +clo s +bur k +volun tar +rec ite +macfar lane +cur few +bou do +w gn +sti x +sla p +scrat ched +philli p +jour ne +ex pelled +wa z +u ke +tati ana +ou e +ho pp +dimit ri +ðŁĵ £ +mato logist +electri fying +blu ffs +bill smafia +az cardinals +y aa +x mas +shar a +r ith +g ills +dre s +bar ton +authori zation +imperi alism +home of +to do +foot path +band width +visit spain +moh sin +erup ted +mi ki +insig nia +mike l +ss h +ger a +bank holiday +aw an +t weak +star craft +e al +construc tion +skelet ons +le ep +ine m +bar clay +ship wreck +monsi eur +yo h +ron t +form ative +ser o +le p +horse man +hoo sier +haz mat +cylin ders +cen ti +ðŁĴ¥ðŁĴ¥ ðŁĴ¥ +re em +na ire +mus ically +gras shopper +est onian +termin ology +ro main +blogger rt +tox in +stan ce +cultiv ated +an ast +ðŁIJ į +shi mano +go pher +ene i +recycla ble +gam ification +fight for +c q +avoc ados +ke ys +eli ke +gly cer +shak ur +mobili zation +gal ley +expla in +ex changed +pe th +obe dience +illa ge +en nis +ãĥ ŀ +wi v +walla bies +ma ar +ig ers +fin tech +fin alized +wo j +meaning less +in field +onna ise +e et +bron te +pass ages +ðŁij § +strick land +northern lights +lom ond +h tc +wr ay +shi fter +di alog +ðŁį į +>> >>>> +te atime +ste ch +sic huan +qu ill +fran ca +comple mentary +bar rington +marcu s +mal am +goo oo +for sa +elec tra +af s +âĹ Ĩ +tri fe +sn azzy +fo lia +and olan +after dark +wood son +stra de +litt lest +o gun +con wy +co wards +ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ ðŁĺĤðŁĺĤðŁĺĤ +íĬ ¸ +se ul +mur phy +dun ks +kapil shar +jo achim +wom ack +equal ity +aver ages +a ine +ðŁ¦ Ī +tac ular +dis ability +u ked +mid century +bar thol +teas ers +tab ern +nj caa +sp out +op i +ku bball +bl om +so ar +popu lism +meth yl +ðŁijĬ ðŁı¼ +o spre +alo ils +ðŁĵ ĸ +ðŁĮ ļ +x er +sp illing +publ ica +car dam +adi sh +sa cha +p kg +bu da +lyric ist +i bc +gru mp +ho ver +hal ep +anti body +anem one +âĻ¥âĻ¥ âĻ¥âĻ¥ +m cl +litho graph +cc u +s fest +path ic +calli ster +otta wa +gun sn +rut ger +hali but +en vision +differenti ate +ðŁļĢ ðŁļĢ +pir an +lat el +uc n +trou bad +ra ine +fierc ely +learn english +lea se +wex mondays +em it +dray ton +bur rell +scuba diving +hol ler +dr u +clo cked +w ral +ap ro +trans lucent +w bo +patri arch +mo ja +lan nister +fish ery +ne derland +mil dly +mi rai +ma ko +ja p +ðŁĺ©ðŁĺ© ðŁĺ© +pro statec +p anna +ar ama +under taking +tomp kins +ne op +soli ds +sav oury +e ames +cut lery +wood bridge +steam er +ri zzo +wild cat +rat na +lamin ated +kin eni +jal ap +ai des +acknowle dges +?! ?!?! +! ðŁİī +w afc +mag gio +ha ves +dar je +of i +gr il +v asi +bru x +mo hd +fake speare +arn old +r mb +for be +wal leye +ro di +therapeu tics +strate gi +ob ste +mu dder +download able +dd ings +d ca +asi angames +campe on +appropri ation +th century +ram atta +dra ped +bul lion +mu c +one x +se greg +ophel ia +bod ily +âĿ¤ ðŁĺį +wi zar +te ased +ade my +to id +sur a +lazar us +sn ickers +ma se +lo h +bow ed +bibli o +x change +har lan +gho shal +flavor ful +bha gat +alle z +whiche ver +ten stein +disc er +organ iser +mt g +dream liner +t se +hok kaido +mo k +indulg ent +hick man +blin ded +al yn +aaa ah +sp ool +lough borough +inter pret +et v +aristo tle +optimi zing +avici i +madu rai +ju li +naw az +mat chups +ab ide +paint ing +w elling +vel i +octag on +in scribed +po king +plac er +life cycle +kili g +g sp +eli ves +cle ments +na sheed +me sut +incarcer ated +dist illed +wal ang +delic acy +del gado +che z +ch ita +ad ero +tu x +pati l +o do +abh cosmetics +tv c +p bc +in accurate +hardwork paysoff +ball er +quot ation +merchandi sing +ga stri +defen ses +dro gba +bex hill +ban kno +win ona +si eg +p gs +hahah ha +agu chi +su bram +mirac le +de sch +li bre +ba cher +ent ine +bbcra di +lou dest +r ps +pi erc +fr yer +storm trooper +rafael nadal +pas co +exhau stion +epic onetsy +rc tid +kel lie +ga ines +d bz +sm riti +s bridge +lim ited +cla w +technic al +bio graphical +ado red +ภ° +exclu de +ac adia +key boards +fur man +so ca +sur u +ni ps +sw aps +server less +run e +pu ffy +north ampton +nish ings +hen der +cartri dges +gun shot +ðŁĵ ¹ +fil ament +respon dents +pey ton +mountaine er +mer ging +life span +intimid ation +p afc +nl wx +expan sive +pur r +f ck +ca e +at ti +tele thon +so hn +mend el +lo pes +dor i +un broken +te red +tast ings +in active +disin tegr +t assel +share the +pi ano +is lay +air space +z awa +ricci ardo +ming ton +fresh er +cur ry +re vs +pharo ah +h mv +exhilar ating +wh oo +lin kin +kri spy +competen cy +ste wards +ne bu +kat su +ad mins +baz ar +as ar +giving back +s summit +song z +lin us +raj kumar +farm ington +fanta sia +ðŁĺ´ ðŁĺ´ +so bri +lis se +barry more +pri sm +blo b +sen ew +mono xide +exp ire +eigh teen +di pper +xi ao +kil t +hin ch +bbc sport +bam boo +p ter +ex al +ðŁ¦ ĭ +ham lin +expe ditions +star gazing +food security +wy lie +ul f +st ingly +on storm +lo eb +bro ome +bn ha +pancre atic +eli ve +!!!!!!!! !!! +ther apper +ortho pedic +avengers endgame +antit rust +ìļ ° +go te +om d +off side +gy llen +win eries +white water +ad l +lu pita +exce eds +consi sted +chew bacca +ash leigh +nhl jets +is san +sh ld +hay at +cran berries +ðŁ¤ĺ ðŁı½ +rock the +spring training +fall out +dairy free +wa j +un decided +so wn +rc n +north wales +htt r +fu mble +d its +comp elled +popu list +min ted +blan chett +. '' +pro pulsion +m illa +au berg +her tz +h ta +u daipur +serendip ity +azte cs +als ace +ðŁIJ ij +lu n +sho es +char li +gar za +ðŁĴ Ł +pro biotics +fox tv +ol is +mi ff +loc alized +diffu ser +si gue +fun ko +rend ous +ðŁĴ ij +jeky ll diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/vocab.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/vocab.json new file mode 100644 index 0000000000000000000000000000000000000000..182766ce89b439768edadda342519f33802f5364 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/compiled/vocab.json @@ -0,0 +1 @@ +{"!":0,"\"":1,"#":2,"$":3,"%":4,"&":5,"'":6,"(":7,")":8,"*":9,"+":10,",":11,"-":12,".":13,"/":14,"0":15,"1":16,"2":17,"3":18,"4":19,"5":20,"6":21,"7":22,"8":23,"9":24,":":25,";":26,"<":27,"=":28,">":29,"?":30,"@":31,"A":32,"B":33,"C":34,"D":35,"E":36,"F":37,"G":38,"H":39,"I":40,"J":41,"K":42,"L":43,"M":44,"N":45,"O":46,"P":47,"Q":48,"R":49,"S":50,"T":51,"U":52,"V":53,"W":54,"X":55,"Y":56,"Z":57,"[":58,"\\":59,"]":60,"^":61,"_":62,"`":63,"a":64,"b":65,"c":66,"d":67,"e":68,"f":69,"g":70,"h":71,"i":72,"j":73,"k":74,"l":75,"m":76,"n":77,"o":78,"p":79,"q":80,"r":81,"s":82,"t":83,"u":84,"v":85,"w":86,"x":87,"y":88,"z":89,"{":90,"|":91,"}":92,"~":93,"¡":94,"¢":95,"£":96,"¤":97,"¥":98,"¦":99,"§":100,"¨":101,"©":102,"ª":103,"«":104,"¬":105,"®":106,"¯":107,"°":108,"±":109,"²":110,"³":111,"´":112,"µ":113,"¶":114,"·":115,"¸":116,"¹":117,"º":118,"»":119,"¼":120,"½":121,"¾":122,"¿":123,"À":124,"Á":125,"Â":126,"Ã":127,"Ä":128,"Å":129,"Æ":130,"Ç":131,"È":132,"É":133,"Ê":134,"Ë":135,"Ì":136,"Í":137,"Î":138,"Ï":139,"Ð":140,"Ñ":141,"Ò":142,"Ó":143,"Ô":144,"Õ":145,"Ö":146,"×":147,"Ø":148,"Ù":149,"Ú":150,"Û":151,"Ü":152,"Ý":153,"Þ":154,"ß":155,"à":156,"á":157,"â":158,"ã":159,"ä":160,"å":161,"æ":162,"ç":163,"è":164,"é":165,"ê":166,"ë":167,"ì":168,"í":169,"î":170,"ï":171,"ð":172,"ñ":173,"ò":174,"ó":175,"ô":176,"õ":177,"ö":178,"÷":179,"ø":180,"ù":181,"ú":182,"û":183,"ü":184,"ý":185,"þ":186,"ÿ":187,"Ā":188,"ā":189,"Ă":190,"ă":191,"Ą":192,"ą":193,"Ć":194,"ć":195,"Ĉ":196,"ĉ":197,"Ċ":198,"ċ":199,"Č":200,"č":201,"Ď":202,"ď":203,"Đ":204,"đ":205,"Ē":206,"ē":207,"Ĕ":208,"ĕ":209,"Ė":210,"ė":211,"Ę":212,"ę":213,"Ě":214,"ě":215,"Ĝ":216,"ĝ":217,"Ğ":218,"ğ":219,"Ġ":220,"ġ":221,"Ģ":222,"ģ":223,"Ĥ":224,"ĥ":225,"Ħ":226,"ħ":227,"Ĩ":228,"ĩ":229,"Ī":230,"ī":231,"Ĭ":232,"ĭ":233,"Į":234,"į":235,"İ":236,"ı":237,"IJ":238,"ij":239,"Ĵ":240,"ĵ":241,"Ķ":242,"ķ":243,"ĸ":244,"Ĺ":245,"ĺ":246,"Ļ":247,"ļ":248,"Ľ":249,"ľ":250,"Ŀ":251,"ŀ":252,"Ł":253,"ł":254,"Ń":255,"!":256,"\"":257,"#":258,"$":259,"%":260,"&":261,"'":262,"(":263,")":264,"*":265,"+":266,",":267,"-":268,".":269,"/":270,"0":271,"1":272,"2":273,"3":274,"4":275,"5":276,"6":277,"7":278,"8":279,"9":280,":":281,";":282,"<":283,"=":284,">":285,"?":286,"@":287,"A":288,"B":289,"C":290,"D":291,"E":292,"F":293,"G":294,"H":295,"I":296,"J":297,"K":298,"L":299,"M":300,"N":301,"O":302,"P":303,"Q":304,"R":305,"S":306,"T":307,"U":308,"V":309,"W":310,"X":311,"Y":312,"Z":313,"[":314,"\\":315,"]":316,"^":317,"_":318,"`":319,"a":320,"b":321,"c":322,"d":323,"e":324,"f":325,"g":326,"h":327,"i":328,"j":329,"k":330,"l":331,"m":332,"n":333,"o":334,"p":335,"q":336,"r":337,"s":338,"t":339,"u":340,"v":341,"w":342,"x":343,"y":344,"z":345,"{":346,"|":347,"}":348,"~":349,"¡":350,"¢":351,"£":352,"¤":353,"¥":354,"¦":355,"§":356,"¨":357,"©":358,"ª":359,"«":360,"¬":361,"®":362,"¯":363,"°":364,"±":365,"²":366,"³":367,"´":368,"µ":369,"¶":370,"·":371,"¸":372,"¹":373,"º":374,"»":375,"¼":376,"½":377,"¾":378,"¿":379,"À":380,"Á":381,"Â":382,"Ã":383,"Ä":384,"Å":385,"Æ":386,"Ç":387,"È":388,"É":389,"Ê":390,"Ë":391,"Ì":392,"Í":393,"Î":394,"Ï":395,"Ð":396,"Ñ":397,"Ò":398,"Ó":399,"Ô":400,"Õ":401,"Ö":402,"×":403,"Ø":404,"Ù":405,"Ú":406,"Û":407,"Ü":408,"Ý":409,"Þ":410,"ß":411,"à":412,"á":413,"â":414,"ã":415,"ä":416,"å":417,"æ":418,"ç":419,"è":420,"é":421,"ê":422,"ë":423,"ì":424,"í":425,"î":426,"ï":427,"ð":428,"ñ":429,"ò":430,"ó":431,"ô":432,"õ":433,"ö":434,"÷":435,"ø":436,"ù":437,"ú":438,"û":439,"ü":440,"ý":441,"þ":442,"ÿ":443,"Ā":444,"ā":445,"Ă":446,"ă":447,"Ą":448,"ą":449,"Ć":450,"ć":451,"Ĉ":452,"ĉ":453,"Ċ":454,"ċ":455,"Č":456,"č":457,"Ď":458,"ď":459,"Đ":460,"đ":461,"Ē":462,"ē":463,"Ĕ":464,"ĕ":465,"Ė":466,"ė":467,"Ę":468,"ę":469,"Ě":470,"ě":471,"Ĝ":472,"ĝ":473,"Ğ":474,"ğ":475,"Ġ":476,"ġ":477,"Ģ":478,"ģ":479,"Ĥ":480,"ĥ":481,"Ħ":482,"ħ":483,"Ĩ":484,"ĩ":485,"Ī":486,"ī":487,"Ĭ":488,"ĭ":489,"Į":490,"į":491,"İ":492,"ı":493,"IJ":494,"ij":495,"Ĵ":496,"ĵ":497,"Ķ":498,"ķ":499,"ĸ":500,"Ĺ":501,"ĺ":502,"Ļ":503,"ļ":504,"Ľ":505,"ľ":506,"Ŀ":507,"ŀ":508,"Ł":509,"ł":510,"Ń":511,"in":512,"th":513,"an":514,"re":515,"ar":516,"er":517,"the":518,"ing":519,"ou":520,"on":521,"st":522,"or":523,"en":524,"on":525,"al":526,"at":527,"er":528,"it":529,"in":530,"to":531,"ro":532,"is":533,"le":534,"ic":535,"at":536,"and":537,"ed":538,"of":539,"ch":540,"or":541,"es":542,"il":543,"el":544,"st":545,"ac":546,"om":547,"am":548,"lo":549,"an":550,"ay":551,"sh":552,"ri":553,"li":554,"ti":555,"for":556,"ne":557,"ðŁ":558,"ra":559,"ha":560,"de":561,"ol":562,"ve":563,"si":564,"ur":565,"al":566,"se":567,"'s":568,"un":569,"di":570,"be":571,"la":572,"wh":573,"oo":574,"day":575,"en":576,"ma":577,"no":578,"le":579,"to":580,"our":581,"ir":582,"gh":583,"wit":584,"it":585,"yo":586,"as":587,"sp":588,"this":589,"ts":590,"ati":591,"you":592,"with":593,"ad":594,"is":595,"ab":596,"ly":597,"we":598,"the":599,"te":600,"as":601,"ag":602,"vi":603,"pp":604,"su":605,"ho":606,"my":607,"..":608,"bu":609,"com":610,"se":611,"ers":612,"me":613,"me":614,"all":615,"con":616,"mo":617,"ke":618,"ge":619,"out":620,"ent":621,"co":622,"fe":623,"ver":624,"ar":625,"fro":626,"au":627,"po":628,"ce":629,"ght":630,"are":631,"ss":632,"from":633,"ch":634,"tr":635,"oun":636,"one":637,"by":638,"do":639,"th":640,"wor":641,"ere":642,"ke":643,"pro":644,"for":645,"ds":646,"bo":647,"ta":648,"we":649,"go":650,"he":651,"ter":652,"ing":653,"de":654,"be":655,"ation":656,"mor":657,"ay":658,"ex":659,"ill":660,"pe":661,"ks":662,"sc":663,"lu":664,"fu":665,"qu":666,"ver":667,"ðŁĺ":668,"ju":669,"mu":670,"ate":671,"and":672,"ve":673,"king":674,"mar":675,"op":676,"hi":677,"...":678,"pre":679,"ad":680,"ru":681,"that":682,"jo":683,"of":684,"ce":685,"new":686,"am":687,"ap":688,"gre":689,"ss":690,"du":691,"now":692,"ye":693,"ting":694,"your":695,"ity":696,"ni":697,"ci":698,"par":699,"gu":700,"fi":701,"af":702,"per":703,"ter":704,"up":705,"so":706,"gi":707,"ons":708,"gr":709,"ge":710,"br":711,"pl":712,"'t":713,"mi":714,"ine":715,"wee":716,"bi":717,"us":718,"sho":719,"have":720,"today":721,"av":722,"man":723,"ent":724,"ack":725,"ure":726,"our":727,"âĢ":728,"cu":729,"ld":730,"loo":731,"im":732,"ice":733,"som":734,"fin":735,"red":736,"ren":737,"ood":738,"was":739,"tion":740,"pi":741,"ir":742,"ther":743,"ty":744,"ph":745,"ard":746,"ec":747,"!!":748,"mon":749,"more":750,"will":751,"tra":752,"can":753,"col":754,"pu":755,"te":756,"wn":757,"mb":758,"so":759,"iti":760,"just":761,"ning":762,"here":763,"tu":764,"pa":765,"pr":766,"but":767,"what":768,"ally":769,"fir":770,"min":771,"ca":772,"ant":773,"sa":774,"ted":775,"ev":776,"ment":777,"fa":778,"get":779,"ame":780,"about":781,"gra":782,"not":783,"happ":784,"ays":785,"man":786,"his":787,"time":788,"like":789,"gh":790,"has":791,"than":792,"love":793,"art":794,"ste":795,"ding":796,"he":797,"cre":798,"ws":799,"wat":800,"der":801,"ite":802,"ser":803,"ace":804,"age":805,"end":806,"str":807,"aw":808,"stor":809,"re":810,"car":811,"ell":812,"all":813,"ps":814,"fri":815,"pho":816,"por":817,"do":818,"ak":819,"wi":820,"fre":821,"who":822,"shi":823,"boo":824,"son":825,"ell":826,"when":827,"ill":828,"how":829,"great":830,"win":831,"el":832,"bl":833,"ssi":834,"ali":835,"some":836,"ðŁĴ":837,"ton":838,"der":839,"les":840,"pla":841,"ï¸":842,"ed":843,"sch":844,"hu":845,"ong":846,"don":847,"ki":848,"sh":849,"ann":850,"cor":851,"..":852,"ound":853,"az":854,"ine":855,"ary":856,"ful":857,"stu":858,"ould":859,"sti":860,"go":861,"see":862,"able":863,"ars":864,"ll":865,"mis":866,"ber":867,"ck":868,"wa":869,"ents":870,"no":871,"sig":872,"fe":873,"first":874,"et":875,"spe":876,"ack":877,"if":878,"ous":879,"'m":880,"ster":881,"app":882,"ang":883,"ance":884,"ans":885,"good":886,"bre":887,"ever":888,"they":889,"tic":890,"come":891,"off":892,"back":893,"ase":894,"ings":895,"old":896,"ight":897,"fo":898,"her":899,"happy":900,"pic":901,"its":902,"ving":903,"us":904,"mat":905,"hom":906,"dy":907,"em":908,"sk":909,"ying":910,"their":911,"led":912,"ry":913,"ul":914,"har":915,"ck":916,"ton":917,"onal":918,"hel":919,"ric":920,"bir":921,"vie":922,"way":923,"tri":924,"da":925,"ple":926,"bro":927,"sto":928,"ool":929,"night":930,"tru":931,"ba":932,"read":933,"res":934,"year":935,"fr":936,"tor":937,"als":938,"coun":939,"cla":940,"ture":941,"vel":942,"ated":943,"lec":944,"end":945,"thing":946,"vo":947,"ici":948,"best":949,"can":950,"work":951,"last":952,"after":953,"ence":954,"pri":955,"pe":956,"es":957,"il":958,"âĢ¦":959,"dre":960,"ys":961,"over":962,"ies":963,"ðŁij":964,"comm":965,"tw":966,"ink":967,"sun":968,"cl":969,"life":970,"tt":971,"ach":972,"land":973,"sy":974,"tre":975,"tal":976,"pol":977,"sm":978,"duc":979,"sal":980,"ft":981,"'re":982,"che":983,"war":984,"tur":985,"ations":986,"ach":987,"ms":988,"ile":989,"pm":990,"ough":991,"ate":992,"star":993,"week":994,"!!!":995,"clu":996,"there":997,"ner":998,"tom":999,"sel":1000,"ï¸ı":1001,"world":1002,"ves":1003,"cam":1004,"got":1005,"inter":1006,"off":1007,"um":1008,"tonight":1009,"other":1010,"hou":1011,"look":1012,"je":1013,"id":1014,"sion":1015,"beau":1016,"att":1017,"eli":1018,"ort":1019,"rec":1020,"ff":1021,"ster":1022,"supp":1023,"gen":1024,"been":1025,"ily":1026,"team":1027,"mm":1028,"ic":1029,"peop":1030,"itt":1031,"ats":1032,"only":1033,"mber":1034,"eng":1035,"bri":1036,"mp":1037,"know":1038,"bur":1039,"bar":1040,"ins":1041,"low":1042,"she":1043,"row":1044,"âĿ":1045,"tro":1046,"people":1047,"via":1048,"low":1049,"aga":1050,"bet":1051,"xt":1052,"fac":1053,"char":1054,"ear":1055,"wal":1056,"sen":1057,"fam":1058,"ble":1059,"nati":1060,"ish":1061,"nor":1062,"game":1063,"live":1064,"sco":1065,"ley":1066,"don":1067,"ick":1068,"ball":1069,"very":1070,"these":1071,"pan":1072,"ia":1073,"ating":1074,"cr":1075,"are":1076,"gir":1077,"make":1078,"stre":1079,"show":1080,".\"":1081,"fl":1082,"up":1083,"dr":1084,"thanks":1085,"illi":1086,"wom":1087,"sts":1088,"ig":1089,"sur":1090,"every":1091,"cur":1092,"view":1093,"let":1094,"into":1095,"most":1096,"na":1097,"indi":1098,"gar":1099,"had":1100,"sou":1101,"ved":1102,"ant":1103,"ition":1104,"made":1105,"fol":1106,"uni":1107,"ited":1108,"ðŁı":1109,"ical":1110,"thr":1111,"ready":1112,"chec":1113,"dra":1114,"kes":1115,"book":1116,"ep":1117,"sic":1118,"morning":1119,"news":1120,"cau":1121,"ct":1122,"well":1123,"anc":1124,"photo":1125,"than":1126,"ors":1127,"birth":1128,"gg":1129,"out":1130,"next":1131,"some":1132,"ening":1133,"story":1134,"chri":1135,"down":1136,"home":1137,"ffe":1138,"free":1139,"da":1140,"bor":1141,"fil":1142,"cial":1143,"thank":1144,"side":1145,"lear":1146,"que":1147,"line":1148,"ten":1149,"ates":1150,"years":1151,"my":1152,"photo":1153,"beauti":1154,"right":1155,"nu":1156,"form":1157,"ship":1158,"ban":1159,"ther":1160,"days":1161,"gam":1162,"ason":1163,"gy":1164,"ðŁİ":1165,"birthday":1166,"set":1167,"ick":1168,"et":1169,"still":1170,"coming":1171,"take":1172,"ðŁĩ":1173,"bb":1174,"sol":1175,"son":1176,"den":1177,"ep":1178,"music":1179,"them":1180,"den":1181,"why":1182,"foo":1183,"cra":1184,"amaz":1185,"wn":1186,"hol":1187,"tting":1188,"wr":1189,"ue":1190,"mag":1191,"cro":1192,"lan":1193,"clo":1194,"bra":1195,"ak":1196,"sing":1197,"cal":1198,"read":1199,"'ve":1200,"joh":1201,"bab":1202,"dri":1203,"blo":1204,"big":1205,"eric":1206,"int":1207,"tor":1208,"try":1209,"la":1210,"leg":1211,"house":1212,"mic":1213,"val":1214,"beautiful":1215,"litt":1216,"check":1217,"new":1218,"vers":1219,"sw":1220,"ari":1221,"play":1222,"her":1223,"âĢĵ":1224,"win":1225,"ma":1226,"congr":1227,"school":1228,"fun":1229,".@":1230,"heal":1231,"ich":1232,"del":1233,"where":1234,"lon":1235,"ket":1236,"two":1237,"much":1238,"watch":1239,"ven":1240,"ded":1241,"ast":1242,"ked":1243,"bas":1244,"going":1245,"mp":1246,"ever":1247,"ways":1248,"roo":1249,"desig":1250,"ly":1251,"sed":1252,"top":1253,"lin":1254,"chan":1255,"too":1256,"iting":1257,"dent":1258,"ghts":1259,"ty":1260,"spo":1261,"need":1262,"blu":1263,"inst":1264,"being":1265,"âĿ¤":1266,"wel":1267,"ls":1268,"him":1269,"may":1270,"sting":1271,"na":1272,"ely":1273,"little":1274,"ga":1275,"nat":1276,"tomor":1277,"mc":1278,"hon":1279,"want":1280,"air":1281,"pic":1282,"americ":1283,"per":1284,"less":1285,"week":1286,"vel":1287,"ah":1288,"cap":1289,"cham":1290,"ger":1291,"tim":1292,"tomorrow":1293,"ness":1294,"state":1295,"hal":1296,"serv":1297,"ze":1298,"os":1299,"pat":1300,"vis":1301,"exc":1302,"sin":1303,"ff":1304,"city":1305,"cen":1306,"any":1307,"bel":1308,"summ":1309,"tin":1310,"would":1311,"looking":1312,"ko":1313,"cele":1314,"family":1315,"mer":1316,"pow":1317,"help":1318,"bus":1319,"co":1320,"cle":1321,"self":1322,"ens":1323,"ics":1324,"tho":1325,"ani":1326,"cho":1327,"lead":1328,"bs":1329,"twee":1330,"think":1331,"fore":1332,"chil":1333,"vide":1334,"did":1335,"ale":1336,"chi":1337,"vil":1338,"ends":1339,"wing":1340,"pas":1341,"'ll":1342,"vol":1343,"sa":1344,"gs":1345,"many":1346,"jec":1347,"before":1348,"graph":1349,"ny":1350,"uring":1351,"wil":1352,"dd":1353,"buil":1354,"fav":1355,"sted":1356,"tran":1357,"ling":1358,"oud":1359,"dge":1360,"fiel":1361,"national":1362,"sta":1363,"cer":1364,"were":1365,"ina":1366,"season":1367,"cou":1368,"ned":1369,"amazing":1370,"tions":1371,"celebr":1372,"ns":1373,"ath":1374,"head":1375,"sday":1376,"dar":1377,"loc":1378,"vin":1379,"another":1380,"goo":1381,"sat":1382,"ny":1383,"join":1384,"pres":1385,"ses":1386,"sing":1387,"ana":1388,"ining":1389,"....":1390,"cour":1391,"ï¸ı":1392,"act":1393,"cause":1394,"light":1395,"ams":1396,"ta":1397,"bal":1398,"fc":1399,"high":1400,"offici":1401,"tt":1402,"christ":1403,"dic":1404,"day":1405,"ral":1406,"hor":1407,":)":1408,"visi":1409,"nam":1410,"ob":1411,"mas":1412,"ght":1413,"really":1414,"tun":1415,"find":1416,"through":1417,"port":1418,"ut":1419,"tive":1420,"sty":1421,"ne":1422,"ore":1423,"ðŁĺĤ":1424,"support":1425,"never":1426,"even":1427,"ðŁĶ":1428,"ha":1429,"ya":1430,"ld":1431,"uk":1432,"ran":1433,"jam":1434,"with":1435,"medi":1436,"des":1437,"ney":1438,"ching":1439,"ale":1440,"hy":1441,"kin":1442,"!!":1443,"dy":1444,"place":1445,"also":1446,"ble":1447,"which":1448,"black":1449,"bli":1450,"say":1451,"park":1452,"play":1453,"ire":1454,"video":1455,"weekend":1456,"ail":1457,"key":1458,"pt":1459,"ward":1460,"friday":1461,"din":1462,"iness":1463,"gro":1464,"ben":1465,"always":1466,"tball":1467,"ago":1468,"mil":1469,"cy":1470,"produc":1471,"disc":1472,"under":1473,"please":1474,"spor":1475,"full":1476,"ey":1477,"ðŁĻ":1478,"ise":1479,"ities":1480,"cat":1481,"kno":1482,"use":1483,"fore":1484,"ker":1485,"art":1486,"high":1487,"open":1488,"san":1489,"ef":1490,"ours":1491,"shed":1492,"stri":1493,"dro":1494,"again":1495,"im":1496,"ðŁĵ":1497,"enjo":1498,"fun":1499,"getting":1500,"pen":1501,"ger":1502,"cli":1503,"any":1504,"every":1505,"eu":1506,"women":1507,"âľ":1508,"est":1509,"could":1510,"ry":1511,"\"@":1512,"thou":1513,"sha":1514,"commun":1515,"ber":1516,"dents":1517,"dis":1518,"while":1519,"away":1520,"dio":1521,"ham":1522,"gla":1523,"date":1524,"ka":1525,"miss":1526,"unch":1527,"won":1528,"inf":1529,"room":1530,"ga":1531,"real":1532,"exper":1533,"direc":1534,"should":1535,"spr":1536,"gol":1537,"long":1538,"better":1539,"ori":1540,"ey":1541,"ience":1542,"ils":1543,"zz":1544,"han":1545,"found":1546,"vs":1547,"âĻ":1548,"post":1549,"tic":1550,"part":1551,"men":1552,"rence":1553,"cess":1554,"vic":1555,"sil":1556,"shop":1557,"ðŁĺĤ":1558,"food":1559,"val":1560,"stic":1561,"you":1562,"says":1563,"elec":1564,"star":1565,"oc":1566,"land":1567,"id":1568,"ction":1569,"field":1570,"sof":1571,"start":1572,"water":1573,"friends":1574,"ones":1575,"ðŁĮ":1576,"fla":1577,"far":1578,"white":1579,"party":1580,"inst":1581,"grou":1582,"tv":1583,"everyone":1584,"ment":1585,"ja":1586,"cha":1587,"prin":1588,"ants":1589,"during":1590,"lat":1591,"lar":1592,"west":1593,"then":1594,"ka":1595,"youn":1596,"insp":1597,"inte":1598,"ween":1599,"visit":1600,"against":1601,"rele":1602,"head":1603,"ces":1604,"town":1605,"looks":1606,"thre":1607,"regi":1608,"rent":1609,"projec":1610,"girl":1611,"sear":1612,"wo":1613,"mom":1614,"car":1615,"hun":1616,"publi":1617,"di":1618,"ple":1619,"call":1620,"cri":1621,"um":1622,"ford":1623,"perfe":1624,"friend":1625,"hard":1626,"ssion":1627,"test":1628,"playing":1629,"around":1630,"because":1631,"kets":1632,"meet":1633,"satur":1634,"arti":1635,"work":1636,"jun":1637,"ven":1638,"run":1639,"member":1640,"port":1641,"super":1642,"twit":1643,"sam":1644,"els":1645,"tly":1646,"adv":1647,"ative":1648,"ath":1649,"sure":1650,"avail":1651,"lar":1652,"squ":1653,"ards":1654,"event":1655,"men":1656,"ll":1657,"over":1658,"logy":1659,"ital":1660,"times":1661,"mal":1662,"back":1663,"coo":1664,"making":1665,"stru":1666,"âģ":1667,"itu":1668,"shar":1669,"gan":1670,"cas":1671,"sn":1672,"summer":1673,"picture":1674,"fan":1675,"hin":1676,"christmas":1677,"cy":1678,"proud":1679,"champi":1680,"design":1681,"pping":1682,"hope":1683,"ca":1684,"available":1685,"may":1686,"wed":1687,"photograph":1688,"special":1689,"sale":1690,"stop":1691,"ery":1692,"awe":1693,"ality":1694,"history":1695,"ama":1696,"presi":1697,"bru":1698,"working":1699,"done":1700,"dr":1701,"ken":1702,"feat":1703,"wood":1704,"atest":1705,"sunday":1706,"movi":1707,"vely":1708,"sle":1709,"face":1710,"spec":1711,"students":1712,"by":1713,"ham":1714,"spon":1715,"business":1716,"dat":1717,"ie":1718,"ip":1719,"soci":1720,"glo":1721,"hand":1722,"recor":1723,"rs":1724,"mee":1725,"keep":1726,"pur":1727,"health":1728,"she":1729,"comple":1730,"god":1731,"davi":1732,"collec":1733,"list":1734,"ra":1735,"club":1736,"ters":1737,"inclu":1738,"things":1739,"plan":1740,"âĺ":1741,"john":1742,"shing":1743,"atul":1744,"soon":1745,"blue":1746,"gor":1747,"saturday":1748,"won":1749,"congratul":1750,"see":1751,"âĿ¤ï¸ı":1752,"those":1753,"ðŁĺį":1754,"final":1755,"dou":1756,"ith":1757,"own":1758,"road":1759,"tour":1760,"ast":1761,"india":1762,"til":1763,"nd":1764,"fer":1765,"favor":1766,"sul":1767,"learn":1768,"fire":1769,"just":1770,"group":1771,"ah":1772,"rac":1773,"body":1774,"ur":1775,"care":1776,"à¸":1777,"plo":1778,"oh":1779,"pos":1780,"give":1781,"tech":1782,"sub":1783,"cent":1784,"ering":1785,"ym":1786,"ility":1787,"fic":1788,"london":1789,"vir":1790,"guys":1791,"ba":1792,"ðŁ¤":1793,"baby":1794,"scre":1795,"ðŁĺį":1796,"trump":1797,"under":1798,"change":1799,"ian":1800,"colle":1801,"sses":1802,"ler":1803,"ssed":1804,"nice":1805,"announ":1806,"power":1807,"sar":1808,"aking":1809,"mini":1810,"sli":1811,"swee":1812,"kar":1813,"ful":1814,"cru":1815,"action":1816,"ather":1817,").":1818,"stand":1819,"devel":1820,"aa":1821,"gan":1822,"left":1823,"lol":1824,"rel":1825,"trans":1826,"ments":1827,"int":1828,"ef":1829,"manag":1830,"dig":1831,"gener":1832,"down":1833,"pau":1834,"tiv":1835,"ku":1836,"thur":1837,"ken":1838,"ston":1839,"fans":1840,"talk":1841,"tweet":1842,"too":1843,"style":1844,"prote":1845,"secon":1846,"fron":1847,"awesome":1848,"gl":1849,"pal":1850,"net":1851,"sor":1852,"lau":1853,"gon":1854,"since":1855,"tty":1856,"series":1857,"memor":1858,"beli":1859,"film":1860,"did":1861,"dies":1862,"ot":1863,"congratulations":1864,"pra":1865,"eve":1866,"woo":1867,"official":1868,"suc":1869,"incre":1870,"bon":1871,"part":1872,"pped":1873,"class":1874,"sive":1875,"boy":1876,"cul":1877,"perfect":1878,"tou":1879,"dam":1880,"welcome":1881,"football":1882,"hi":1883,"pap":1884,"wait":1885,"ada":1886,"congrats":1887,"young":1888,"excited":1889,"rece":1890,"jan":1891,"va":1892,"red":1893,"stra":1894,"media":1895,"'d":1896,"does":1897,"let":1898,"mul":1899,"ills":1900,"green":1901,"mel":1902,"toge":1903,"future":1904,"yester":1905,"versity":1906,"form":1907,"tain":1908,"ide":1909,"ches":1910,"kids":1911,"qui":1912,"haha":1913,"deta":1914,"big":1915,"favorite":1916,"girls":1917,"contin":1918,"dom":1919,"search":1920,"ual":1921,"air":1922,"ders":1923,"month":1924,"cer":1925,"yesterday":1926,"community":1927,"ade":1928,"dog":1929,"ville":1930,"ices":1931,"deli":1932,"syste":1933,"run":1934,"ism":1935,"heart":1936,"cup":1937,"enti":1938,"few":1939,"president":1940,"eds":1941,"until":1942,"festi":1943,"ok":1944,"flo":1945,"said":1946,"ole":1947,"med":1948,"travel":1949,"£":1950,"phone":1951,"together":1952,"fast":1953,"lot":1954,"games":1955,"shir":1956,"between":1957,"yes":1958,"thers":1959,"doing":1960,"mac":1961,"ator":1962,"band":1963,"follow":1964,"project":1965,"develop":1966,"diffe":1967,"confe":1968,"speci":1969,"cast":1970,"ys":1971,"board":1972,"rd":1973,"ial":1974,"shoo":1975,"ram":1976,"having":1977,"share":1978,"follow":1979,"one":1980,"name":1981,"mr":1982,"put":1983,"discu":1984,"ory":1985,"came":1986,"ous":1987,"site":1988,"twitter":1989,"tb":1990,"tit":1991,"finally":1992,"zed":1993,"super":1994,"compan":1995,"using":1996,"alls":1997,"list":1998,"ris":1999,"shot":2000,"gal":2001,"tar":2002,"del":2003,"john":2004,"âĢĶ":2005,"something":2006,"ram":2007,"intere":2008,"whe":2009,"bit":2010,"ðŁį":2011,"street":2012,"ound":2013,"ai":2014,"tickets":2015,"movie":2016,"real":2017,"ky":2018,"taking":2019,"opp":2020,"cc":2021,"lam":2022,"moun":2023,"inve":2024,"black":2025,"used":2026,"online":2027,"yor":2028,"local":2029,"gue":2030,"cks":2031,"ow":2032,"gest":2033,"boys":2034,"illion":2035,"cont":2036,"reci":2037,"ined":2038,"euro":2039,"now":2040,"seen":2041,"ph":2042,"teach":2043,"def":2044,"south":2045,"such":2046,"award":2047,"must":2048,"issu":2049,"care":2050,"feel":2051,"plu":2052,"latest":2053,"sports":2054,"web":2055,"tex":2056,"ement":2057,"sk":2058,"fic":2059,"wan":2060,"tech":2061,"ot":2062,"box":2063,"ner":2064,"free":2065,"tal":2066,"ash":2067,"case":2068,"hot":2069,"wonder":2070,"meeting":2071,"era":2072,"chall":2073,"ðŁIJ":2074,"job":2075,"ili":2076,"cool":2077,"jour":2078,"ths":2079,"mo":2080,"fel":2081,"die":2082,"micha":2083,"ele":2084,"team":2085,"service":2086,"stand":2087,"makes":2088,"ping":2089,"early":2090,"comes":2091,"ek":2092,"holi":2093,"vers":2094,"ague":2095,"sau":2096,"three":2097,"monday":2098,"fashi":2099,"someone":2100,"thro":2101,"sea":2102,"bad":2103,"suppor":2104,"turn":2105,"ury":2106,"ming":2107,"photography":2108,"nic":2109,"mark":2110,"pretty":2111,"ssing":2112,"watching":2113,"memb":2114,"arri":2115,"county":2116,"beach":2117,"fran":2118,"center":2119,"police":2120,"bat":2121,"public":2122,"tan":2123,"press":2124,"saf":2125,"sy":2126,"gets":2127,"roy":2128,"ners":2129,"your":2130,"buy":2131,"sters":2132,"show":2133,"ased":2134,"childre":2135,"afric":2136,"ines":2137,"space":2138,"scri":2139,"hall":2140,"pain":2141,"aring":2142,"home":2143,"mur":2144,"health":2145,"ched":2146,"sand":2147,"recei":2148,"guy":2149,"ea":2150,"american":2151,"resi":2152,"children":2153,"--":2154,"iri":2155,"ington":2156,"country":2157,"ross":2158,"len":2159,"anna":2160,"books":2161,"bc":2162,"ece":2163,"dom":2164,"lovely":2165,"kh":2166,"pet":2167,"gy":2168,"gri":2169,"stage":2170,"office":2171,"rock":2172,"mon":2173,"bay":2174,"table":2175,"sun":2176,"med":2177,"thin":2178,"lor":2179,"flow":2180,"(@":2181,"university":2182,"store":2183,"front":2184,"good":2185,"za":2186,"vote":2187,"north":2188,"hey":2189,"anim":2190,"order":2191,"mid":2192,"without":2193,"ade":2194,"remember":2195,"market":2196,"??":2197,"mus":2198,"training":2199,"educ":2200,"but":2201,"cover":2202,"stan":2203,"scen":2204,"bla":2205,"break":2206,"lou":2207,"same":2208,"gold":2209,"ain":2210,"os":2211,"both":2212,"lit":2213,"vern":2214,"ai":2215,"albu":2216,"pa":2217,"enjoy":2218,"beg":2219,"elling":2220,"thursday":2221,"info":2222,"san":2223,"america":2224,"hair":2225,"tel":2226,"march":2227,"concer":2228,"college":2229,"conference":2230,"app":2231,"hour":2232,"chang":2233,"âļ":2234,"sour":2235,"ols":2236,"weather":2237,"war":2238,"phi":2239,"festival":2240,"second":2241,"cute":2242,"prac":2243,"ener":2244,"stry":2245,"lea":2246,"polit":2247,"sav":2248,"sen":2249,"ow":2250,"mi":2251,"near":2252,"ought":2253,"ze":2254,"coffe":2255,"willi":2256,"dan":2257,"sey":2258,"david":2259,"ese":2260,"fan":2261,"deci":2262,"theat":2263,"nov":2264,"ation":2265,"trac":2266,"sci":2267,"review":2268,"cel":2269,"em":2270,"un":2271,"july":2272,"orig":2273,"tion":2274,"dru":2275,"former":2276,"stay":2277,"after":2278,"inv":2279,"took":2280,"data":2281,"bal":2282,"tues":2283,"dan":2284,"evening":2285,"ðŁĺĤðŁĺĤ":2286,"dol":2287,"ures":2288,"provi":2289,"ts":2290,"est":2291,"sign":2292,"jac":2293,"uk":2294,"song":2295,"yet":2296,"bow":2297,"indu":2298,"jap":2299,"hoo":2300,"point":2301,"anyone":2302,"zy":2303,"ist":2304,"hur":2305,"ital":2306,"building":2307,"woman":2308,"chur":2309,"jer":2310,"perfor":2311,"coach":2312,"league":2313,"cess":2314,"net":2315,"imag":2316,"nation":2317,"brit":2318,"que":2319,"awards":2320,"ages":2321,"works":2322,"ced":2323,"mance":2324,"late":2325,"ign":2326,"money":2327,"true":2328,"ii":2329,"tell":2330,"plac":2331,"pac":2332,"asy":2333,"world":2334,"behin":2335,"import":2336,"reading":2337,"gram":2338,"giving":2339,"met":2340,"hit":2341,"forward":2342,"stom":2343,"present":2344,"june":2345,"social":2346,"noon":2347,"mart":2348,"half":2349,"swe":2350,"govern":2351,"ker":2352,"details":2353,"lish":2354,"__":2355,"acy":2356,"sia":2357,"bert":2358,"fall":2359,"!!!!":2360,"),":2361,"thi":2362,"diti":2363,"sport":2364,"king":2365,"fit":2366,"staf":2367,"cat":2368,"muse":2369,"centr":2370,"yer":2371,"contro":2372,"bloo":2373,"walk":2374,"actu":2375,"didn":2376,"lim":2377,"learning":2378,"research":2379,"wedne":2380,"auth":2381,"hours":2382,"ky":2383,"far":2384,"hen":2385,"....":2386,"itch":2387,"ril":2388,"strong":2389,"sky":2390,"questi":2391,"james":2392,"ron":2393,"dg":2394,"fur":2395,"cin":2396,"does":2397,"appro":2398,"marke":2399,"tures":2400,"fully":2401,"chat":2402,"behind":2403,"tem":2404,"fini":2405,"mission":2406,"batt":2407,"feel":2408,"heav":2409,"everything":2410,"bar":2411,"wish":2412,"premi":2413,"ima":2414,"experience":2415,"each":2416,"report":2417,"sweet":2418,"tics":2419,"spring":2420,"respon":2421,"system":2422,"victor":2423,"lin":2424,"saw":2425,"already":2426,"ghter":2427,"fle":2428,"ãĥ":2429,"bring":2430,"album":2431,"--":2432,"ells":2433,"stan":2434,"tom":2435,"international":2436,"went":2437,"anni":2438,"match":2439,"pper":2440,"stone":2441,"small":2442,"rain":2443,"fashion":2444,"area":2445,"van":2446,"agram":2447,"ko":2448,"thought":2449,"worth":2450,"van":2451,"mer":2452,"coffee":2453,"ites":2454,"gn":2455,"artist":2456,"con":2457,"arch":2458,"cir":2459,"secre":2460,"ground":2461,"iso":2462,"hand":2463,"com":2464,"bridge":2465,"hs":2466,"xi":2467,"link":2468,"pul":2469,"spl":2470,"race":2471,"fli":2472,"river":2473,"gas":2474,"disco":2475,"dal":2476,"player":2477,"fit":2478,"photos":2479,"ity":2480,"ok":2481,"jor":2482,"tra":2483,"april":2484,"ads":2485,"adi":2486,"solu":2487,"beauty":2488,"door":2489,"mess":2490,"update":2491,"alia":2492,"scho":2493,"ened":2494,"moment":2495,"scot":2496,"science":2497,"ior":2498,"ties":2499,"across":2500,"ously":2501,"shes":2502,"doesn":2503,"page":2504,"water":2505,"million":2506,"classi":2507,"lic":2508,"cast":2509,"formation":2510,"michael":2511,"ello":2512,"smo":2513,"ints":2514,"vision":2515,"opening":2516,"ldn":2517,"austr":2518,"tuesday":2519,"winner":2520,"possi":2521,"round":2522,"shirt":2523,"dit":2524,"bo":2525,"ues":2526,"illed":2527,"along":2528,"trip":2529,"starting":2530,"impro":2531,"kan":2532,"person":2533,"not":2534,"reco":2535,"needs":2536,"cle":2537,"lie":2538,"rest":2539,"ring":2540,"winter":2541,"simp":2542,"mom":2543,"beer":2544,"face":2545,"tors":2546,"usa":2547,"collection":2548,"geor":2549,"session":2550,"trying":2551,"las":2552,"lake":2553,"jen":2554,"origin":2555,"student":2556,"secur":2557,"vin":2558,"pics":2559,"expe":2560,"comp":2561,"gonna":2562,"equ":2563,"bad":2564,"ley":2565,"au":2566,"members":2567,"break":2568,"wall":2569,"gic":2570,"dinner":2571,"bul":2572,"inspir":2573,"ri":2574,"mind":2575,"ica":2576,"winning":2577,"talking":2578,"tren":2579,"sis":2580,"ten":2581,"wonderful":2582,"snow":2583,"hear":2584,"thom":2585,"nothing":2586,"gui":2587,"stin":2588,"blog":2589,"fest":2590,"bun":2591,"lee":2592,"wards":2593,"chance":2594,"dress":2595,"ren":2596,"paul":2597,"pes":2598,"techno":2599,"russi":2600,"card":2601,"east":2602,"mari":2603,"wine":2604,"ti":2605,"law":2606,"stric":2607,"ki":2608,"ape":2609,"augu":2610,"profe":2611,"ash":2612,"course":2613,"mail":2614,"rently":2615,"dun":2616,"mun":2617,"love":2618,"island":2619,"drive":2620,"sl":2621,"ended":2622,"main":2623,"lost":2624,"nature":2625,"âĿ¤ï¸ı":2626,"chic":2627,"repor":2628,"pin":2629,"pro":2630,"station":2631,"cep":2632,"takes":2633,"company":2634,"goes":2635,"ond":2636,"mach":2637,"radio":2638,"dad":2639,"rock":2640,"ja":2641,"pay":2642,"champion":2643,"ee":2644,"inde":2645,"tta":2646,"atic":2647,"tab":2648,"believe":2649,"energy":2650,"zi":2651,"tat":2652,"word":2653,"once":2654,"resul":2655,"yl":2656,"andre":2657,"ano":2658,"instagram":2659,"close":2660,"tam":2661,"custom":2662,"wa":2663,"conom":2664,"shows":2665,"life":2666,"kin":2667,"rob":2668,"tage":2669,"nation":2670,"almost":2671,"listen":2672,"save":2673,"reli":2674,"ace":2675,"mary":2676,"tree":2677,"forget":2678,"jack":2679,"waiting":2680,"director":2681,"hill":2682,"born":2683,"temp":2684,"fl":2685,"ste":2686,"ona":2687,"single":2688,"wednesday":2689,"united":2690,"ino":2691,"@_":2692,"nel":2693,"celebrate":2694,"ending":2695,"deal":2696,"ji":2697,"canada":2698,"huge":2699,"track":2700,"âĢ¢":2701,"fy":2702,"fanta":2703,"ang":2704,"york":2705,"release":2706,"pun":2707,"episo":2708,"words":2709,"tour":2710,"pack":2711,"igh":2712,"classic":2713,"performance":2714,"ket":2715,"afternoon":2716,"record":2717,"wins":2718,"proble":2719,"âĿ¤":2720,"four":2721,"bed":2722,"bank":2723,"dance":2724,"sla":2725,"called":2726,"might":2727,"ap":2728,"past":2729,"ðŁļ":2730,"different":2731,"ite":2732,"gift":2733,"ssive":2734,"church":2735,"cus":2736,"program":2737,"hotel":2738,"ice":2739,"mad":2740,"security":2741,"enge":2742,"dc":2743,"enough":2744,"sta":2745,"ety":2746,"dead":2747,"gun":2748,"hear":2749,"mir":2750,"human":2751,"gress":2752,"ounds":2753,"piece":2754,"breaking":2755,"garden":2756,"fight":2757,"views":2758,"fish":2759,"started":2760,"running":2761,"green":2762,"seri":2763,"sm":2764,"ask":2765,"dor":2766,"death":2767,"econom":2768,"eri":2769,"ird":2770,"ser":2771,"lunch":2772,"âģ¦":2773,"box":2774,"natu":2775,"base":2776,"ban":2777,"fal":2778,"global":2779,"wild":2780,"wow":2781,"outside":2782,"move":2783,"lead":2784,"anal":2785,"museum":2786,"ong":2787,"haw":2788,"power":2789,"thank":2790,"bac":2791,"charac":2792,"campa":2793,"digital":2794,"ro":2795,"oper":2796,"dev":2797,"wol":2798,"pati":2799,"fa":2800,"male":2801,"paper":2802,"illing":2803,"cs":2804,"âĥ":2805,"education":2806,"taken":2807,"effe":2808,"mou":2809,"sad":2810,"\".":2811,"based":2812,"staff":2813,"including":2814,"living":2815,"ac":2816,"china":2817,"mob":2818,"storm":2819,"luck":2820,"phil":2821,"oo":2822,"yn":2823,"travel":2824,"kel":2825,"tial":2826,"price":2827,"book":2828,"important":2829,"bio":2830,"pool":2831,"nyc":2832,"fab":2833,"load":2834,"?!":2835,"challenge":2836,"cry":2837,"serve":2838,"wear":2839,"bus":2840,"tain":2841,"number":2842,"ror":2843,"kat":2844,"iz":2845,"though":2846,"hosp":2847,"mm":2848,"fair":2849,"utes":2850,"hot":2851,"pop":2852,"fied":2853,"camp":2854,"development":2855,"libr":2856,"cali":2857,"ems":2858,"âģ¦@":2859,"bol":2860,"ised":2861,"standing":2862,"model":2863,"ita":2864,"gle":2865,"brown":2866,"image":2867,"vered":2868,"force":2869,"oil":2870,"partic":2871,"shu":2872,"daily":2873,"law":2874,"sec":2875,"class":2876,"camp":2877,"holiday":2878,"clin":2879,"kers":2880,"present":2881,"game":2882,"incredi":2883,"ership":2884,"interview":2885,"bill":2886,"due":2887,"andy":2888,"abo":2889,"innov":2890,"key":2891,"acade":2892,"pil":2893,"moder":2894,"stars":2895,"brand":2896,"fer":2897,"weeks":2898,"consi":2899,"pre":2900,"safe":2901,"writ":2902,"dium":2903,"launch":2904,"marketing":2905,"annual":2906,"assi":2907,"court":2908,"lady":2909,"cted":2910,"anda":2911,"inside":2912,"child":2913,"oppor":2914,"smith":2915,"centre":2916,"gue":2917,"âģ©":2918,"fren":2919,"sty":2920,"fort":2921,"ently":2922,"isn":2923,"keep":2924,"tober":2925,"ony":2926,"boy":2927,"ald":2928,"colla":2929,"demo":2930,"level":2931,"compet":2932,"ado":2933,"bour":2934,"fantastic":2935,"mate":2936,"su":2937,"south":2938,"opportun":2939,"versary":2940,"later":2941,"bud":2942,"facebook":2943,"laun":2944,"stern":2945,"pit":2946,"!\"":2947,"maj":2948,"gram":2949,"tbt":2950,"fire":2951,"happy":2952,"aks":2953,"whole":2954,"actually":2955,"iller":2956,"ella":2957,"lots":2958,"alex":2959,"ange":2960,"lands":2961,"ðŁĺŃ":2962,"enter":2963,"rou":2964,"episode":2965,"ped":2966,"inten":2967,"shire":2968,"who":2969,"plan":2970,"ho":2971,"cake":2972,"west":2973,"magaz":2974,"fresh":2975,"cc":2976,"nar":2977,"chris":2978,"writing":2979,"wer":2980,"nom":2981,"lo":2982,"midd":2983,"dream":2984,"ol":2985,"tional":2986,"deb":2987,">>":2988,"become":2989,"si":2990,"grand":2991,"alling":2992,"histor":2993,"ride":2994,"ired":2995,"safe":2996,"queen":2997,"cil":2998,"intro":2999,"vil":3000,"dani":3001,"...":3002,"artic":3003,"stat":3004,"short":3005,"oring":3006,"selfi":3007,"missi":3008,"doc":3009,"bit":3010,"gall":3011,"bom":3012,"ire":3013,"selec":3014,"dition":3015,"ðŁĶ¥":3016,"friend":3017,"beat":3018,"ghting":3019,"ðŁĺĬ":3020,"peace":3021,"exhi":3022,"anta":3023,"ability":3024,"illu":3025,"jon":3026,"quality":3027,"tribu":3028,"mes":3029,"players":3030,"fair":3031,"cut":3032,"cab":3033,"success":3034,"bi":3035,"sus":3036,"promo":3037,"sche":3038,"ange":3039,"ico":3040,"commit":3041,"catch":3042,"illa":3043,"kind":3044,"feeling":3045,"quo":3046,"say":3047,"anniversary":3048,"spot":3049,"mother":3050,"ane":3051,"pend":3052,"yourself":3053,"ops":3054,"apple":3055,"minutes":3056,"po":3057,"grand":3058,"ries":3059,"haha":3060,"career":3061,"edition":3062,"dec":3063,"rick":3064,"ami":3065,"concert":3066,"itive":3067,"geous":3068,"dly":3069,"tte":3070,"advent":3071,"ig":3072,"lights":3073,"aker":3074,"sky":3075,"âĥ£":3076,"ray":3077,"finished":3078,"way":3079,"sd":3080,"accoun":3081,"ðŁĴķ":3082,"cky":3083,"chel":3084,"liter":3085,"painting":3086,"los":3087,"stun":3088,"technology":3089,"nas":3090,"mar":3091,"bil":3092,"africa":3093,"kie":3094,"eyes":3095,"golf":3096,"plus":3097,"nia":3098,"itec":3099,"services":3100,"wedding":3101,"known":3102,"tele":3103,".....":3104,"starts":3105,"paren":3106,"wants":3107,"ational":3108,"months":3109,"windo":3110,"favour":3111,"ert":3112,"magazine":3113,"exclu":3114,"reve":3115,"bc":3116,"original":3117,"ess":3118,"nal":3119,"anti":3120,"stro":3121,"tice":3122,"study":3123,"à¤":3124,"vac":3125,"national":3126,"five":3127,"rain":3128,"vement":3129,"ute":3130,"verse":3131,"emer":3132,"army":3133,"possible":3134,"guess":3135,"valley":3136,"thern":3137,"crow":3138,"mr":3139,"color":3140,"onto":3141,"pick":3142,"clear":3143,"dark":3144,"tac":3145,"wanted":3146,"itting":3147,"cancer":3148,"government":3149,"die":3150,"rise":3151,"zing":3152,"cold":3153,"foun":3154,"studio":3155,"stration":3156,"brother":3157,"ahead":3158,"shel":3159,"micro":3160,"ically":3161,"dau":3162,"signed":3163,"viol":3164,"ax":3165,"asse":3166,"io":3167,"wre":3168,"splay":3169,"chick":3170,"august":3171,"plat":3172,"tips":3173,"spi":3174,"human":3175,"easy":3176,"logi":3177,"mike":3178,"grow":3179,"agre":3180,"ww":3181,"shad":3182,"motiv":3183,"wide":3184,"turns":3185,"omg":3186,"var":3187,"defin":3188,"sug":3189,"jim":3190,"ðŁĶ¥":3191,"td":3192,"campaign":3193,"named":3194,"retweet":3195,"cop":3196,"tv":3197,"leav":3198,"kis":3199,"double":3200,"smar":3201,"issue":3202,"villa":3203,"information":3204,"lies":3205,"stock":3206,"nt":3207,"distric":3208,"shor":3209,"mix":3210,"ero":3211,"sep":3212,"mex":3213,"seeing":3214,"live":3215,"remin":3216,"code":3217,"gur":3218,"sc":3219,"wild":3220,"lun":3221,"hood":3222,"spot":3223,"father":3224,"forever":3225,"upd":3226,"traf":3227,"fly":3228,"need":3229,"gradu":3230,"train":3231,"make":3232,"sab":3233,"bey":3234,"size":3235,"leader":3236,"talks":3237,"eu":3238,"log":3239,"fox":3240,"gorgeous":3241,"less":3242,"lets":3243,"surpri":3244,"myself":3245,"note":3246,"lives":3247,"fru":3248,"loved":3249,"sever":3250,"dem":3251,"ji":3252,"soc":3253,"hold":3254,"dogs":3255,"ni":3256,"âŀ":3257,"leave":3258,"airport":3259,"benef":3260,"expl":3261,"ships":3262,"complete":3263,"achi":3264,"great":3265,"vintage":3266,"jack":3267,"roc":3268,"wood":3269,"priv":3270,"offer":3271,"eye":3272,"version":3273,"tea":3274,"coach":3275,"offic":3276,"well":3277,"gen":3278,"sat":3279,"hh":3280,"youth":3281,"ox":3282,"?\"":3283,"mt":3284,"mix":3285,"gg":3286,"dle":3287,"natural":3288,"build":3289,"breakfast":3290,"thinking":3291,"theatre":3292,"moon":3293,"berg":3294,"goals":3295,"george":3296,"ene":3297,"excell":3298,"iling":3299,"tune":3300,"yed":3301,"gate":3302,"mit":3303,"network":3304,"joe":3305,"hello":3306,"fb":3307,"tube":3308,"wearing":3309,"athle":3310,"struc":3311,"hard":3312,"glass":3313,"gers":3314,"throw":3315,"ges":3316,"bt":3317,"industry":3318,"management":3319,"alist":3320,"goal":3321,"stream":3322,"yel":3323,"avi":3324,"icious":3325,"others":3326,"ski":3327,"christi":3328,"bird":3329,"esc":3330,"min":3331,"tro":3332,"lt":3333,"jan":3334,"imp":3335,"rights":3336,"sha":3337,"organ":3338,"central":3339,"ara":3340,"roll":3341,"favourite":3342,"chester":3343,"else":3344,"pay":3345,"cars":3346,"mine":3347,"step":3348,"practice":3349,"major":3350,"hang":3351,"ðŁĺĺ":3352,"non":3353,"vari":3354,"engine":3355,"volun":3356,"dia":3357,"iled":3358,"architec":3359,"pink":3360,"ds":3361,"thy":3362,"wash":3363,"website":3364,"bag":3365,"control":3366,"elli":3367,"fra":3368,"answ":3369,"dence":3370,"yu":3371,"ron":3372,"ola":3373,"gin":3374,"drin":3375,"lic":3376,"couple":3377,"spar":3378,"gon":3379,"create":3380,"ct":3381,"celebrating":3382,"deep":3383,"eat":3384,"tee":3385,"voice":3386,"drop":3387,"visit":3388,"ators":3389,"stadium":3390,"ft":3391,"wis":3392,"rol":3393,"grade":3394,"famil":3395,"points":3396,"repre":3397,"was":3398,"traffic":3399,"japan":3400,"org":3401,"honor":3402,"texas":3403,"manu":3404,"âĻ¥":3405,"safety":3406,"rer":3407,"bag":3408,"emplo":3409,"released":3410,"regu":3411,"aka":3412,"nav":3413,"role":3414,"senior":3415,"spect":3416,"cross":3417,"lines":3418,"best":3419,"pack":3420,"sin":3421,"tie":3422,"missing":3423,"sunset":3424,"liber":3425,"ising":3426,"jay":3427,"ski":3428,"championship":3429,"activ":3430,"ladies":3431,"played":3432,"yy":3433,"publ":3434,"alo":3435,"pride":3436,"sr":3437,"paki":3438,"lux":3439,"survi":3440,"cked":3441,"ets":3442,"chocol":3443,"australia":3444,"paris":3445,"miles":3446,"hat":3447,"mental":3448,"ala":3449,"mean":3450,"mobile":3451,"ena":3452,"insi":3453,"found":3454,"chief":3455,"tag":3456,"incredible":3457,"return":3458,"é":3459,"google":3460,"french":3461,"crew":3462,"hallo":3463,"alian":3464,"jaz":3465,"cher":3466,"silver":3467,"north":3468,"english":3469,"baseball":3470,"caf":3471,"limited":3472,"following":3473,"appreci":3474,"earth":3475,"kir":3476,"vember":3477,"wed":3478,"ption":3479,"ged":3480,"october":3481,"flori":3482,"cr":3483,"ency":3484,"gave":3485,"lord":3486,"stuff":3487,"berry":3488,"post":3489,"smile":3490,"broad":3491,"state":3492,"gger":3493,"means":3494,"icy":3495,"gun":3496,"yo":3497,"master":3498,"burg":3499,"hands":3500,"nie":3501,"//":3502,"union":3503,"british":3504,"biggest":3505,"district":3506,"aming":3507,"hil":3508,"oce":3509,"person":3510,"pass":3511,"envir":3512,"schools":3513,"arrived":3514,"ances":3515,"inspired":3516,"expla":3517,"ben":3518,"library":3519,"bott":3520,"amp":3521,"steph":3522,"contact":3523,"bang":3524,"ms":3525,"califor":3526,"told":3527,"battle":3528,"bb":3529,"chicago":3530,"⾨":3531,"strate":3532,"shi":3533,"dece":3534,"-)":3535,"add":3536,"lab":3537,"jones":3538,"legend":3539,"castle":3540,"inger":3541,"stance":3542,"bel":3543,"ura":3544,"refu":3545,"leaders":3546,"pot":3547,"sex":3548,"hic":3549,"article":3550,"kid":3551,"france":3552,"xx":3553,"exe":3554,"guide":3555,"volunte":3556,"print":3557,"ali":3558,"ceo":3559,"tweets":3560,"wx":3561,"scene":3562,"volu":3563,"anti":3564,"han":3565,"associ":3566,"sharing":3567,"rose":3568,"minister":3569,"sher":3570,"inste":3571,"clean":3572,"democr":3573,"poster":3574,"skin":3575,"psy":3576,"proper":3577,"crazy":3578,"iam":3579,"ore":3580,"ini":3581,"anything":3582,"pod":3583,"moving":3584,"click":3585,"explo":3586,"comb":3587,"craft":3588,"fi":3589,"blood":3590,"isra":3591,"public":3592,"dent":3593,"olym":3594,"england":3595,"asi":3596,"cher":3597,"fact":3598,"environ":3599,"harry":3600,"gone":3601,"medic":3602,"enjoying":3603,"justice":3604,"jr":3605,"indian":3606,"wife":3607,"sound":3608,"tes":3609,"drawing":3610,"pal":3611,"idea":3612,"crit":3613,"juli":3614,"iler":3615,"warm":3616,"clar":3617,"thoughts":3618,"defen":3619,"council":3620,"introduc":3621,"died":3622,"janu":3623,"ani":3624,"send":3625,"lier":3626,"ml":3627,"interesting":3628,"trade":3629,"wind":3630,"bay":3631,"sac":3632,"ancy":3633,"source":3634,"bes":3635,"organi":3636,"arly":3637,"large":3638,"ffici":3639,"tag":3640,"ut":3641,"desp":3642,"oes":3643,"title":3644,"sym":3645,"pictures":3646,"open":3647,"women":3648,"showing":3649,"ria":3650,"least":3651,"leadership":3652,"current":3653,"electr":3654,"valent":3655,"listening":3656,"ckey":3657,"general":3658,"deser":3659,"duce":3660,";)":3661,"cent":3662,"ðŁĺįðŁĺį":3663,"scott":3664,"poor":3665,"selfie":3666,"events":3667,"ion":3668,"wrong":3669,"dev":3670,"hill":3671,"septe":3672,"culture":3673,"line":3674,"sorry":3675,"sent":3676,"sister":3677,"cept":3678,"kri":3679,"november":3680,"ari":3681,"announce":3682,"zation":3683,"bran":3684,"gent":3685,"du":3686,"len":3687,"pers":3688,"fm":3689,"martin":3690,"op":3691,"emb":3692,"ome":3693,"middle":3694,"success":3695,"peter":3696,"january":3697,"flu":3698,"racing":3699,"dav":3700,"bike":3701,"ðŁı»":3702,"pet":3703,"shoot":3704,"professi":3705,"featuring":3706,"september":3707,"nowplaying":3708,"staur":3709,"za":3710,"onic":3711,"quick":3712,"baske":3713,"speaking":3714,"milit":3715,"zer":3716,"chicken":3717,"bell":3718,"sad":3719,"coast":3720,"loving":3721,"yers":3722,"dj":3723,"panel":3724,"verage":3725,"swit":3726,"icks":3727,"bou":3728,"california":3729,"sam":3730,"parents":3731,"ero":3732,"killed":3733,"phys":3734,"jobs":3735,"migr":3736,"anth":3737,"emo":3738,"halloween":3739,"ander":3740,"cm":3741,"competition":3742,"eag":3743,"sket":3744,"spir":3745,"maybe":3746,"exclusive":3747,"appe":3748,"journey":3749,"screen":3750,"ford":3751,"io":3752,"hate":3753,"ug":3754,"soul":3755,"hero":3756,"society":3757,"syn":3758,"guit":3759,"nh":3760,"dj":3761,"ases":3762,"impre":3763,"time":3764,"sales":3765,"dd":3766,"fts":3767,"summit":3768,"stunning":3769,"oms":3770,"turned":3771,"clean":3772,"soft":3773,"beat":3774,"restaur":3775,"dered":3776,"ences":3777,"magic":3778,"dio":3779,"shine":3780,"guest":3781,"healthy":3782,"exhib":3783,"stories":3784,"popu":3785,"nis":3786,"ela":3787,"below":3788,"funny":3789,"results":3790,"sne":3791,"currently":3792,"ard":3793,"download":3794,"flight":3795,"mal":3796,"fine":3797,"pad":3798,"chu":3799,"ented":3800,"hat":3801,"ðŁijı":3802,"steve":3803,"jo":3804,"mark":3805,"rat":3806,"ball":3807,"pc":3808,"pon":3809,"bby":3810,"oli":3811,"arts":3812,"asure":3813,"bowl":3814,"attack":3815,"mic":3816,"dear":3817,"range":3818,"enter":3819,"chocolate":3820,"brilli":3821,"access":3822,",\"":3823,"???":3824,"chap":3825,"const":3826,"tn":3827,"matter":3828,"blue":3829,"gallery":3830,"emp":3831,"workshop":3832,"leading":3833,"yours":3834,"basketball":3835,"wanna":3836,"thu":3837,"__":3838,"marri":3839,"sleep":3840,"bia":3841,"che":3842,"mad":3843,"impact":3844,"own":3845,"sir":3846,"channel":3847,"europe":3848,"esp":3849,"kitch":3850,"hospital":3851,"wra":3852,"royal":3853,"fs":3854,"neu":3855,"quar":3856,"ney":3857,"acks":3858,"chase":3859,"ppy":3860,"stal":3861,"ately":3862,"tim":3863,"december":3864,"rare":3865,"perform":3866,"cream":3867,"weight":3868,"choo":3869,"night":3870,"haven":3871,"franc":3872,"khan":3873,"built":3874,"helping":3875,"trust":3876,"type":3877,"golden":3878,"tax":3879,"snow":3880,"swi":3881,"disa":3882,"questions":3883,"vey":3884,"light":3885,"cn":3886,"cloud":3887,"thomas":3888,"aged":3889,"shou":3890,"teams":3891,"gran":3892,"reason":3893,"aa":3894,"youtube":3895,"vp":3896,"pizz":3897,"manager":3898,"bury":3899,"credit":3900,"treat":3901,"max":3902,"ik":3903,"main":3904,"ging":3905,"dead":3906,"probab":3907,"yeah":3908,"ãĤ":3909,"brand":3910,"soli":3911,"plant":3912,"tayl":3913,"girl":3914,"ðŁĺŃ":3915,"nament":3916,"auto":3917,"message":3918,"kore":3919,"nur":3920,"terr":3921,"agu":3922,"map":3923,"senting":3924,"loves":3925,"gives":3926,"gab":3927,"zen":3928,"robert":3929,"confir":3930,"wars":3931,"om":3932,"stain":3933,"camera":3934,"ander":3935,"wonder":3936,"ab":3937,"cap":3938,"sold":3939,"suit":3940,"walking":3941,"continue":3942,"effec":3943,"daughter":3944,"danc":3945,"chain":3946,"multi":3947,"kid":3948,"yan":3949,"champion":3950,"vo":3951,"tains":3952,"host":3953,"mini":3954,"missed":3955,"resc":3956,"lyn":3957,"finish":3958,"delicious":3959,"sas":3960,"taylor":3961,"ib":3962,"promis":3963,"products":3964,"mountain":3965,"florida":3966,"register":3967,"treat":3968,"recent":3969,"female":3970,"booth":3971,"matt":3972,"vehic":3973,"sop":3974,"motor":3975,"supporting":3976,"phic":3977,"extre":3978,"drink":3979,"lane":3980,"third":3981,"ps":3982,"constru":3983,"cere":3984,"farm":3985,"ðŁİī":3986,"tured":3987,"ðŁijī":3988,"cats":3989,"aj":3990,"gie":3991,"shooting":3992,"asked":3993,"pakistan":3994,"ame":3995,"mb":3996,"gil":3997,"legal":3998,"square":3999,"invol":4000,"draw":4001,"oooo":4002,"!!!!":4003,"opportunity":4004,"py":4005,"ei":4006,"bts":4007,"teacher":4008,"character":4009,"johnson":4010,"bron":4011,"lywood":4012,"chine":4013,"cing":4014,"cine":4015,"dge":4016,"gaming":4017,"russia":4018,"cia":4019,"quote":4020,"rich":4021,"gov":4022,"flowers":4023,"spiri":4024,"stin":4025,"growth":4026,"ðŁı¼":4027,"commer":4028,"juni":4029,"mum":4030,"ran":4031,"sna":4032,"aren":4033,"cb":4034,"actor":4035,"color":4036,"sit":4037,"pair":4038,"chi":4039,"bow":4040,"academy":4041,"held":4042,"rang":4043,"metal":4044,"yl":4045,"active":4046,"probably":4047,"tch":4048,"needed":4049,"spee":4050,"choice":4051,"italy":4052,"ryan":4053,"ðŁĩº":4054,"flower":4055,"vit":4056,"mn":4057,"foundation":4058,"bak":4059,"sions":4060,"neigh":4061,"floo":4062,"heard":4063,"remo":4064,"fresh":4065,"inging":4066,"ref":4067,"town":4068,"clou":4069,"jesus":4070,"spirit":4071,"couldn":4072,"zes":4073,"ðŁĴĻ":4074,"williams":4075,"proce":4076,"modern":4077,"process":4078,"shoes":4079,"created":4080,"tric":4081,"issues":4082,"anne":4083,"atten":4084,"debut":4085,"hr":4086,"nit":4087,"stig":4088,"apo":4089,"eps":4090,"zu":4091,"ãĢ":4092,"six":4093,"cards":4094,"langu":4095,"famous":4096,"tournament":4097,"sel":4098,"ebay":4099,"yn":4100,"ston":4101,"kick":4102,"announced":4103,"kam":4104,"voc":4105,"brilliant":4106,"house":4107,"cheese":4108,"warri":4109,"music":4110,"hockey":4111,"ðŁĺĤðŁĺĤ":4112,"skills":4113,"autom":4114,"smart":4115,"medical":4116,"mony":4117,"ex":4118,"guar":4119,"give":4120,"personal":4121,"vention":4122,"alli":4123,"press":4124,"floor":4125,"mc":4126,"victory":4127,"him":4128,"simple":4129,"thor":4130,"ðŁĩºðŁĩ":4131,"tail":4132,"lucky":4133,"alex":4134,"quite":4135,"bot":4136,"ssions":4137,"challeng":4138,"cann":4139,"amazon":4140,"hell":4141,"bought":4142,"):":4143,"edy":4144,"secret":4145,"production":4146,"independ":4147,"defe":4148,"added":4149,"pr":4150,"pag":4151,"bed":4152,"greatest":4153,"within":4154,"jay":4155,"ðŁ¥":4156,"ireland":4157,"rely":4158,"sd":4159,"text":4160,"driving":4161,"program":4162,"speed":4163,"colum":4164,"stron":4165,"é":4166,"forest":4167,"âĸ":4168,"machine":4169,"coin":4170,"scar":4171,"ount":4172,"bie":4173,"¡ï¸ı":4174,"portra":4175,"common":4176,"wrest":4177,"received":4178,"know":4179,"invest":4180,"plans":4181,"accor":4182,"adop":4183,"tery":4184,"reali":4185,"pp":4186,"kal":4187,"artwork":4188,"mean":4189,"god":4190,"instead":4191,"anci":4192,"motivation":4193,"asing":4194,"inspiration":4195,"upcoming":4196,"political":4197,"europe":4198,"mers":4199,"heavy":4200,"ðŁijį":4201,"febru":4202,"scotland":4203,"ough":4204,"bt":4205,"boss":4206,"schedu":4207,"speak":4208,"nick":4209,"ured":4210,"ino":4211,"ek":4212,"risk":4213,"tory":4214,"presents":4215,"bon":4216,"rug":4217,"states":4218,"exhibition":4219,"ilo":4220,"mill":4221,"brought":4222,":-)":4223,"touri":4224,"come":4225,"officially":4226,"champions":4227,"doors":4228,"rep":4229,"pose":4230,"extra":4231,"kings":4232,"soccer":4233,"squad":4234,"applic":4235,"ata":4236,"sometimes":4237,"tari":4238,"excellent":4239,"ðŁĺĺ":4240,"straight":4241,"carol":4242,"rip":4243,"âĢį":4244,"graphic":4245,"mol":4246,"election":4247,"february":4248,"asons":4249,"li":4250,"dir":4251,"mt":4252,"nick":4253,"usu":4254,"mrs":4255,"comics":4256,"institu":4257,"corpor":4258,"vi":4259,"ðŁĻı":4260,"tural":4261,"dise":4262,"acci":4263,"weare":4264,"among":4265,"shopping":4266,"till":4267,"what":4268,"chair":4269,"span":4270,"chinese":4271,"innovation":4272,"joy":4273,"kit":4274,"century":4275,"obama":4276,"phili":4277,"fc":4278,"reach":4279,"citi":4280,"ulous":4281,"non":4282,"dang":4283,"happening":4284,"burn":4285,"pel":4286,"orange":4287,"dv":4288,"kick":4289,"claim":4290,"ingham":4291,"phy":4292,"nov":4293,"podcast":4294,"whi":4295,"nights":4296,"earlier":4297,"bear":4298,"lah":4299,"exciting":4300,"ora":4301,"given":4302,"slo":4303,"memories":4304,"continues":4305,"product":4306,"gho":4307,"cd":4308,"knows":4309,"ðŁİī":4310,"published":4311,"discuss":4312,"yard":4313,"iphone":4314,"tries":4315,"wall":4316,"feb":4317,"aren":4318,"truth":4319,"winners":4320,"ture":4321,"ditional":4322,"military":4323,"problem":4324,"mand":4325,"dog":4326,"loss":4327,"cric":4328,"canadi":4329,"veter":4330,"village":4331,"\",":4332,"yr":4333,"ung":4334,"donald":4335,"aging":4336,"birds":4337,"scienti":4338,"les":4339,"this":4340,"region":4341,"tical":4342,"itten":4343,"ila":4344,"ðŁĺİ":4345,"dad":4346,"diam":4347,"above":4348,"stren":4349,"lit":4350,"pir":4351,"lab":4352,"focus":4353,"busy":4354,"dur":4355,"apply":4356,"sma":4357,"author":4358,"aci":4359,"execu":4360,"domin":4361,"rela":4362,"jackson":4363,"ato":4364,"washington":4365,"ðŁĻĮ":4366,"kill":4367,"popular":4368,"cement":4369,"road":4370,"eating":4371,"location":4372,"vent":4373,"arre":4374,"nan":4375,"custo":4376,"adventure":4377,"ordin":4378,"sport":4379,"ult":4380,"lock":4381,"question":4382,"driver":4383,"landsc":4384,"oni":4385,"kins":4386,"pd":4387,"jordan":4388,"tered":4389,"kk":4390,"af":4391,"child":4392,"sp":4393,"justin":4394,"eni":4395,"selling":4396,"zo":4397,"whit":4398,"boston":4399,"particip":4400,"signing":4401,"happened":4402,"heat":4403,"mam":4404,"dreams":4405,"lows":4406,"graph":4407,"theday":4408,"heading":4409,"bro":4410,"blessed":4411,"vic":4412,"vegas":4413,"hd":4414,"inning":4415,"roman":4416,"andro":4417,"denti":4418,"use":4419,"cit":4420,"progress":4421,"writer":4422,"bob":4423,"ffs":4424,"growing":4425,"bly":4426,"aware":4427,"exam":4428,"spent":4429,"bet":4430,"score":4431,"beyond":4432,"docu":4433,"adel":4434,"sf":4435,"coura":4436,"collabor":4437,"inc":4438,"private":4439,"boat":4440,"**":4441,"zone":4442,"pha":4443,"bill":4444,"total":4445,"planning":4446,"towards":4447,"places":4448,"preview":4449,"creative":4450,"damn":4451,"ideas":4452,"seems":4453,"poten":4454,"saying":4455,"display":4456,"sw":4457,"aqu":4458,"louis":4459,"bye":4460,"lil":4461,"email":4462,"western":4463,"germany":4464,"eller":4465,"res":4466,"fant":4467,"mentary":4468,"deals":4469,"richard":4470,"jersey":4471,"streng":4472,"rad":4473,"pizza":4474,"mond":4475,"ware":4476,"lac":4477,"gi":4478,"archi":4479,"cd":4480,"yellow":4481,"recently":4482,"reach":4483,"à¹":4484,"kitchen":4485,"designed":4486,"try":4487,"gal":4488,"restaurant":4489,"ature":4490,"ww":4491,"jas":4492,"lma":4493,"ðŁijĮ":4494,"pain":4495,"avo":4496,"minute":4497,"schol":4498,"therap":4499,"ticket":4500,"dry":4501,"japan":4502,"ditions":4503,"terri":4504,"selves":4505,"happen":4506,"tup":4507,"mag":4508,"copy":4509,"sher":4510,"freedom":4511,"file":4512,"specially":4513,"toronto":4514,"load":4515,"gary":4516,"rey":4517,"answer":4518,"loy":4519,"caught":4520,"prize":4521,"une":4522,"fication":4523,"niger":4524,"syd":4525,"touch":4526,"feature":4527,"jazz":4528,"records":4529,"himself":4530,"dish":4531,"rober":4532,"spotted":4533,"master":4534,"wave":4535,"finals":4536,"bull":4537,"forum":4538,"ald":4539,"recomm":4540,"cha":4541,"ae":4542,"doo":4543,"instru":4544,"truly":4545,"lg":4546,"ink":4547,"brothers":4548,"dest":4549,"jim":4550,"mit":4551,"closed":4552,"ison":4553,"tried":4554,"santa":4555,"affe":4556,"wan":4557,"horse":4558,"grow":4559,"campus":4560,"relation":4561,"native":4562,"journ":4563,"gov":4564,"oct":4565,"kit":4566,"bound":4567,"partner":4568,"rema":4569,"crowd":4570,"!)":4571,"calls":4572,"rail":4573,"quali":4574,"solution":4575,"contest":4576,"convers":4577,"snap":4578,"base":4579,"initi":4580,"tax":4581,"ye":4582,"entrepre":4583,"itor":4584,"construction":4585,"food":4586,"presented":4587,"nings":4588,"climate":4589,"km":4590,"model":4591,"bj":4592,"block":4593,"presentation":4594,"dream":4595,"fix":4596,"calling":4597,"busine":4598,"congress":4599,"understand":4600,"web":4601,"value":4602,"ï¸ıâĥ£":4603,"mexico":4604,"itely":4605,"kim":4606,"charity":4607,"reflec":4608,"blan":4609,"flying":4610,"analy":4611,"families":4612,"band":4613,"recipe":4614,"celebration":4615,"accep":4616,"ary":4617,"tot":4618,"gb":4619,"interested":4620,"captain":4621,"âĻ¥":4622,"tip":4623,"absol":4624,"braz":4625,"investig":4626,"ology":4627,"dec":4628,"truck":4629,"vering":4630,"clear":4631,"dont":4632,"gotta":4633,"advis":4634,"begins":4635,"mass":4636,"descri":4637,"block":4638,"kim":4639,"david":4640,"songs":4641,"memorial":4642,"features":4643,"sustain":4644,"'.":4645,"grab":4646,"jose":4647,"va":4648,"conserv":4649,"sets":4650,"manchester":4651,"fighting":4652,"degre":4653,"aga":4654,"ind":4655,"sleep":4656,"position":4657,"hair":4658,"signs":4659,"policy":4660,"ito":4661,"alert":4662,"stam":4663,"spend":4664,"wy":4665,"absolut":4666,"dm":4667,"animal":4668,"myster":4669,"successful":4670,"problems":4671,"robo":4672,"kay":4673,"garden":4674,"pd":4675,"mayor":4676,"dale":4677,"tol":4678,"offers":4679,"visiting":4680,"friendly":4681,"trees":4682,"officer":4683,"account":4684,"kevin":4685,"ðŁijį":4686,"giant":4687,"continu":4688,"consu":4689,"tract":4690,"nfl":4691,"ðŁĺĬ":4692,"hq":4693,"bility":4694,"aar":4695,"disney":4696,"teen":4697,"oned":4698,"white":4699,"trailer":4700,"dedic":4701,"alone":4702,"absolutely":4703,"digital":4704,"william":4705,"ination":4706,"swa":4707,"ee":4708,"entire":4709,"german":4710,"roll":4711,"hits":4712,"cost":4713,"stay":4714,"tha":4715,"alive":4716,"according":4717,"cot":4718,"literally":4719,"herit":4720,"reti":4721,"hahaha":4722,"experi":4723,"likes":4724,"gt":4725,"steel":4726,"____":4727,"chair":4728,"christian":4729,"tower":4730,"difference":4731,"md":4732,"tress":4733,"mid":4734,"prince":4735,"african":4736,"feder":4737,"foot":4738,"carri":4739,"served":4740,"rice":4741,"shall":4742,"featured":4743,"cker":4744,"recru":4745,"poe":4746,"sense":4747,"nific":4748,"comedy":4749,"content":4750,"fat":4751,"posted":4752,"contribu":4753,"timate":4754,"liver":4755,"mble":4756,"internet":4757,"age":4758,"european":4759,"cling":4760,"glad":4761,"ffic":4762,"sco":4763,"akes":4764,"elle":4765,"termin":4766,"tony":4767,"pale":4768,"colour":4769,"serious":4770,"patri":4771,"movies":4772,"bm":4773,"professional":4774,"ado":4775,"alu":4776,"bringing":4777,"falls":4778,"israel":4779,"term":4780,"language":4781,"brook":4782,"mann":4783,"communic":4784,"cannot":4785,"acti":4786,"phe":4787,"yan":4788,"entreprene":4789,"turkey":4790,"logical":4791,"long":4792,"arm":4793,"urs":4794,"workers":4795,"ingly":4796,"ggs":4797,"ric":4798,"tual":4799,"receive":4800,"opens":4801,"gear":4802,"social":4803,"feet":4804,"cking":4805,"adver":4806,"finan":4807,"feels":4808,"spla":4809,"hr":4810,"easter":4811,"brain":4812,"ãģ":4813,"fig":4814,"ledge":4815,"nearly":4816,"protect":4817,"massive":4818,"eth":4819,"awa":4820,"ðŁĺģ":4821,"yrs":4822,"awareness":4823,"definitely":4824,"kn":4825,"imagine":4826,"ku":4827,"systems":4828,"ðŁijı":4829,"fas":4830,"lik":4831,"provide":4832,"amo":4833,"discover":4834,"influ":4835,"maker":4836,"gaz":4837,"fitness":4838,"street":4839,"ers":4840,"ted":4841,"wc":4842,"ysis":4843,"positive":4844,"helped":4845,"quest":4846,"andrew":4847,"brad":4848,"bin":4849,"hanging":4850,"ling":4851,"bright":4852,"section":4853,"mass":4854,"ðŁĻĮ":4855,"followers":4856,"hosting":4857,"tempor":4858,"flag":4859,"ave":4860,"letter":4861,"kur":4862,"requi":4863,"often":4864,"cryp":4865,"suff":4866,"âļ½":4867,"russian":4868,"treatment":4869,"alle":4870,"hay":4871,"lan":4872,"keeping":4873,"holy":4874,"powerful":4875,"predic":4876,"fund":4877,"especially":4878,"window":4879,"jewel":4880,"ily":4881,"ðŁĴľ":4882,"generation":4883,"appa":4884,"seriously":4885,"od":4886,"ðŁĺĤðŁĺĤðŁĺĤ":4887,"certi":4888,"irish":4889,"ðŁijĮ":4890,"miami":4891,"beth":4892,"vity":4893,"secu":4894,"chef":4895,"crime":4896,"graphy":4897,"max":4898,"artists":4899,"revolu":4900,"guard":4901,"speech":4902,"uc":4903,"updates":4904,"faces":4905,"stant":4906,"changed":4907,"reports":4908,"lower":4909,"pear":4910,"nc":4911,"kil":4912,"looked":4913,"speaker":4914,"sf":4915,"respect":4916,"okay":4917,"ocean":4918,"sitting":4919,"architecture":4920,"trail":4921,"seat":4922,"ira":4923,"leg":4924,"japanese":4925,"dam":4926,"ular":4927,"swim":4928,"politics":4929,"financial":4930,"old":4931,"mouth":4932,"attemp":4933,"destin":4934,"fishing":4935,"attention":4936,"mem":4937,"changes":4938,"decided":4939,"religi":4940,"gin":4941,"cav":4942,"zz":4943,"adam":4944,"mac":4945,"write":4946,"begin":4947,"scul":4948,"alter":4949,"iss":4950,"athon":4951,"images":4952,"moo":4953,"joined":4954,"ðŁĺī":4955,"âŀ¡ï¸ı":4956,"passed":4957,"musli":4958,"hir":4959,"largest":4960,"camer":4961,"comic":4962,"ghted":4963,"rugby":4964,"burgh":4965,"gging":4966,"testing":4967,"prepar":4968,"laugh":4969,"aled":4970,"improve":4971,"believ":4972,"advice":4973,"shares":4974,"heart":4975,"turning":4976,"sb":4977,"tel":4978,"cafe":4979,"nes":4980,"daniel":4981,"patter":4982,"tz":4983,"sett":4984,"park":4985,"cand":4986,"stick":4987,"happens":4988,"brian":4989,"newest":4990,"epic":4991,"ador":4992,"kies":4993,"warning":4994,"animals":4995,"custom":4996,"arc":4997,"dian":4998,"gold":4999,"core":5000,"tf":5001,"city":5002,"pants":5003,"reality":5004,"confi":5005,"inju":5006,"fox":5007,"guil":5008,"knew":5009,"âĺº":5010,"correc":5011,"itude":5012,"dden":5013,".#":5014,"reduc":5015,"pass":5016,"fon":5017,"ya":5018,"owner":5019,"returns":5020,"nc":5021,"east":5022,"apol":5023,"insur":5024,"tho":5025,"sim":5026,"junior":5027,"bee":5028,"angel":5029,"attle":5030,"electric":5031,"horror":5032,"crash":5033,"eye":5034,"path":5035,"southern":5036,"employe":5037,"geo":5038,"tan":5039,"haz":5040,"rally":5041,"ðŁı»":5042,"property":5043,"wasn":5044,"enjoyed":5045,"grey":5046,"gas":5047,"brew":5048,"northern":5049,"holding":5050,"gp":5051,"take":5052,"chart":5053,"lyn":5054,"drama":5055,"zo":5056,"paid":5057,"throwback":5058,"cup":5059,"discussion":5060,"downtown":5061,"will":5062,"lew":5063,"bis":5064,"tary":5065,"bread":5066,"upon":5067,"rate":5068,"teachers":5069,"itation":5070,"anced":5071,"cycle":5072,"choose":5073,"dc":5074,"iran":5075,"cow":5076,"dave":5077,"raise":5078,"princess":5079,"faith":5080,"->":5081,"industri":5082,"spain":5083,"guitar":5084,"facts":5085,"mn":5086,"spen":5087,"courte":5088,"gott":5089,"projects":5090,"audi":5091,"osc":5092,"peter":5093,"sand":5094,"interest":5095,"happiness":5096,"venue":5097,"soldi":5098,"surprise":5099,"potential":5100,"perio":5101,"customer":5102,"ii":5103,"gni":5104,"manufac":5105,"eco":5106,"broken":5107,"singer":5108,"vels":5109,"wales":5110,"hus":5111,"inj":5112,"four":5113,"talent":5114,"dying":5115,"matthe":5116,"film":5117,"joining":5118,"sell":5119,"jar":5120,"lmao":5121,"surger":5122,"bbc":5123,"sources":5124,"austin":5125,"nik":5126,"charles":5127,"fam":5128,"princi":5129,"angel":5130,"cash":5131,"lot":5132,"ored":5133,"plays":5134,"plate":5135,"done":5136,"memory":5137,"brings":5138,"nba":5139,"solutions":5140,"teaching":5141,"grace":5142,"circu":5143,"helps":5144,"founder":5145,"mary":5146,"explore":5147,"decor":5148,"parts":5149,"cho":5150,"integr":5151,"hau":5152,"ises":5153,"putting":5154,"iner":5155,"rit":5156,"vy":5157,"michel":5158,"blues":5159,"everyday":5160,"forms":5161,"bio":5162,"year":5163,"pin":5164,"tter":5165,"spring":5166,"))":5167,"pot":5168,"aling":5169,"performing":5170,"shan":5171,"planet":5172,"musical":5173,"heads":5174,"italian":5175,"strugg":5176,"âĢįâĻ":5177,"wings":5178,"pump":5179,"hh":5180,"trou":5181,"aid":5182,"prime":5183,"earth":5184,"paint":5185,"mont":5186,"amy":5187,"bbc":5188,"fabulous":5189,"fruit":5190,"android":5191,"bourne":5192,"ceremony":5193,"ential":5194,"??":5195,"debate":5196,"oning":5197,"draft":5198,"solar":5199,"tx":5200,"jam":5201,"corn":5202,"!!!!!":5203,"broo":5204,"milk":5205,"posed":5206,"ohi":5207,"movement":5208,"bren":5209,"partner":5210,"pg":5211,"ette":5212,"aries":5213,"shout":5214,"ng":5215,"leaving":5216,"tells":5217,"sens":5218,"taste":5219,"kelly":5220,"worl":5221,"gym":5222,"rich":5223,"egy":5224,"pid":5225,"mas":5226,"âĤ":5227,"courtesy":5228,"frank":5229,"increase":5230,"written":5231,"ppers":5232,"rel":5233,"hai":5234,"sas":5235,"sound":5236,"tti":5237,"wich":5238,"river":5239,"...\"":5240,"ag":5241,"fellow":5242,"rome":5243,"small":5244,"gency":5245,"ican":5246,"luxury":5247,"proof":5248,"met":5249,"wildlife":5250,"moments":5251,"rather":5252,"corner":5253,"compe":5254,"canadian":5255,"likely":5256,"therapy":5257,"liam":5258,"economic":5259,"indie":5260,"route":5261,"fight":5262,"hope":5263,"setting":5264,"antly":5265,"cross":5266,"fantasy":5267,"dee":5268,"sketch":5269,"compli":5270,"ymi":5271,"rules":5272,"engineering":5273,"figure":5274,"row":5275,".,":5276,"fw":5277,"sydney":5278,"wou":5279,"tation":5280,"drew":5281,"uses":5282,"there":5283,"spread":5284,"structure":5285,"patrick":5286,"apparently":5287,"ros":5288,"hills":5289,"wwe":5290,"anny":5291,"commission":5292,"div":5293,"fying":5294,"consul":5295,"analysis":5296,"exi":5297,"tennis":5298,"vehicle":5299,"ðŁĺŃðŁĺŃ":5300,"ass":5301,"highly":5302,"opened":5303,"bann":5304,"ðŁĴĻ":5305,"mph":5306,"wishing":5307,"vor":5308,"fif":5309,"giveaway":5310,"rr":5311,"ray":5312,"jess":5313,"gat":5314,"icymi":5315,"xit":5316,"highest":5317,"york":5318,"pie":5319,"involved":5320,"higher":5321,"rie":5322,"malay":5323,"intelli":5324,"despite":5325,"chee":5326,"sarah":5327,"bean":5328,"recogni":5329,"arsen":5330,"talented":5331,"passion":5332,"ich":5333,"abc":5334,"leads":5335,"disease":5336,"vis":5337,"sec":5338,"presenting":5339,"milli":5340,"hole":5341,"shots":5342,"depart":5343,"surgery":5344,"govt":5345,"bin":5346,"dual":5347,"evi":5348,"longer":5349,"evol":5350,"screen":5351,"portrait":5352,"etc":5353,"lose":5354,"chat":5355,"pen":5356,"pi":5357,"oma":5358,"sick":5359,"erc":5360,"companies":5361,"entry":5362,"plane":5363,"gry":5364,"vene":5365,"liverpool":5366,"premiere":5367,"shared":5368,"ared":5369,"films":5370,"ira":5371,"holidays":5372,"cricket":5373,"ician":5374,"ving":5375,".)":5376,"ultimate":5377,"division":5378,"conduc":5379,"sept":5380,"forces":5381,"mont":5382,"smart":5383,"disapp":5384,"sunshine":5385,"ind":5386,"bless":5387,"made":5388,"colors":5389,"frank":5390,"iron":5391,"bottle":5392,"sgo":5393,"mood":5394,"jason":5395,"eric":5396,"birth":5397,"teen":5398,"response":5399,"target":5400,"statement":5401,"fear":5402,"thel":5403,"alum":5404,"arab":5405,"blin":5406,"direction":5407,"steps":5408,"erial":5409,"worked":5410,"atl":5411,"ðŁĴķ":5412,"felt":5413,"poli":5414,"scenes":5415,"homes":5416,"bell":5417,"eat":5418,"ateful":5419,"tin":5420,"lace":5421,"folks":5422,"pse":5423,"ann":5424,"wisdom":5425,"fav":5426,"butter":5427,"sr":5428,"areas":5429,"smoo":5430,"biz":5431,"dges":5432,"appo":5433,"more":5434,"them":5435,"effect":5436,"windows":5437,"sunny":5438,"capital":5439,"totally":5440,"cities":5441,"grant":5442,"mbers":5443,"slow":5444,"autu":5445,"ilities":5446,"wro":5447,"rising":5448,"stics":5449,"violence":5450,"igh":5451,"quot":5452,"hit":5453,"tc":5454,"heritage":5455,"buff":5456,"nes":5457,"zar":5458,"dential":5459,"exac":5460,"edge":5461,"deep":5462,"arena":5463,"became":5464,"benefits":5465,"marks":5466,"mber":5467,"az":5468,"ames":5469,"preci":5470,"dragon":5471,"reg":5472,"dings":5473,"dos":5474,"ðŁĴª":5475,"nel":5476,"sity":5477,"meal":5478,"dist":5479,"legend":5480,"purchase":5481,"pical":5482,"stick":5483,"fat":5484,"duba":5485,"profess":5486,"carto":5487,"prof":5488,"countries":5489,"responsi":5490,"sequ":5491,"fab":5492,"tribute":5493,"honored":5494,"practic":5495,"purple":5496,"anton":5497,"pared":5498,"tough":5499,"summer":5500,"environment":5501,"sons":5502,"ðŁĻı":5503,"mps":5504,"gies":5505,"heroes":5506,"telling":5507,"henry":5508,"fen":5509,"knowledge":5510,"Ģï¸ı":5511,"fr":5512,"neg":5513,"ure":5514,"acking":5515,"hearts":5516,"soo":5517,"hollywood":5518,"jump":5519,"sauce":5520,"schedule":5521,"turn":5522,"yoga":5523,"creating":5524,"cket":5525,"creek":5526,"âŃ":5527,"customers":5528,"madri":5529,"gul":5530,"assemb":5531,"mount":5532,"cell":5533,"top":5534,"stal":5535,"davis":5536,"twi":5537,"sign":5538,"premier":5539,"itions":5540,"hearing":5541,"unk":5542,"patients":5543,"appear":5544,"heaven":5545,"alty":5546,"doctor":5547,"ae":5548,"platform":5549,"jeff":5550,"ðŁĵ·":5551,"regional":5552,"bid":5553,"boxing":5554,"exten":5555,"ority":5556,"aw":5557,"wise":5558,"ille":5559,"several":5560,"bie":5561,"situ":5562,"syria":5563,"âľħ":5564,"reminder":5565,"entertain":5566,"lion":5567,"partners":5568,"inn":5569,"phar":5570,"fau":5571,"pls":5572,"expected":5573,"sugar":5574,"decision":5575,"sb":5576,"chron":5577,"association":5578,"leaves":5579,"visited":5580,"shap":5581,"ðŁĴĸ":5582,"further":5583,"hann":5584,"wi":5585,"runs":5586,"ler":5587,"funding":5588,"filled":5589,"......":5590,"tiny":5591,"hang":5592,"org":5593,"cool":5594,"semin":5595,"ðŁıĨ":5596,"spons":5597,"navy":5598,"saint":5599,"drug":5600,"dal":5601,"roun":5602,"covered":5603,"traditional":5604,"investment":5605,"dete":5606,"alism":5607,"flow":5608,"nis":5609,"sunrise":5610,"feat":5611,"fted":5612,"weird":5613,"jere":5614,"vegan":5615,"medicine":5616,"ano":5617,"accu":5618,"delivery":5619,"temple":5620,"changing":5621,"wilson":5622,"philipp":5623,"refe":5624,"nd":5625,"iser":5626,"gay":5627,"rand":5628,"atives":5629,"tely":5630,"pand":5631,"intellig":5632,"gare":5633,"ambas":5634,"demon":5635,"committee":5636,"strategy":5637,"refuge":5638,"budget":5639,"protec":5640,"pier":5641,"express":5642,"nomin":5643,"economy":5644,"allow":5645,"icon":5646,"galax":5647,"oh":5648,"indivi":5649,"demand":5650,"virgin":5651,"luke":5652,"alists":5653,"mani":5654,"smi":5655,"judge":5656,"enty":5657,"michi":5658,"result":5659,"amed":5660,"speaks":5661,"',":5662,"houston":5663,"shin":5664,"bing":5665,"fly":5666,"chem":5667,"auto":5668,"vas":5669,"get":5670,"arm":5671,"thanks":5672,"din":5673,"gang":5674,"xx":5675,"sion":5676,"located":5677,"pl":5678,"josh":5679,"info":5680,"joins":5681,"adverti":5682,"otd":5683,"eld":5684,"sie":5685,"reasons":5686,"vent":5687,"ðŁĩºðŁĩ¸":5688,"âł":5689,"conversation":5690,"studi":5691,"ðŁĶ¥ðŁĶ¥":5692,"gos":5693,"sounds":5694,"unit":5695,"musc":5696,"gel":5697,"acked":5698,"paci":5699,"cos":5700,"dere":5701,"uu":5702,"ao":5703,"lam":5704,"inspiring":5705,"arms":5706,"tware":5707,"matters":5708,"addic":5709,"dude":5710,"ext":5711,"crisis":5712,"bath":5713,"meet":5714,"singh":5715,"expect":5716,"delhi":5717,"rescue":5718,"worst":5719,"aug":5720,"shipping":5721,"serving":5722,"sto":5723,"dark":5724,"aces":5725,"historic":5726,"landscape":5727,"designer":5728,"billion":5729,"grateful":5730,"wake":5731,"eve":5732,"miller":5733,"housing":5734,"dynam":5735,"isco":5736,"beha":5737,"shop":5738,"prou":5739,"eas":5740,"asia":5741,"eding":5742,"kon":5743,"department":5744,"awar":5745,"marine":5746,"inci":5747,"photographer":5748,"tape":5749,"logo":5750,"rings":5751,"dit":5752,"----":5753,"vinyl":5754,"wc":5755,"voting":5756,"seven":5757,"ambassad":5758,"dallas":5759,"tu":5760,"comment":5761,"kra":5762,"bles":5763,"wag":5764,"ud":5765,"audio":5766,"strike":5767,"official":5768,"ots":5769,"metho":5770,"tools":5771,"radi":5772,"alan":5773,"hunt":5774,"watched":5775,"ake":5776,"fake":5777,"drinking":5778,"merry":5779,"ml":5780,"bday":5781,"rio":5782,"nike":5783,"cant":5784,"repe":5785,"costu":5786,"murder":5787,"akers":5788,"chers":5789,"outs":5790,"beginning":5791,"sos":5792,"ades":5793,"nin":5794,"notes":5795,"wrote":5796,"solo":5797,"ci":5798,"lighting":5799,"urban":5800,"brexit":5801,"attend":5802,"shirts":5803,"playo":5804,"actress":5805,"plic":5806,"standard":5807,"quotes":5808,"parade":5809,"ancient":5810,"©":5811,"turing":5812,"ree":5813,"primary":5814,"flash":5815,"citiz":5816,"mates":5817,"stein":5818,"zi":5819,"clinton":5820,"skin":5821,"gene":5822,"hum":5823,"gar":5824,"tle":5825,"yi":5826,"focu":5827,"dean":5828,"plants":5829,"cyber":5830,"bu":5831,"ome":5832,"hop":5833,"address":5834,"tix":5835,"gifts":5836,"relationship":5837,"subscri":5838,"feed":5839,"exactly":5840,"hawks":5841,"exo":5842,"stress":5843,"sn":5844,"arrested":5845,"ane":5846,"software":5847,"zero":5848,"theme":5849,"mumb":5850,"immigr":5851,"mia":5852,"makeup":5853,"pleasure":5854,"univers":5855,"harb":5856,"engine":5857,"aper":5858,"rin":5859,"bra":5860,"institute":5861,"leather":5862,"alth":5863,"singing":5864,"cos":5865,"ghty":5866,"meas":5867,"stic":5868,"side":5869,"insurance":5870,"cot":5871,"pitch":5872,"mountains":5873,"crimin":5874,"supre":5875,"valentine":5876,"ater":5877,"wouldn":5878,"scale":5879,"related":5880,"regar":5881,"startup":5882,"packed":5883,"mike":5884,"weekly":5885,"pts":5886,"count":5887,"har":5888,"gotten":5889,"mind":5890,"berlin":5891,"conditions":5892,"switch":5893,"corn":5894,"save":5895,"gli":5896,"emergency":5897,"tuned":5898,"stock":5899,"discussing":5900,"everybody":5901,"sday":5902,"whether":5903,"wrestling":5904,"eces":5905,"gender":5906,"chen":5907,"ðŁijĢ":5908,"madrid":5909,"marathon":5910,"egg":5911,"ier":5912,"thx":5913,"asking":5914,"korea":5915,"wolf":5916,"aya":5917,"gm":5918,"gau":5919,"atory":5920,"vr":5921,"grass":5922,"killing":5923,"bble":5924,"uro":5925,"uni":5926,"eth":5927,"shore":5928,"then":5929,"reale":5930,"bottom":5931,"exerc":5932,"kar":5933,"ories":5934,"adri":5935,"sands":5936,"sex":5937,".'":5938,"volunteers":5939,"perform":5940,"parliam":5941,"include":5942,"delighted":5943,"executive":5944,"fuel":5945,"kiss":5946,"ãħ":5947,"charge":5948,"hu":5949,"cakes":5950,"vet":5951,"glu":5952,"agree":5953,"prices":5954,"nau":5955,"hl":5956,"gru":5957,"raj":5958,"strength":5959,"bic":5960,"spending":5961,"ales":5962,"aven":5963,"blast":5964,":(":5965,"yof":5966,"normal":5967,"six":5968,"quick":5969,"sea":5970,"daw":5971,"meets":5972,"lovers":5973,"updated":5974,"potat":5975,"completed":5976,"cook":5977,"opportunities":5978,"pure":5979,"organic":5980,"temper":5981,"cam":5982,"avoid":5983,"parking":5984,"dubai":5985,"ando":5986,"distri":5987,"toy":5988,"completely":5989,"donald":5990,"trial":5991,"bass":5992,"boun":5993,"background":5994,"vas":5995,"marvel":5996,"lum":5997,"rus":5998,"tool":5999,"commissi":6000,"throwback":6001,"finding":6002,"islam":6003,"!?":6004,"stop":6005,"evil":6006,"oral":6007,"residents":6008,"identi":6009,"oak":6010,"ðŁİ¶":6011,"lil":6012,"spanish":6013,"chapter":6014,"stopped":6015,"direct":6016,"hosted":6017,"picked":6018,"labour":6019,"lewis":6020,"defense":6021,"à®":6022,"healthcare":6023,"whis":6024,"math":6025,"peak":6026,"raised":6027,"fix":6028,"bull":6029,"thir":6030,"chelsea":6031,"folk":6032,"tre":6033,"candi":6034,"paul":6035,"either":6036,"adam":6037,"poetry":6038,"jewelry":6039,"ðŁ¦":6040,"pray":6041,"ا":6042,"gc":6043,"oz":6044,"wishes":6045,"foreign":6046,"sung":6047,"learned":6048,"ene":6049,"ning":6050,"michael":6051,"illustration":6052,"legendary":6053,"wav":6054,"bau":6055,"ðŁļ¨":6056,"calend":6057,"streets":6058,"âĨ":6059,"monster":6060,"buck":6061,"gr":6062,"school":6063,"bath":6064,"waste":6065,"neck":6066,"hawa":6067,"beach":6068,"replac":6069,"ject":6070,"oner":6071,"factory":6072,"count":6073,"ðŁĵ¸":6074,"morgan":6075,"dering":6076,"sean":6077,"stephen":6078,"dep":6079,"novel":6080,"videos":6081,"ical":6082,"pressure":6083,"arsenal":6084,"expre":6085,"irs":6086,"trending":6087,"ssa":6088,"flash":6089,"resear":6090,"through":6091,"professor":6092,"sculp":6093,"tos":6094,"gged":6095,"mma":6096,"bee":6097,"ape":6098,"hunter":6099,"ami":6100,"hei":6101,"plastic":6102,"bucks":6103,"universe":6104,"legen":6105,"nigeria":6106,"pleased":6107,"ris":6108,"thinks":6109,"autumn":6110,"ids":6111,"dis":6112,"anthony":6113,"ðŁı½":6114,"aked":6115,"glasses":6116,"finance":6117,"zer":6118,"kas":6119,"contract":6120,"numbers":6121,"shaw":6122,"partnership":6123,"til":6124,"launched":6125,"sal":6126,"victoria":6127,"theater":6128,"usual":6129,"names":6130,"period":6131,"eliza":6132,"ith":6133,"barcel":6134,"rocks":6135,"bags":6136,"mate":6137,"distribu":6138,"jon":6139,"diffic":6140,"alized":6141,"curren":6142,"scored":6143,"bha":6144,"dublin":6145,"rose":6146,"inted":6147,"solid":6148,"behavi":6149,"walker":6150,"simply":6151,"gardens":6152,"headed":6153,"ini":6154,"ohio":6155,"weap":6156,"fo":6157,"glen":6158,"estate":6159,"random":6160,"thunder":6161,"thru":6162,"kill":6163,"jacket":6164,"iti":6165,"entertainment":6166,"thanksgiving":6167,"ental":6168,"encoura":6169,"elo":6170,"ather":6171,"tank":6172,"highlights":6173,"fting":6174,"rule":6175,"models":6176,"border":6177,"bjp":6178,"husband":6179,"indone":6180,"kenya":6181,"bears":6182,"alo":6183,"ninten":6184,"pix":6185,"stro":6186,"orders":6187,"salad":6188,"roads":6189,"nor":6190,"lation":6191,"sophi":6192,"ðŁı¼":6193,"pieces":6194,"bone":6195,"mins":6196,"includes":6197,"nutr":6198,"phil":6199,"sent":6200,"fundra":6201,"gain":6202,"borough":6203,"nad":6204,"monday":6205,"activity":6206,"items":6207,"becoming":6208,"kenne":6209,"detro":6210,"cardi":6211,"guests":6212,"ux":6213,"worldwide":6214,"severe":6215,"news":6216,"thankful":6217,"fiction":6218,"vege":6219,"mall":6220,"sian":6221,"eral":6222,"injury":6223,"lee":6224,"menu":6225,"dancing":6226,"scotti":6227,"example":6228,"(#":6229,"nai":6230,"studios":6231,"bai":6232,"ðŁĴĽ":6233,"jav":6234,"diamond":6235,"vince":6236,"rick":6237,"protection":6238,"lincol":6239,"champs":6240,"approach":6241,"dar":6242,"mile":6243,"clouds":6244,"jeff":6245,"infin":6246,"lers":6247,"ples":6248,"peace":6249,"gop":6250,"âĻ¡":6251,"techn":6252,"stra":6253,"average":6254,"effort":6255,"introducing":6256,"diversity":6257,"australian":6258,"amp":6259,"boost":6260,"ske":6261,"patient":6262,"appreciate":6263,"icians":6264,"pur":6265,"fell":6266,"woods":6267,"illustr":6268,"ðŁĸ":6269,"agency":6270,"actions":6271,"britain":6272,"underway":6273,"seattle":6274,"eland":6275,"ago":6276,"fill":6277,"streaming":6278,"protest":6279,"challenges":6280,"kyo":6281,"etsy":6282,"cooking":6283,"expert":6284,"russ":6285,"rainbow":6286,"commercial":6287,"spin":6288,"beats":6289,"cry":6290,"valu":6291,"eli":6292,"throw":6293,"grams":6294,"levels":6295,"michigan":6296,"cad":6297,"adorable":6298,"constitu":6299,"ws":6300,"pub":6301,"midnight":6302,"that":6303,"netfli":6304,"brazil":6305,"diego":6306,"regular":6307,"joy":6308,"âĤ¬":6309,"liqu":6310,"eastern":6311,"kni":6312,"flat":6313,"np":6314,"brown":6315,"wer":6316,"sey":6317,"tters":6318,"acting":6319,"vanc":6320,"cycling":6321,"programme":6322,"raw":6323,"complex":6324,"tattoo":6325,"throwbackthursday":6326,"sessions":6327,"rooms":6328,"sight":6329,"species":6330,"bomb":6331,"laugh":6332,"keeps":6333,"moon":6334,"officers":6335,"conver":6336,"tr":6337,"hash":6338,"tack":6339,"rious":6340,"adap":6341,"aj":6342,"recogn":6343,"expo":6344,"sugge":6345,"confirmed":6346,"rolling":6347,"dressing":6348,"ict":6349,"friday":6350,"phones":6351,"ridge":6352,"concept":6353,"roy":6354,"keys":6355,"effor":6356,"cate":6357,"kne":6358,"even":6359,"lay":6360,"communities":6361,"mod":6362,"naz":6363,"everywhere":6364,"alab":6365,"bitcoin":6366,"banks":6367,"outdoor":6368,"federal":6369,"stores":6370,"hp":6371,"cal":6372,"mely":6373,"signific":6374,"bear":6375,"republic":6376,"closer":6377,"allah":6378,"pick":6379,"xd":6380,"palace":6381,"chill":6382,"bam":6383,"erous":6384,"una":6385,"allen":6386,"outstanding":6387,"olympic":6388,"supply":6389,"figu":6390,"vau":6391,"lp":6392,"charlie":6393,"unes":6394,">>>":6395,"legends":6396,"icial":6397,"coast":6398,"benefit":6399,"multi":6400,"fits":6401,"farmers":6402,"amount":6403,"sisters":6404,"harve":6405,"honey":6406,"queen":6407,"bers":6408,"plann":6409,"âŃIJ":6410,"mu":6411,"barcelona":6412,"alber":6413,"status":6414,"remain":6415,"extra":6416,"candy":6417,"vious":6418,"âľĮ":6419,"ov":6420,"warriors":6421,"-->":6422,"jump":6423,"amar":6424,"xmas":6425,"studies":6426,"iors":6427,"kor":6428,"donate":6429,"prep":6430,"fish":6431,"ima":6432,"painted":6433,"admini":6434,"cosplay":6435,"sports":6436,"drops":6437,"fighter":6438,"evidence":6439,"ðŁĴª":6440,"lake":6441,"rob":6442,"cinema":6443,"profile":6444,"ñ":6445,"stands":6446,"legacy":6447,"shape":6448,"roof":6449,"civil":6450,"ians":6451,"syl":6452,"sham":6453,"voted":6454,"retail":6455,"philli":6456,"listed":6457,"duty":6458,"nb":6459,"thes":6460,"fare":6461,"auction":6462,"fficial":6463,"storms":6464,"dp":6465,"loun":6466,"shops":6467,"aly":6468,"anime":6469,"multiple":6470,"ðŁĺįðŁĺį":6471,"psycho":6472,"jean":6473,"apart":6474,"candidate":6475,"ggy":6476,"conf":6477,"joseph":6478,"wick":6479,"meat":6480,"frame":6481,"cl":6482,"forgot":6483,"phy":6484,"fing":6485,"lied":6486,"rep":6487,"seed":6488,"fall":6489,"ufc":6490,"nut":6491,"lind":6492,"mode":6493,"fields":6494,"ence":6495,"sley":6496,"ðŁ¤Ķ":6497,"chill":6498,"followed":6499,"announces":6500,"corru":6501,"trophy":6502,"themselves":6503,"acle":6504,"aldu":6505,"kong":6506,"lon":6507,"sv":6508,"broke":6509,"anderson":6510,"tai":6511,"story":6512,"temporary":6513,"activities":6514,"kati":6515,"ariz":6516,"crystal":6517,"spoke":6518,"extremely":6519,"trading":6520,"ðŁĴļ":6521,"ü":6522,"inch":6523,"edin":6524,"outfit":6525,"equip":6526,"madi":6527,"formed":6528,"beef":6529,"pop":6530,"tiger":6531,"thisday":6532,"tired":6533,"neighb":6534,"retro":6535,"isa":6536,"unt":6537,"tas":6538,"kansas":6539,"dest":6540,"seconds":6541,"tay":6542,"hurric":6543,"ou":6544,"galaxy":6545,"daddy":6546,"brow":6547,"burger":6548,"enced":6549,"desk":6550,"accur":6551,"secretary":6552,"elite":6553,"kab":6554,"chin":6555,"tourism":6556,"buddy":6557,"icide":6558,"dressed":6559,"ud":6560,"vacation":6561,"cheers":6562,"comfor":6563,"characters":6564,"jet":6565,"buying":6566,"lins":6567,"nap":6568,"realestate":6569,"lie":6570,"afc":6571,"iii":6572,"fame":6573,"nr":6574,"bat":6575,"agent":6576,"makers":6577,"âĢ¼":6578,"sector":6579,"opti":6580,"leon":6581,"diet":6582,"prayer":6583,"hip":6584,"mir":6585,"lex":6586,"bry":6587,"ana":6588,"passing":6589,"wen":6590,"recovery":6591,"aki":6592,"popul":6593,"resort":6594,"maria":6595,"stuck":6596,"reads":6597,"tier":6598,"perfec":6599,"netflix":6600,"poo":6601,"champ":6602,"oc":6603,"reduce":6604,"wered":6605,"comments":6606,"claim":6607,"accident":6608,"sag":6609,"hack":6610,"salt":6611,"kinda":6612,"killer":6613,"ios":6614,"zy":6615,"exchange":6616,"lecture":6617,"enger":6618,"icking":6619,"tau":6620,"reveals":6621,"prison":6622,"zom":6623,"ghan":6624,"ul":6625,"journal":6626,"iot":6627,"trin":6628,"jona":6629,"governor":6630,"cape":6631,"quarter":6632,"spective":6633,"impressive":6634,"babies":6635,"tx":6636,"mill":6637,"oy":6638,"harri":6639,"joint":6640,"sue":6641,"collaboration":6642,"trend":6643,"revolution":6644,"renew":6645,"alumni":6646,"gett":6647,"shell":6648,"sunday":6649,"entu":6650,"nic":6651,"donaldtrump":6652,"blockchain":6653,"pacific":6654,"explains":6655,"spy":6656,"advoc":6657,"paradi":6658,"tof":6659,"starring":6660,"pav":6661,"feed":6662,"brac":6663,"smoke":6664,"hamp":6665,"yam":6666,"tokyo":6667,"simon":6668,"dh":6669,"effici":6670,"physical":6671,"nj":6672,"elli":6673,"slow":6674,"graduate":6675,"americans":6676,"tify":6677,"fred":6678,"apore":6679,"finds":6680,"robin":6681,"wet":6682,"notice":6683,"semi":6684,"unve":6685,"kom":6686,"pilot":6687,"screening":6688,"daily":6689,"ðŁĴĹ":6690,"royal":6691,"spa":6692,"votes":6693,"nag":6694,"whate":6695,"attending":6696,"experim":6697,"addition":6698,"kate":6699,"stol":6700,"mali":6701,"foot":6702,"christ":6703,"chan":6704,"dee":6705,"licen":6706,"global":6707,"moore":6708,"tia":6709,"brigh":6710,"mystery":6711,"yay":6712,"âĿ¤ï¸ıâĿ¤ï¸ı":6713,"creati":6714,"mechan":6715,"clock":6716,"dic":6717,"âĢĶ":6718,"pper":6719,"alph":6720,"throughout":6721,"allow":6722,"resources":6723,"selection":6724,"hamil":6725,"bbq":6726,"aaaa":6727,"virginia":6728,"disney":6729,"eng":6730,"sored":6731,"drinks":6732,"fancy":6733,"consider":6734,"enda":6735,"jane":6736,"handmade":6737,"dul":6738,"ontari":6739,"ius":6740,"sville":6741,"colorado":6742,"whatever":6743,"wheel":6744,"promise":6745,"never":6746,"designs":6747,"ably":6748,"sexual":6749,"vancou":6750,"ati":6751,"convention":6752,"cultural":6753,"singapore":6754,"promo":6755,"loaded":6756,"glasgo":6757,"ppl":6758,"noo":6759,"kee":6760,"stem":6761,"mention":6762,"ido":6763,"cruise":6764,"riding":6765,"becomes":6766,"bey":6767,"âļ½ï¸ı":6768,"twin":6769,"dedicated":6770,"nash":6771,"desi":6772,"workout":6773,"jenni":6774,"iv":6775,"groups":6776,"relax":6777,"phoeni":6778,"lift":6779,"mixed":6780,"mck":6781,"pc":6782,"must":6783,"metro":6784,"cies":6785,"yar":6786,"aim":6787,"anger":6788,"ie":6789,"recy":6790,"married":6791,"dropped":6792,"engag":6793,"lest":6794,"ambassador":6795,"oph":6796,"des":6797,"wick":6798,"assistant":6799,"natur":6800,"fail":6801,"ltd":6802,"short":6803,"kap":6804,"shaw":6805,"bigger":6806,"remains":6807,"critical":6808,"survey":6809,"coverage":6810,"erson":6811,"wind":6812,"nb":6813,"billy":6814,"letes":6815,"acts":6816,"jimmy":6817,"atlan":6818,"aland":6819,"tc":6820,"importance":6821,"damage":6822,"fg":6823,"storage":6824,"twt":6825,"bond":6826,"balance":6827,"crying":6828,"puppy":6829,"vote":6830,"push":6831,"ðŁĴľ":6832,"poly":6833,"mel":6834,"london":6835,"terrori":6836,"effective":6837,"corporate":6838,"atlanta":6839,"jaco":6840,"nasa":6841,"greek":6842,"senate":6843,"ish":6844,"eva":6845,"intelligence":6846,"efforts":6847,"alco":6848,"kun":6849,"hall":6850,"diag":6851,"claims":6852,"first":6853,"hb":6854,"bae":6855,"vul":6856,"pull":6857,"°":6858,"separ":6859,"speed":6860,"victi":6861,"onthisday":6862,"audience":6863,"rates":6864,"teach":6865,"filming":6866,"bush":6867,"song":6868,"yum":6869,"brun":6870,"raine":6871,"awa":6872,"parks":6873,"ðĿ":6874,"rabb":6875,"rach":6876,"raid":6877,"reached":6878,"rail":6879,"moves":6880,"selected":6881,"fri":6882,"raising":6883,"omy":6884,"stones":6885,"suk":6886,"francisco":6887,"cases":6888,"capit":6889,"confu":6890,"wtf":6891,"poke":6892,"equipment":6893,"greg":6894,"essential":6895,"offering":6896,"nex":6897,"pies":6898,"bec":6899,"creation":6900,"chairman":6901,"crown":6902,"wal":6903,"johnny":6904,"shift":6905,"neck":6906,"bang":6907,"bird":6908,"ðŁĺı":6909,"duck":6910,"reserve":6911,"depu":6912,"masters":6913,"overall":6914,"notic":6915,"juice":6916,"sneak":6917,"cheer":6918,"classes":6919,"eagles":6920,"nca":6921,"carpet":6922,"civil":6923,"coaches":6924,"harris":6925,"ups":6926,"balls":6927,"decor":6928,"martin":6929,"ros":6930,"vice":6931,"announcement":6932,"whose":6933,"tigers":6934,"stered":6935,"cts":6936,"dram":6937,"steel":6938,"young":6939,"install":6940,"suppo":6941,"recording":6942,"deck":6943,"seats":6944,"lder":6945,"angle":6946,"bot":6947,"styles":6948,"elections":6949,"fortun":6950,"nab":6951,"butter":6952,"arian":6953,"kash":6954,"inner":6955,"oured":6956,"beast":6957,"wei":6958,"iconic":6959,"experts":6960,"necess":6961,"beng":6962,"james":6963,"lia":6964,"greece":6965,"ðŁĵ·":6966,"ðŁĺģ":6967,"goodbye":6968,"mitch":6969,"twice":6970,"mumbai":6971,"steam":6972,"rush":6973,"medal":6974,"nett":6975,"fashion":6976,"tar":6977,"rs":6978,"saving":6979,"ricul":6980,"lm":6981,"sleeping":6982,"brooklyn":6983,"miss":6984,"sending":6985,"discovered":6986,"sphere":6987,"oftheday":6988,"kicks":6989,"missions":6990,"wright":6991,"ern":6992,"ghtly":6993,"ious":6994,"melbourne":6995,"startu":6996,"moved":6997,"carry":6998,"dak":6999,"agues":7000,"belgi":7001,"ema":7002,"wayne":7003,"dot":7004,"erie":7005,"pel":7006,"itunes":7007,"matthew":7008,"nobody":7009,"estab":7010,"calm":7011,"winds":7012,"luc":7013,"prepare":7014,"trends":7015,"exercise":7016,"advant":7017,"ðŁĴ¯":7018,"athletics":7019,"apps":7020,"ctions":7021,"advance":7022,"launches":7023,"little":7024,"realdonaldtrump":7025,"elizabeth":7026,"carolina":7027,"hub":7028,"hidden":7029,"nw":7030,"user":7031,"poll":7032,"greater":7033,"most":7034,"fed":7035,"pat":7036,"lifestyle":7037,"sati":7038,"scores":7039,"marriage":7040,"lr":7041,"avenue":7042,"deserve":7043,"rif":7044,"ðŁĹ":7045,"watch":7046,"championships":7047,"gray":7048,"enni":7049,"cotton":7050,"gom":7051,"where":7052,"package":7053,"sum":7054,"absolu":7055,"newly":7056,"foods":7057,"tyler":7058,"assembly":7059,"muslim":7060,"bank":7061,"rememb":7062,"options":7063,"producer":7064,"lando":7065,"funds":7066,"upper":7067,"shadow":7068,"progre":7069,"cop":7070,"inge":7071,"legs":7072,"detroit":7073,"hillary":7074,"jose":7075,"giants":7076,"soup":7077,"sustainable":7078,"tus":7079,"clothes":7080,"rocking":7081,"nz":7082,"minne":7083,"materi":7084,"bruce":7085,"eart":7086,"casting":7087,"independent":7088,"thousands":7089,"tah":7090,"decl":7091,"veterans":7092,"lions":7093,"wrap":7094,"âĢ¦":7095,"dess":7096,"bling":7097,"stine":7098,"eggs":7099,"oon":7100,"closing":7101,"zay":7102,"att":7103,"bacon":7104,"fail":7105,"arizona":7106,"depre":7107,"ghost":7108,"newsp":7109,"wers":7110,"vip":7111,"liked":7112,"ident":7113,"volunteer":7114,"adult":7115,"pupp":7116,"circle":7117,"material":7118,"degree":7119,"grown":7120,"boom":7121,"calendar":7122,"sur":7123,"viewing":7124,"athletes":7125,"chand":7126,"rell":7127,"asian":7128,"entr":7129,"volley":7130,"victims":7131,"body":7132,"mama":7133,"transfer":7134,"geek":7135,"indic":7136,"saved":7137,"mai":7138,"gent":7139,"its":7140,"lounge":7141,"kol":7142,"theory":7143,"situation":7144,"islands":7145,"arth":7146,"zoo":7147,"flood":7148,"viously":7149,"showed":7150,"parliament":7151,"chev":7152,"eline":7153,"attrac":7154,"abad":7155,"tail":7156,"hrs":7157,"lus":7158,"portu":7159,"gory":7160,"provides":7161,"toys":7162,"death":7163,"infe":7164,"ance":7165,"gle":7166,"liam":7167,"lover":7168,"hud":7169,"dvd":7170,"revealed":7171,"gw":7172,"rement":7173,"cathe":7174,"lying":7175,"radio":7176,"derby":7177,"stors":7178,"chemi":7179,"hospit":7180,"⾨":7181,"':":7182,"ilove":7183,"lemon":7184,"republic":7185,"sni":7186,"ness":7187,"door":7188,"reaction":7189,"pregn":7190,"flav":7191,"scholar":7192,"spotify":7193,"isation":7194,"visual":7195,"aware":7196,"sponsored":7197,"joke":7198,"lessons":7199,"legis":7200,"lock":7201,"simil":7202,"ðŁĺĭ":7203,"kind":7204,"lay":7205,"mah":7206,"hoping":7207,"vancouver":7208,"aser":7209,"cleaning":7210,"gala":7211,"threat":7212,"lap":7213,"ache":7214,"romance":7215,"expen":7216,"repost":7217,"zam":7218,"epi":7219,"mirror":7220,"oak":7221,"adul":7222,"batman":7223,"slu":7224,"lc":7225,"viewed":7226,"reviews":7227,"dates":7228,"indonesia":7229,"activi":7230,"offen":7231,"leaf":7232,"isi":7233,"agricul":7234,"costume":7235,"sites":7236,"spiritu":7237,"appearance":7238,"iry":7239,"stair":7240,"application":7241,"spectac":7242,"icity":7243,"skies":7244,"handle":7245,"punk":7246,"paradise":7247,"tn":7248,"deal":7249,"providing":7250,"doc":7251,"receiving":7252,"brew":7253,"microsoft":7254,"ö":7255,"ferr":7256,"metro":7257,"thail":7258,"yum":7259,"carter":7260,"á":7261,"gentle":7262,"breaks":7263,"cooper":7264,"showcase":7265,"cutting":7266,"egypt":7267,"baby":7268,"seminar":7269,"glori":7270,"sson":7271,"fave":7272,"rehear":7273,"lotte":7274,"lady":7275,"alas":7276,"prep":7277,"delivered":7278,"nuclear":7279,"iro":7280,"engagement":7281,"atta":7282,"conven":7283,"zan":7284,"glory":7285,"holds":7286,"businesses":7287,"strange":7288,"sche":7289,"itself":7290,"grad":7291,"markets":7292,"falling":7293,"stats":7294,"geon":7295,"budd":7296,"lis":7297,"sheet":7298,"thisi":7299,"colo":7300,"desert":7301,"registration":7302,"ign":7303,"explain":7304,"interior":7305,"laws":7306,"writers":7307,"springs":7308,"kr":7309,"fried":7310,"bloom":7311,"infra":7312,"ao":7313,"cred":7314,"past":7315,"lineup":7316,"boo":7317,"brea":7318,"boots":7319,"celebrity":7320,"attacks":7321,"brook":7322,"eves":7323,"excu":7324,"cherry":7325,"oop":7326,"fascin":7327,"boyfriend":7328,"seas":7329,"nine":7330,"effects":7331,"powered":7332,"kha":7333,"ðŁĺĢ":7334,"shout":7335,"condition":7336,"ij":7337,"hero":7338,"enterpri":7339,"winter":7340,"applications":7341,"shoe":7342,"gel":7343,"battle":7344,"programs":7345,"wart":7346,"ðŁĴ¥":7347,"rap":7348,"hol":7349,"dangerous":7350,"dia":7351,"counter":7352,"rics":7353,"ior":7354,"knight":7355,"coat":7356,"emotional":7357,"atures":7358,"das":7359,"wheel":7360,"forecast":7361,"transport":7362,"glasgow":7363,"kingdom":7364,"preparing":7365,"immedi":7366,"ffin":7367,"awarded":7368,"printing":7369,"roman":7370,"fighters":7371,"anymore":7372,"belt":7373,"pine":7374,"wine":7375,"xi":7376,"employees":7377,"logies":7378,"alled":7379,"demo":7380,"birthday":7381,"angeles":7382,"log":7383,"drivers":7384,"necklace":7385,"kath":7386,"sit":7387,"athlete":7388,"efs":7389,"sburg":7390,"purpose":7391,"resistance":7392,"releases":7393,"tis":7394,"various":7395,"deliver":7396,"chal":7397,"sanc":7398,"oppo":7399,"craw":7400,"neuro":7401,"dra":7402,"supporters":7403,"snap":7404,"difficult":7405,"swear":7406,"logist":7407,"path":7408,"attempt":7409,"à¥":7410,"swimming":7411,"steve":7412,"hurt":7413,"included":7414,"bap":7415,"ware":7416,"ðŁĴĭ":7417,"enders":7418,"jake":7419,"leeds":7420,"climb":7421,"lb":7422,"imple":7423,"lisa":7424,"clothing":7425,"ðŁĺİ":7426,"dt":7427,"compla":7428,"swing":7429,"straw":7430,"vals":7431,"kle":7432,"users":7433,"storm":7434,"cuts":7435,"ontario":7436,"pan":7437,"handsome":7438,"iow":7439,"argu":7440,"checking":7441,"scottish":7442,"Ķï¸ı":7443,"sier":7444,"emma":7445,"pod":7446,"pattern":7447,"desh":7448,"enh":7449,"edward":7450,"ting":7451,"kh":7452,"half":7453,"lincoln":7454,"mother":7455,"alleg":7456,"rc":7457,"volleyball":7458,"dn":7459,"gay":7460,"ally":7461,"leton":7462,"grove":7463,"loud":7464,"advanced":7465,"respec":7466,"client":7467,"supreme":7468,"thailand":7469,"how":7470,"gig":7471,"toi":7472,"dot":7473,"dollar":7474,"ðŁijĩ":7475,"pit":7476,"rb":7477,"hn":7478,"produced":7479,"ggers":7480,"âĨĴ":7481,"mlb":7482,"canvas":7483,"fineart":7484,"usd":7485,"inthe":7486,"pson":7487,"actual":7488,"sl":7489,"tb":7490,"ipad":7491,"ensure":7492,"umb":7493,"wd":7494,"ska":7495,"mars":7496,"kend":7497,"feli":7498,"thing":7499,"countdown":7500,"absolute":7501,"rout":7502,"dral":7503,"py":7504,"injured":7505,"mint":7506,"hunting":7507,"mmer":7508,"sage":7509,"ligh":7510,"acity":7511,"expan":7512,"murray":7513,"aro":7514,"secure":7515,"fourth":7516,"eagle":7517,"relief":7518,"stakes":7519,"industrial":7520,"clark":7521,"understanding":7522,"seem":7523,"plenty":7524,"silver":7525,"clau":7526,"threat":7527,"sail":7528,"produce":7529,"abstr":7530,"isis":7531,"br":7532,"engers":7533,"worry":7534,"bieber":7535,"sj":7536,"justin":7537,"realize":7538,"kyle":7539,"espn":7540,"filter":7541,"sch":7542,"types":7543,"gamedev":7544,"ding":7545,"twitter":7546,"soldiers":7547,"pom":7548,"carbon":7549,"yards":7550,"childhood":7551,"ried":7552,"kel":7553,"eleph":7554,"tons":7555,"keynote":7556,"quiet":7557,"wire":7558,"posting":7559,"issa":7560,"representing":7561,"backs":7562,"alexander":7563,"celebrates":7564,"taining":7565,"||":7566,"chor":7567,"escape":7568,"peek":7569,"tives":7570,"field":7571,"ssie":7572,"impac":7573,"sponsor":7574,"rc":7575,"wedd":7576,"cannab":7577,"sides":7578,"tracks":7579,"compar":7580,"contrac":7581,"technical":7582,"bible":7583,"exploring":7584,"share":7585,"trav":7586,"nate":7587,"illo":7588,"scru":7589,"mingham":7590,"guns":7591,"ofthe":7592,"shame":7593,"sees":7594,"catho":7595,"access":7596,"cel":7597,"reported":7598,"»":7599,"mario":7600,"pad":7601,"hopefully":7602,"ouse":7603,"yon":7604,"disappo":7605,"olo":7606,"pitt":7607,"pac":7608,"gap":7609,"crush":7610,"sg":7611,"kle":7612,"gem":7613,"empire":7614,"dirty":7615,"ais":7616,"aviation":7617,"zealand":7618,"facing":7619,"highway":7620,"danny":7621,"spider":7622,"otta":7623,"ðŁĺĦ":7624,"wy":7625,"colours":7626,"infl":7627,"costs":7628,"olympics":7629,"aus":7630,"hm":7631,"howard":7632,"passes":7633,"lauren":7634,"mush":7635,"opin":7636,"rho":7637,"discount":7638,"operation":7639,"emily":7640,"mmm":7641,"chamber":7642,"dil":7643,"toyo":7644,"ship":7645,"samu":7646,"pictured":7647,"unic":7648,"pol":7649,"keeper":7650,"cartoon":7651,"sten":7652,"ignor":7653,"nations":7654,"nl":7655,"tasting":7656,"detail":7657,"officials":7658,"motor":7659,"francis":7660,"editor":7661,"ðŁijĩ":7662,"pets":7663,"rangers":7664,"tg":7665,"rn":7666,"wri":7667,"nichol":7668,"ise":7669,"spots":7670,"anie":7671,"check":7672,"triple":7673,"kumar":7674,"speakers":7675,"icing":7676,"prepared":7677,"abuse":7678,"friendship":7679,"month":7680,"swim":7681,"aire":7682,"scent":7683,"hamilton":7684,"indian":7685,"jes":7686,"yummy":7687,"tears":7688,"dawn":7689,"ized":7690,"worlds":7691,"ðŁķ":7692,"billi":7693,"stone":7694,"nhs":7695,"basic":7696,"por":7697,"stle":7698,"iron":7699,"older":7700,"clevel":7701,"eing":7702,"ðŁĺįðŁĺįðŁĺį":7703,"prints":7704,"firm":7705,"aircraft":7706,"finest":7707,"develop":7708,"aaron":7709,"tz":7710,"graham":7711,"owners":7712,"foli":7713,"lesson":7714,"ques":7715,"babe":7716,"craft":7717,"phen":7718,"jun":7719,"birmingham":7720,"vine":7721,"ller":7722,"ian":7723,"fineartamerica":7724,"evolu":7725,"stab":7726,"imper":7727,"ward":7728,"comic":7729,"wiz":7730,"invited":7731,"duke":7732,"match":7733,"ports":7734,"roger":7735,"diagno":7736,"kept":7737,"test":7738,"visu":7739,"rhy":7740,"soc":7741,"tox":7742,"baker":7743,"surface":7744,"covers":7745,"mans":7746,"bits":7747,"xbox":7748,"ffle":7749,"nan":7750,"gard":7751,"hart":7752,"waters":7753,"villa":7754,"retro":7755,"lightning":7756,"catholic":7757,"democracy":7758,"neighbor":7759,"penn":7760,"cran":7761,"jonathan":7762,"laura":7763,"vibes":7764,"sub":7765,"coaching":7766,"clearly":7767,"ukraine":7768,"brave":7769,"commitment":7770,"tall":7771,"mart":7772,"rap":7773,"modi":7774,"scott":7775,"bros":7776,"shower":7777,"ðŁı¾":7778,"âĺºï¸ı":7779,"cousin":7780,"approach":7781,"bre":7782,"compos":7783,"hilari":7784,"philly":7785,"gad":7786,"quickly":7787,"rian":7788,"tm":7789,"virtual":7790,"houses":7791,"kt":7792,"phoenix":7793,"wire":7794,"ffy":7795,"bunch":7796,"ancing":7797,"tale":7798,"snapchat":7799,"starter":7800,"ht":7801,"kicking":7802,"apart":7803,"thy":7804,")!":7805,"blogger":7806,"itz":7807,"comfort":7808,"angels":7809,"wash":7810,"\":":7811,"argent":7812,"request":7813,"honest":7814,"mighty":7815,"bobby":7816,"kg":7817,"rol":7818,"thouse":7819,"expo":7820,"hc":7821,"tables":7822,"magical":7823,"posts":7824,"dem":7825,"nw":7826,"orlando":7827,"aber":7828,"***":7829,"ðŁĺľ":7830,"environmental":7831,"transformation":7832,"mile":7833,"wic":7834,"hiring":7835,"maine":7836,"boar":7837,"rying":7838,"tis":7839,"niture":7840,"tweeted":7841,"antonio":7842,"opinion":7843,"finale":7844,"diy":7845,"fis":7846,"thin":7847,"trouble":7848,"lego":7849,"files":7850,"quart":7851,"spa":7852,"currency":7853,"climate":7854,"fanart":7855,"railway":7856,"space":7857,"bands":7858,"daniel":7859,"motion":7860,"leng":7861,"holder":7862,"occu":7863,"marie":7864,"cathedral":7865,"buzz":7866,"bies":7867,"nascar":7868,"bmw":7869,"battery":7870,"charlotte":7871,"doctor":7872,"zzle":7873,"seven":7874,"insan":7875,"ddy":7876,"sten":7877,"labor":7878,"thrilled":7879,"seren":7880,"documentary":7881,"waves":7882,"certain":7883,"candid":7884,"allowed":7885,"nintendo":7886,"starwars":7887,"tap":7888,"homemade":7889,"dles":7890,"thering":7891,"bree":7892,"empty":7893,"piano":7894,"positi":7895,"country":7896,"pork":7897,"puts":7898,"perry":7899,"matic":7900,"spotlight":7901,"tist":7902,"orities":7903,"wealth":7904,"cp":7905,"barbar":7906,"committed":7907,"assau":7908,"profit":7909,"eight":7910,"hul":7911,"finishing":7912,"runner":7913,"sso":7914,"inspec":7915,"charged":7916,"christop":7917,"losing":7918,"coal":7919,"hoo":7920,"elev":7921,"dele":7922,"moham":7923,"donation":7924,"cable":7925,"clinic":7926,"jin":7927,"managed":7928,"tering":7929,"â¬":7930,"urban":7931,"deputy":7932,"bber":7933,"burn":7934,"academic":7935,"ott":7936,"stake":7937,"iter":7938,"stown":7939,"acker":7940,"adventures":7941,"adams":7942,"greg":7943,"prom":7944,"vol":7945,"acqu":7946,"congre":7947,"paint":7948,"citizens":7949,"call":7950,"afford":7951,"vc":7952,"asks":7953,"thetic":7954,"independence":7955,"âĽ":7956,"hitting":7957,"blon":7958,"future":7959,"âı":7960,"inno":7961,"gene":7962,"boards":7963,"distance":7964,"set":7965,"remem":7966,"thal":7967,"prevent":7968,"lang":7969,"objec":7970,"susp":7971,"matt":7972,"induc":7973,"boro":7974,"pione":7975,"redi":7976,"virtu":7977,"printed":7978,"scope":7979,"shark":7980,"succe":7981,"astron":7982,"illegal":7983,"jag":7984,"cting":7985,"inee":7986,"ato":7987,"robin":7988,"nutrition":7989,"bf":7990,"dutch":7991,"bn":7992,"furniture":7993,"forgotten":7994,"atar":7995,"rup":7996,"hyper":7997,"branch":7998,"communication":7999,"degrees":8000,"onia":8001,"uncle":8002,"promote":8003,"orche":8004,"wii":8005,"js":8006,"button":8007,"major":8008,"cbs":8009,"bristol":8010,"premium":8011,"ordinary":8012,"edit":8013,"mg":8014,"weed":8015,"steven":8016,":'":8017,"gus":8018,"tes":8019,"captured":8020,"drugs":8021,"dow":8022,"writes":8023,"bishop":8024,"wheels":8025,"alization":8026,"discovery":8027,"wr":8028,"rachel":8029,"neil":8030,"hydr":8031,"cutest":8032,"entrepreneur":8033,"korean":8034,"oregon":8035,"ulty":8036,"perfectly":8037,"supported":8038,"historical":8039,"twins":8040,"elly":8041,"wel":8042,"devil":8043,"income":8044,"scientists":8045,"deleg":8046,"hen":8047,"oni":8048,"iced":8049,"gio":8050,"curry":8051,"reveal":8052,"eg":8053,"buffalo":8054,"nol":8055,"opera":8056,"cameron":8057,"hahahaha":8058,"jab":8059,"graduation":8060,"craig":8061,"ral":8062,"if":8063,"organization":8064,"lege":8065,"gang":8066,"sud":8067,"edinburgh":8068,"lack":8069,"flies":8070,"gate":8071,"thrones":8072,"qb":8073,"thereal":8074,"eleg":8075,"ppin":8076,"cles":8077,"jamie":8078,"tnam":8079,"crypto":8080,"oul":8081,"pages":8082,"ase":8083,"roots":8084,"stupid":8085,"adid":8086,"boot":8087,"protein":8088,"sap":8089,"sium":8090,"sus":8091,"endor":8092,"function":8093,"dont":8094,"enna":8095,"chy":8096,"sque":8097,"worker":8098,"mtv":8099,"ea":8100,"kan":8101,"ðŁĴļ":8102,"mus":8103,"profession":8104,"tto":8105,"operations":8106,"allo":8107,"ctor":8108,"invite":8109,"scand":8110,"outh":8111,"zim":8112,"links":8113,"clients":8114,"samsung":8115,"discusses":8116,"nell":8117,"ultra":8118,"somewhere":8119,"stewart":8120,"inet":8121,"dez":8122,"bout":8123,"factor":8124,"tian":8125,"trans":8126,"jeremy":8127,"db":8128,"ðŁĩ¬":8129,"orn":8130,"developing":8131,"spol":8132,"cooper":8133,"mau":8134,"remembering":8135,"trek":8136,"family":8137,"seniors":8138,"foster":8139,"attended":8140,"wing":8141,"transform":8142,"elementary":8143,"horiz":8144,"listing":8145,"malaysia":8146,"itch":8147,"warrior":8148,"philippines":8149,"russell":8150,"mend":8151,"initiative":8152,"creep":8153,"tops":8154,"briti":8155,"aur":8156,"sharp":8157,"advertising":8158,"ugly":8159,"achiev":8160,"materials":8161,"bug":8162,"device":8163,"bonus":8164,"facility":8165,"cole":8166,"nhl":8167,"yas":8168,"planned":8169,"pole":8170,"excellence":8171,"trick":8172,"confl":8173,"rp":8174,"achieve":8175,"loan":8176,"swag":8177,"jessica":8178,"howe":8179,"pour":8180,"scu":8181,"zoo":8182,"rated":8183,"dresses":8184,"rebel":8185,"mexican":8186,"coordin":8187,"mess":8188,"atlantic":8189,"tl":8190,"oscar":8191,"walks":8192,"pharmac":8193,"investigation":8194,"...#":8195,"cci":8196,"easily":8197,"mondaymotivation":8198,"yment":8199,"auti":8200,"forced":8201,"armed":8202,"colleagues":8203,"papers":8204,"proper":8205,"shake":8206,"buc":8207,"lean":8208,"exhibit":8209,"evement":8210,"cott":8211,"biz":8212,"sper":8213,"kent":8214,"swan":8215,"/@":8216,"girlfriend":8217,"hawk":8218,"âĺĢï¸ı":8219,"mono":8220,"ðŁĴĽ":8221,"statue":8222,"ðŁĺ³":8223,"ras":8224,"teeth":8225,"precious":8226,"tile":8227,"pam":8228,"swift":8229,"vali":8230,"nose":8231,"drunk":8232,"experiences":8233,"comeback":8234,"genius":8235,"worse":8236,"shef":8237,"rad":8238,"edit":8239,"honour":8240,"auspol":8241,"larry":8242,"hire":8243,"gordon":8244,"achievement":8245,"........":8246,"suicide":8247,"alternative":8248,"sup":8249,"surroun":8250,"shake":8251,"keith":8252,"pepper":8253,"turk":8254,"criminal":8255,"beck":8256,"sum":8257,"walls":8258,"cnn":8259,"antic":8260,"offe":8261,"colli":8262,"wines":8263,"highlight":8264,"hawaii":8265,"embar":8266,"lfc":8267,"ðŁĩ®":8268,"mv":8269,">>":8270,"atmo":8271,"word":8272,"carl":8273,"shoutout":8274,"brewing":8275,"ìĿ":8276,"dof":8277,"sic":8278,"hottest":8279,"colon":8280,"hhh":8281,"shut":8282,"lowing":8283,"volume":8284,"apartment":8285,"agreement":8286,"destro":8287,"wee":8288,"religious":8289,"iowa":8290,"rod":8291,"landing":8292,"represent":8293,"ðŁĵ·:":8294,"las":8295,"usually":8296,"hl":8297,"cac":8298,"salv":8299,"along":8300,"laughing":8301,"beans":8302,"reminds":8303,"phase":8304,"somebody":8305,"mask":8306,"ranked":8307,"destroy":8308,"sci":8309,"âĢ¼ï¸ı":8310,"gabri":8311,"leo":8312,"roa":8313,"failed":8314,"sil":8315,"refugees":8316,"revi":8317,"ring":8318,"berries":8319,"cookies":8320,"yy":8321,"conservation":8322,"shab":8323,"humans":8324,"determin":8325,"ain":8326,"niall":8327,"assu":8328,"mba":8329,"from":8330,"extreme":8331,"vices":8332,"commerce":8333,"ghtful":8334,"ordered":8335,"supports":8336,"recap":8337,"vor":8338,"dropping":8339,"correct":8340,"paying":8341,"meaning":8342,"nj":8343,"quiz":8344,"\"#":8345,"business":8346,"ðŁĩ®ðŁĩ":8347,"indigen":8348,"dust":8349,"boxes":8350,"blind":8351,"xxx":8352,"zzy":8353,"ðŁĩ¬ðŁĩ":8354,"ssels":8355,"sant":8356,"ddle":8357,"hilarious":8358,"design":8359,"wondering":8360,"vehicles":8361,"kre":8362,"jud":8363,"reception":8364,"parker":8365,"ÃŃ":8366,"privi":8367,"hydro":8368,"softball":8369,"pollu":8370,"locked":8371,"bah":8372,"ear":8373,"script":8374,"divi":8375,"brace":8376,"george":8377,"theast":8378,"belo":8379,"jal":8380,"tionary":8381,"dental":8382,"rocket":8383,"purch":8384,"shak":8385,"manufacturing":8386,"ez":8387,"itis":8388,"concep":8389,"tball":8390,"chs":8391,"directed":8392,"prayers":8393,"ook":8394,"philos":8395,"variety":8396,"chess":8397,"server":8398,"gand":8399,"balti":8400,"ðŁĵ¸":8401,"sely":8402,"cruz":8403,"spectacular":8404,"burning":8405,"represent":8406,"iz":8407,"tone":8408,"merce":8409,"hell":8410,"bedroom":8411,"establi":8412,"bol":8413,"common":8414,"ãĥ»":8415,"abor":8416,"kitty":8417,"heights":8418,"repair":8419,"william":8420,"quake":8421,"alabama":8422,"population":8423,"rev":8424,"rett":8425,"ists":8426,"nite":8427,"lem":8428,"aha":8429,"cleveland":8430,"rm":8431,"pover":8432,"obse":8433,"montre":8434,"mania":8435,"®":8436,"conne":8437,"carni":8438,"shah":8439,"fy":8440,"ua":8441,"scor":8442,"struggle":8443,"bob":8444,"''":8445,"appropri":8446,"decide":8447,"ffed":8448,"caster":8449,"sort":8450,"hungry":8451,"drag":8452,"اÙ":8453,"grounds":8454,"dw":8455,"slightly":8456,"cardin":8457,"deadline":8458,"bronze":8459,"webin":8460,"barry":8461,"silence":8462,"euro":8463,"option":8464,"earn":8465,"ðŁĴĸ":8466,"however":8467,"naren":8468,"nails":8469,"bathroom":8470,"vine":8471,"phd":8472,"mining":8473,"garage":8474,"()":8475,"shoulder":8476,"defeat":8477,"dir":8478,"ov":8479,"liberty":8480,"pleas":8481,"xon":8482,"compre":8483,"av":8484,"jin":8485,"ables":8486,"silent":8487,"famili":8488,"visits":8489,"dipl":8490,"habit":8491,"millions":8492,"regarding":8493,"innovative":8494,"senator":8495,"rts":8496,"von":8497,"kl":8498,"whil":8499,"required":8500,"âĿĦ":8501,"luv":8502,"presidential":8503,"pocket":8504,"hundre":8505,"shown":8506,"frozen":8507,"toward":8508,"fast":8509,"confidence":8510,"rough":8511,"individual":8512,"quet":8513,"ðŁı½":8514,"dome":8515,"fifa":8516,"engineer":8517,"zen":8518,"remix":8519,"ðŁĺĥ":8520,"plant":8521,"minor":8522,"robinson":8523,"asy":8524,"pulled":8525,"certain":8526,"potato":8527,"(:":8528,"pres":8529,"occa":8530,"wit":8531,"item":8532,"sie":8533,"dating":8534,"thompson":8535,"owned":8536,"anu":8537,"vie":8538,"tedly":8539,"goodnight":8540,"except":8541,"ðŁĮŁ":8542,"iraq":8543,"kie":8544,"rences":8545,"lip":8546,"similar":8547,"saudi":8548,"vig":8549,"arthur":8550,"picks":8551,"milan":8552,"honda":8553,"maxi":8554,"og":8555,"stest":8556,"arch":8557,"analytics":8558,"basti":8559,"pearl":8560,"terry":8561,"horse":8562,"astro":8563,"acce":8564,"launching":8565,"international":8566,"sno":8567,"tasty":8568,"denver":8569,"irl":8570,"pete":8571,"torn":8572,"advantage":8573,"varsity":8574,"\"\"":8575,"sole":8576,"gc":8577,"lang":8578,"demonstr":8579,"olds":8580,"unity":8581,"nets":8582,"inspire":8583,"crete":8584,"nashville":8585,"nelson":8586,"eter":8587,"walk":8588,"hyun":8589,"mack":8590,"treas":8591,"seeking":8592,"rage":8593,"brush":8594,"aband":8595,"whilst":8596,"cocon":8597,"hong":8598,"shelter":8599,"ip":8600,"possibly":8601,"soo":8602,"ited":8603,"âĦ":8604,"races":8605,"warming":8606,"quin":8607,"television":8608,"matches":8609,"rapi":8610,"mental":8611,"palm":8612,"jennifer":8613,"rolls":8614,"indiana":8615,"bars":8616,"catching":8617,"rescu":8618,"candidates":8619,"fare":8620,"âłĢ":8621,"seo":8622,"vietnam":8623,"alpha":8624,"michelle":8625,"visible":8626,"regre":8627,"wned":8628,"apple":8629,"lip":8630,"ffe":8631,"liz":8632,"yorkshire":8633,"hail":8634,"seasons":8635,"began":8636,"md":8637,"kc":8638,"lap":8639,"fascinating":8640,"help":8641,"ury":8642,"ums":8643,"nuts":8644,"sem":8645,"alongside":8646,"bridge":8647,"orial":8648,"ove":8649,"worldcup":8650,"british":8651,"comfortable":8652,"ive":8653,"hotels":8654,"fairs":8655,"horri":8656,"sox":8657,"dining":8658,"stream":8659,"barri":8660,"ssy":8661,"wim":8662,"terms":8663,"vu":8664,"pere":8665,"lens":8666,"walked":8667,"ror":8668,"lars":8669,"shield":8670,"doubt":8671,"proto":8672,"crossing":8673,"meant":8674,"medium":8675,"adding":8676,"eb":8677,"cheap":8678,"func":8679,"paper":8680,"brands":8681,"ryan":8682,"feedback":8683,"collins":8684,"unknown":8685,"tropical":8686,"sandwich":8687,"fallen":8688,"formu":8689,"select":8690,"loads":8691,"answers":8692,"ori":8693,"maga":8694,"dor":8695,"duo":8696,"alie":8697,"drum":8698,"uri":8699,"deer":8700,"soul":8701,"shut":8702,"âĺº":8703,"stolen":8704,"donated":8705,"buzz":8706,"patriots":8707,"hal":8708,"nasty":8709,"nominated":8710,"monte":8711,"kia":8712,"thri":8713,"ingu":8714,"tests":8715,"petro":8716,"ðŁijij":8717,"hosts":8718,"nest":8719,"topic":8720,"patch":8721,"mmy":8722,"hugh":8723,"abilities":8724,"mathe":8725,"smiles":8726,"gb":8727,"agenda":8728,"insights":8729,"chip":8730,"phan":8731,"failure":8732,"dgers":8733,"hai":8734,"significant":8735,"shock":8736,"rural":8737,"glam":8738,"figures":8739,"potus":8740,"ota":8741,"ministry":8742,"appears":8743,"fear":8744,"rh":8745,"american":8746,"hatt":8747,"sony":8748,"fires":8749,"edi":8750,"nou":8751,"equi":8752,"when":8753,"universal":8754,"madness":8755,"ix":8756,"sculpture":8757,"bach":8758,"tto":8759,"sweden":8760,"eta":8761,"ento":8762,"developed":8763,"monthly":8764,"maps":8765,"rah":8766,"led":8767,"delta":8768,"saints":8769,"islam":8770,"bench":8771,"fifth":8772,"vard":8773,"socks":8774,"welcoming":8775,"je":8776,"turner":8777,"vb":8778,"adi":8779,"norway":8780,"ady":8781,"hurricane":8782,"porsche":8783,"tradition":8784,"exam":8785,"newspaper":8786,"luci":8787,"aver":8788,"ideal":8789,"dna":8790,"madison":8791,"ðŁ§":8792,"witness":8793,"acou":8794,"insight":8795,"simon":8796,"robot":8797,"snake":8798,"nbc":8799,"aco":8800,"ross":8801,"shment":8802,"religion":8803,"chann":8804,"insu":8805,"campbell":8806,"installed":8807,"weather":8808,"horses":8809,"oli":8810,"robert":8811,"kaz":8812,"ðŁıĢ":8813,"veteran":8814,"thread":8815,"quarter":8816,"easier":8817,"capture":8818,"hipho":8819,"lawrence":8820,"romantic":8821,"passion":8822,"clay":8823,"oxford":8824,"thai":8825,"studying":8826,"fia":8827,"elected":8828,"mostly":8829,"cb":8830,"tumb":8831,"âĢįâĻĤ":8832,"xl":8833,"shan":8834,"faster":8835,"evans":8836,"slide":8837,"shri":8838,"seek":8839,"mies":8840,"chemistry":8841,"pumpkin":8842,"tum":8843,",,":8844,"room":8845,"fired":8846,"lips":8847,"presence":8848,"aff":8849,"brewery":8850,"arrive":8851,"swag":8852,"photograph":8853,"pengu":8854,"chips":8855,"attor":8856,"values":8857,"accurate":8858,"contemporary":8859,"principal":8860,"cannabis":8861,"ario":8862,"anywhere":8863,"gia":8864,"democrats":8865,"buildings":8866,"lived":8867,"aps":8868,"negative":8869,"mare":8870,"ballo":8871,"lion":8872,"diamon":8873,"look":8874,"reform":8875,"tommy":8876,"illa":8877,"treats":8878,"hundreds":8879,"portland":8880,"worthy":8881,"excep":8882,"aria":8883,"idol":8884,"beer":8885,"cdn":8886,"yu":8887,"awk":8888,"ðŁĩ¨":8889,"cells":8890,"ó":8891,"identity":8892,"drawn":8893,"devil":8894,"finger":8895,"tham":8896,"ðŁijĬ":8897,"earned":8898,"fintech":8899,"dolph":8900,"tweeting":8901,"evolution":8902,"ðŁĵį":8903,"estim":8904,"mvp":8905,"none":8906,"ðŁĩºðŁĩ¸":8907,"toyota":8908,"aux":8909,"marin":8910,"bold":8911,"lbs":8912,"steak":8913,"murphy":8914,"itable":8915,"louis":8916,"solve":8917,"pia":8918,"skir":8919,"illino":8920,"webinar":8921,"banana":8922,"lov":8923,"thon":8924,"voters":8925,"affordable":8926,"defeated":8927,"lmfa":8928,"airlines":8929,"superb":8930,"anyway":8931,"debt":8932,"bored":8933,"versi":8934,"metal":8935,"responsible":8936,"mk":8937,"sse":8938,"fay":8939,"caused":8940,"fp":8941,"recommend":8942,"plaza":8943,"sporting":8944,"alliance":8945,"austri":8946,"nn":8947,"tours":8948,"surprised":8949,"artif":8950,"thunder":8951,"surve":8952,"wore":8953,"brief":8954,"necessary":8955,"zie":8956,"ashley":8957,"drake":8958,"rt":8959,"knife":8960,"immun":8961,"charges":8962,"athe":8963,"bride":8964,"reply":8965,"gav":8966,"broadcast":8967,"puer":8968,"bracelet":8969,"capacity":8970,"harvest":8971,"idk":8972,"performan":8973,"dding":8974,"ilers":8975,"para":8976,"jama":8977,"province":8978,"chin":8979,"iders":8980,"hari":8981,"teaser":8982,"chen":8983,"restor":8984,"rat":8985,"flat":8986,"colom":8987,"ðŁĴŀ":8988,"ðŁĩ¨ðŁĩ":8989,"smooth":8990,"rt":8991,"pitch":8992,"staying":8993,"israeli":8994,"tcot":8995,"perspective":8996,"dock":8997,"opener":8998,"lovel":8999,"xo":9000,"classroom":9001,"lington":9002,"goal":9003,"kennedy":9004,"sham":9005,"spaces":9006,"mitchell":9007,"homecoming":9008,"uki":9009,"claimed":9010,"recruit":9011,"ingo":9012,"mufc":9013,"monit":9014,"groo":9015,"resident":9016,"percent":9017,"perman":9018,"ottawa":9019,"intment":9020,"anxi":9021,"standards":9022,"worship":9023,"scheme":9024,"fx":9025,"potter":9026,"bian":9027,"athletic":9028,"afgh":9029,"sse":9030,"satell":9031,"parties":9032,"âĿ¤âĿ¤":9033,"infrastructure":9034,"relax":9035,"modu":9036,"worn":9037,"smoking":9038,"yach":9039,"practices":9040,"wcw":9041,"amb":9042,"domestic":9043,"taylor":9044,"kentu":9045,"provided":9046,"modi":9047,"veg":9048,"\"...":9049,"observ":9050,"ðŁĺ©":9051,"beard":9052,"mour":9053,"angry":9054,"ðŁĺ±":9055,"startups":9056,"wooden":9057,"dive":9058,"nail":9059,"antique":9060,"roses":9061,"tornado":9062,"mat":9063,"^^":9064,"suspect":9065,"farm":9066,"devices":9067,"mega":9068,"tul":9069,"scholarship":9070,"gee":9071,"disaster":9072,"arrival":9073,"poin":9074,"marc":9075,"katie":9076,"bbed":9077,"false":9078,"deserves":9079,"richard":9080,"juana":9081,"frey":9082,"tioned":9083,"hybri":9084,"rw":9085,"sarah":9086,"achi":9087,"cure":9088,"ole":9089,"morris":9090,"chic":9091,"broadway":9092,"label":9093,"pak":9094,"poverty":9095,"golf":9096,"ered":9097,"fu":9098,"eries":9099,"bees":9100,"alogue":9101,"stel":9102,"wireless":9103,"jewish":9104,"tide":9105,"blocked":9106,"lifetime":9107,"bhar":9108,"split":9109,"amster":9110,"thi":9111,"joshu":9112,"brunch":9113,"haps":9114,"sfor":9115,"oops":9116,"kapoor":9117,"hiking":9118,"supposed":9119,"roof":9120,"reas":9121,"train":9122,"tight":9123,"trump":9124,"basically":9125,"rr":9126,"eared":9127,"seeds":9128,"entrance":9129,"cp":9130,"wie":9131,"sonic":9132,"victim":9133,"here":9134,"eh":9135,"earrings":9136,"salmon":9137,"arctic":9138,"anne":9139,"dougla":9140,"corruption":9141,"hannah":9142,"hasn":9143,"voices":9144,"conce":9145,"atta":9146,"fleet":9147,"clinical":9148,"democratic":9149,"tony":9150,"stood":9151,"lef":9152,"twitch":9153,"ail":9154,"honestly":9155,"increased":9156,"drome":9157,"donna":9158,"accepted":9159,"visitors":9160,"apar":9161,"ador":9162,"par":9163,"jerry":9164,"rai":9165,"brandon":9166,"abu":9167,"!!!!!!":9168,"meme":9169,"ingh":9170,"glorious":9171,"bhu":9172,"pump":9173,"jol":9174,"like":9175,"fisher":9176,"maz":9177,"agan":9178,"destination":9179,"playlist":9180,"letters":9181,"genu":9182,"brace":9183,"celebrated":9184,"banner":9185,"rhe":9186,"dragon":9187,"ðŁĺħ":9188,"signature":9189,"grey":9190,"âľĶï¸ı":9191,"alice":9192,"bered":9193,"pher":9194,"bern":9195,"cath":9196,"gathering":9197,"scoring":9198,"influence":9199,"smiling":9200,"dept":9201,"local":9202,"ax":9203,"acu":9204,"retirement":9205,"honor":9206,"herself":9207,"chemical":9208,"assess":9209,"yall":9210,"frequ":9211,"appreciation":9212,"aca":9213,"choir":9214,"cuz":9215,"soil":9216,"cil":9217,"reporting":9218,"uh":9219,"enterprise":9220,"grat":9221,"jacob":9222,"rum":9223,"fee":9224,"jak":9225,"spin":9226,"bikes":9227,"phia":9228,"stere":9229,"pis":9230,"blood":9231,"tatt":9232,"raft":9233,"warren":9234,"sheri":9235,"backstage":9236,"marsh":9237,"hashtag":9238,"therine":9239,"rein":9240,"gameday":9241,"guaran":9242,"recipes":9243,"minds":9244,"stronger":9245,"issued":9246,"bicy":9247,"nak":9248,"mented":9249,"scary":9250,"ux":9251,"previous":9252,"ttle":9253,"thats":9254,"actors":9255,"uma":9256,"tina":9257,"bunny":9258,"promotion":9259,"uss":9260,"oliver":9261,"montreal":9262,"whats":9263,"appreciated":9264,"lakes":9265,"excuse":9266,"knowing":9267,"prizes":9268,"muscle":9269,"shades":9270,"scot":9271,"ingredi":9272,"electronic":9273,"juan":9274,"combat":9275,"sri":9276,"eh":9277,"turkish":9278,"lom":9279,"strikes":9280,"prison":9281,"ree":9282,"pope":9283,"vid":9284,"oldest":9285,"doll":9286,"swiss":9287,"certified":9288,"clip":9289,"returning":9290,"lator":9291,"leigh":9292,"ttes":9293,"watson":9294,"healing":9295,"elim":9296,"perhaps":9297,"hass":9298,"kau":9299,"dder":9300,"mouse":9301,"newcastle":9302,"indigenous":9303,"welcomes":9304,"cole":9305,"taught":9306,"noise":9307,"appear":9308,"joe":9309,"canon":9310,"wednesday":9311,"utah":9312,"ctive":9313,"driven":9314,"iv":9315,"cell":9316,"strip":9317,"acc":9318,"focused":9319,"arrest":9320,"stocks":9321,"woo":9322,"âĹ":9323,"noticed":9324,"shado":9325,"displa":9326,"terror":9327,"borne":9328,"second":9329,"queens":9330,"woke":9331,"jail":9332,"nott":9333,"cambridge":9334,"hart":9335,"seaf":9336,"fax":9337,"accept":9338,"âĺħ":9339,"goods":9340,"kat":9341,"twin":9342,"hs":9343,"thousand":9344,"sins":9345,"suite":9346,"ampton":9347,"arn":9348,"relev":9349,"richar":9350,"hoops":9351,"nbc":9352,"classic":9353,"pab":9354,"soldier":9355,"deplo":9356,"leans":9357,"installation":9358,"clash":9359,"leban":9360,"eee":9361,"tire":9362,"beloved":9363,"fusion":9364,"traveling":9365,"nei":9366,"cookie":9367,"globe":9368,"physics":9369,"sq":9370,"col":9371,"wolves":9372,"dl":9373,"exit":9374,"\"-":9375,"football":9376,"leaf":9377,"sterling":9378,"hide":9379,"minneso":9380,"freshman":9381,"nature":9382,"indie":9383,"supplies":9384,"bris":9385,"irish":9386,"inktober":9387,"doodle":9388,"icop":9389,"messages":9390,"adults":9391,"recorded":9392,"fixed":9393,"ardo":9394,"offered":9395,"underground":9396,"drone":9397,"pine":9398,"mainten":9399,"andre":9400,"hammer":9401,"sx":9402,"round":9403,"hike":9404,"brad":9405,"rome":9406,"full":9407,"oney":9408,"rows":9409,"columbia":9410,"archives":9411,"approved":9412,"batch":9413,"illinois":9414,"recognition":9415,"shouldn":9416,"fog":9417,"ncaa":9418,"kevin":9419,"humanity":9420,"although":9421,"powers":9422,"pou":9423,"sar":9424,"pest":9425,"alcohol":9426,"consci":9427,"philadel":9428,"eno":9429,"tm":9430,"okla":9431,"category":9432,"participate":9433,"accused":9434,"brief":9435,"poem":9436,"clubs":9437,"consult":9438,"jab":9439,"bigdata":9440,"amsterdam":9441,"acing":9442,"certific":9443,"nu":9444,"dat":9445,"improved":9446,"andy":9447,"campaig":9448,"palestin":9449,"pace":9450,"mobi":9451,"feelings":9452,"wolf":9453,"brain":9454,"propos":9455,"interactive":9456,"prince":9457,"index":9458,"cis":9459,"chae":9460,"peaceful":9461,"covering":9462,"aco":9463,"courses":9464,"monkey":9465,"replace":9466,"bl":9467,"bloody":9468,"tales":9469,"brighton":9470,"neighborhood":9471,"gates":9472,"spiritual":9473,"afraid":9474,"breast":9475,"bones":9476,"ðŁijī":9477,"video":9478,"wau":9479,"touch":9480,"injuries":9481,"carl":9482,"rix":9483,"unex":9484,"âĢ¢":9485,"fred":9486,"considered":9487,"thusi":9488,"anch":9489,"ony":9490,"usa":9491,"graphics":9492,"acre":9493,"ðŁĺ©":9494,"commemor":9495,"commod":9496,"goti":9497,"guardian":9498,"starbucks":9499,"prevention":9500,"hahahaha":9501,"administration":9502,"portugal":9503,"faculty":9504,"beta":9505,"ula":9506,"albert":9507,"breath":9508,"eri":9509,"letting":9510,"tric":9511,"mentation":9512,"incredibly":9513,"tennes":9514,"vd":9515,"ðŁĻĪ":9516,"eddie":9517,"brick":9518,"grill":9519,"btw":9520,"watches":9521,"researchers":9522,"tney":9523,"nie":9524,"pas":9525,"aster":9526,"vibr":9527,"pokemon":9528,"chrome":9529,"goat":9530,"pitts":9531,"illy":9532,"festive":9533,"yd":9534,"canal":9535,"ðŁĨ":9536,"fies":9537,"carlos":9538,"reque":9539,"partici":9540,"trains":9541,"sample":9542,"temperature":9543,"symph":9544,"picking":9545,"indoor":9546,"zers":9547,"playoffs":9548,"________":9549,"apes":9550,"lyrics":9551,"islamic":9552,"performances":9553,"dick":9554,"spark":9555,"seas":9556,"homa":9557,"ground":9558,"disci":9559,"employee":9560,"commu":9561,"alaska":9562,"alan":9563,"feast":9564,"dging":9565,"banking":9566,"manuel":9567,"slowly":9568,"trucks":9569,"mccar":9570,"ooo":9571,"scrat":9572,"orchestra":9573,"individu":9574,"mx":9575,"breath":9576,"stairs":9577,"equality":9578,"blake":9579,"locations":9580,"coconut":9581,"baltimore":9582,"aaa":9583,"lc":9584,"ðŁıĨ":9585,"harvey":9586,"resist":9587,"immigration":9588,"adidas":9589,"fili":9590,"ref":9591,"lgbt":9592,"mos":9593,"ppi":9594,"kenny":9595,"terror":9596,"bane":9597,"apolis":9598,"sg":9599,"socialmedia":9600,"kai":9601,"honest":9602,"assas":9603,"bollywood":9604,"âĢįâĻĢï¸ı":9605,"ferrari":9606,"horn":9607,"crypto":9608,"boom":9609,"maintenance":9610,"idi":9611,"sman":9612,"wl":9613,"extended":9614,"insul":9615,"ves":9616,"gosp":9617,"tri":9618,"pig":9619,"targe":9620,"celer":9621,"stati":9622,"smh":9623,"ridic":9624,"appeal":9625,"?)":9626,"conclu":9627,"cosme":9628,"sheep":9629,"christopher":9630,"enthusi":9631,"polish":9632,"mets":9633,"ounded":9634,"sustainability":9635,"creativity":9636,"concrete":9637,"rai":9638,"alien":9639,"bless":9640,"tees":9641,"club":9642,"rot":9643,"bos":9644,"exist":9645,"perfection":9646,"luck":9647,"rocky":9648,"expensive":9649,"meanwhile":9650,"happybirthday":9651,"pret":9652,"thriller":9653,"cave":9654,"playoff":9655,"somer":9656,"lu":9657,"lex":9658,"defence":9659,"amwriting":9660,"homeless":9661,"prophe":9662,"chet":9663,"pastor":9664,"ðŁ¤£":9665,"lander":9666,"www":9667,"Ģï¸ı":9668,"tica":9669,"!#":9670,"otic":9671,"radar":9672,"posters":9673,"powder":9674,"poli":9675,"haun":9676,"trap":9677,"blin":9678,"assault":9679,"shorts":9680,"rey":9681,"shy":9682,"squir":9683,"racist":9684,"garlic":9685,"fur":9686,"remote":9687,"smell":9688,"impressed":9689,"fingers":9690,"âłĢ":9691,"dino":9692,"lement":9693,"snu":9694,"promoting":9695,"string":9696,"productive":9697,"bage":9698,"mason":9699,"raz":9700,"directly":9701,"jk":9702,"eval":9703,"ðŁijĬ":9704,"doctors":9705,"cow":9706,"rider":9707,"stv":9708,"remove":9709,"wu":9710,"nathan":9711,"rod":9712,"nr":9713,"=>":9714,"affected":9715,"invest":9716,"mption":9717,"ginger":9718,"od":9719,"agriculture":9720,"sque":9721,"mug":9722,"counting":9723,"kee":9724,"magnific":9725,"cook":9726,"anistan":9727,"root":9728,"placed":9729,"sympo":9730,"ghana":9731,"und":9732,"cheer":9733,"throwing":9734,"secrets":9735,"filling":9736,"optimi":9737,"butterfly":9738,"bubb":9739,"ðŁĺī":9740,"terrible":9741,"dg":9742,"silk":9743,"obsessed":9744,"lou":9745,"aide":9746,"salute":9747,"monu":9748,"philadelphia":9749,"scientific":9750,"ist":9751,"uae":9752,"dessert":9753,"bottles":9754,"canyon":9755,"ðŁĺĪ":9756,"carib":9757,"other":9758,"wich":9759,"resource":9760,"guilty":9761,"und":9762,"leon":9763,"ess":9764,"kane":9765,"ele":9766,"trainer":9767,"heim":9768,"ante":9769,"manage":9770,"rookie":9771,"treated":9772,"poses":9773,"rsvp":9774,"causes":9775,"awak":9776,"jewell":9777,"lett":9778,"onics":9779,"titles":9780,"cardiff":9781,"gaga":9782,"bump":9783,"useful":9784,"?!":9785,"loose":9786,"bbing":9787,"::":9788,"argentina":9789,"debu":9790,"cycl":9791,"whel":9792,"disgu":9793,"jel":9794,"kills":9795,"biology":9796,"exter":9797,"trash":9798,"bodies":9799,"tram":9800,"circuit":9801,"expect":9802,"lads":9803,"wells":9804,"shot":9805,"gee":9806,"narendr":9807,"fastest":9808,"bent":9809,"bills":9810,"marshall":9811,"hats":9812,"introduce":9813,"citizen":9814,"impossible":9815,"gib":9816,"azz":9817,"networking":9818,"rant":9819,"think":9820,"indy":9821,"stops":9822,"ftheday":9823,"brian":9824,"**":9825,"amodi":9826,"dome":9827,"courage":9828,"packing":9829,"affairs":9830,"gn":9831,"sized":9832,"entary":9833,"poland":9834,"switzer":9835,"afghanistan":9836,"wu":9837,"tender":9838,"subscribe":9839,"mosco":9840,"attend":9841,"republican":9842,"honey":9843,"âĢĭ":9844,"simul":9845,"wester":9846,"foodie":9847,"oro":9848,"middle":9849,"abt":9850,"copies":9851,"maje":9852,"narendramodi":9853,"typical":9854,"inspirational":9855,"vitam":9856,"wiscon":9857,"cubs":9858,"tivity":9859,"hali":9860,"ears":9861,"kay":9862,"dare":9863,"marijuana":9864,"curious":9865,"ania":9866,"tomato":9867,"remind":9868,"ðŁĩ·":9869,"scared":9870,"coup":9871,"poet":9872,"landed":9873,"rid":9874,"wrapped":9875,"morri":9876,"climbing":9877,"ews":9878,"feeding":9879,"contra":9880,"thology":9881,"grid":9882,"tively":9883,"reader":9884,"laser":9885,"diving":9886,"dig":9887,"latin":9888,"tied":9889,"shakespe":9890,"oci":9891,"adm":9892,"showers":9893,"chuck":9894,"marcus":9895,"oos":9896,"knee":9897,"olive":9898,"owl":9899,"dylan":9900,"anno":9901,"gym":9902,"decisions":9903,"wellness":9904,"arrives":9905,"satis":9906,"chris":9907,"thurs":9908,"ðŁ¤£":9909,"interviews":9910,"thankyou":9911,"switzerland":9912,"overnight":9913,"journalist":9914,"serves":9915,"volcan":9916,".......":9917,"plot":9918,"nicol":9919,"carrying":9920,"magne":9921,"treasure":9922,"exp":9923,"bever":9924,"ðŁĺ¢":9925,"marty":9926,"mole":9927,"donations":9928,"recognized":9929,"bh":9930,"dus":9931,"shann":9932,"aldo":9933,"successfully":9934,"ente":9935,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":9936,"cabinet":9937,"cuis":9938,"titled":9939,"das":9940,"sol":9941,"strategies":9942,"delivering":9943,"adds":9944,"anian":9945,"nether":9946,"ðŁĴĥ":9947,"contain":9948,"suits":9949,"pairs":9950,"todd":9951,"rella":9952,"rope":9953,"cio":9954,"crop":9955,"paintings":9956,"suz":9957,"rejec":9958,"bust":9959,"dh":9960,"fraud":9961,"mh":9962,"control":9963,"jeal":9964,"destroyed":9965,"allows":9966,"wool":9967,"minnesota":9968,"omen":9969,"ju":9970,"symposium":9971,"daf":9972,"limit":9973,"accounts":9974,"loading":9975,"intern":9976,"resolution":9977,"holland":9978,"qual":9979,"meetings":9980,"grave":9981,"camping":9982,"vam":9983,"renov":9984,"liberal":9985,"amber":9986,"gree":9987,"humb":9988,"fever":9989,"eling":9990,"brooks":9991,"à²":9992,"beth":9993,"aded":9994,"alt":9995,"roe":9996,"performed":9997,"josh":9998,"franklin":9999,"nicole":10000,"dess":10001,"bbs":10002,"mg":10003,"networks":10004,"minim":10005,"alt":10006,"weapons":10007,"guy":10008,"jason":10009,"gha":10010,"harbour":10011,"aton":10012,"praise":10013,"kentucky":10014,"belfast":10015,"sticks":10016,"bloss":10017,"hopes":10018,"anthro":10019,"familiar":10020,"wait":10021,"chile":10022,"depression":10023,"lax":10024,"jets":10025,"leice":10026,"receives":10027,"sier":10028,"ank":10029,"dex":10030,"indeed":10031,"flexi":10032,"fabric":10033,"lamb":10034,"helicop":10035,"amanda":10036,"âĢĶâĢĶ":10037,"compete":10038,"snack":10039,"technologies":10040,"syrian":10041,"moms":10042,"muham":10043,"chosen":10044,"anat":10045,"devon":10046,"sharks":10047,"ret":10048,"fundraiser":10049,"selfies":10050,"stations":10051,"communications":10052,"tennessee":10053,"tutor":10054,"rot":10055,"valuable":10056,"dynamic":10057,"nurse":10058,"ied":10059,"earthquake":10060,"deserved":10061,"ave":10062,"sara":10063,"stretch":10064,"douglas":10065,"nepal":10066,"ç":10067,"obviously":10068,"dame":10069,"rape":10070,"anybody":10071,"kw":10072,"patrol":10073,"holders":10074,"hanna":10075,"infographic":10076,"eco":10077,"beating":10078,"stanley":10079,"boats":10080,"ribb":10081,"ez":10082,"witch":10083,"inva":10084,"acid":10085,"boarding":10086,"-@":10087,"gil":10088,"dave":10089,"careers":10090,"oppos":10091,"lloy":10092,"inter":10093,"dope":10094,"resu":10095,"jagu":10096,"shade":10097,"indy":10098,"onist":10099,"relations":10100,"agen":10101,"able":10102,"incident":10103,"meter":10104,"sharma":10105,"idr":10106,"prove":10107,"immediately":10108,"troops":10109,"aman":10110,"glow":10111,"gaza":10112,"blocks":10113,"personal":10114,"chronic":10115,"aller":10116,"sid":10117,"shr":10118,"whatsapp":10119,"lucy":10120,"archae":10121,"hou":10122,"journalism":10123,"ourselves":10124,"got":10125,"themed":10126,"shaped":10127,"weak":10128,"casual":10129,"length":10130,"slam":10131,"abbey":10132,"ev":10133,"counter":10134,"esta":10135,"recipi":10136,"chapel":10137,"expansion":10138,"self":10139,"suffering":10140,"spice":10141,"nz":10142,"spart":10143,"desper":10144,"booking":10145,"quarters":10146,"yon":10147,"ðŁĴĹ":10148,"pk":10149,"continued":10150,"-#":10151,"manhatt":10152,"talked":10153,"shen":10154,"combo":10155,"hybrid":10156,"jeans":10157,"liquid":10158,"seal":10159,"retweets":10160,"acceler":10161,"collective":10162,"tas":10163,":))":10164,"professionals":10165,"raw":10166,"ott":10167,"susan":10168,"iring":10169,"oklahoma":10170,"reven":10171,"survival":10172,"creator":10173,"transit":10174,"stac":10175,"surf":10176,"ik":10177,"editing":10178,"chilling":10179,"bailey":10180,"steal":10181,"rable":10182,"parent":10183,"hunger":10184,"snapp":10185,"collect":10186,"philosoph":10187,"dedication":10188,"cf":10189,"cm":10190,"leep":10191,"repeat":10192,"reha":10193,"unfortun":10194,"aer":10195,"aero":10196,"abstract":10197,"monitor":10198,"agents":10199,"bul":10200,"science":10201,"harbor":10202,"dragons":10203,"flooding":10204,"accompli":10205,"dash":10206,"julia":10207,"thered":10208,"tuesday":10209,"cyber":10210,"blow":10211,"tained":10212,"lem":10213,"reference":10214,"ppo":10215,"negoti":10216,"charle":10217,"connor":10218,"ault":10219,"accessories":10220,"commissioner":10221,"rainy":10222,"rear":10223,"advisory":10224,"lucas":10225,"maid":10226,"coal":10227,"kav":10228,"polo":10229,"ðŁı¾":10230,"transport":10231,"margare":10232,"strawberry":10233,"burns":10234,"greens":10235,"nev":10236,"participants":10237,"colin":10238,"belgium":10239,"colour":10240,"inform":10241,"dell":10242,"bron":10243,"caly":10244,"kickoff":10245,"strategic":10246,"reunion":10247,"honors":10248,"lib":10249,"egyp":10250,"âŃIJï¸ı":10251,"hypo":10252,"sizes":10253,"registered":10254,"betes":10255,"relaxing":10256,"bloom":10257,"intense":10258,"valentines":10259,"insane":10260,"wwii":10261,"px":10262,"trio":10263,"blade":10264,"wisconsin":10265,"cone":10266,"platin":10267,"alize":10268,"raven":10269,"increasing":10270,"indians":10271,"ilian":10272,"blu":10273,"rabbit":10274,"extension":10275,"jef":10276,"audi":10277,"ferry":10278,"sell":10279,"aday":10280,"usb":10281,"sweat":10282,"champag":10283,"method":10284,"memph":10285,"assist":10286,"sby":10287,"cape":10288,"removed":10289,"magn":10290,"vt":10291,"rams":10292,"fbi":10293,"tackle":10294,"phew":10295,"hon":10296,"motorcycle":10297,"suspec":10298,"elephant":10299,"subject":10300,"lette":10301,"dairy":10302,"wheat":10303,"awkward":10304,"act":10305,"trol":10306,"mitted":10307,"zayn":10308,"sheriff":10309,"enemy":10310,"cons":10311,"kett":10312,"bulls":10313,"evalu":10314,"btc":10315,"satellite":10316,"holo":10317,"porter":10318,"diabetes":10319,"better":10320,"releasing":10321,"surf":10322,":-":10323,"sebasti":10324,"collecting":10325,"encing":10326,"ethi":10327,"gods":10328,"alley":10329,"healthy":10330,"mills":10331,"smash":10332,"copper":10333,"crack":10334,"readers":10335,"spac":10336,"license":10337,"basket":10338,"bangla":10339,"entic":10340,"omi":10341,"mere":10342,"sively":10343,"animation":10344,"lanes":10345,"dentally":10346,"chillin":10347,"fie":10348,"karen":10349,"depth":10350,"lipse":10351,"ng":10352,"rip":10353,"melo":10354,"sandy":10355,"ðŁijıðŁijı":10356,"vincent":10357,"nut":10358,"hug":10359,"whole":10360,"creates":10361,"????":10362,"âĿ¤ï¸ıâĿ¤ï¸ı":10363,"baked":10364,"upgrade":10365,"roberts":10366,"hara":10367,"caribbean":10368,"authentic":10369,"mbs":10370,"moscow":10371,"attorney":10372,"wiki":10373,"chlo":10374,"hull":10375,"cork":10376,"\"!":10377,"stylish":10378,"ðŁĵ¸:":10379,"diary":10380,"improving":10381,"expand":10382,"bright":10383,"pollution":10384,"knights":10385,"personality":10386,"checked":10387,"facilities":10388,"zel":10389,"bowling":10390,"guer":10391,"ðŁİĤ":10392,"ongoing":10393,"units":10394,"hook":10395,"beck":10396,"conflict":10397,"todd":10398,"farming":10399,"educational":10400,"kak":10401,"clay":10402,"stroke":10403,"belly":10404,"explore":10405,"millenni":10406,"thm":10407,"loop":10408,"sms":10409,"consist":10410,"circa":10411,"bryan":10412,"dab":10413,"younger":10414,"solidar":10415,"ppa":10416,"experienced":10417,"bella":10418,"board":10419,"sheffield":10420,"stephen":10421,"consumer":10422,"submit":10423,"sponsor":10424,"tang":10425,"aggre":10426,"combined":10427,"tracking":10428,"sanders":10429,"baz":10430,"survive":10431,"ferred":10432,"equal":10433,"sep":10434,"reed":10435,"strong":10436,"privacy":10437,"stap":10438,"ung":10439,"acry":10440,"pasta":10441,"pirates":10442,"ager":10443,"fairy":10444,"dup":10445,"introduced":10446,"wip":10447,"lets":10448,"spray":10449,"ðŁĵº":10450,"grew":10451,"asts":10452,"pittsburgh":10453,"newyork":10454,"joey":10455,"lauren":10456,"trade":10457,"chop":10458,"pipe":10459,"claire":10460,"behavior":10461,"vap":10462,"crews":10463,"laptop":10464,"ðŁ¤Ĺ":10465,"chester":10466,"discipl":10467,"df":10468,"outdoors":10469,"ks":10470,"gover":10471,"superstar":10472,"casino":10473,"farmer":10474,";-)":10475,"returned":10476,"ðŁıĪ":10477,"mail":10478,"roasted":10479,"costa":10480,"vill":10481,"pez":10482,"gardening":10483,"distribution":10484,"shining":10485,"investors":10486,"rasp":10487,"decades":10488,"realized":10489,"barn":10490,"pti":10491,"stable":10492,"utd":10493,"panthers":10494,"mens":10495,"bn":10496,"cade":10497,"bucket":10498,"ynn":10499,"whenever":10500,"wake":10501,"dais":10502,"bernie":10503,"lodge":10504,"julie":10505,"atmosphere":10506,"ðŁĺĺðŁĺĺ":10507,"majority":10508,"parti":10509,"excit":10510,"cut":10511,"meh":10512,"muslims":10513,"begun":10514,"flights":10515,"veness":10516,"ceme":10517,"posing":10518,"sole":10519,"gou":10520,"darkness":10521,"peach":10522,"celtic":10523,"authority":10524,"grandma":10525,"fulness":10526,"smith":10527,"specific":10528,"garcia":10529,"coins":10530,"goodness":10531,"aldub":10532,"recruiting":10533,"dennis":10534,"gary":10535,"sleeve":10536,"weapon":10537,"plz":10538,"discover":10539,"harrison":10540,"recruitment":10541,"jai":10542,"chim":10543,"compared":10544,"toms":10545,"mothers":10546,"amy":10547,"archive":10548,"task":10549,"benjam":10550,"seg":10551,"lawyer":10552,"alum":10553,"investing":10554,"mie":10555,"chez":10556,"jp":10557,"ake":10558,"flam":10559,"wallpaper":10560,"âĻ¥ï¸ı":10561,"tton":10562,"chest":10563,"favorites":10564,"weigh":10565,"coolest":10566,"rating":10567,"relevant":10568,"logan":10569,"maple":10570,"runners":10571,"prior":10572,"people":10573,"maur":10574,"terrorist":10575,"tested":10576,"carnival":10577,"suspen":10578,"measure":10579,"mv":10580,"cybersecurity":10581,"appren":10582,"terrorism":10583,"oz":10584,"vital":10585,"nies":10586,"gonz":10587,"funded":10588,"twist":10589,"assessment":10590,"diesel":10591,"enfor":10592,"column":10593,"addressing":10594,"casts":10595,"payment":10596,"xton":10597,"fier":10598,",'":10599,"last":10600,"nee":10601,"unless":10602,"close":10603,"skill":10604,"cuisine":10605,"funeral":10606,"tiles":10607,"aun":10608,"kru":10609,"relationships":10610,"ðŁĴ¯":10611,"event":10612,"âĢįâĻĤï¸ı":10613,"kindness":10614,"proposed":10615,"acoustic":10616,"aes":10617,"defender":10618,"dance":10619,"htt":10620,"wat":10621,"voy":10622,"ðŁ¤ĺ":10623,"aus":10624,"cliff":10625,"searching":10626,"beautifully":10627,"inqu":10628,"atl":10629,"specialist":10630,"ðŁIJ¶":10631,"dai":10632,"trails":10633,"classics":10634,"instant":10635,"vous":10636,"revenue":10637,"march":10638,"kirk":10639,"fringe":10640,"fireworks":10641,"trivia":10642,"âĺħ":10643,"traction":10644,"walter":10645,"moto":10646,"lily":10647,"attitude":10648,"climb":10649,"scan":10650,"savings":10651,"cw":10652,"faith":10653,"credits":10654,"abled":10655,"graff":10656,"autograph":10657,"hehe":10658,"ranch":10659,"had":10660,"rogers":10661,"ðŁĮ¹":10662,"fin":10663,"requ":10664,"folk":10665,"additional":10666,"lynn":10667,"uber":10668,"dollars":10669,"logic":10670,"worth":10671,"som":10672,"thesis":10673,"pound":10674,"bic":10675,"stur":10676,"ceram":10677,"spencer":10678,"entered":10679,"vamp":10680,"organized":10681,"âľĪ":10682,"pps":10683,"tron":10684,"mercedes":10685,"noti":10686,"competitive":10687,"dow":10688,"ousness":10689,"victor":10690,"grilled":10691,"nai":10692,"putin":10693,"abra":10694,"blame":10695,"alexand":10696,"animal":10697,"decent":10698,"pent":10699,"interior":10700,":')":10701,"butler":10702,"ballet":10703,"ðŁĴĶ":10704,"albums":10705,"downs":10706,"lad":10707,"sir":10708,"plain":10709,"pers":10710,"blonde":10711,"disc":10712,"pakistan":10713,"sement":10714,"gaa":10715,"wage":10716,"chas":10717,"mani":10718,"cops":10719,"territ":10720,"lol":10721,"laughter":10722,"rivers":10723,"magnificent":10724,"lamp":10725,"wb":10726,"newsle":10727,"charts":10728,"blessing":10729,"punch":10730,"longest":10731,"floral":10732,"cutie":10733,"farewell":10734,"stopping":10735,"mbb":10736,"bud":10737,"cheese":10738,"decla":10739,"sim":10740,"mcdonald":10741,"deter":10742,"youth":10743,"tch":10744,"freder":10745,"kindle":10746,"fern":10747,"ator":10748,"asleep":10749,"pond":10750,"sprint":10751,"pounds":10752,"lazy":10753,"ghe":10754,"fundraising":10755,"deadly":10756,"grande":10757,"doug":10758,"hey":10759,"linda":10760,"considering":10761,"ium":10762,"golden":10763,"vik":10764,"authors":10765,"diss":10766,"ually":10767,"appropriate":10768,"morning":10769,"yle":10770,"honoring":10771,"folio":10772,"bec":10773,"rebec":10774,"finland":10775,"formula":10776,"cornwall":10777,"shay":10778,"causing":10779,"blend":10780,"signal":10781,"tent":10782,"kashmir":10783,"nationals":10784,"harmony":10785,"scout":10786,"accessi":10787,"height":10788,"medieval":10789,"improvement":10790,"kees":10791,"practical":10792,"card":10793,"depar":10794,"hun":10795,"oming":10796,"calgary":10797,"stel":10798,"bubble":10799,"guru":10800,"mah":10801,"unexpe":10802,"nh":10803,"eda":10804,"meat":10805,"ige":10806,"sio":10807,"goddess":10808,"inches":10809,"tunes":10810,"britt":10811,"stion":10812,"raj":10813,"âĻ«":10814,"mercy":10815,"ðŁĴĺ":10816,"sends":10817,"iest":10818,"polici":10819,"vale":10820,"reduced":10821,"asap":10822,"vijay":10823,"defensive":10824,"celebrations":10825,"riders":10826,"meditation":10827,"harmon":10828,"ging":10829,"¡":10830,"programming":10831,"inau":10832,"sudden":10833,"mh":10834,"replacement":10835,"sku":10836,"jar":10837,"grades":10838,"tast":10839,"kitt":10840,"branding":10841,"kaw":10842,"boot":10843,"fought":10844,"pays":10845,"gf":10846,"ization":10847,"hop":10848,"kk":10849,"activist":10850,"vend":10851,"coastal":10852,"chaos":10853,"ðŁĶ´":10854,"seme":10855,"billboard":10856,"lifting":10857,"cumb":10858,"scal":10859,"ðŁĸ¤":10860,"struck":10861,"lv":10862,"indiedev":10863,"beaten":10864,"jungle":10865,"alright":10866,"destiny":10867,"ming":10868,"kc":10869,"chances":10870,"oman":10871,"qatar":10872,"craf":10873,"trained":10874,"prix":10875,"charm":10876,"otive":10877,"smu":10878,"ec":10879,"anders":10880,"handed":10881,"alban":10882,"certainly":10883,"arriving":10884,"ize":10885,"sai":10886,"track":10887,"painter":10888,"humble":10889,"appointment":10890,"headline":10891,"managing":10892,"mod":10893,"aspe":10894,"andrea":10895,"ä":10896,"ethiop":10897,"united":10898,"exist":10899,"bali":10900,"kad":10901,"nt":10902,"dred":10903,"rex":10904,"recognize":10905,"tampa":10906,"beers":10907,"atia":10908,"heels":10909,"note":10910,"transportation":10911,"turtle":10912,"rede":10913,"hiphop":10914,"spicy":10915,"spurs":10916,"â¬ĩ":10917,"corp":10918,"thern":10919,"toast":10920,"hurry":10921,"properties":10922,"mage":10923,"marco":10924,"elements":10925,"bouti":10926,"syndrome":10927,"msg":10928,"developer":10929,"graders":10930,"heim":10931,"resil":10932,"offices":10933,"delay":10934,"dimen":10935,"vintag":10936,"barbara":10937,"ðŁĺ±":10938,"venezu":10939,"cular":10940,"faced":10941,"barn":10942,"ðŁĺĨ":10943,"survivor":10944,"worm":10945,"confused":10946,"passionate":10947,"ر":10948,"identify":10949,"electricity":10950,"souls":10951,"bradley":10952,"reportedly":10953,"lunch":10954,"shelf":10955,"elia":10956,"sweet":10957,"smooth":10958,"employment":10959,"amel":10960,"manhattan":10961,"steam":10962,"ounts":10963,"yep":10964,"living":10965,"une":10966,"describe":10967,"cares":10968,"manila":10969,"shawn":10970,"acted":10971,"bash":10972,"steven":10973,"rest":10974,"petition":10975,"divine":10976,"welsh":10977,"race":10978,"platinum":10979,"ðŁĮ¸":10980,"pb":10981,"extraordinary":10982,"solidarity":10983,"mall":10984,"onion":10985,"scheduled":10986,"gameof":10987,"fergu":10988,"dems":10989,"norm":10990,"pk":10991,"trials":10992,"policies":10993,"publishing":10994,"stole":10995,"front":10996,"character":10997,"vania":10998,"exce":10999,"stie":11000,"sca":11001,"residential":11002,"sailing":11003,"ðŁĶ¥ðŁĶ¥ðŁĶ¥":11004,"sponsors":11005,"thick":11006,"champagne":11007,"shepher":11008,"continuing":11009,"venice":11010,"perth":11011,"nap":11012,"aster":11013,"yak":11014,"unlimited":11015,"choices":11016,"neo":11017,"hiv":11018,"reporter":11019,"brussels":11020,"fold":11021,"dys":11022,"semi":11023,"lawn":11024,"italia":11025,"wifi":11026,"ask":11027,"emed":11028,"frame":11029,"monitoring":11030,"stead":11031,"ida":11032,"grin":11033,"isa":11034,"flip":11035,"restric":11036,"offensive":11037,"attached":11038,"dish":11039,"why":11040,"phillips":11041,"greet":11042,"pals":11043,"mixtape":11044,"vou":11045,"fielder":11046,"spark":11047,"alberta":11048,"glen":11049,"cash":11050,"sri":11051,"uri":11052,"rodri":11053,"entrepreneurs":11054,"climatechange":11055,"psy":11056,"dle":11057,"ements":11058,"linked":11059,"netherlands":11060,"accidentally":11061,"opposition":11062,"velvet":11063,"rays":11064,"cw":11065,"omo":11066,"mf":11067,"lmfao":11068,"newsletter":11069,":)":11070,"toilet":11071,"literature":11072,"disp":11073,"philip":11074,"uniform":11075,"suddenly":11076,"header":11077,"cooler":11078,"---":11079,"proud":11080,"brig":11081,"nissan":11082,"scientist":11083,"jah":11084,"concentr":11085,"packs":11086,"appointed":11087,"soap":11088,"engage":11089,"chose":11090,"âĻ¡":11091,"setup":11092,"jealous":11093,"harry":11094,"gation":11095,"tunnel":11096,"temp":11097,"oscars":11098,"decade":11099,"recommended":11100,"children":11101,"aba":11102,"anxiety":11103,"vements":11104,"salon":11105,"photoo":11106,"organiz":11107,"machines":11108,"abs":11109,"ville":11110,"hype":11111,"tiff":11112,"emerging":11113,"avgeek":11114,"[#":11115,"contribution":11116,"brady":11117,"resto":11118,"gmail":11119,"fitz":11120,"photoshoot":11121,"helmet":11122,"ht":11123,"elegant":11124,"uganda":11125,"nursing":11126,"orleans":11127,"penn":11128,"nah":11129,"footage":11130,"ema":11131,"wo":11132,"wad":11133,"concerns":11134,"vere":11135,"remark":11136,"whoever":11137,"strang":11138,"pt":11139,"quit":11140,"shang":11141,"history":11142,"sick":11143,"permanent":11144,"illness":11145,"cold":11146,"vision":11147,"hem":11148,"arrow":11149,"convic":11150,"pink":11151,"occup":11152,"bald":11153,"exhau":11154,"uof":11155,"amo":11156,"ont":11157,"ãĥ»":11158,"adopt":11159,"laid":11160,"smoked":11161,"interpre":11162,"essenti":11163,"associated":11164,"bd":11165,"bby":11166,"fier":11167,"install":11168,"diplom":11169,"conditi":11170,"cf":11171,"wak":11172,"anya":11173,"graci":11174,"fisher":11175,"sss":11176,"apr":11177,"ilit":11178,"musician":11179,"symphony":11180,"cord":11181,"hack":11182,"legi":11183,"lv":11184,"blessings":11185,"humor":11186,"scra":11187,"eti":11188,"minster":11189,"travelling":11190,"bush":11191,"jewellery":11192,"lime":11193,"!!!":11194,"pregnant":11195,"pee":11196,"lob":11197,"capital":11198,"ipa":11199,"pencil":11200,"labor":11201,"ducks":11202,"proudly":11203,"wedding":11204,"derek":11205,"mw":11206,"peg":11207,"valentine":11208,"angu":11209,"retreat":11210,"prospect":11211,"danger":11212,"vulner":11213,"upset":11214,",#":11215,"srk":11216,"xim":11217,"thursday":11218,"nfl":11219,"kisses":11220,"reds":11221,"crack":11222,"reward":11223,"cu":11224,"kok":11225,"mete":11226,"abandoned":11227,"itt":11228,"meals":11229,"spell":11230,"stanbul":11231,"delays":11232,"rum":11233,"leop":11234,"gum":11235,"nova":11236,"superman":11237,"chick":11238,"mis":11239,"dramatic":11240,"innocent":11241,"rounds":11242,"rec":11243,"autism":11244,"bangladesh":11245,"moral":11246,"movie":11247,"spoo":11248,"kla":11249,"âĥ£":11250,"outing":11251,"messi":11252,"abroad":11253,"lookin":11254,"aim":11255,"qi":11256,"stack":11257,"collage":11258,"à¯":11259,"hudson":11260,"scan":11261,"hoe":11262,"chau":11263,"occur":11264,"commander":11265,"holes":11266,"ðŁİĦ":11267,"bias":11268,"von":11269,"sticker":11270,"mak":11271,"responsibility":11272,"columbus":11273,"saint":11274,"edmon":11275,"racism":11276,"farms":11277,"wen":11278,"gulf":11279,"mayo":11280,"!!!!!!!!":11281,"corporation":11282,"bachel":11283,"ela":11284,"internal":11285,"jeep":11286,"follows":11287,"dialogue":11288,"derer":11289,"smartphone":11290,"helen":11291,"richmond":11292,"equity":11293,"sland":11294,"bg":11295,"near":11296,"avi":11297,"memphis":11298,"weir":11299,"discussed":11300,"badge":11301,"pup":11302,"mistake":11303,"phenomen":11304,"unite":11305,"ðŁĽ":11306,"depic":11307,"rides":11308,"inaugu":11309,"nat":11310,"softwitter":11311,"combination":11312,"gospel":11313,"âļ¾":11314,"admission":11315,"retrogaming":11316,"ðŁIJ¾":11317,"schu":11318,"mbo":11319,"junction":11320,"alarm":11321,"à¦":11322,"grac":11323,"khali":11324,"kul":11325,"male":11326,"caption":11327,"wish":11328,"tere":11329,"corps":11330,"rubber":11331,"playstation":11332,"erin":11333,"efficient":11334,"lor":11335,"jokes":11336,"inary":11337,"norman":11338,"luis":11339,"inaugural":11340,"ched":11341,"âļ½ï¸ı":11342,"dip":11343,"toe":11344,"strat":11345,"aac":11346,"amu":11347,"pier":11348,"cott":11349,"command":11350,"tten":11351,"snoo":11352,"cube":11353,"closes":11354,"classical":11355,"sword":11356,"expression":11357,"reaching":11358,"napp":11359,"cost":11360,"affect":11361,"rico":11362,"gif":11363,"breathe":11364,"tribe":11365,"ortho":11366,"hay":11367,"lg":11368,"fries":11369,"nm":11370,"hiding":11371,"richards":11372,"ende":11373,"micro":11374,"capitol":11375,"copy":11376,"rom":11377,"regime":11378,"maryland":11379,"taxi":11380,"dial":11381,"embarra":11382,"unbeliev":11383,"cht":11384,"vs":11385,"elimin":11386,"odd":11387,"penny":11388,"soundtrack":11389,"lings":11390,"transition":11391,"remaining":11392,"ais":11393,"malik":11394,"?!?":11395,"random":11396,"defend":11397,"ultra":11398,"trum":11399,"dancer":11400,"stol":11401,"drive":11402,"aver":11403,"roast":11404,"definition":11405,"sean":11406,"excitement":11407,"particul":11408,"surely":11409,"shav":11410,"bery":11411,"dishes":11412,"comm":11413,"isol":11414,"iam":11415,"obli":11416,"ghost":11417,"hughes":11418,"chiefs":11419,"bas":11420,"conservative":11421,"special":11422,"femin":11423,"shri":11424,"nancy":11425,"intel":11426,"tune":11427,"ðŁĩª":11428,"joel":11429,"ggle":11430,"moto":11431,"ðŁĺĶ":11432,"buck":11433,"dag":11434,"anticip":11435,"montana":11436,"guid":11437,"frog":11438,"ecraft":11439,"ope":11440,"drives":11441,"numer":11442,"xy":11443,"colorful":11444,"wednesdaywisdom":11445,"illumin":11446,"beyon":11447,"inaugur":11448,"deeply":11449,"prefer":11450,"fortune":11451,"cooked":11452,"tible":11453,"âĺķ":11454,"sweater":11455,"itter":11456,"tty":11457,"ui":11458,"gie":11459,"complic":11460,"~~":11461,"taxes":11462,"cups":11463,"diverse":11464,"samanth":11465,"âłĢâłĢ":11466,"baking":11467,"symp":11468,"wai":11469,"behalf":11470,"mercur":11471,"travels":11472,"ðŁİīðŁİ":11473,"oria":11474,"engaged":11475,"jumping":11476,"retired":11477,"naked":11478,"puni":11479,"speedway":11480,"sciences":11481,"rehearsal":11482,"onym":11483,"dyou":11484,"plates":11485,"rati":11486,"krish":11487,"jazz":11488,"carol":11489,"raf":11490,"penalty":11491,"timeline":11492,"ruby":11493,"engineers":11494,"raf":11495,"belle":11496,"dose":11497,"cheon":11498,"escap":11499,"meg":11500,"rank":11501,"ord":11502,"megan":11503,"merch":11504,"eclipse":11505,"âĺºï¸ı":11506,"pledge":11507,"kirk":11508,"persi":11509,"leicester":11510,"sak":11511,"wk":11512,"safely":11513,"yyy":11514,"jet":11515,"promised":11516,"jc":11517,"enne":11518,"noah":11519,"reno":11520,"rea":11521,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":11522,"trail":11523,"ðŁijĢ":11524,"fd":11525,"sooo":11526,"rimin":11527,"wk":11528,"า":11529,"ial":11530,"xox":11531,"biscu":11532,"dale":11533,"fandom":11534,"participating":11535,"flag":11536,"privilege":11537,"peach":11538,"machine":11539,"boston":11540,"gross":11541,"og":11542,"miracle":11543,"adoption":11544,"uss":11545,"monsters":11546,"beij":11547,"clarke":11548,"pushing":11549,"praying":11550,"aro":11551,"dn":11552,"ellis":11553,"apollo":11554,"odds":11555,"refugee":11556,"tow":11557,"bp":11558,"ðŁĩ¬ðŁĩ§":11559,"hend":11560,"appeared":11561,"membership":11562,"pean":11563,"dum":11564,"violent":11565,"vy":11566,"potatoes":11567,"aww":11568,"greetings":11569,"tts":11570,"acon":11571,"shane":11572,"photographed":11573,"crab":11574,"temperatures":11575,"cuba":11576,"cfc":11577,"welcom":11578,"hel":11579,"innings":11580,"mk":11581,"code":11582,"knock":11583,"grass":11584,"swedish":11585,"pta":11586,"icky":11587,"vat":11588,"lining":11589,"sq":11590,"sap":11591,"arc":11592,"announcing":11593,"skins":11594,"cityof":11595,"bring":11596,"cox":11597,"gamer":11598,"itarian":11599,"ida":11600,"hd":11601,"rosse":11602,"sadly":11603,"geo":11604,"âļ¡ï¸ı":11605,"tags":11606,"father":11607,"change":11608,"lance":11609,"whiskey":11610,"adelaide":11611,"tec":11612,"stickers":11613,"market":11614,"classy":11615,"badass":11616,"florence":11617,"liner":11618,"frost":11619,"kate":11620,"acon":11621,"scandal":11622,"essex":11623,"ðŁĺı":11624,"vivi":11625,"drill":11626,"bloggers":11627,"recommend":11628,"dha":11629,"acres":11630,"roma":11631,"buy":11632,"grocer":11633,"eria":11634,"mahar":11635,"ffer":11636,"patterns":11637,"veri":11638,"compu":11639,"stev":11640,"anga":11641,"mentor":11642,"doo":11643,"itali":11644,"cdnpoli":11645,"only":11646,"conduct":11647,"electro":11648,"def":11649,"whale":11650,"preparation":11651,"bicycle":11652,"viral":11653,"turnout":11654,"brass":11655,"quad":11656,"hospitality":11657,"packaging":11658,"dency":11659,"cemetery":11660,"aboard":11661,"dreaming":11662,"picture":11663,"tall":11664,"invent":11665,"admi":11666,"oe":11667,"temps":11668,"quan":11669,"fundam":11670,"promp":11671,"residence":11672,"mud":11673,"souri":11674,"âĦ¢":11675,"graffiti":11676,"gif":11677,"dnd":11678,"comp":11679,"swar":11680,"peeps":11681,"palestine":11682,"devils":11683,"sang":11684,"assistance":11685,"bike":11686,"mississi":11687,"interviewed":11688,"nephew":11689,"drums":11690,"vand":11691,"gentlemen":11692,"nsw":11693,"insta":11694,"lebanon":11695,"eeee":11696,"olivia":11697,"very":11698,"rough":11699,"industries":11700,"mation":11701,"ðŁĺĴ":11702,"barrel":11703,"nay":11704,"pops":11705,"modern":11706,"illy":11707,"arest":11708,"onents":11709,"protecting":11710,"vans":11711,"eo":11712,"vikings":11713,"restaurants":11714,"reck":11715,"jackie":11716,"andrew":11717,"willing":11718,"heath":11719,"citizen":11720,"discrimin":11721,"à¹Ī":11722,"stuart":11723,"mys":11724,"hip":11725,"transp":11726,"\"?":11727,"tex":11728,"sushi":11729,"ked":11730,"crossed":11731,"distur":11732,"pedia":11733,"fate":11734,"somehow":11735,"moth":11736,"processing":11737,"iss":11738,"rin":11739,"uts":11740,"yyc":11741,"vert":11742,"lgbt":11743,"reid":11744,"onto":11745,"arabia":11746,"habitat":11747,"==":11748,"streak":11749,"simpson":11750,"addiction":11751,"wimble":11752,"delivers":11753,"challenging":11754,"ðŁİ¶":11755,"franch":11756,"edu":11757,"sme":11758,"aids":11759,"hurst":11760,"tham":11761,"tarian":11762,"remembered":11763,"palestinian":11764,"fees":11765,"trum":11766,"sketch":11767,"uru":11768,"fitting":11769,"jesse":11770,"ðŁĶ¥ðŁĶ¥":11771,"--------":11772,"bach":11773,"icia":11774,"colored":11775,"dah":11776,"associate":11777,"intel":11778,"seller":11779,"pu":11780,"stuffed":11781,"acs":11782,"bs":11783,"shin":11784,"cooperation":11785,"certificate":11786,"abu":11787,"ingredients":11788,"rev":11789,"inge":11790,"elder":11791,"christian":11792,"bundle":11793,"thic":11794,"dirt":11795,"beijing":11796,"commit":11797,"teddy":11798,"edu":11799,"today":11800,"sfield":11801,"wyn":11802,"confirms":11803,"loo":11804,"jv":11805,"eness":11806,"alpha":11807,"virus":11808,"arium":11809,"grind":11810,"bridges":11811,"introduction":11812,"polls":11813,"bacter":11814,"zach":11815,"terminal":11816,"raiders":11817,"flavor":11818,"zombie":11819,"vod":11820,"spreading":11821,"gameofthrones":11822,"efficiency":11823,"lately":11824,"alem":11825,"tweet":11826,"crimes":11827,"cler":11828,"dey":11829,"dged":11830,"hyun":11831,"payments":11832,"circus":11833,"ðŁĺŃðŁĺŃ":11834,"missouri":11835,"lub":11836,"episodes":11837,"cage":11838,"pos":11839,"matching":11840,"tumblr":11841,"lined":11842,"gest":11843,"ambi":11844,"narr":11845,"ington":11846,"regul":11847,"blown":11848,"isle":11849,"coco":11850,"ondon":11851,"joshua":11852,"touring":11853,"sma":11854,"sausage":11855,"bestfriend":11856,"boeing":11857,"desire":11858,"savage":11859,"rapper":11860,"devo":11861,"tear":11862,"takeover":11863,"cowboys":11864,"poker":11865,"parag":11866,"ppe":11867,"hint":11868,"wears":11869,"seth":11870,"roles":11871,"lanc":11872,"manga":11873,"format":11874,"flyer":11875,"cay":11876,"moor":11877,"bake":11878,"splash":11879,"vad":11880,"kerala":11881,"proceeds":11882,"silly":11883,"reflection":11884,"distr":11885,"wid":11886,"suit":11887,"civic":11888,"yankees":11889,"byn":11890,"migration":11891,"distin":11892,"orch":11893,"femini":11894,"qualifying":11895,"turi":11896,"obe":11897,"hundred":11898,"crap":11899,"wang":11900,"mathemat":11901,"bure":11902,"exposure":11903,"ferguson":11904,"semester":11905,"reserv":11906,"plym":11907,"ahu":11908,"facial":11909,"wax":11910,"worried":11911,"cab":11912,"vio":11913,"asa":11914,"cod":11915,"topics":11916,"pcs":11917,"halo":11918,"rescued":11919,"horizon":11920,"ark":11921,"âļª":11922,"holly":11923,"elf":11924,"ulti":11925,"pup":11926,"qualified":11927,"attendance":11928,"atively":11929,"destroy":11930,"yc":11931,"forth":11932,"photooftheday":11933,"cents":11934,"iceland":11935,"measures":11936,"desk":11937,"portfolio":11938,"articles":11939,"directors":11940,"datab":11941,"ew":11942,"creepy":11943,"ounding":11944,"honoured":11945,"mist":11946,"jit":11947,"mentioned":11948,"portable":11949,"itic":11950,"dann":11951,"fridayfeeling":11952,"amid":11953,"tiger":11954,"scrip":11955,"helicopter":11956,"hardware":11957,"explor":11958,"workplace":11959,"austria":11960,"beatles":11961,"bernar":11962,"spider":11963,"disco":11964,"cult":11965,"limits":11966,"shortly":11967,"final":11968,"ninja":11969,"luke":11970,"lebron":11971,"walmart":11972,"oil":11973,"vanilla":11974,"shire":11975,"yeg":11976,"aky":11977,"cs":11978,"bler":11979,"collected":11980,"tg":11981,"rolled":11982,"specials":11983,"bff":11984,"pierre":11985,"shim":11986,"vier":11987,"flashback":11988,"restoration":11989,"individuals":11990,"prod":11991,"freaking":11992,"turer":11993,"oa":11994,"refre":11995,"moroc":11996,"greet":11997,"reyn":11998,"careful":11999,"ouring":12000,"ush":12001,"isd":12002,"gill":12003,"view":12004,"thunderstorm":12005,"bled":12006,"picnic":12007,"guardi":12008,"pig":12009,"ark":12010,"sylvania":12011,"banned":12012,"ucl":12013,"vijay":12014,"orium":12015,"avengers":12016,"believes":12017,"eur":12018,"monument":12019,"concerned":12020,"labs":12021,"berg":12022,"aap":12023,"vish":12024,"singles":12025,"cancel":12026,"zel":12027,"arab":12028,"ruth":12029,"tooth":12030,"arta":12031,"shaf":12032,"chairs":12033,"rack":12034,"diseases":12035,"crowd":12036,"cly":12037,"flex":12038,"christma":12039,"artificial":12040,"tomat":12041,"fine":12042,"draws":12043,"advocate":12044,"france":12045,"ÙĬ":12046,"ðŁĺ³":12047,"heavy":12048,"sour":12049,"comprehen":12050,"noble":12051,"aap":12052,"hindu":12053,"coral":12054,"gars":12055,"owen":12056,"nl":12057,"stall":12058,"yellow":12059,"marina":12060,"inver":12061,"support":12062,"tough":12063,"promises":12064,"pie":12065,"masterpiece":12066,"score":12067,"force":12068,"mortg":12069,"cryptocurrency":12070,"ox":12071,"rors":12072,"rockin":12073,"provin":12074,"hog":12075,"nostal":12076,"oakland":12077,"patrick":12078,"inclusion":12079,"traffic":12080,"ahmed":12081,"aha":12082,"luxury":12083,"consecu":12084,"demon":12085,"âĸº":12086,"blowing":12087,"stag":12088,":\"":12089,"encourage":12090,"bene":12091,"skull":12092,"dodge":12093,"buster":12094,"kinson":12095,"witne":12096,"error":12097,"lowest":12098,"fellow":12099,"à°":12100,"shre":12101,"blur":12102,"virgin":12103,"composer":12104,"slip":12105,"mornings":12106,"gains":12107,"table":12108,"grain":12109,"arist":12110,"brazilian":12111,"wwe":12112,"tues":12113,"ribbon":12114,"anag":12115,"dist":12116,"sacrif":12117,"embrace":12118,"entrepreneur":12119,"affili":12120,"deo":12121,"tali":12122,"tourist":12123,"fatal":12124,"ìĬ":12125,"automatic":12126,"ðŁĩµ":12127,"weak":12128,"welfare":12129,"confirm":12130,"benjamin":12131,"fights":12132,"alleged":12133,"mead":12134,"struggling":12135,"prosecu":12136,"chef":12137,"è":12138,"proposal":12139,"ern":12140,"ðŁĺĦ":12141,"dyk":12142,"ongs":12143,"hong":12144,"mack":12145,"melon":12146,"onent":12147,"rush":12148,"dap":12149,"toler":12150,"propag":12151,"cze":12152,"translation":12153,"wallet":12154,"cottage":12155,"sail":12156,"constitution":12157,"ðŁĴĢ":12158,"munici":12159,"favor":12160,"stormhour":12161,"ih":12162,"ðŁĺĮ":12163,"approaching":12164,"pinned":12165,"jed":12166,"nigerian":12167,"nach":12168,"shat":12169,"particularly":12170,"mcdon":12171,"cameras":12172,"annie":12173,"administr":12174,"heat":12175,"electrical":12176,"charming":12177,"gibson":12178,"boutique":12179,"exposed":12180,"actor":12181,"pillow":12182,"beaches":12183,"genuine":12184,"margaret":12185,"bennett":12186,"louisi":12187,"positions":12188,"ely":12189,"shiny":12190,"tention":12191,"architect":12192,"rental":12193,"acqui":12194,"google":12195,"subway":12196,"moment":12197,"ðŁļ¨":12198,"rim":12199,"methods":12200,"cycli":12201,"norfolk":12202,"ÙĪ":12203,"overwhel":12204,"rapid":12205,"wear":12206,"happybirthday":12207,"progressive":12208,"ðŁĴ¥":12209,"cogn":12210,"papa":12211,"fool":12212,"philosophy":12213,"polar":12214,"jimmy":12215,"wig":12216,"ðŁĴĭ":12217,"operating":12218,"reduction":12219,"phi":12220,"flags":12221,"tothe":12222,"odi":12223,"ares":12224,"koo":12225,"kang":12226,"arkansas":12227,"ashton":12228,"wimbledon":12229,"scifi":12230,"attractive":12231,"mississippi":12232,"logists":12233,"ralph":12234,"label":12235,"graduates":12236,"maha":12237,"hometown":12238,"âľĮï¸ı":12239,"founded":12240,"onthe":12241,"liz":12242,"transl":12243,"minimum":12244,"presti":12245,"tam":12246,"generations":12247,"rebel":12248,"journalists":12249,"param":12250,"mcm":12251,"acrylic":12252,"deaths":12253,"tesla":12254,"wt":12255,"bryant":12256,"jerus":12257,"istanbul":12258,"muhammad":12259,"riley":12260,"kris":12261,"workshops":12262,"iso":12263,"counts":12264,"stret":12265,"protected":12266,"trinity":12267,"manual":12268,"rhin":12269,"ril":12270,"pleasant":12271,"lemon":12272,"nerd":12273,"harder":12274,"darren":12275,"bury":12276,"rah":12277,"basis":12278,"migu":12279,"occasion":12280,"lists":12281,"âĿ¤ï¸ıâĿ¤ï¸ıâĿ¤ï¸ı":12282,"eb":12283,"decre":12284,"hampton":12285,"ìĿ´":12286,"travis":12287,"transform":12288,"puerto":12289,"nhl":12290,"avoc":12291,"trips":12292,"unexpected":12293,"vet":12294,"didyou":12295,"barber":12296,"stages":12297,"mson":12298,"represented":12299,"fort":12300,"lal":12301,"pple":12302,"nicely":12303,"ignore":12304,"quil":12305,"quinn":12306,"hk":12307,"carrier":12308,"reminded":12309,"among":12310,"passenger":12311,"ellen":12312,"guez":12313,"scape":12314,"mural":12315,"youngest":12316,"mash":12317,"dill":12318,"routine":12319,"stainless":12320,"jackson":12321,"gandhi":12322,"thal":12323,"oners":12324,"editorial":12325,"conversations":12326,"sdale":12327,"automation":12328,"ike":12329,"าà¸":12330,"ðŁĩª":12331,"haul":12332,"laying":12333,"mentions":12334,"amen":12335,"abortion":12336,"ibi":12337,"counties":12338,"catherine":12339,"mands":12340,"jame":12341,"roller":12342,"aut":12343,"nam":12344,"ological":12345,"ception":12346,"ranking":12347,"toxic":12348,"snacks":12349,"victorian":12350,"bangkok":12351,"psychology":12352,"reg":12353,"angela":12354,"respond":12355,"style":12356,"sophie":12357,"dakota":12358,"achieved":12359,"marked":12360,"imperial":12361,"inas":12362,"gloves":12363,"slim":12364,"confident":12365,"attacked":12366,"gger":12367,"lonely":12368,"valentinesday":12369,"reb":12370,"craftbeer":12371,"origin":12372,"zimbab":12373,"ceiling":12374,"teens":12375,"otherwise":12376,"wb":12377,"fers":12378,"daysof":12379,"advisor":12380,"yah":12381,"âĻª":12382,"ender":12383,"republicans":12384,"ava":12385,"skirt":12386,"pipel":12387,"chie":12388,"jane":12389,"jax":12390,"ðŁĺĭ":12391,"âľĬ":12392,"jays":12393,"brett":12394,"balo":12395,"crucial":12396,"dhar":12397,"asis":12398,"deau":12399,"lloyd":12400,"chatting":12401,"âĿĦï¸ı":12402,"relay":12403,"remarkable":12404,"ns":12405,"wet":12406,"brisbane":12407,"ðŁĶ´":12408,"tionally":12409,"fk":12410,"layer":12411,"household":12412,"consecutive":12413,"esis":12414,"pendant":12415,"stir":12416,"critic":12417,"sugar":12418,"photoshop":12419,"pares":12420,"artistic":12421,"dodgers":12422,"cun":12423,"crafted":12424,"amend":12425,"boat":12426,"âŃIJï¸ı":12427,"egyptian":12428,"saw":12429,"trage":12430,"smaller":12431,"oxy":12432,"paired":12433,"next":12434,"ires":12435,"taco":12436,"oy":12437,"uc":12438,"sti":12439,"aerial":12440,"://":12441,"dro":12442,"dotcom":12443,"ggins":12444,"rpg":12445,"aye":12446,"lean":12447,"striker":12448,"lobby":12449,"protests":12450,"priority":12451,"congress":12452,"amate":12453,"invit":12454,"rington":12455,"mommy":12456,"thus":12457,"allowing":12458,"pioneer":12459,"enforcement":12460,"gori":12461,"talk":12462,"drag":12463,"dumb":12464,"bullet":12465,"sange":12466,"ery":12467,"targets":12468,"ðŁĩ¦":12469,"heather":12470,"consider":12471,"seafood":12472,"vest":12473,"risks":12474,"%.":12475,"pg":12476,"sacred":12477,"heating":12478,"kicked":12479,"ttot":12480,".-":12481,"chandi":12482,"coven":12483,"pool":12484,"pulse":12485,"ia":12486,"roster":12487,"shakespeare":12488,"esa":12489,"cargo":12490,"peanut":12491,"troop":12492,"action":12493,"tablet":12494,"homework":12495,"castle":12496,"struction":12497,"musicians":12498,"freezing":12499,"butt":12500,"justinbieber":12501,"jj":12502,"bahrain":12503,"anthem":12504,"audit":12505,"didyouknow":12506,"navig":12507,"guidance":12508,"âĸ¶":12509,"turf":12510,"nun":12511,"fications":12512,"yemen":12513,"charging":12514,"xc":12515,"broncos":12516,"subur":12517,"pale":12518,"boring":12519,"amongst":12520,"forthe":12521,"emper":12522,"omfg":12523,"pj":12524,"expecting":12525,"ðŁĴ«":12526,"stl":12527,"admin":12528,"expectations":12529,"swan":12530,"shoot":12531,"ooooo":12532,"minent":12533,"ãĢIJ":12534,"wallace":12535,"stang":12536,"saturday":12537,"adopted":12538,"doubles":12539,"homie":12540,"omez":12541,"dhan":12542,"venture":12543,"surrounding":12544,"file":12545,"mobility":12546,"dees":12547,"wski":12548,"brooke":12549,"embro":12550,"remembers":12551,"kara":12552,"testim":12553,"botan":12554,"mtv":12555,"sacrifice":12556,"jerusalem":12557,"dl":12558,"´":12559,"properly":12560,"ilion":12561,"asi":12562,"legit":12563,"cope":12564,"mcla":12565,"recycling":12566,"larger":12567,"ðŁĴĵ":12568,"patric":12569,"generous":12570,"jared":12571,"pf":12572,"molly":12573,"thomas":12574,"judges":12575,"hb":12576,"sorts":12577,"blvd":12578,"oven":12579,"entering":12580,"planes":12581,"beet":12582,"integration":12583,"booked":12584,"freed":12585,"vern":12586,"ashes":12587,"topped":12588,"depot":12589,"welcomed":12590,"rena":12591,"mick":12592,"dand":12593,"seeks":12594,"gamer":12595,"rankings":12596,"rene":12597,"mut":12598,"whisky":12599,"firefighters":12600,"gues":12601,"gather":12602,"tourney":12603,"demen":12604,"yang":12605,"newton":12606,"automotive":12607,"backyard":12608,"detailed":12609,"mist":12610,"tobac":12611,"fiber":12612,"unusual":12613,"gratitude":12614,"spare":12615,"neys":12616,":*":12617,"peri":12618,"floating":12619,"finalist":12620,"donating":12621,"dress":12622,"broad":12623,"bethe":12624,"economics":12625,"taiwan":12626,"edwards":12627,"plug":12628,"prairi":12629,"valen":12630,"baba":12631,"fad":12632,"anas":12633,"harper":12634,"disorder":12635,"applied":12636,"patt":12637,"bikin":12638,"liver":12639,"curi":12640,"caroline":12641,"anner":12642,"julian":12643,"walking":12644,"malcol":12645,"screenshot":12646,"coding":12647,"skincare":12648,"activists":12649,"mysterious":12650,"exact":12651,"blocking":12652,"mercury":12653,"batter":12654,"dump":12655,"âľĮ":12656,"ense":12657,"lish":12658,"ridiculous":12659,"protesters":12660,"ðŁĻĪ":12661,"lust":12662,"sweat":12663,"ass":12664,"alike":12665,"cody":12666,"rements":12667,"winds":12668,"aspir":12669,"vienna":12670,"pray":12671,"...@":12672,"boi":12673,"candle":12674,"assists":12675,"tee":12676,"derson":12677,"pony":12678,"fence":12679,"conspir":12680,"âĺħâĺħ":12681,"ooth":12682,"epic":12683,"barely":12684,"aunt":12685,"bam":12686,"diamonds":12687,"endless":12688,"screens":12689,"cancer":12690,"gro":12691,"pst":12692,"prospec":12693,"mosque":12694,"helpful":12695,"ouri":12696,"brother":12697,"gujar":12698,"cristi":12699,"inez":12700,"towers":12701,"addresses":12702,"gray":12703,"burton":12704,"retweeted":12705,"ðŁ¤Ķ":12706,"nity":12707,"duck":12708,"supervis":12709,"joan":12710,"kinder":12711,"sanctu":12712,"pied":12713,"âı°":12714,"łï¸ı":12715,"mati":12716,"revenge":12717,"cester":12718,"elife":12719,"designers":12720,"backed":12721,"boli":12722,"weight":12723,"couch":12724,"sures":12725,"sits":12726,"shrimp":12727,"lagos":12728,"authorities":12729,"osity":12730,"holly":12731,"computing":12732,"factors":12733,"abe":12734,"panels":12735,"ramad":12736,"sentence":12737,"mission":12738,"holm":12739,"rb":12740,"dads":12741,"shanghai":12742,"money":12743,"sheets":12744,"skate":12745,"threw":12746,"cupcakes":12747,"infinite":12748,"lis":12749,"practicing":12750,"essay":12751,"kai":12752,"asci":12753,"mob":12754,"ugh":12755,"holmes":12756,"regg":12757,"ikh":12758,"mock":12759,"collections":12760,"pep":12761,"ova":12762,"salt":12763,"nandez":12764,"coy":12765,"threats":12766,"texts":12767,"cinnam":12768,"pregnancy":12769,"pending":12770,"stamp":12771,"flower":12772,"gis":12773,"agreed":12774,"payne":12775,"rover":12776,"phra":12777,"soft":12778,"ffin":12779,"fathers":12780,"passengers":12781,"aways":12782,"ala":12783,"hes":12784,"livan":12785,"ins":12786,"samuel":12787,"ingui":12788,"hof":12789,"jj":12790,"chennai":12791,"catal":12792,"omic":12793,"heath":12794,"niece":12795,"pumped":12796,"integrated":12797,"arel":12798,"nom":12799,"productivity":12800,"wanting":12801,"visa":12802,"diana":12803,"twil":12804,"itv":12805,"camps":12806,"rowing":12807,"dley":12808,"blackand":12809,"guards":12810,"bells":12811,"reverse":12812,"vibe":12813,"ricky":12814,"moss":12815,"nyt":12816,"âĺĢï¸ı":12817,"elle":12818,"troy":12819,"cudd":12820,"evan":12821,"womens":12822,"foto":12823,"mistakes":12824,"wicked":12825,"mil":12826,"cled":12827,"memes":12828,"cosmo":12829,"scholar":12830,"reno":12831,"ðŁĺĢ":12832,"vents":12833,"#âĢ¦":12834,"terrorists":12835,"casey":12836,"cardinals":12837,"ðŁĺĬðŁĺĬ":12838,"venezuela":12839,"bola":12840,"literacy":12841,"tw":12842,"eno":12843,"contains":12844,"austin":12845,"financi":12846,"evan":12847,"harvard":12848,"originally":12849,"chevro":12850,"herald":12851,"nottingham":12852,"managers":12853,"âŀ¡":12854,"accepting":12855,"walsh":12856,"tutorial":12857,"entrepreneurship":12858,"yacht":12859,"requirements":12860,"glenn":12861,"pede":12862,"unfortunately":12863,"aching":12864,"daisy":12865,"gian":12866,"nightmare":12867,"âĿĹ":12868,"rina":12869,"bart":12870,"emails":12871,"opposite":12872,"whom":12873,"sake":12874,"puzzle":12875,"dashi":12876,"party":12877,"blanket":12878,"buses":12879,"lore":12880,"beauty":12881,"reason":12882,"punjab":12883,"windsor":12884,"functional":12885,"existing":12886,"hello":12887,"glimp":12888,"convin":12889,"lak":12890,"screaming":12891,"rebecca":12892,"bliss":12893,"northwest":12894,"infinity":12895,"cosmetics":12896,"pulling":12897,"coffee":12898,"pling":12899,"opho":12900,"colombia":12901,"interiordesign":12902,"(+":12903,"emotions":12904,"sac":12905,"sunglasses":12906,"saves":12907,"df":12908,"sixth":12909,"aly":12910,"ðŁĺ»":12911,"deen":12912,"devast":12913,"politicians":12914,"lacrosse":12915,"gu":12916,"pei":12917,"java":12918,"combine":12919,"coalition":12920,"erts":12921,"surviv":12922,"chad":12923,"strian":12924,"nn":12925,"devi":12926,"counc":12927,"concern":12928,"controller":12929,"breast":12930,"jury":12931,"tum":12932,"introduces":12933,"ladi":12934,"mobile":12935,"alz":12936,"steady":12937,"nurses":12938,"hacking":12939,"online":12940,"ocean":12941,"ðŁİĦ":12942,"aam":12943,"juven":12944,"icc":12945,"louisiana":12946,"arte":12947,"streetart":12948,"ison":12949,"wns":12950,"frm":12951,"panda":12952,"noir":12953,"maintain":12954,"delay":12955,"symptoms":12956,"thorn":12957,"geome":12958,"tern":12959,"carried":12960,"pru":12961,"panor":12962,"assy":12963,"peru":12964,"cloud":12965,"spra":12966,"pedi":12967,"este":12968,"tagged":12969,"ðŁĺĿ":12970,"shadows":12971,"nazi":12972,"اÙĦ":12973,"corri":12974,"âĻ¥âĻ¥":12975,"jad":12976,"ðŁĩ«":12977,"formal":12978,"spoken":12979,"ðŁĮŀ":12980,"enjoy":12981,"lopez":12982,"outlook":12983,"inho":12984,"wander":12985,"Ùħ":12986,"maya":12987,"pee":12988,"dine":12989,"ãĢij":12990,"briefing":12991,"supporter":12992,"arily":12993,"ghters":12994,"naturally":12995,"doctorwho":12996,"jen":12997,"var":12998,"newyear":12999,"rese":13000,"simm":13001,"rex":13002,"consequ":13003,"tomatoes":13004,"burst":13005,"bravo":13006,"burgers":13007,"cracking":13008,"northeast":13009,"biom":13010,"mushroom":13011,"marque":13012,"double":13013,"nier":13014,"vag":13015,"twenty":13016,"keyboard":13017,"winni":13018,"jamaica":13019,"parish":13020,":-":13021,"mentalhealth":13022,"alizing":13023,"render":13024,"waking":13025,"ðŁİĤ":13026,"gly":13027,"nathan":13028,"washing":13029,"melissa":13030,"jung":13031,"loyal":13032,"chili":13033,"songwriter":13034,"guitarist":13035,"bowie":13036,"neighbors":13037,"onymous":13038,"asset":13039,"tai":13040,"headquarters":13041,"ðŁĮĪ":13042,"ihear":13043,"cigare":13044,"surg":13045,")\"":13046,"repl":13047,"darling":13048,"ðŁĻĦ":13049,"zak":13050,"sare":13051,"ãħĭ":13052,"mickey":13053,"warehouse":13054,"massage":13055,"inees":13056,"didnt":13057,"iw":13058,"hurts":13059,"engaging":13060,"magic":13061,"womenin":13062,"kitten":13063,"mors":13064,"cart":13065,"titans":13066,"colleague":13067,"competing":13068,"eran":13069,"khal":13070,"marble":13071,"demand":13072,"delight":13073,"etary":13074,"blizz":13075,"louise":13076,"mls":13077,"finishes":13078,"experiment":13079,"conducted":13080,"electronics":13081,"itters":13082,"caring":13083,"whats":13084,"symbol":13085,"jung":13086,"ecu":13087,"pix":13088,"context":13089,"charger":13090,"ðŁĺĩ":13091,"reig":13092,"frag":13093,"ëĭ":13094,"chad":13095,"true":13096,"kerry":13097,"defending":13098,"aint":13099,"auton":13100,"checkout":13101,"barnes":13102,"lessly":13103,"dt":13104,"mme":13105,"cloudy":13106,"secondary":13107,"arez":13108,"_:":13109,"appa":13110,"constant":13111,"\")":13112,"vets":13113,"job":13114,"ient":13115,"ðŁĺŃðŁĺŃðŁĺŃ":13116,"mj":13117,"french":13118,"diver":13119,"davies":13120,"hhhh":13121,"ebook":13122,"à¹ī":13123,"mariti":13124,"breeze":13125,"suspended":13126,"mato":13127,"viet":13128,"rahu":13129,"sei":13130,"bolt":13131,"enary":13132,"leis":13133,"karl":13134,"framed":13135,"explaining":13136,"abc":13137,"dealing":13138,"nato":13139,"jake":13140,"expand":13141,"leonard":13142,"established":13143,"dub":13144,"armen":13145,"elled":13146,"vocal":13147,"nicholas":13148,"orient":13149,"kyo":13150,"illustrated":13151,"ahh":13152,"dancers":13153,"million":13154,"geta":13155,"popp":13156,"asu":13157,"murdered":13158,"gible":13159,"stoked":13160,"griffin":13161,"maximum":13162,"adrian":13163,"encounter":13164,"thero":13165,"davidson":13166,"ðŁį»":13167,"holiday":13168,"evo":13169,"assets":13170,"carson":13171,"memorable":13172,"âļ½":13173,"obam":13174,"representative":13175,"cbd":13176,"tricks":13177,"vogue":13178,"voice":13179,"mmmm":13180,"sebastian":13181,"clif":13182,"athy":13183,"paralle":13184,"ðŁ¤·":13185,"pak":13186,"evacu":13187,"eats":13188,"اØ":13189,"touched":13190,"organised":13191,"spirits":13192,"canad":13193,"guided":13194,"framework":13195,"ðŁĮŁ":13196,"ped":13197,"natural":13198,"agar":13199,"replaced":13200,"anchor":13201,"tit":13202,"shah":13203,"organis":13204,"superior":13205,"rn":13206,"chro":13207,"erica":13208,"still":13209,"coron":13210,"chuck":13211,"locks":13212,"organ":13213,"rosen":13214,"scam":13215,"bened":13216,"/#":13217,"keen":13218,"trevor":13219,"vampire":13220,"sorted":13221,"!'":13222,"afford":13223,"intro":13224,"grace":13225,"ðŁĺľ":13226,"saur":13227,"kickstarter":13228,"influen":13229,"vu":13230,"yup":13231,"poc":13232,"ðŁİ¥":13233,"aar":13234,"sang":13235,"trek":13236,"etsy":13237,"tbh":13238,"scream":13239,"chevrolet":13240,"pixel":13241,"shepherd":13242,"anor":13243,"gabriel":13244,"twood":13245,"sdcc":13246,"meters":13247,"developers":13248,"closure":13249,"vw":13250,"twitch":13251,"ìĹ":13252,"seoul":13253,"price":13254,"hog":13255,"nish":13256,"hillary":13257,"scratch":13258,"incen":13259,"wagon":13260,"disability":13261,"panther":13262,"chats":13263,"gd":13264,"witz":13265,"sussex":13266,"late":13267,"denmark":13268,"gerald":13269,"cancelled":13270,"nette":13271,"ix":13272,"naval":13273,"baptist":13274,"tet":13275,"yad":13276,"math":13277,"hoy":13278,"randy":13279,"point":13280,"intellec":13281,"fruits":13282,"wool":13283,"guin":13284,"pron":13285,"theft":13286,"condem":13287,"marry":13288,"nola":13289,"architects":13290,"cincin":13291,"rockets":13292,"gentleman":13293,"explan":13294,"tate":13295,"doe":13296,"raises":13297,"wildlife":13298,"wl":13299,"insider":13300,"blanc":13301,"wp":13302,"forsale":13303,"nyc":13304,"powell":13305,"unbelievable":13306,"pens":13307,"goodies":13308,"mustang":13309,"pens":13310,"stays":13311,"squash":13312,"xoxo":13313,"nearby":13314,"everton":13315,"coco":13316,"leagu":13317,"khan":13318,"stud":13319,"southwest":13320,"construc":13321,"sworth":13322,"croatia":13323,"lea":13324,"sums":13325,"aims":13326,"ean":13327,"vaness":13328,"itious":13329,"pathy":13330,"arcade":13331,"bend":13332,"suggests":13333,"sacram":13334,"royals":13335,"rier":13336,"emir":13337,"incl":13338,"ank":13339,"clark":13340,"right":13341,"vacc":13342,"ा":13343,"tane":13344,"lib":13345,"usc":13346,"sales":13347,"huh":13348,"sally":13349,"vera":13350,"pga":13351,"grows":13352,"drum":13353,"tree":13354,"ethics":13355,"suggest":13356,"isab":13357,"sealed":13358,"previously":13359,"animated":13360,"abdu":13361,"rises":13362,"glob":13363,"predat":13364,"scarf":13365,"delic":13366,"omar":13367,"lli":13368,"sxsw":13369,"python":13370,"nebra":13371,"funk":13372,"reflect":13373,"pavilion":13374,"tically":13375,"chasing":13376,"bakery":13377,"invasion":13378,"koh":13379,"believed":13380,"cohen":13381,"conqu":13382,"crafts":13383,"nati":13384,"clever":13385,"governance":13386,"samples":13387,"fails":13388,"âĶ":13389,"timo":13390,"ritu":13391,"striking":13392,"inclusive":13393,"shocking":13394,"cant":13395,"requires":13396,"drawings":13397,"à¸Ń":13398,"purchased":13399,"dum":13400,"zach":13401,"warner":13402,"console":13403,"mansion":13404,"fountain":13405,"circum":13406,"esh":13407,"island":13408,"milk":13409,"profits":13410,"halifax":13411,"rival":13412,"âľĪï¸ı":13413,"jenny":13414,"sandra":13415,"nye":13416,"kelly":13417,"yal":13418,"quad":13419,"nos":13420,"instein":13421,"finalists":13422,"midfielder":13423,"cue":13424,"exceptional":13425,"aan":13426,"sapp":13427,"gettin":13428,"saa":13429,"fati":13430,"slice":13431,"volk":13432,"swal":13433,"lasting":13434,"summary":13435,"itas":13436,"smo":13437,"sz":13438,"âĺĨ":13439,"ipl":13440,"flames":13441,"enews":13442,"hav":13443,"hoodie":13444,"pitcher":13445,"windy":13446,"revol":13447,"central":13448,"tonite":13449,"ðŁİīðŁİī":13450,"solved":13451,"milwau":13452,"organizations":13453,"weets":13454,"refin":13455,"sth":13456,"ãĥ¼":13457,"elin":13458,"tona":13459,"cinnamon":13460,"ðŁİ¨":13461,"ðŁİģ":13462,"ronaldo":13463,"peninsu":13464,"omega":13465,"elds":13466,"designing":13467,"eigh":13468,"bluet":13469,"benz":13470,"nug":13471,"asha":13472,"robots":13473,"sudan":13474,"choosing":13475,"endo":13476,"serge":13477,"closely":13478,"handy":13479,"finger":13480,"being":13481,"arte":13482,"survived":13483,"flame":13484,"milestone":13485,"gut":13486,"dwar":13487,"futures":13488,"ée":13489,"elo":13490,"fridge":13491,"elic":13492,"ouch":13493,"ub":13494,"pv":13495,"titan":13496,"collar":13497,"station":13498,"nevada":13499,"aurora":13500,"rd":13501,"duncan":13502,"âģł":13503,"brien":13504,"marsh":13505,"о":13506,"total":13507,"chry":13508,"sers":13509,"suffe":13510,"rachel":13511,"college":13512,"todays":13513,"courts":13514,"chit":13515,"reunited":13516,"gymna":13517,"genesis":13518,"beside":13519,"representation":13520,"chant":13521,"collector":13522,"rak":13523,"athens":13524,"nigh":13525,"munich":13526,"languages":13527,"flu":13528,"participation":13529,"___":13530,"cv":13531,"spectrum":13532,"soda":13533,"cover":13534,"referen":13535,"abbo":13536,"apa":13537,"publication":13538,"edm":13539,"monica":13540,"army":13541,"ðŁļĢ":13542,"divor":13543,"dry":13544,"streams":13545,"robotics":13546,"cider":13547,"bullying":13548,"approval":13549,"stoke":13550,"platforms":13551,"sierra":13552,"extin":13553,"ib":13554,"hayes":13555,"succeed":13556,"suffer":13557,"atically":13558,"dai":13559,"lynch":13560,"hound":13561,"delines":13562,"acknow":13563,"dated":13564,"exclusively":13565,"heres":13566,"facilit":13567,"damaged":13568,"charter":13569,"lakers":13570,"falcon":13571,"unveiled":13572,"welove":13573,"ease":13574,"patience":13575,"lone":13576,"gentle":13577,"genetic":13578,"producing":13579,"gour":13580,"shannon":13581,"bilities":13582,"zimbabwe":13583,"pint":13584,"daughters":13585,"literary":13586,"belle":13587,"clam":13588,"surrounded":13589,"kany":13590,"neil":13591,"pirate":13592,"ranger":13593,"hbd":13594,"natalie":13595,"belong":13596,"olympi":13597,"embassy":13598,"scol":13599,"ener":13600,"akin":13601,"loren":13602,"bh":13603,":/":13604,"diva":13605,"denim":13606,"hipp":13607,"ðŁĩµðŁĩ":13608,"arnold":13609,"?'":13610,"weren":13611,"empower":13612,"disabled":13613,"manor":13614,"raspberry":13615,"baf":13616,"awful":13617,"drummer":13618,"kardashi":13619,"nash":13620,"machinelearning":13621,"chu":13622,"rebels":13623,"timing":13624,"monroe":13625,"tongue":13626,"range":13627,"pupils":13628,"ress":13629,"amazon":13630,"bz":13631,"harley":13632,"palmer":13633,"balloon":13634,"sings":13635,"icec":13636,"jb":13637,"cers":13638,"gps":13639,"whist":13640,"rise":13641,"lt":13642,"oooo":13643,"cattle":13644,"shooter":13645,"vodka":13646,"ucl":13647,"mtg":13648,"lesli":13649,"jonas":13650,"dispo":13651,"atric":13652,"stein":13653,"vintage":13654,"firms":13655,"floyd":13656,"cowboy":13657,"soooo":13658,"isaac":13659,"warcraft":13660,"disneyland":13661,"beautiful":13662,"beam":13663,"franchise":13664,"bun":13665,"kag":13666,"anon":13667,"turbo":13668,"sweep":13669,"madein":13670,"karachi":13671,"detective":13672,"pennsylvania":13673,"controversi":13674,"vitamin":13675,"aside":13676,"chronic":13677,"describes":13678,"removal":13679,"hah":13680,"aper":13681,"tened":13682,"uto":13683,"badly":13684,"mirac":13685,"fry":13686,"yea":13687,"injec":13688,"thermal":13689,"compact":13690,"thor":13691,"teed":13692,"urgent":13693,"lite":13694,"gilli":13695,"sophom":13696,"ico":13697,"chem":13698,"pm":13699,"fork":13700,"freak":13701,"chak":13702,"recipient":13703,"iy":13704,"nik":13705,"modeling":13706,"cans":13707,"ðŁıĢ":13708,"delux":13709,"seam":13710,"survivors":13711,"radical":13712,"investigating":13713,"reliable":13714,"fm":13715,"turt":13716,"lighthouse":13717,"tool":13718,"gown":13719,"))":13720,"bots":13721,"autograph":13722,"aid":13723,"buffe":13724,"hmm":13725,"horrible":13726,"ssional":13727,"anni":13728,"à¹Ģ":13729,"kits":13730,"schi":13731,"eternal":13732,"huss":13733,"sensitive":13734,"ru":13735,"tastes":13736,"checks":13737,"imo":13738,"portion":13739,"skate":13740,"eden":13741,"halftime":13742,"fried":13743,"rihanna":13744,"tise":13745,"flick":13746,"cain":13747,"sgt":13748,"âľĶ":13749,"shau":13750,"stained":13751,"raffle":13752,"drove":13753,"salman":13754,"principles":13755,"sho":13756,"aru":13757,"jess":13758,"guine":13759,"garbage":13760,"myan":13761,"jelly":13762,"disru":13763,"zia":13764,"qld":13765,"entries":13766,"lav":13767,"flew":13768,"admit":13769,"objects":13770,"compare":13771,"nytimes":13772,"cannes":13773,"pn":13774,"suffol":13775,"roc":13776,"dana":13777,"egg":13778,"hist":13779,"counsel":13780,"'!":13781,"physi":13782,"imagination":13783,"adjust":13784,"explosion":13785,"plymouth":13786,"horror":13787,"elliott":13788,"bourne":13789,"dex":13790,"breed":13791,"audio":13792,"lobster":13793,"disappointed":13794,"nationwide":13795,"((":13796,"increases":13797,"australi":13798,"cedar":13799,"staring":13800,"racial":13801,"eis":13802,"gmt":13803,"visions":13804,"stayed":13805,"discussions":13806,"dean":13807,"curtis":13808,"maiden":13809,"stellar":13810,"happiest":13811,"hwy":13812,"preseason":13813,"carav":13814,"mondays":13815,"hospitals":13816,"glimpse":13817,"scholars":13818,"jai":13819,"terrace":13820,"anna":13821,"goose":13822,"graded":13823,"lotus":13824,"hung":13825,"grocery":13826,"stamps":13827,"emperor":13828,"scoop":13829,"inser":13830,"cas":13831,"existence":13832,"heal":13833,"falcons":13834,"marvel":13835,"reducing":13836,"terrific":13837,"magnetic":13838,"performs":13839,"barre":13840,"pus":13841,"treating":13842,"icon":13843,"wh":13844,"declared":13845,"trauma":13846,"dod":13847,"comedian":13848,"nikon":13849,"bugs":13850,"asm":13851,"montgom":13852,"ibiza":13853,"comprehensive":13854,"has":13855,"santi":13856,"fellowship":13857,"dash":13858,"psal":13859,"louisville":13860,"spy":13861,"fault":13862,"dthe":13863,"filed":13864,"vista":13865,"desc":13866,"fears":13867,"youtu":13868,"sps":13869,"esp":13870,"rig":13871,"crime":13872,"berger":13873,"wonderland":13874,"kent":13875,"informed":13876,"stevens":13877,"myth":13878,"aston":13879,"iri":13880,"visitor":13881,"atri":13882,"producers":13883,"alla":13884,"personally":13885,"separate":13886,"agencies":13887,"afri":13888,"ilan":13889,"spoke":13890,"nina":13891,"squad":13892,"dives":13893,"depend":13894,"liv":13895,"fierce":13896,"entertaining":13897,"chain":13898,"scat":13899,"borders":13900,"palette":13901,"spro":13902,"osis":13903,"derby":13904,"tobacco":13905,"zio":13906,"willie":13907,"juvent":13908,"zoom":13909,"holy":13910,"entirely":13911,"afe":13912,"martinez":13913,"beds":13914,"pea":13915,"bulldogs":13916,"ðŁĩªðŁĩ":13917,"ibm":13918,"neon":13919,"ethiopia":13920,"teammates":13921,"planting":13922,"twer":13923,"anytime":13924,"forbes":13925,"ón":13926,"runway":13927,"nervous":13928,"roger":13929,"pile":13930,"chanc":13931,"apocaly":13932,"uw":13933,"oi":13934,"drought":13935,"territory":13936,"brick":13937,"creatures":13938,"goin":13939,"waff":13940,"gren":13941,"southeast":13942,"jean":13943,"ambul":13944,"edited":13945,"strap":13946,"cv":13947,"aaron":13948,"ãĥ»ãĥ»":13949,"tsu":13950,"description":13951,"kindly":13952,"clutch":13953,"immer":13954,"enor":13955,"womensday":13956,"orange":13957,"rag":13958,"obvious":13959,"hyder":13960,"channels":13961,"mango":13962,"meyer":13963,"raining":13964,"getty":13965,"pilgri":13966,"coordinator":13967,"upload":13968,"nintendo":13969,"donuts":13970,"sanchez":13971,"apparel":13972,"jr":13973,"zzi":13974,",@":13975,"jefferson":13976,"accessible":13977,"greatly":13978,"eid":13979,"initial":13980,"buddha":13981,"paris":13982,"mascot":13983,"â¬ĩï¸ı":13984,"schwar":13985,"siri":13986,"spinning":13987,"mortgage":13988,"echo":13989,"endange":13990,"gedly":13991,"chloe":13992,"enhance":13993,"karnat":13994,"kry":13995,"explores":13996,"ðŁĴģ":13997,"affair":13998,"icals":13999,"alla":14000,"dart":14001,"dolphins":14002,"differences":14003,"squirrel":14004,"augh":14005,"drones":14006,"ellen":14007,"restore":14008,"paw":14009,"unfor":14010,"pike":14011,"hilton":14012,"collab":14013,"consumers":14014,"coinci":14015,"outcomes":14016,"ppp":14017,"aq":14018,"coupon":14019,"liest":14020,"sims":14021,"kho":14022,"aves":14023,"spoon":14024,"pudding":14025,"corbyn":14026,"haters":14027,"exams":14028,"slave":14029,".!":14030,"psa":14031,"apples":14032,"tamil":14033,"sed":14034,"coke":14035,"zzo":14036,"losange":14037,"carbon":14038,"clair":14039,"...)":14040,"khu":14041,"craig":14042,"exploration":14043,"sanctuary":14044,"sue":14045,"alway":14046,"dementia":14047,"wonders":14048,"superhero":14049,"pakistani":14050,"browns":14051,"bluetooth":14052,"locker":14053,"marc":14054,"eventu":14055,"deluxe":14056,"rodriguez":14057,"âĿ¤âĿ¤":14058,"robb":14059,"ðŁĴ¦":14060,"linux":14061,"tens":14062,"intelligent":14063,"seed":14064,"voter":14065,"sler":14066,"peaks":14067,"intern":14068,"teenage":14069,"peninsula":14070,"handling":14071,"tie":14072,"cousins":14073,"wendy":14074,"mee":14075,"à¹Ģà¸":14076,"dino":14077,"ðŁĴ°":14078,"ðŁĺĥ":14079,"zee":14080,"sbury":14081,"tragedy":14082,"bk":14083,"bore":14084,"zin":14085,"warns":14086,"idiot":14087,"touching":14088,"continental":14089,"tacos":14090,"safari":14091,"washed":14092,"podium":14093,"morrison":14094,"forests":14095,"cbc":14096,"alon":14097,"particular":14098,"beads":14099,"invented":14100,"loch":14101,"lighter":14102,"wherever":14103,"ide":14104,"documents":14105,"awe":14106,"kr":14107,"nowhere":14108,"miner":14109,"stit":14110,"rox":14111,"contribute":14112,"hardy":14113,"clan":14114,"object":14115,"cait":14116,"ðŁĴķðŁĴķ":14117,"happier":14118,"vegetables":14119,"tart":14120,"gag":14121,"nominee":14122,"heavily":14123,"panic":14124,"jd":14125,"theresa":14126,"atm":14127,"uph":14128,"sfc":14129,"suri":14130,"drink":14131,"nal":14132,"revel":14133,"kl":14134,"avocado":14135,"nomination":14136,"madonna":14137,"sharon":14138,"malcolm":14139,"controlled":14140,"shers":14141,"revival":14142,"legislation":14143,"shoots":14144,"nin":14145,"commentary":14146,"pros":14147,"humanrights":14148,"stranger":14149,"mitch":14150,"pipeline":14151,"legally":14152,"thu":14153,"gilbert":14154,"toll":14155,"granted":14156,"ghs":14157,"iranian":14158,"refreshing":14159,"duk":14160,"abi":14161,"prime":14162,"joseph":14163,"mosa":14164,"statistics":14165,"productions":14166,"merry":14167,"patel":14168,"sax":14169,"humanitarian":14170,"structures":14171,"emissions":14172,"towns":14173,"freel":14174,"stering":14175,"ratings":14176,"allegedly":14177,"cabin":14178,"stl":14179,"wade":14180,"flyers":14181,"trim":14182,"promising":14183,"zu":14184,"ballot":14185,"comparison":14186,"freeze":14187,"outer":14188,"greatness":14189,"assign":14190,"snowy":14191,"rale":14192,"tories":14193,"mediter":14194,"knock":14195,"consultant":14196,"cincinnati":14197,"analyst":14198,"scoo":14199,"jews":14200,"approxim":14201,"pure":14202,"portraits":14203,"cyrus":14204,"ational":14205,"loans":14206,"acquis":14207,"elu":14208,"acceptable":14209,"union":14210,"watercolor":14211,"rust":14212,"battles":14213,"perfu":14214,"seasonal":14215,"serial":14216,"mindset":14217,"riot":14218,"feld":14219,"ennial":14220,"closet":14221,"priest":14222,"tanks":14223,"intl":14224,"screw":14225,"bum":14226,"abdul":14227,"oux":14228,"explained":14229,"rica":14230,"imaging":14231,"lawyers":14232,"buried":14233,"ãĥ»ãĥ»ãĥ»":14234,"earl":14235,"âĢķ":14236,"lton":14237,"restored":14238,"stripes":14239,"foss":14240,"demands":14241,"stealing":14242,"alexis":14243,"mund":14244,"aker":14245,"urus":14246,"wardro":14247,"hugs":14248,"genre":14249,"ego":14250,"ÙĦ":14251,"participated":14252,"babes":14253,"banquet":14254,"tious":14255,"hemi":14256,"dsb":14257,"lost":14258,"milwaukee":14259,"jenner":14260,"gem":14261,"outra":14262,"loses":14263,"idi":14264,"reps":14265,"ðŁİ§":14266,"regulation":14267,"flaw":14268,"fang":14269,"vibrant":14270,"ramp":14271,"rains":14272,"wellbeing":14273,"soviet":14274,"viewers":14275,"depo":14276,"libraries":14277,"bigo":14278,"sery":14279,"gill":14280,"destruction":14281,"coz":14282,"cx":14283,"bridal":14284,"alds":14285,"planted":14286,"amateur":14287,"lud":14288,"cheering":14289,"showcas":14290,"profile":14291,"iu":14292,"vertical":14293,"packers":14294,"wizard":14295,"skip":14296,"slight":14297,"beau":14298,"airways":14299,"much":14300,"rera":14301,"ðŁĮĬ":14302,"absor":14303,"patio":14304,"packages":14305,"sells":14306,"mentally":14307,"ðŁĺ¢":14308,"reynolds":14309,"kare":14310,"tribun":14311,"walt":14312,"knit":14313,"taste":14314,"surrey":14315,"bounce":14316,"creature":14317,"bare":14318,"betting":14319,"sure":14320,"miley":14321,"laughs":14322,"alore":14323,"cyn":14324,"tl":14325,"artist":14326,"annah":14327,"warmer":14328,"dynamics":14329,"lunchtime":14330,"maritime":14331,"vulnerable":14332,"ðŁĴĥ":14333,"wolver":14334,"durham":14335,"constantly":14336,"amin":14337,"sibl":14338,":@":14339,"bullet":14340,"kach":14341,"angelo":14342,"wilder":14343,"doom":14344,"desktop":14345,"lawsuit":14346,"kca":14347,"henderson":14348,"inviting":14349,"betty":14350,"tawards":14351,"rafa":14352,"leaked":14353,"andi":14354,"gems":14355,"afl":14356,"velo":14357,"mediterran":14358,"probe":14359,"totten":14360,"stephanie":14361,"snation":14362,"combe":14363,"qs":14364,"overcome":14365,"assassin":14366,"rav":14367,"filip":14368,"winnipeg":14369,"shil":14370,"determined":14371,"kas":14372,"outre":14373,"regret":14374,"guides":14375,"aaa":14376,"ðŁĺĪ":14377,"wives":14378,"manife":14379,"erly":14380,"smy":14381,"shima":14382,"xing":14383,"pixel":14384,"jacob":14385,"accommod":14386,"toy":14387,"ono":14388,"poo":14389,"tier":14390,"answe":14391,"ðŁĴģ":14392,"rosa":14393,"lease":14394,"belongs":14395,"thar":14396,"eventually":14397,"neither":14398,"goa":14399,"skiing":14400,"atra":14401,"agh":14402,"broadcasting":14403,"fury":14404,"pyram":14405,"dice":14406,"volkswag":14407,"womens":14408,"provider":14409,"bombs":14410,"missile":14411,"whip":14412,"dick":14413,"norwe":14414,"backup":14415,"elder":14416,"mature":14417,"concerts":14418,"gious":14419,"squee":14420,"goodmorning":14421,"braves":14422,"^_":14423,"aussie":14424,"luna":14425,"males":14426,"heck":14427,"fortn":14428,"romeo":14429,"steelers":14430,"pn":14431,"peer":14432,"represents":14433,"«":14434,"katy":14435,"miguel":14436,"require":14437,"chains":14438,"lur":14439,"immediate":14440,"timber":14441,"âĸ¶ï¸ı":14442,"advocacy":14443,"export":14444,"anz":14445,"tiffany":14446,"author":14447,"ðŁİĪ":14448,"dudes":14449,"chilly":14450,"hid":14451,"harm":14452,"bug":14453,"monster":14454,"terrier":14455,"tuc":14456,"storytelling":14457,"tak":14458,"inti":14459,"immigrants":14460,"bis":14461,"reaches":14462,"compassion":14463,"johnny":14464,"contributions":14465,"ðŁIJ¶":14466,"mechanical":14467,"impression":14468,"ranks":14469,"kobe":14470,"menting":14471,"blossom":14472,"pablo":14473,"builder":14474,"bombing":14475,"twel":14476,"sullivan":14477,"omo":14478,"pete":14479,"demi":14480,"kudos":14481,"wbb":14482,"tgif":14483,"massach":14484,"neighbor":14485,"chefs":14486,"engines":14487,"pune":14488,"gained":14489,"phantom":14490,"sdays":14491,"extend":14492,"gran":14493,"centers":14494,"jacqu":14495,"datasci":14496,"sleepy":14497,"elvis":14498,"answered":14499,"slot":14500,"cony":14501,"flexible":14502,"tially":14503,"letics":14504,"%,":14505,"andrews":14506,"sible":14507,"momma":14508,"vino":14509,"dox":14510,"invitational":14511,"twilight":14512,"jade":14513,"illery":14514,"johns":14515,"fou":14516,"pv":14517,"--->":14518,"breakdown":14519,"billion":14520,"printer":14521,"mond":14522,"cbc":14523,"maggie":14524,"legion":14525,"dub":14526,"kurt":14527,"poor":14528,"parenting":14529,"regions":14530,"bikini":14531,"beware":14532,"sional":14533,"auburn":14534,"kidding":14535,"amples":14536,"span":14537,"contempor":14538,"cic":14539,"habits":14540,"ako":14541,"prefe":14542,"buddies":14543,"itz":14544,"emily":14545,"personnel":14546,"mountain":14547,"versus":14548,"ðŁĺ¬":14549,"earning":14550,"sink":14551,"dari":14552,"uu":14553,"swin":14554,"ister":14555,"brutal":14556,"nac":14557,"kata":14558,"cloth":14559,"amand":14560,"ðŁĶĹ":14561,"neo":14562,"alumin":14563,"weekends":14564,"nebraska":14565,"codes":14566,"delayed":14567,"bruno":14568,"proven":14569,"inc":14570,"ight":14571,"flan":14572,"oro":14573,"lambert":14574,"regulat":14575,"wf":14576,"massachuse":14577,"kardashian":14578,"bernard":14579,"fiesta":14580,"volcano":14581,"grandpa":14582,"anca":14583,"dre":14584,"stitu":14585,"meaning":14586,"foam":14587,"auck":14588,"ated":14589,"rl":14590,"hotel":14591,"persons":14592,"dynasty":14593,"ellor":14594,"mai":14595,"amne":14596,"styling":14597,"avier":14598,"eg":14599,"vegetarian":14600,",âĢ¦":14601,"founders":14602,"stain":14603,"gd":14604,"cycles":14605,"skyline":14606,"tractor":14607,"exists":14608,"tral":14609,"kidney":14610,"maril":14611,"instag":14612,"sette":14613,"addict":14614,"triangle":14615,"flashback":14616,"controversial":14617,"zon":14618,"pins":14619,"ias":14620,"tray":14621,"township":14622,"delegates":14623,"spam":14624,"hms":14625,"crane":14626,"peoples":14627,"olo":14628,"faction":14629,"butes":14630,"onica":14631,"delegation":14632,"newprofile":14633,"elier":14634,"mca":14635,"wand":14636,"gely":14637,"losangeles":14638,"berke":14639,"tive":14640,"disrup":14641,"zza":14642,"casa":14643,"jordan":14644,"fordshire":14645,"gathered":14646,"ichi":14647,"attendees":14648,"à¸Ńà¸":14649,"peppers":14650,"coin":14651,"bourbon":14652,"ernity":14653,"rotary":14654,"behaviour":14655,"jeremy":14656,"teamwork":14657,"compliance":14658,"tremend":14659,"ðŁĩ§":14660,"buhari":14661,"cambo":14662,"buyers":14663,"hagen":14664,"buds":14665,"bayern":14666,"monte":14667,"smells":14668,"anza":14669,"athlon":14670,"described":14671,"workforce":14672,"giving":14673,"api":14674,"investments":14675,"dail":14676,"selena":14677,"database":14678,"thum":14679,"mortal":14680,"student":14681,"buyer":14682,"dover":14683,"garten":14684,"attle":14685,"loyalty":14686,"genoci":14687,"holocau":14688,"theaters":14689,"ruling":14690,"venus":14691,"patent":14692,"chun":14693,"abby":14694,"awake":14695,"massacre":14696,"bangalore":14697,"breaking":14698,"simmons":14699,"justi":14700,"hale":14701,"edchat":14702,"ggles":14703,"hawk":14704,"marking":14705,"headlines":14706,"strom":14707,"cove":14708,"breathtaking":14709,"medals":14710,"haircut":14711,"christine":14712,"telegraph":14713,"gujarat":14714,"jura":14715,"cane":14716,"shore":14717,"propaganda":14718,"mueller":14719,"........":14720,"savi":14721,"stomach":14722,"throws":14723,"tab":14724,"warm":14725,"jong":14726,"renowned":14727,"hir":14728,"rais":14729,"mushrooms":14730,"guaranteed":14731,"boa":14732,"mj":14733,"revolutionary":14734,"certification":14735,"bruins":14736,"join":14737,"wes":14738,"passport":14739,"cg":14740,"sexu":14741,"capable":14742,"wv":14743,"tones":14744,"jackets":14745,"accompan":14746,"spinach":14747,"forever":14748,"blair":14749,"watts":14750,"gl":14751,"couples":14752,"prairie":14753,"newprofilepic":14754,"logistics":14755,"massachusetts":14756,"jaguar":14757,"oid":14758,"weal":14759,"underwater":14760,"moz":14761,"yi":14762,"maths":14763,"myanmar":14764,"preps":14765,"suffered":14766,"trace":14767,"wali":14768,"ahhh":14769,"borg":14770,"stitch":14771,"culin":14772,"realise":14773,"infection":14774,"discrimination":14775,"shame":14776,"ankle":14777,"humid":14778,"yt":14779,"bracket":14780,"truck":14781,"triu":14782,"easter":14783,"community":14784,"postcard":14785,"involving":14786,"tyler":14787,"caramel":14788,"overview":14789,"examples":14790,"integrity":14791,"basement":14792,"instruments":14793,"anium":14794,"atus":14795,"gher":14796,"laundry":14797,"achieve":14798,"geneva":14799,"pricing":14800,"hyderabad":14801,"belief":14802,"meta":14803,"jaw":14804,"accounting":14805,"leader":14806,"cristiano":14807,"couture":14808,"cyp":14809,"vised":14810,",,,":14811,"knu":14812,"hick":14813,"breaker":14814,"bram":14815,"rab":14816,"moor":14817,"hamas":14818,"graduating":14819,"puppies":14820,"akh":14821,"tah":14822,"aches":14823,"rie":14824,"opini":14825,"gta":14826,"reign":14827,"tragic":14828,"rever":14829,"pill":14830,"pineapple":14831,"touches":14832,"dare":14833,"leys":14834,"ilo":14835,"interiors":14836,"scouts":14837,"bart":14838,"enzie":14839,"dono":14840,"brock":14841,"christians":14842,"ensemble":14843,"·":14844,"cinemas":14845,"newport":14846,"airline":14847,"winston":14848,"leigh":14849,"contents":14850,"prescri":14851,"urge":14852,"trout":14853,"fically":14854,"ilia":14855,"subsi":14856,"arer":14857,"âļ¾ï¸ı":14858,"wounded":14859,"ðŁĻĤ":14860,"pepper":14861,"ðŁĴŀ":14862,"fitted":14863,"aff":14864,"resur":14865,"thursdaythoughts":14866,"zero":14867,"archaeology":14868,"div":14869,"jee":14870,"ion":14871,"awaiting":14872,"cozy":14873,"beauties":14874,"bald":14875,"data":14876,"grizz":14877,"stalk":14878,"kinds":14879,"cleared":14880,"jessic":14881,"regular":14882,"aliens":14883,"place":14884,"bos":14885,"bizar":14886,"thisis":14887,"ðŁĴĢ":14888,"tottenham":14889,"mafia":14890,"slam":14891,"ariana":14892,"carroll":14893,"backpack":14894,"carey":14895,"univ":14896,"rg":14897,"pep":14898,"digit":14899,"tattoos":14900,"agon":14901,"volunteering":14902,"differen":14903,"consumption":14904,"kathr":14905,"headphones":14906,"tshirt":14907,"ob":14908,"element":14909,"retail":14910,"shru":14911,"algori":14912,"container":14913,"conscious":14914,"fil":14915,"coming":14916,"rash":14917,"urope":14918,"define":14919,"gior":14920,"feminist":14921,"flowing":14922,"routes":14923,"glaci":14924,"fert":14925,"somerset":14926,"antes":14927,"tweeps":14928,"$$":14929,"hour":14930,"endangered":14931,"yearsof":14932,"roh":14933,"popped":14934,"backing":14935,"basil":14936,"brake":14937,"monaco":14938,"lgbtq":14939,"prague":14940,"utility":14941,"cassi":14942,"gateway":14943,"haunted":14944,"schul":14945,"ðŁİµ":14946,"should":14947,"walkingdead":14948,"completing":14949,"danny":14950,"montgomery":14951,"penguin":14952,"ssi":14953,"merchandi":14954,"ðŁijij":14955,"church":14956,"hates":14957,"captain":14958,"breathing":14959,"cet":14960,"fairly":14961,"approaches":14962,"companion":14963,"surprising":14964,"kanye":14965,"pey":14966,"hindi":14967,"targeted":14968,"lords":14969,"deut":14970,"digging":14971,"german":14972,"rut":14973,"energy":14974,"closest":14975,"yun":14976,"apologi":14977,"ั":14978,"sack":14979,"rup":14980,"ddy":14981,"portal":14982,"dough":14983,"bats":14984,"ðŁĵ°":14985,"atur":14986,"grapher":14987,"pires":14988,"motors":14989,"ðŁĮ¹":14990,"jc":14991,"dang":14992,"tuk":14993,"clue":14994,"usc":14995,"page":14996,"dless":14997,"brows":14998,"jus":14999,"ading":15000,"remarks":15001,"oom":15002,"cardio":15003,"stefan":15004,"armstrong":15005,"âĢ¢âĢ¢":15006,"niest":15007,"belgian":15008,"biop":15009,"soy":15010,"lof":15011,"íĥ":15012,"qt":15013,"flashbackfriday":15014,"cee":15015,"ģà¸":15016,"wreck":15017,"marines":15018,"amendment":15019,"wardrobe":15020,"voy":15021,"burned":15022,"guitars":15023,"rainf":15024,"lifel":15025,"ssil":15026,"ounce":15027,"external":15028,"ckey":15029,"mesh":15030,"sheikh":15031,"invitation":15032,"suggesti":15033,"popcorn":15034,"phenomenal":15035,"anonymous":15036,"tuna":15037,"chicago":15038,"oval":15039,"dely":15040,"locals":15041,"(&":15042,"prof":15043,"novel":15044,"finder":15045,"sparks":15046,"laven":15047,"infu":15048,"nicks":15049,"quant":15050,"rae":15051,"exec":15052,"distingui":15053,"stances":15054,"mutual":15055,"shal":15056,"unveils":15057,"edmonton":15058,"zania":15059,"adio":15060,"viewer":15061,"bradford":15062,"auditorium":15063,"quis":15064,"react":15065,"http":15066,"lero":15067,"cheeky":15068,"impacts":15069,"tak":15070,"edt":15071,"desperate":15072,"tay":15073,"ìĦ":15074,"settle":15075,"bargain":15076,"resume":15077,"unite":15078,"thrown":15079,"kest":15080,"seys":15081,"marching":15082,"amit":15083,"decline":15084,"schar":15085,"metr":15086,"stanford":15087,"linke":15088,"berra":15089,"dolls":15090,"rugby":15091,"jami":15092,"bor":15093,"roadtrip":15094,"dinosaur":15095,"mik":15096,"sunder":15097,"rem":15098,"bk":15099,"overseas":15100,"naughty":15101,"implementation":15102,"iamsrk":15103,"luncheon":15104,"firing":15105,"miami":15106,"perez":15107,"thee":15108,"zon":15109,"gifted":15110,"conversion":15111,"ceramic":15112,"¡ï¸ı":15113,"pedro":15114,"ìĨ":15115,"vick":15116,"!@":15117,"heed":15118,"sid":15119,"bw":15120,"document":15121,"plun":15122,"grants":15123,"fantasy":15124,"predictions":15125,"valid":15126,"carved":15127,"graduated":15128,"ðŁijįðŁı»":15129,"nationally":15130,"chy":15131,"afl":15132,"resso":15133,"blank":15134,"rivals":15135,"jig":15136,"eties":15137,"omics":15138,"unemp":15139,"bound":15140,"sko":15141,"inspection":15142,"paral":15143,"highs":15144,"crisp":15145,"bans":15146,"oba":15147,"[@":15148,"cospla":15149,"costumes":15150,"recall":15151,"mouth":15152,"nigel":15153,"bts":15154,"tera":15155,"kov":15156,"docs":15157,"westminster":15158,"dict":15159,"gravity":15160,"kari":15161,"rogue":15162,"tted":15163,"wark":15164,"idaho":15165,"wend":15166,"awi":15167,"queensland":15168,"processes":15169,"cliffe":15170,"mick":15171,"compens":15172,"opol":15173,"they":15174,"clari":15175,"wikipedia":15176,"salmankhan":15177,"hazard":15178,"preston":15179,"sweetest":15180,"pdf":15181,"chees":15182,"trilo":15183,"southafrica":15184,"burnt":15185,"($":15186,"contain":15187,"tp":15188,"submitted":15189,"soundcloud":15190,"atu":15191,"rez":15192,"wordpress":15193,"corrupt":15194,"nf":15195,"maker":15196,"íķ":15197,"paras":15198,"advent":15199,"rial":15200,"cafe":15201,"fossil":15202,"!!!!!!!":15203,"cows":15204,"cj":15205,"spur":15206,"institutions":15207,"landmark":15208,"entit":15209,"reut":15210,"his":15211,"alzheim":15212,"wemb":15213,"reggae":15214,"mosqu":15215,"stat":15216,"identified":15217,"dealer":15218,"ream":15219,"reland":15220,"tension":15221,"ðŁĩ©":15222,"wrapping":15223,"deeper":15224,"frat":15225,"reddit":15226,"aris":15227,"morocco":15228,"..\"":15229,"blow":15230,"mapping":15231,"priorities":15232,"inga":15233,"swap":15234,"rewards":15235,"conspiracy":15236,"creative":15237,"cj":15238,"congressional":15239,"vault":15240,"plex":15241,"sophomore":15242,"shadow":15243,"eless":15244,"ðŁĺħ":15245,"darts":15246,"aldub":15247,"annoying":15248,"props":15249,"nas":15250,"aluminum":15251,"hbo":15252,"offense":15253,"jill":15254,"onions":15255,"laur":15256,"tae":15257,"hardest":15258,"shro":15259,"gaining":15260,"measure":15261,"edtech":15262,"cyprus":15263,"tara":15264,"angeli":15265,"carlo":15266,"goon":15267,"alli":15268,"implic":15269,"jupit":15270,"resilience":15271,"hail":15272,"balanced":15273,")...":15274,"joyce":15275,"gra":15276,"theli":15277,"defined":15278,"shipped":15279,"mainly":15280,"mina":15281,"lm":15282,"sacri":15283,"ober":15284,"pim":15285,"claiming":15286,"enters":15287,"corey":15288,"bok":15289,"cried":15290,"cooling":15291,"danielle":15292,"pharmacy":15293,"thorough":15294,"cake":15295,"klo":15296,"outreach":15297,"zens":15298,"digitalmarketing":15299,"valent":15300,"snp":15301,"herb":15302,"mrw":15303,"café":15304,"captures":15305,"notre":15306,"triumph":15307,"pancakes":15308,"cumber":15309,"spike":15310,"dation":15311,"bigg":15312,"sper":15313,"critical":15314,"amal":15315,"tooth":15316,"founding":15317,"astro":15318,"'#":15319,"quantum":15320,"thames":15321,"unc":15322,"pride":15323,"airbus":15324,"knocked":15325,"undefeated":15326,"mediterranean":15327,"calcu":15328,"clown":15329,"sensor":15330,"hammer":15331,"forgive":15332,"cushi":15333,"berry":15334,"majestic":15335,"elect":15336,"politan":15337,"gta":15338,"kari":15339,"burke":15340,"seahawks":15341,"volkswagen":15342,"rei":15343,"landscapes":15344,"casu":15345,"grandfather":15346,"listened":15347,"//":15348,"startrek":15349,"rainfall":15350,"furry":15351,"vier":15352,"stark":15353,"rifle":15354,"ffa":15355,"leges":15356,"hillaryclinton":15357,"minus":15358,"correctly":15359,"architectural":15360,"prece":15361,"upside":15362,"boxer":15363,"ðŁĻĮðŁı¼":15364,"isai":15365,"det":15366,"provo":15367,"tissue":15368,"spooky":15369,"veled":15370,"recon":15371,"prospects":15372,"quebec":15373,"âļ«":15374,"igno":15375,"anatomy":15376,"shapes":15377,"wp":15378,"pinterest":15379,"hore":15380,"anes":15381,"pickup":15382,"tip":15383,"pradesh":15384,"hugh":15385,"coe":15386,"pok":15387,"grammy":15388,"wellington":15389,"stigate":15390,"righ":15391,"leap":15392,"kingston":15393,"scenic":15394,"gosh":15395,"vani":15396,"aug":15397,"sary":15398,"zier":15399,"bureau":15400,"linson":15401,"conte":15402,"fragr":15403,"allan":15404,"gaw":15405,"lana":15406,"collision":15407,"surveill":15408,"renais":15409,"arrange":15410,"sali":15411,"doin":15412,"brance":15413,"brendan":15414,"ourse":15415,"incoming":15416,"suspension":15417,"à´":15418,"lla":15419,"educators":15420,"intri":15421,"dae":15422,"biography":15423,"bulgar":15424,"villain":15425,"gothic":15426,"rwanda":15427,"ew":15428,"mayor":15429,"meetup":15430,"democrat":15431,"morgan":15432,"sudden":15433,"tesco":15434,"carrot":15435,"bomber":15436,"mckin":15437,"rene":15438,"funday":15439,"agricultural":15440,"hahah":15441,"showtime":15442,"forming":15443,"cola":15444,"scorpi":15445,"quote":15446,"poppy":15447,"slife":15448,"daz":15449,"tub":15450,"nen":15451,"mot":15452,"ðŁĺ»":15453,"sore":15454,"elderly":15455,"ove":15456,"skinny":15457,"umi":15458,"anco":15459,"manship":15460,"were":15461,"gv":15462,"kah":15463,"folding":15464,"neat":15465,"samantha":15466,"danish":15467,"ukrain":15468,"humidity":15469,"nutri":15470,"jakarta":15471,"candles":15472,"oooooooo":15473,"atile":15474,"strength":15475,"ibra":15476,"bapti":15477,"charleston":15478,"frames":15479,"girls":15480,"clearing":15481,"gluten":15482,"##":15483,"supernatural":15484,"jubi":15485,"phone":15486,"hein":15487,"drun":15488,"leak":15489,"investor":15490,"yer":15491,"domain":15492,"ballroom":15493,"mish":15494,"appli":15495,"offshore":15496,"blaze":15497,"doro":15498,"âĺķï¸ı":15499,"winery":15500,"sharif":15501,"adore":15502,"nir":15503,"safer":15504,"sigh":15505,"ascri":15506,"strongly":15507,"tracy":15508,"cker":15509,"oll":15510,"faithful":15511,"eyed":15512,"delightful":15513,"vism":15514,"karnataka":15515,"titan":15516,"whar":15517,"jerseys":15518,"refur":15519,"heaven":15520,"grip":15521,"panama":15522,"preli":15523,"gluten":15524,"odd":15525,"content":15526,"ponti":15527,"tioning":15528,"ecommerce":15529,"federation":15530,"flawless":15531,"gear":15532,"tires":15533,"byr":15534,"police":15535,"cuban":15536,"tributes":15537,"ticul":15538,"churches":15539,"nursery":15540,"diaries":15541,"museums":15542,"snapped":15543,"ivan":15544,"wight":15545,"tourists":15546,"ramadan":15547,"trent":15548,"prophet":15549,"wondered":15550,"focusing":15551,"hid":15552,"icons":15553,"iq":15554,"ambulance":15555,"pist":15556,"funniest":15557,"timeless":15558,"srilan":15559,"buys":15560,"kids":15561,"colourful":15562,"ashi":15563,"chir":15564,"mum":15565,"ðŁĵļ":15566,"letter":15567,"xen":15568,"reuters":15569,"preserve":15570,"inting":15571,"step":15572,"fuji":15573,"univer":15574,"iu":15575,"showdown":15576,"poems":15577,"surveillance":15578,"suspected":15579,"tae":15580,"solving":15581,"tomb":15582,"mothersday":15583,"carpen":15584,"recruit":15585,"pilots":15586,"broc":15587,"mixing":15588,"fridays":15589,"tyr":15590,"representatives":15591,"trapped":15592,"abdul":15593,"freestyle":15594,"cluster":15595,"âļłï¸ı":15596,"kd":15597,"skill":15598,"pitt":15599,"exo":15600,"commerci":15601,"museum":15602,"locally":15603,"gina":15604,"nobel":15605,"immune":15606,"frac":15607,"capsu":15608,"mained":15609,"attempts":15610,"bulldog":15611,"bespoke":15612,"singers":15613,"spelling":15614,"segment":15615,"natures":15616,"tick":15617,"lipstick":15618,"cleaner":15619,"gettable":15620,"precision":15621,"âĢ¼ï¸ı":15622,"thood":15623,"reef":15624,"nope":15625,"billy":15626,"digi":15627,"musi":15628,"rival":15629,"figured":15630,"tality":15631,"sunny":15632,"berk":15633,"awww":15634,"awaits":15635,"unreal":15636,"copen":15637,"asylum":15638,"exotic":15639,"buen":15640,"mock":15641,"enable":15642,"archy":15643,"fra":15644,"plastic":15645,"almond":15646,"ampli":15647,"displays":15648,"abbott":15649,"sme":15650,"xp":15651,"ðŁĻĥ":15652,"graphic":15653,"ived":15654,"mara":15655,"caution":15656,"leaks":15657,"enberg":15658,"ulu":15659,"unicorn":15660,"cannon":15661,"apprentic":15662,"ðŁĺĺðŁĺĺ":15663,"bball":15664,"willow":15665,"atics":15666,"amas":15667,"manufacturer":15668,"campaigns":15669,"porters":15670,"floors":15671,"lsu":15672,"type":15673,"kej":15674,"honorary":15675,"itim":15676,"tole":15677,"minecraft":15678,"dx":15679,"mash":15680,"rio":15681,"consequences":15682,"ronald":15683,"gossi":15684,"suffolk":15685,"muse":15686,"rbi":15687,"livemusic":15688,"ivan":15689,"ðŁİ¤":15690,"leu":15691,"patriot":15692,"manit":15693,"lanca":15694,"homedecor":15695,"dear":15696,"sigma":15697,"tide":15698,"strings":15699,"vita":15700,"sequel":15701,"tryna":15702,"investigate":15703,"boris":15704,"vegan":15705,"barrier":15706,"mindfulness":15707,"webb":15708,"hustle":15709,"inda":15710,"tanzania":15711,"stray":15712,"texas":15713,"cag":15714,"diagnosis":15715,"woman":15716,"gw":15717,"obsession":15718,"lative":15719,"nufc":15720,"flynn":15721,"momentum":15722,"sofa":15723,"wald":15724,"vegetable":15725,"tucker":15726,"supper":15727,"seab":15728,"arro":15729,"seag":15730,"venting":15731,"councill":15732,"splat":15733,"calcul":15734,"..#":15735,"comfy":15736,"odisha":15737,"stopp":15738,"warfare":15739,"caes":15740,"à¨":15741,"coy":15742,"priceless":15743,"insec":15744,"ðŁĺĽ":15745,"controls":15746,"empowerment":15747,"datascience":15748,"perpe":15749,"genic":15750,"eres":15751,"trudeau":15752,"mano":15753,"slavery":15754,"expanding":15755,"mahe":15756,"failing":15757,"saga":15758,"photographs":15759,"crest":15760,"reon":15761,"surfing":15762,"hie":15763,"ðŁįĢ":15764,"jae":15765,"fellows":15766,"southampton":15767,"solom":15768,"cester":15769,"tability":15770,"horn":15771,"sect":15772,"hee":15773,"coleman":15774,"atlas":15775,"explorer":15776,"consultation":15777,"copyright":15778,"organizing":15779,"denied":15780,"monkeys":15781,"noodles":15782,"bris":15783,"flor":15784,"dough":15785,"bonds":15786,"shocked":15787,"ecosystem":15788,"carefully":15789,"wm":15790,"apartments":15791,"curve":15792,"sandiego":15793,"mustard":15794,"commen":15795,"ceremon":15796,"ech":15797,"ruth":15798,"ðŁĻĮðŁı»":15799,"hawai":15800,"filmed":15801,"tear":15802,"asingly":15803,"cair":15804,"watt":15805,"instrument":15806,"outta":15807,"yeol":15808,"riverside":15809,"ë°":15810,".:":15811,"norwich":15812,"alog":15813,"migrants":15814,"newman":15815,"ride":15816,"sprink":15817,"targeting":15818,"believe":15819,"torch":15820,"reflects":15821,"permission":15822,"ffman":15823,"enemies":15824,"basics":15825,"seized":15826,"sundays":15827,"lei":15828,"hassan":15829,"endo":15830,"hc":15831,"stad":15832,"lements":15833,"kkkk":15834,"nano":15835,"shark":15836,"mana":15837,"onic":15838,"treatments":15839,"early":15840,"collaborative":15841,"shuttle":15842,"branches":15843,"misses":15844,"mainedcm":15845,"apers":15846,"kyle":15847,"carrie":15848,"leisure":15849,"shet":15850,"birding":15851,"advances":15852,"ðŁĵĿ":15853,"popular":15854,"diane":15855,"abe":15856,"rewar":15857,"neighbour":15858,"kpop":15859,"remembrance":15860,"playground":15861,"rub":15862,"krishna":15863,"ebola":15864,"inquiry":15865,"epa":15866,"lumin":15867,"organisation":15868,"abraham":15869,"normally":15870,"preten":15871,"janet":15872,"wt":15873,"ðŁĴİ":15874,"encouraging":15875,"astic":15876,"bump":15877,"sydney":15878,"sz":15879,"ssss":15880,"garrett":15881,"ðŁĵ»":15882,"consulting":15883,"romania":15884,"spotting":15885,"chancellor":15886,"arma":15887,"prestigious":15888,"ðĿIJ":15889,"tad":15890,"cryst":15891,"competit":15892,"ratio":15893,"cataly":15894,"brow":15895,"jur":15896,"viking":15897,"commute":15898,"yday":15899,"layers":15900,"dumb":15901,"escal":15902,"genocide":15903,"fill":15904,"gupta":15905,"stepping":15906,"sei":15907,"foto":15908,"wildcats":15909,"coli":15910,"project":15911,"earnings":15912,"str":15913,"geons":15914,"completion":15915,"bm":15916,"decorated":15917,"crawford":15918,"afghan":15919,"scare":15920,"visibility":15921,"hib":15922,"direction":15923,"stroll":15924,"christina":15925,"alternate":15926,"clare":15927,"stylist":15928,"behold":15929,"sance":15930,"leopard":15931,"acquired":15932,"narrative":15933,"ashi":15934,"thea":15935,"????":15936,"peas":15937,"atch":15938,"slides":15939,"leen":15940,"renewable":15941,"english":15942,"quir":15943,"coaster":15944,"rx":15945,"fools":15946,"matchday":15947,"mism":15948,"amazing":15949,"zig":15950,"keting":15951,"wont":15952,"towel":15953,"diab":15954,"stake":15955,"nm":15956,"melt":15957,"ethan":15958,"grape":15959,"politician":15960,"smen":15961,"íĺ":15962,"reo":15963,"weddings":15964,"catcher":15965,"oracle":15966,"memo":15967,"ðŁĮ´":15968,"eck":15969,"robbie":15970,"norwegian":15971,"operator":15972,"amor":15973,"sewing":15974,"jul":15975,"xie":15976,"uv":15977,"fifty":15978,"mega":15979,"tattoo":15980,"liberals":15981,"upri":15982,"trafficking":15983,"richardson":15984,"suv":15985,"kip":15986,"messy":15987,"tremendous":15988,"glou":15989,"courtney":15990,"lad":15991,"stereo":15992,"myers":15993,"idio":15994,"^_^":15995,"manning":15996,"dye":15997,"wd":15998,"throne":15999,"junk":16000,"asu":16001,"provincial":16002,"kook":16003,"wrc":16004,"fineart":16005,"hampshire":16006,"renaissance":16007,"bred":16008,"fallout":16009,"sj":16010,"snl":16011,"alam":16012,"torture":16013,"fyi":16014,"shines":16015,"paw":16016,"char":16017,"henry":16018,"crow":16019,"acious":16020,"dian":16021,"paige":16022,"bare":16023,"stockholm":16024,"scenery":16025,"ðŁĩ·":16026,"jeffrey":16027,"push":16028,"decoration":16029,"ned":16030,"cute":16031,"brigade":16032,"lavender":16033,"invites":16034,"esports":16035,"voir":16036,"dried":16037,"transpl":16038,"surgeon":16039,"novels":16040,"pulls":16041,"sony":16042,"lunar":16043,"mane":16044,"ivy":16045,"frustr":16046,"dorset":16047,"sai":16048,"torres":16049,"ssion":16050,"shutdown":16051,"suggestions":16052,"writing":16053,"eo":16054,"battlefield":16055,"uga":16056,"ðŁIJ¾":16057,"vacu":16058,"splac":16059,"git":16060,"ug":16061,"highland":16062,"%)":16063,"mermaid":16064,"sacramento":16065,"tails":16066,"pw":16067,"kah":16068,"tell":16069,"enhanced":16070,"ìķ":16071,"auckland":16072,"cruel":16073,"ðŁ¤©":16074,"audre":16075,"sailor":16076,"grammar":16077,"glove":16078,"deon":16079,"inflam":16080,"freshly":16081,"kell":16082,"zip":16083,"christie":16084,"mild":16085,"dixon":16086,"instructor":16087,"gence":16088,"ãħł":16089,"subjec":16090,"constitutional":16091,"crowds":16092,"invisible":16093,"ruins":16094,"dak":16095,"sip":16096,"plaque":16097,"pouring":16098,"complex":16099,"zine":16100,"stead":16101,"flet":16102,"transmission":16103,"loway":16104,"arun":16105,"increasingly":16106,"aud":16107,"transparen":16108,"crowned":16109,"scoun":16110,"blizzard":16111,"luxu":16112,"fiers":16113,"achievements":16114,"hunters":16115,"rocked":16116,"basin":16117,"violet":16118,"proves":16119,"achieving":16120,"prosper":16121,"sega":16122,"float":16123,"vian":16124,"xiv":16125,"polic":16126,"tura":16127,"approximately":16128,"wanderlust":16129,"keepers":16130,"getaway":16131,"cod":16132,"polis":16133,"bryan":16134,"colts":16135,"talents":16136,"yogur":16137,"glutenfree":16138,"wrist":16139,"gry":16140,"czech":16141,"ðŁİĪ":16142,"eville":16143,"ðŁıĪ":16144,"tox":16145,"daniels":16146,"amer":16147,"bids":16148,"weareone":16149,"metab":16150,"gt":16151,"boyz":16152,"pdx":16153,"possession":16154,"pushed":16155,"shrine":16156,"realistic":16157,"trigger":16158,"navi":16159,"rumors":16160,"naf":16161,"jenkins":16162,"trun":16163,"communi":16164,"ÃĹ":16165,"gamers":16166,"armor":16167,"mohammed":16168,"balcony":16169,"yah":16170,"strongest":16171,"rhythm":16172,"unforgettable":16173,"kp":16174,"hobb":16175,"custody":16176,"gregor":16177,"rita":16178,"aesthetic":16179,"ilation":16180,"sponsoring":16181,"nay":16182,"kidnapp":16183,"shs":16184,"rajas":16185,"meg":16186,"significantly":16187,"buttons":16188,"lac":16189,"versions":16190,"essentials":16191,"opinions":16192,"kro":16193,"dprinting":16194,"widely":16195,"dk":16196,"uran":16197,"yal":16198,"requested":16199,"cn":16200,"curric":16201,"plum":16202,"grun":16203,"vm":16204,"devon":16205,"myo":16206,"relation":16207,"juventus":16208,"rouge":16209,"minority":16210,"mines":16211,"jupiter":16212,"nine":16213,"oxygen":16214,"frankie":16215,"unesco":16216,"fabric":16217,"disgusting":16218,"salman":16219,"detection":16220,"lanka":16221,"dac":16222,"ðŁĩ«ðŁĩ·":16223,"argument":16224,"shelves":16225,"celtics":16226,"roberto":16227,"pigs":16228,"hedge":16229,"faul":16230,"powering":16231,"butterflies":16232,"fir":16233,"remake":16234,"atti":16235,"como":16236,"empha":16237,"kendall":16238,"pokemon":16239,"seating":16240,"dans":16241,"baldwin":16242,"ðŁij»":16243,"leslie":16244,"onedirection":16245,"timber":16246,"iman":16247,"font":16248,"eder":16249,"dion":16250,"steph":16251,"format":16252,"gregory":16253,"prop":16254,"hex":16255,"ruin":16256,"sory":16257,"infer":16258,"naw":16259,"barak":16260,"sdgs":16261,"karao":16262,"lush":16263,"vander":16264,"endent":16265,"gis":16266,"afro":16267,"soccer":16268,"ayan":16269,"tuni":16270,"lung":16271,"dayof":16272,"alexa":16273,"marath":16274,"addicted":16275,"agile":16276,"hygi":16277,"lightweight":16278,"ì§":16279,"mandela":16280,"joey":16281,"ancy":16282,"hum":16283,"bir":16284,"memorial":16285,"jimin":16286,"ginger":16287,"vak":16288,"javascri":16289,"crops":16290,"origins":16291,"dari":16292,"piper":16293,"import":16294,"aggressive":16295,"prediction":16296,"repairs":16297,"cracker":16298,"voyage":16299,"nike":16300,"mummy":16301,"linkedin":16302,"countryside":16303,"border":16304,"glass":16305,"pert":16306,"sals":16307,"shoe":16308,"autographed":16309,"walnut":16310,"collegi":16311,"salary":16312,"pairing":16313,"ðŁĮ¸":16314,"cathol":16315,"sweethe":16316,"defeats":16317,"strengthen":16318,"rooftop":16319,"improvements":16320,"barriers":16321,"uru":16322,"tally":16323,"ruled":16324,"ðŁĨļ":16325,"naija":16326,"emoji":16327,"percent":16328,"gio":16329,"probs":16330,"once":16331,"admits":16332,"paths":16333,"liar":16334,"daytona":16335,"peters":16336,"cali":16337,"calli":16338,"mug":16339,"osa":16340,"aph":16341,"aby":16342,"hyde":16343,"ethnic":16344,"plains":16345,"olf":16346,"hahahahaha":16347,"holic":16348,"?!?!":16349,"subli":16350,"blacks":16351,"mot":16352,"ghton":16353,"lovin":16354,"brent":16355,"baru":16356,"lati":16357,"dew":16358,"ateau":16359,"qa":16360,"painful":16361,"busters":16362,"static":16363,"ðŁĩ¨ðŁĩ¦":16364,"notebook":16365,"outfits":16366,"sies":16367,"rf":16368,"floods":16369,"ÑĢ":16370,"throat":16371,"suici":16372,"rovers":16373,"bengal":16374,"prepares":16375,"blog":16376,"miniature":16377,"ب":16378,"amphi":16379,"comb":16380,"rsp":16381,"intimate":16382,"greene":16383,"Ìĩ":16384,"altar":16385,"surgical":16386,"vessel":16387,"...?":16388,"gavin":16389,"gator":16390,"threatened":16391,"zar":16392,"robbery":16393,"dier":16394,"promoted":16395,"yg":16396,"xs":16397,"subs":16398,"interviewing":16399,"threatening":16400,"dozen":16401,"meado":16402,"waterfall":16403,"nintendoswitch":16404,"calum":16405,"ministers":16406,"drop":16407,"universities":16408,"warned":16409,"tactics":16410,"ðŁĩ²":16411,"refuse":16412,"adju":16413,"vast":16414,"ðŁĺ´":16415,"mcfc":16416,"libya":16417,"nofilter":16418,"distributed":16419,"reser":16420,"ronnie":16421,"deco":16422,"javascript":16423,"monk":16424,"interests":16425,"flex":16426,"martha":16427,"sties":16428,"ood":16429,"ðŁ¤£ðŁ¤£":16430,"eun":16431,"bali":16432,"gomez":16433,"stimul":16434,"moderate":16435,"dity":16436,"iris":16437,"straw":16438,"consistent":16439,"directions":16440,"adopt":16441,"salsa":16442,"croo":16443,"recovered":16444,"blackfriday":16445,"lancaster":16446,"accept":16447,"weareoneexo":16448,"builds":16449,"freeman":16450,"airplane":16451,"dition":16452,"belong":16453,"jamie":16454,"pitching":16455,"lif":16456,"omin":16457,"crispy":16458,"prepping":16459,"veg":16460,"chang":16461,"accomplished":16462,"gracias":16463,"dolphin":16464,"elector":16465,"culinary":16466,"superbowl":16467,"wala":16468,"pursuit":16469,"blackberry":16470,"bean":16471,"cardinal":16472,"proved":16473,"immigrant":16474,"strictly":16475,"holocaust":16476,"passage":16477,"haus":16478,"coup":16479,"purse":16480,"harass":16481,"<<":16482,"leed":16483,"adobe":16484,"stad":16485,"legislat":16486,"parked":16487,"priyan":16488,"silva":16489,"krist":16490,"sthe":16491,"funky":16492,"iga":16493,"settlement":16494,"phs":16495,"tmrw":16496,"stressed":16497,"hunt":16498,"hockey":16499,"treasures":16500,"chambers":16501,"olu":16502,"hut":16503,"marley":16504,"texture":16505,"wilderness":16506,"mming":16507,"potentially":16508,"omaha":16509,"judy":16510,"toes":16511,"spoiler":16512,"distinguished":16513,"felix":16514,"ahu":16515,"recommendations":16516,"zombies":16517,"hitler":16518,"triple":16519,"collapse":16520,"motivated":16521,"ultimat":16522,"ggling":16523,"soy":16524,"cigar":16525,"foren":16526,"vineyard":16527,"glitter":16528,"findings":16529,"colonial":16530,"hunter":16531,"erik":16532,"dens":16533,"beetle":16534,"lotte":16535,"subtle":16536,"smatter":16537,"trusted":16538,"experimental":16539,"naments":16540,"ðŁĺĨ":16541,"region":16542,"acquisition":16543,"breeding":16544,"quarterback":16545,"amreading":16546,"ootd":16547,"rude":16548,"initiatives":16549,"stout":16550,"hyung":16551,"outcome":16552,"alfred":16553,"mics":16554,"expertise":16555,"bacteria":16556,"penguins":16557,"jumper":16558,"valencia":16559,"bark":16560,"ingday":16561,"sellers":16562,"contracts":16563,"houston":16564,"commissioned":16565,"adaptation":16566,"swansea":16567,"santiago":16568,"commonwealth":16569,"judging":16570,"submission":16571,"scorer":16572,"tommy":16573,"ño":16574,"exquis":16575,"filing":16576,"explanation":16577,"allison":16578,"wembley":16579,"ridge":16580,"chevy":16581,"santos":16582,"ownership":16583,"cognitive":16584,"favourites":16585,"shed":16586,"philanthro":16587,"deleted":16588,"godd":16589,"snor":16590,"guidelines":16591,"ffing":16592,"jeep":16593,"clips":16594,"swamp":16595,"anor":16596,"guild":16597,"bolton":16598,"springfield":16599,"municipal":16600,"goalkeeper":16601,"yeon":16602,"ðŁĺįðŁĺįðŁĺįðŁĺį":16603,"ãħĭãħĭ":16604,"waterfront":16605,"grave":16606,"contemporary":16607,"arity":16608,"ÃŃa":16609,"sleeps":16610,"syrup":16611,"alam":16612,"pire":16613,"coyo":16614,"motogp":16615,"tyson":16616,"kejri":16617,"circul":16618,"singly":16619,"crunch":16620,"complicated":16621,"nostalgia":16622,"kop":16623,"move":16624,"kale":16625,"macro":16626,"midwest":16627,"hans":16628,"tribal":16629,"nude":16630,"à¯į":16631,"beyonce":16632,"congratulate":16633,"cater":16634,"league":16635,"ðŁĻĬ":16636,"ladder":16637,"crashed":16638,"technic":16639,"karaoke":16640,"harassment":16641,"rots":16642,"experiencing":16643,"kristen":16644,"ðŁĩ³":16645,"ðŁ¤Ĺ":16646,"reflections":16647,"guinness":16648,"illustrator":16649,"ðŁĻıðŁı»":16650,"center":16651,"narrow":16652,"commons":16653,"regulations":16654,"ÙĨ":16655,"harm":16656,"croft":16657,"cussion":16658,"hongkong":16659,"stical":16660,"internship":16661,"zoe":16662,"chop":16663,"hoods":16664,"estimated":16665,"batteries":16666,"berkeley":16667,"smoothie":16668,"shaun":16669,"cros":16670,"~~":16671,"campe":16672,"hump":16673,"bg":16674,"prototype":16675,"click":16676,"shawn":16677,"reviewed":16678,"templ":16679,"pf":16680,"jedi":16681,"blogs":16682,"raymond":16683,"asth":16684,"bah":16685,"avail":16686,"scotch":16687,"leafs":16688,"nikki":16689,"tok":16690,"hollow":16691,"urges":16692,"oft":16693,"unlike":16694,"latin":16695,"ue":16696,"catering":16697,"mili":16698,"alternati":16699,"maver":16700,"и":16701,"agle":16702,"preorder":16703,"lux":16704,"cucu":16705,"ðŁijıðŁijı":16706,"tart":16707,"âĿ¤âĿ¤âĿ¤":16708,"arabic":16709,"rapidly":16710,"arrang":16711,"allen":16712,"traveltuesday":16713,"paws":16714,"flows":16715,"stability":16716,"fluid":16717,"capp":16718,"canberra":16719,"uuuu":16720,"spani":16721,"demonstration":16722,"mla":16723,"placement":16724,"mw":16725,"presidents":16726,"awesom":16727,"beverly":16728,"anist":16729,"neal":16730,"fathersday":16731,"referendum":16732,"lahore":16733,"oaks":16734,"debbie":16735,"halfway":16736,"ghosts":16737,"debor":16738,"matthews":16739,"fiat":16740,"tfw":16741,"presen":16742,"robi":16743,"ded":16744,"brock":16745,"laughed":16746,"amounts":16747,"bamboo":16748,"kindergarten":16749,"eaten":16750,"mtvhottest":16751,"breakout":16752,"usic":16753,"fraser":16754,"legislative":16755,"pang":16756,"module":16757,"sammy":16758,"gover":16759,"earns":16760,"expedition":16761,"garh":16762,"concepts":16763,"charlie":16764,"lava":16765,"bachelor":16766,"veggies":16767,"determine":16768,"ellie":16769,"unlocked":16770,"fruit":16771,"dalla":16772,"coupe":16773,"washington":16774,"deposit":16775,"ivory":16776,"paula":16777,"chicag":16778,"gucci":16779,"ðŁİĥ":16780,"cultiv":16781,"pierce":16782,"lifted":16783,"stumb":16784,"recover":16785,"muscles":16786,"conducting":16787,"cbs":16788,"mclaren":16789,"sophia":16790,"cellu":16791,"oceans":16792,"uploaded":16793,"gameplay":16794,"maldives":16795,"kimber":16796,"avoi":16797,"racer":16798,"caine":16799,"cavs":16800,"hana":16801,"liga":16802,"raven":16803,"intervention":16804,"inauguration":16805,"ooh":16806,"attraction":16807,"merchandise":16808,"tunein":16809,"liking":16810,"juniors":16811,"intended":16812,"attacking":16813,"aquarium":16814,"iwd":16815,"components":16816,"suring":16817,"centu":16818,"yogurt":16819,"ðŁıĥ":16820,"showroom":16821,"optical":16822,"tyour":16823,"judge":16824,"yield":16825,"anto":16826,"plc":16827,"transparency":16828,"recycled":16829,"chief":16830,"arom":16831,"ambassadors":16832,"planet":16833,"âĿĦï¸ı":16834,"omed":16835,"vanessa":16836,"court":16837,"margar":16838,"haley":16839,"vr":16840,"regina":16841,"pdates":16842,"hispan":16843,"livestream":16844,"âģ£":16845,"yahoo":16846,"galla":16847,"secured":16848,"wir":16849,"beneath":16850,"offl":16851,"nil":16852,"amb":16853,"yeg":16854,"outlet":16855,"ute":16856,"peep":16857,"lindsay":16858,"bentley":16859,"...!":16860,"heel":16861,"trilogy":16862,"vos":16863,"tyre":16864,"therefore":16865,"toronto":16866,"abi":16867,"simpli":16868,"jae":16869,"extensive":16870,"elephants":16871,"sor":16872,"orientation":16873,"impeach":16874,"replay":16875,"constructed":16876,"peterson":16877,"pais":16878,"ported":16879,"customs":16880,"collap":16881,"adu":16882,"highlands":16883,"salem":16884,"shelby":16885,"kovic":16886,"strain":16887,"rosie":16888,"senators":16889,"snaps":16890,"bobb":16891,"suzuki":16892,"blades":16893,"kp":16894,"lolo":16895,"generate":16896,"sight":16897,"mae":16898,"structural":16899,"predict":16900,"jumped":16901,"ahmad":16902,"sung":16903,"justice":16904,"glam":16905,"volvo":16906,"jubilee":16907,"detention":16908,"losses":16909,"puri":16910,"everytime":16911,"а":16912,"rao":16913,"edge":16914,"limer":16915,"resemb":16916,"harold":16917,"retri":16918,"sacrific":16919,"surprises":16920,"amc":16921,"srilanka":16922,"barbie":16923,"mens":16924,"finn":16925,"ags":16926,"ukrainian":16927,"embrac":16928,"îIJ":16929,"flavors":16930,"homer":16931,"laure":16932,"outh":16933,"priced":16934,"verde":16935,"firm":16936,"ahs":16937,"cub":16938,"trey":16939,"paranor":16940,"profit":16941,"indv":16942,"whoa":16943,"harsh":16944,"alot":16945,"critics":16946,"hubby":16947,"figur":16948,"gira":16949,"castro":16950,"chanel":16951,"input":16952,"originals":16953,"tenant":16954,"yyyy":16955,"turers":16956,"lincoln":16957,"coon":16958,"learn":16959,"chou":16960,"acare":16961,"oles":16962,"diner":16963,"hyp":16964,"bizarre":16965,"mcr":16966,"letsgo":16967,"decorating":16968,"ðŁĮİ":16969,"alison":16970,"arvin":16971,"fd":16972,"rehab":16973,"mccarthy":16974,"lottery":16975,"dah":16976,"minneapolis":16977,"eligible":16978,"diagnosed":16979,"emerald":16980,"destinations":16981,"sans":16982,"ory":16983,"blazers":16984,"nv":16985,"bail":16986,"digitalart":16987,"noc":16988,"malta":16989,"solar":16990,"pipes":16991,"allegations":16992,"nock":16993,"pope":16994,"brid":16995,"premier":16996,"nx":16997,"presentations":16998,"efa":16999,"bows":17000,"valve":17001,"opponent":17002,"Įë":17003,"visual":17004,"ingle":17005,"categor":17006,"eter":17007,"pois":17008,"dani":17009,"attract":17010,"neutral":17011,"thene":17012,"crashes":17013,"freddie":17014,"utili":17015,"cst":17016,"awakening":17017,"sloven":17018,"qualify":17019,"proof":17020,"fairy":17021,"lev":17022,"freight":17023,"enjoys":17024,"cupcake":17025,"flavour":17026,"âķ":17027,"protective":17028,"ðŁijıðŁı»":17029,"isu":17030,"admir":17031,"hmmm":17032,"continuous":17033,"aires":17034,"raptors":17035,"showcasing":17036,"yuk":17037,"paste":17038,"follower":17039,"instructions":17040,"spru":17041,"@__":17042,"theo":17043,"debuts":17044,"vette":17045,"stow":17046,"esof":17047,"ached":17048,"sultan":17049,"sandwich":17050,"somalia":17051,"franco":17052,"carne":17053,"fluffy":17054,"alpine":17055,"jasmine":17056,"heated":17057,"violin":17058,"pless":17059,"divorce":17060,"performer":17061,"phies":17062,"portsm":17063,"dara":17064,"kirby":17065,"lop":17066,"chilli":17067,"forth":17068,"skype":17069,"ðŁĩ®ðŁĩ¹":17070,"celebrities":17071,"edy":17072,"vee":17073,"poison":17074,"eyel":17075,"grabs":17076,"ssic":17077,"uno":17078,"western":17079,"railroad":17080,"amer":17081,"numerous":17082,"sv":17083,"fow":17084,"fist":17085,"âĢĭ":17086,"requests":17087,"martial":17088,"emmy":17089,"acceptance":17090,"laura":17091,"ิ":17092,"erup":17093,"hyundai":17094,"outlander":17095,"utt":17096,"wrestle":17097,"espresso":17098,"demanding":17099,"gdp":17100,"geography":17101,"saskat":17102,"troll":17103,"confeder":17104,"sues":17105,"sem":17106,"bets":17107,"tful":17108,"tosh":17109,"teaches":17110,"coloured":17111,"galway":17112,"macy":17113,"disorders":17114,"bbcra":17115,"atem":17116,"fender":17117,"litter":17118,"esh":17119,"providers":17120,"renovation":17121,"nominate":17122,"psg":17123,"nominations":17124,"jenna":17125,"sharp":17126,"someday":17127,"zur":17128,"brains":17129,"cheshire":17130,"prey":17131,"hugo":17132,"¿":17133,"token":17134,"rv":17135,"carr":17136,"tactical":17137,"zelda":17138,"kayla":17139,"fernando":17140,"photographers":17141,"jour":17142,"umbrella":17143,"woody":17144,"congressman":17145,"dump":17146,"levy":17147,"juan":17148,"dazz":17149,"signals":17150,"lain":17151,"anu":17152,"michel":17153,"porch":17154,"alden":17155,"siblings":17156,"yale":17157,"peel":17158,"swick":17159,"ggin":17160,"llc":17161,"kale":17162,"scon":17163,"ild":17164,"patreon":17165,"reel":17166,"quin":17167,"witt":17168,"marty":17169,"moody":17170,"toni":17171,"dery":17172,"gators":17173,"specifically":17174,"ddin":17175,"lyon":17176,"trick":17177,"meadows":17178,"pj":17179,"borgh":17180,"vik":17181,"tur":17182,"bronx":17183,"puff":17184,"lantern":17185,"ðŁ¤¦":17186,"gently":17187,"bestie":17188,"fact":17189,"refused":17190,"fasci":17191,"mpy":17192,"ðŁĶµ":17193,"crossover":17194,"meadow":17195,"indianapolis":17196,"ducation":17197,"sley":17198,"loom":17199,"mixer":17200,"newmusic":17201,"filmmaker":17202,"prosperity":17203,"lim":17204,"weekend":17205,"creamy":17206,"neutr":17207,"luther":17208,"hv":17209,"northern":17210,"two":17211,"hra":17212,"catches":17213,"appearances":17214,"habit":17215,"kittens":17216,"nv":17217,"illac":17218,"infan":17219,"regardless":17220,"lizard":17221,"dunk":17222,"curtain":17223,"acom":17224,"intu":17225,"vez":17226,"emin":17227,"flats":17228,"calendars":17229,"empower":17230,"ruined":17231,"hungary":17232,"vid":17233,"wex":17234,"ulum":17235,"aberdeen":17236,"osa":17237,"kt":17238,"massi":17239,"seemed":17240,"sden":17241,"'?":17242,"telephone":17243,"defi":17244,"inspires":17245,"meow":17246,"zones":17247,"blind":17248,"ply":17249,"tucson":17250,"adventure":17251,"ged":17252,"oyster":17253,"ðŁijıðŁijıðŁijı":17254,"output":17255,"ttt":17256,"metallic":17257,"smash":17258,"ucla":17259,"scots":17260,"perfect":17261,"lucy":17262,"regularly":17263,"spic":17264,"relative":17265,"athers":17266,"mise":17267,"battling":17268,"decides":17269,"mata":17270,"occupied":17271,"randomly":17272,"catsoftwitter":17273,"gian":17274,"bally":17275,"alties":17276,"allies":17277,"immen":17278,"syrac":17279,"ðŁĴľðŁĴľ":17280,"llan":17281,"aur":17282,"kut":17283,"lamar":17284,"affects":17285,"nra":17286,"starwar":17287,"ðŁ¤ĺ":17288,"scram":17289,"enchan":17290,"process":17291,"luxurious":17292,"array":17293,"sherlock":17294,"compati":17295,"dorf":17296,"stress":17297,"msu":17298,"swith":17299,"sala":17300,"sofinstagram":17301,"foil":17302,"understood":17303,"quay":17304,"rp":17305,"cade":17306,"jaw":17307,"enab":17308,"encoun":17309,"ðŁİī:":17310,"dock":17311,"saturn":17312,"mull":17313,"layout":17314,"rarely":17315,"happily":17316,"fixture":17317,"orph":17318,"overlooking":17319,"herbs":17320,"mitt":17321,"pillar":17322,"nolan":17323,"petty":17324,"stry":17325,"ui":17326,"muk":17327,"ores":17328,"overs":17329,"áµ":17330,"recreation":17331,"wesley":17332,"rit":17333,"kejriwal":17334,"stocking":17335,"gv":17336,"subscribers":17337,"moose":17338,"mae":17339,"bert":17340,"oppre":17341,"assignment":17342,"uro":17343,"highlighting":17344,"calvin":17345,"weigh":17346,"cambodia":17347,"avon":17348,"kem":17349,"disabilities":17350,"ready":17351,"chargers":17352,"pads":17353,"izing":17354,"illian":17355,"truste":17356,"colleges":17357,"associates":17358,"albany":17359,"milton":17360,"cron":17361,"bur":17362,"hardly":17363,"sights":17364,"antiques":17365,"echo":17366,"surprisingly":17367,"haiti":17368,"capt":17369,"php":17370,"opio":17371,"inequality":17372,"equal":17373,"keny":17374,"schmid":17375,"autographs":17376,"rent":17377,"quer":17378,"citrus":17379,"challenged":17380,"tec":17381,"epide":17382,"fest":17383,"zhou":17384,"lime":17385,"citizenship":17386,"crystal":17387,"convinced":17388,"messenger":17389,"copenhagen":17390,"âĿĹï¸ı":17391,"warran":17392,"developments":17393,"ï¸ıâĥ£":17394,"forex":17395,"hiro":17396,"sneakers":17397,"xide":17398,"viva":17399,"stereo":17400,"batting":17401,"ssel":17402,"host":17403,"bengal":17404,"criticism":17405,"qc":17406,"crun":17407,"attempted":17408,"rye":17409,"determination":17410,"creations":17411,"dread":17412,"labels":17413,"posse":17414,"ancer":17415,"johan":17416,"sister":17417,"partnerships":17418,"lesbian":17419,"kst":17420,"guarantee":17421,"baro":17422,"fixing":17423,"mason":17424,"mous":17425,"chemicals":17426,"tless":17427,"biodiversity":17428,"paro":17429,"bharat":17430,"acol":17431,"refuge":17432,"ente":17433,"titi":17434,"dyssey":17435,"responds":17436,"lefto":17437,"iner":17438,"sevel":17439,"rahul":17440,"oline":17441,"frankfur":17442,"choreo":17443,"enjoyable":17444,"cto":17445,"struggles":17446,"woodland":17447,"heavyweight":17448,"gens":17449,"recep":17450,"accred":17451,"ðŁĺ¡":17452,"transformed":17453,"listen":17454,"atop":17455,"nk":17456,"surge":17457,"bere":17458,"governor":17459,"prisoners":17460,"claude":17461,"till":17462,"mulator":17463,"emotion":17464,"waterloo":17465,"start":17466,"ðŁĩº":17467,"cleaned":17468,"grandmother":17469,"fearless":17470,"african":17471,"astronomy":17472,"ðŁıģ":17473,"à¸Ļ":17474,"theworld":17475,"suitable":17476,"anthony":17477,"kand":17478,"tten":17479,"meaningful":17480,"disclo":17481,"jacobs":17482,"ø":17483,"tomlinson":17484,"ghetti":17485,"typho":17486,"substan":17487,"asco":17488,"tek":17489,"nagar":17490,"mud":17491,"amon":17492,"vaccine":17493,"fty":17494,"flesh":17495,"noel":17496,"inflation":17497,"portugue":17498,"glamour":17499,"tram":17500,"vre":17501,"tequ":17502,"roundup":17503,"wyn":17504,"rejected":17505,"mosaic":17506,"sighting":17507,"calf":17508,"ota":17509,"composition":17510,"gopro":17511,"gonzale":17512,"eed":17513,"bard":17514,"tue":17515,"effectively":17516,"ween":17517,"alto":17518,"ribs":17519,"relate":17520,"thirsty":17521,"furious":17522,"dim":17523,"chard":17524,"perfume":17525,"sny":17526,"churchill":17527,"kof":17528,"masterclass":17529,"wave":17530,"ðŁĶµ":17531,"erin":17532,"owns":17533,"tobe":17534,"skilled":17535,"tem":17536,"gof":17537,"eni":17538,"tori":17539,"crazy":17540,"lick":17541,"resistant":17542,"icial":17543,"agar":17544,"!:":17545,"gali":17546,"delaware":17547,"blitz":17548,"kohli":17549,"puck":17550,"availability":17551,"himalay":17552,"influential":17553,"crochet":17554,"victori":17555,"reading":17556,"hobby":17557,"viet":17558,"jas":17559,"engra":17560,"skul":17561,"ðŁĩ²ðŁĩ":17562,"educate":17563,"techno":17564,"districts":17565,"blues":17566,"sett":17567,"seventh":17568,"learns":17569,"eeee":17570,"apocalypse":17571,"hangout":17572,"cruel":17573,"mutu":17574,"bruh":17575,"helen":17576,"sheer":17577,"ction":17578,"klein":17579,"texans":17580,"cereal":17581,"shine":17582,"nered":17583,"gras":17584,"ambro":17585,"fella":17586,"hindu":17587,"matthew":17588,"lima":17589,"miranda":17590,"jewel":17591,"soho":17592,"eurovision":17593,"neighbours":17594,"chandler":17595,"besides":17596,"ðŁ¥°":17597,"astros":17598,"thumbs":17599,"renault":17600,"rave":17601,"hired":17602,"ðŁĸ¤":17603,"itary":17604,"zor":17605,"blazer":17606,"kine":17607,"eau":17608,"katy":17609,"dccomics":17610,"pec":17611,"rodgers":17612,"waterproof":17613,"killers":17614,"superint":17615,"preserv":17616,"asso":17617,"brewers":17618,"promotional":17619,"scam":17620,"villages":17621,"sketches":17622,"juicy":17623,"forlife":17624,"audit":17625,"solo":17626,"fundamental":17627,"lene":17628,"philippine":17629,"tend":17630,"conservatives":17631,"sponsorship":17632,"ddle":17633,"aine":17634,"htc":17635,"osi":17636,"hulk":17637,"waf":17638,"à¸Ļ":17639,"evaluation":17640,"antine":17641,"slee":17642,"robertson":17643,"roosevel":17644,"agi":17645,"sophistic":17646,"employers":17647,"bubbles":17648,"kowski":17649,"interaction":17650,"shu":17651,"boule":17652,"ican":17653,"jare":17654,"hank":17655,"legitim":17656,"knicks":17657,"karma":17658,"receiver":17659,"perks":17660,"uh":17661,"stair":17662,"suni":17663,"laboratory":17664,"graves":17665,"vocals":17666,"oot":17667,"cture":17668,"thrive":17669,"tico":17670,"ãĥ³":17671,"bw":17672,"cartoons":17673,"mcdonalds":17674,"draw":17675,"yung":17676,"pler":17677,"lid":17678,"ethical":17679,"groove":17680,"enta":17681,"internationalwomensday":17682,"patron":17683,"worries":17684,"ðŁİħ":17685,"ðŁijĭ":17686,"katherine":17687,"diaz":17688,"tori":17689,"bachchan":17690,"trust":17691,"mineral":17692,"icom":17693,"builders":17694,"born":17695,"coloring":17696,"latte":17697,"case":17698,"revolution":17699,"trader":17700,"oxid":17701,"chipot":17702,"instantly":17703,"southern":17704,"sehun":17705,"prob":17706,"hernandez":17707,"lisbon":17708,"huawe":17709,"pong":17710,"mea":17711,"rooney":17712,"wheelchair":17713,"keen":17714,"bett":17715,"corin":17716,"regulatory":17717,"displac":17718,"karen":17719,"schem":17720,"sunsets":17721,"whales":17722,"reminis":17723,"hep":17724,"hide":17725,"marcel":17726,"pandora":17727,"doyle":17728,"thfc":17729,"otto":17730,"nokia":17731,"transgender":17732,"kov":17733,"hawaiian":17734,"shave":17735,"sovere":17736,"excer":17737,"nicki":17738,"pug":17739,"stor":17740,"roth":17741,"weet":17742,"legal":17743,"dignity":17744,"pow":17745,"homage":17746,"ðŁĩ³ðŁĩ":17747,"sre":17748,"canon":17749,"lax":17750,"woah":17751,"quartz":17752,"ña":17753,"greeting":17754,"flickr":17755,"nairobi":17756,"advocates":17757,"anc":17758,"vii":17759,"eugene":17760,"thra":17761,"cre":17762,"elan":17763,"pension":17764,"thletics":17765,"toni":17766,"reagan":17767,"xv":17768,"store":17769,"bench":17770,"harlem":17771,"toddler":17772,"sentenced":17773,"âĻ¥ï¸ı":17774,"globally":17775,"cheaper":17776,"uf":17777,"mam":17778,"nico":17779,"iku":17780,"thou":17781,"nist":17782,"dami":17783,"thala":17784,"rhodes":17785,"sale":17786,"bowls":17787,"âĪ":17788,"lasvegas":17789,"sanctions":17790,"admire":17791,"matched":17792,"unable":17793,"traveler":17794,"eleven":17795,"strawberries":17796,"âĢĶâĢĶâĢĶâĢĶ":17797,"studio":17798,"jacques":17799,"ims":17800,"valued":17801,"sno":17802,"cheesecake":17803,"nxt":17804,"eos":17805,"sx":17806,"fx":17807,"tonic":17808,"hatch":17809,"chicks":17810,"grads":17811,"handic":17812,"rory":17813,"asp":17814,"ripped":17815,"dentist":17816,"nen":17817,"lufc":17818,"âľĬ":17819,"dige":17820,"hopkins":17821,"sherman":17822,"fda":17823,"forall":17824,"ashley":17825,"strand":17826,"hy":17827,"liquor":17828,"buffet":17829,"essence":17830,"pharma":17831,"suriya":17832,"ðŁĴĻðŁĴĻ":17833,"festivals":17834,"zan":17835,"refresh":17836,"purple":17837,"uniforms":17838,"kenneth":17839,"=)":17840,"asan":17841,"helsin":17842,"transformers":17843,"kali":17844,"personalized":17845,"chalk":17846,"bobby":17847,"âĮ":17848,"themes":17849,"departure":17850,"print":17851,"illustrations":17852,"quiet":17853,"agrees":17854,"griff":17855,"س":17856,"miti":17857,"together":17858,"convenience":17859,"abar":17860,"carlo":17861,"turtles":17862,"infosec":17863,"somewhat":17864,"arlington":17865,"scholarships":17866,"emirates":17867,"mums":17868,"stella":17869,"autonom":17870,"feather":17871,"gore":17872,"nominees":17873,"fragrance":17874,"ÑĤ":17875,"wong":17876,"theastern":17877,"gre":17878,"zilla":17879,"isi":17880,"bumper":17881,"goo":17882,"dozens":17883,"abduc":17884,"âļªï¸ı":17885,"oils":17886,"donors":17887,"silicon":17888,"ipod":17889,"fortnite":17890,"ðŁĴ¨":17891,"toro":17892,"sparkling":17893,"consciousness":17894,"pala":17895,"num":17896,"mounted":17897,"ffins":17898,"thieves":17899,"teammate":17900,"prab":17901,"omer":17902,"tapes":17903,"bod":17904,"mitsu":17905,"stew":17906,"ere":17907,"pbs":17908,"tusc":17909,"lowe":17910,"rade":17911,"parliamentary":17912,"hm":17913,"edgar":17914,"ðŁijĩðŁijĩ":17915,"toa":17916,"agh":17917,"honi":17918,"slate":17919,"geek":17920,"apt":17921,"hardt":17922,"tap":17923,"horizon":17924,"growth":17925,"makeover":17926,"hil":17927,"paperback":17928,"idan":17929,"rehabil":17930,"giu":17931,"possibilities":17932,"lettu":17933,"franco":17934,"boss":17935,"acher":17936,"doesnt":17937,"moe":17938,"taker":17939,"hussain":17940,"mlk":17941,"dil":17942,"thia":17943,"hama":17944,"realised":17945,"ravens":17946,"curriculum":17947,"mith":17948,"knight":17949,"tedx":17950,"rv":17951,"isaiah":17952,"cumbria":17953,"birthdays":17954,"fing":17955,"prez":17956,"mubarak":17957,"exquisite":17958,"clearance":17959,"yen":17960,"pari":17961,"evo":17962,"ú":17963,"modified":17964,"applying":17965,"implement":17966,"discovering":17967,"chapman":17968,"indiegame":17969,"disk":17970,"crowdfunding":17971,"machin":17972,"livel":17973,"styled":17974,"âĿĮ":17975,"making":17976,"rehearsals":17977,"nutriti":17978,"subscription":17979,"andro":17980,"creators":17981,"carries":17982,"kylie":17983,"camden":17984,"apprentice":17985,"taxpay":17986,"cca":17987,"tuesdaythoughts":17988,"pissed":17989,"erman":17990,"detec":17991,"freedom":17992,"meri":17993,"..!":17994,"psalm":17995,"sunlight":17996,"perspec":17997,"beings":17998,"bookstore":17999,"rockstar":18000,"functions":18001,"pence":18002,"faves":18003,"zn":18004,"obamacare":18005,"spill":18006,"coventry":18007,"pigeon":18008,"pivo":18009,"bait":18010,"kolkata":18011,"aval":18012,"donor":18013,"wah":18014,"privileg":18015,"traditions":18016,"rajasthan":18017,"teness":18018,"portuguese":18019,"ynes":18020,"tackles":18021,"defic":18022,"torn":18023,"polling":18024,"thorne":18025,"ina":18026,"benedict":18027,"barry":18028,"calories":18029,"verdict":18030,"savethe":18031,"norton":18032,"office":18033,"mainstream":18034,"improves":18035,"fron":18036,"responding":18037,"realtor":18038,"scottish":18039,"declar":18040,"rl":18041,"shiv":18042,"supplier":18043,"resting":18044,"sweets":18045,"qui":18046,".âĢ¦":18047,"whitney":18048,"startup":18049,"thankyou":18050,"teacher":18051,"halls":18052,"have":18053,"handmade":18054,"proving":18055,"quartet":18056,"rochester":18057,"lian":18058,"virtual":18059,"mendes":18060,"oficial":18061,"midlands":18062,"xbox":18063,"measuring":18064,"ovo":18065,"accommodation":18066,"brides":18067,"collegiate":18068,"intellectual":18069,"incar":18070,"niag":18071,"ðŁį·":18072,"sfw":18073,"cocoa":18074,"coats":18075,"civilians":18076,"presidency":18077,"matrix":18078,"sweetheart":18079,"triathlon":18080,"wagner":18081,"radic":18082,"planner":18083,"theo":18084,"execution":18085,"kum":18086,"thewalkingdead":18087,"scar":18088,"rotation":18089,"blogging":18090,"bomb":18091,"reson":18092,"bbles":18093,"stare":18094,"assisted":18095,"edo":18096,"branded":18097,"warnings":18098,"thorpe":18099,"acknowle":18100,"satisfied":18101,"shores":18102,"rid":18103,"dora":18104,"physically":18105,"bigh":18106,"approves":18107,"hah":18108,"rical":18109,"versatile":18110,"pretend":18111,"lum":18112,"abhi":18113,"yee":18114,"spit":18115,"ãĢĮ":18116,"djs":18117,"ashtra":18118,"jt":18119,"venues":18120,"grammys":18121,"cyclo":18122,"tracker":18123,"overwatch":18124,"replica":18125,"elyn":18126,"nrl":18127,"lindsey":18128,"homo":18129,"balloons":18130,"kitchen":18131,"sis":18132,"amos":18133,"endeav":18134,"ðŁĴ»":18135,"arec":18136,"thug":18137,"hooked":18138,"hrc":18139,"newyork":18140,"burgh":18141,"americas":18142,"patricia":18143,"ugu":18144,"apathy":18145,"hast":18146,"psychi":18147,"cork":18148,"petrol":18149,"ðŁİ¬":18150,"aku":18151,"popping":18152,"psychological":18153,"aux":18154,"gma":18155,"cadillac":18156,"waste":18157,"authent":18158,"bristol":18159,"name":18160,"queer":18161,"tober":18162,"jerry":18163,"comin":18164,"chant":18165,"privileged":18166,"opar":18167,"loser":18168,"text":18169,"marker":18170,"stries":18171,"equally":18172,"aki":18173,"christmas":18174,"gareth":18175,"blew":18176,"emma":18177,"imagin":18178,"seals":18179,"cheat":18180,"conditioning":18181,"jana":18182,"rens":18183,"daries":18184,"oasis":18185,"discounts":18186,"council":18187,"ika":18188,"shirley":18189,"voucher":18190,"alps":18191,"wx":18192,"qr":18193,"drift":18194,"attempting":18195,"utc":18196,"ت":18197,"gonzalez":18198,"mf":18199,"joker":18200,"parallel":18201,"pare":18202,"aspects":18203,"procedu":18204,"np":18205,"ama":18206,"raleigh":18207,"brighten":18208,"guire":18209,"radiation":18210,"crescent":18211,"hob":18212,"ille":18213,"strand":18214,"vore":18215,"nard":18216,"chest":18217,"diwali":18218,"avatar":18219,"alder":18220,"dling":18221,"pathetic":18222,"ðŁĴĺ":18223,"spirit":18224,"jorge":18225,"filmmaking":18226,"ðŁĻıðŁĻı":18227,"challenger":18228,"bj":18229,"downtown":18230,"html":18231,"adequ":18232,"twisted":18233,"inely":18234,"('":18235,"wraps":18236,"operational":18237,"yne":18238,"nus":18239,"magnet":18240,"marketplace":18241,"healthier":18242,"snapshot":18243,"damon":18244,"interven":18245,"federer":18246,"owls":18247,"biscuits":18248,"jp":18249,"rodeo":18250,"blueberry":18251,"lection":18252,"frontier":18253,"summers":18254,"reyes":18255,"pedestrian":18256,"gol":18257,"caffe":18258,"refurbi":18259,"boulder":18260,"meghan":18261,"specialty":18262,"lass":18263,"ei":18264,"suspects":18265,"approx":18266,"rrr":18267,"rath":18268,"stim":18269,"crushed":18270,"hed":18271,"whun":18272,"loaf":18273,"crore":18274,"rivera":18275,"genetics":18276,"sock":18277,"wasted":18278,"nypd":18279,"answering":18280,"dove":18281,"bella":18282,"olin":18283,"dun":18284,"fiji":18285,"pretty":18286,"sparkle":18287,"yun":18288,"jd":18289,"europa":18290,"lifts":18291,"amber":18292,"mur":18293,"tek":18294,"boyd":18295,"royalty":18296,"indo":18297,"rib":18298,"gotham":18299,"tiest":18300,"installing":18301,"kemp":18302,"thephoto":18303,"cosmic":18304,")))":18305,"wholesale":18306,"loyment":18307,"easy":18308,"suing":18309,"settled":18310,"afp":18311,"prover":18312,"supportive":18313,"rees":18314,"neath":18315,"deliber":18316,"cé":18317,"welcome":18318,"picoftheday":18319,"newborn":18320,"patty":18321,"suns":18322,"siest":18323,"flint":18324,"differently":18325,"spoilers":18326,"trooper":18327,"gins":18328,"cory":18329,"lookout":18330,"equipped":18331,"tape":18332,"toby":18333,"researcher":18334,"ush":18335,"keyes":18336,"alma":18337,"induction":18338,"kw":18339,"khar":18340,"slick":18341,"bride":18342,"eur":18343,"craving":18344,"bookings":18345,"ches":18346,"trunk":18347,"vernon":18348,"spher":18349,"crystals":18350,"relatively":18351,"pompe":18352,"unions":18353,"valley":18354,"para":18355,"want":18356,"okc":18357,"deaf":18358,"sergio":18359,"lennon":18360,"shay":18361,"cra":18362,"vat":18363,"hee":18364,"twe":18365,"liquid":18366,"poly":18367,"ðŁİģ":18368,"bent":18369,"bearing":18370,"motorsport":18371,"barbe":18372,"testi":18373,"hani":18374,"financing":18375,"astronaut":18376,"watercolour":18377,"rish":18378,"comiccon":18379,"gart":18380,"wrong":18381,"bern":18382,"itan":18383,"stepped":18384,"filters":18385,"clow":18386,"mex":18387,"demons":18388,"allo":18389,"expanded":18390,"command":18391,"eters":18392,"goats":18393,"siri":18394,"yr":18395,"pottery":18396,"marion":18397,"ile":18398,"elan":18399,"santo":18400,"persona":18401,"duke":18402,"homeless":18403,"lighted":18404,"wheeler":18405,"changer":18406,"cabbage":18407,"surreal":18408,"hamburg":18409,"smashed":18410,"stran":18411,"knot":18412,"iart":18413,"obi":18414,"bedro":18415,"dial":18416,"thick":18417,"bingo":18418,"fus":18419,"vacuum":18420,"conve":18421,"ative":18422,"accuracy":18423,"account":18424,"refer":18425,"riz":18426,"spiderman":18427,"bana":18428,"rite":18429,"ub":18430,"abs":18431,"medical":18432,"link":18433,"siem":18434,">>>>":18435,"betra":18436,"glowing":18437,"reactions":18438,"puppet":18439,"spaghetti":18440,"angs":18441,"remedi":18442,"prayfor":18443,"royce":18444,"charlotte":18445,"£ï¸ı":18446,"ghet":18447,"affecting":18448,"rode":18449,"socialist":18450,"moses":18451,"azi":18452,"oit":18453,"reporters":18454,"cdt":18455,"aping":18456,"snat":18457,"minimal":18458,"waist":18459,"siege":18460,">>>>":18461,"rig":18462,"schmidt":18463,"hare":18464,"eca":18465,"thorn":18466,"hemp":18467,"esthe":18468,"clyde":18469,"tha":18470,"donut":18471,"mohamed":18472,"lingerie":18473,"legg":18474,"carpenter":18475,"performers":18476,"dea":18477,"imagined":18478,"curse":18479,"lash":18480,"ctr":18481,"agua":18482,"roar":18483,"gri":18484,"role":18485,"jfk":18486,"resurrec":18487,"roosevelt":18488,"marilyn":18489,"smalle":18490,"willis":18491,"waited":18492,"charities":18493,"theres":18494,"lik":18495,"original":18496,"cari":18497,"cough":18498,"cruci":18499,"lagun":18500,"contrast":18501,"kou":18502,"armour":18503,"removing":18504,"tent":18505,"mazda":18506,"brighter":18507,"thief":18508,"corner":18509,"tequila":18510,"buzzing":18511,"albi":18512,"pam":18513,"azure":18514,"discoun":18515,"pixelart":18516,"possibility":18517,"hamont":18518,"trades":18519,"buda":18520,"hive":18521,"versy":18522,"finch":18523,"transpa":18524,"emi":18525,"terrifying":18526,"inqui":18527,"gba":18528,"substitu":18529,"collecti":18530,"placing":18531,"cindy":18532,"kann":18533,"patho":18534,"diamond":18535,"mourinho":18536,"guinea":18537,"anthropo":18538,"airs":18539,"pumps":18540,"ìļ":18541,"paso":18542,"curling":18543,"anita":18544,"residency":18545,"newh":18546,"joon":18547,"cigarette":18548,"queue":18549,"extrac":18550,"games":18551,"splen":18552,"express":18553,"publicly":18554,"bonnie":18555,"tribune":18556,"baek":18557,"reasonable":18558,"cor":18559,"timothy":18560,"sheeran":18561,"ı":18562,"fdn":18563,"sutton":18564,"concentration":18565,"caravan":18566,"xavier":18567,"alger":18568,"cylin":18569,"frederick":18570,"nerve":18571,"peak":18572,"lettuce":18573,"jail":18574,"pregame":18575,"kavan":18576,"upgraded":18577,"ecology":18578,"squadron":18579,"grapes":18580,"goog":18581,"pastry":18582,"ðŁĹ£":18583,"ãĥ¼ãĥ":18584,"milano":18585,"awaz":18586,"presenter":18587,"ðŁĮ¿":18588,"herd":18589,"kings":18590,"template":18591,"flour":18592,"hv":18593,"kley":18594,"iya":18595,"spec":18596,"ater":18597,"frankfurt":18598,"coch":18599,"texting":18600,"deli":18601,"communist":18602,"regiment":18603,"eleanor":18604,"anticipated":18605,"ðŁijĮðŁı»":18606,"thephotohour":18607,"rano":18608,"surviving":18609,"simulation":18610,"dawson":18611,"arin":18612,"aqua":18613,"mor":18614,"âĢ¦.":18615,"cino":18616,"iraqi":18617,"shaz":18618,"dundee":18619,"wes":18620,"drau":18621,"hannah":18622,"snews":18623,"occupation":18624,"steen":18625,"xm":18626,"angles":18627,"settings":18628,"guru":18629,"knox":18630,"orca":18631,"shaping":18632,"went":18633,"drilling":18634,"zzie":18635,"bri":18636,"kissing":18637,"find":18638,"maine":18639,"âŃIJï¸ıâŃIJï¸ı":18640,"ðŁĮį":18641,"larry":18642,"busted":18643,"tavern":18644,"actively":18645,"-\"":18646,"replacing":18647,"nod":18648,"unlock":18649,".\"":18650,"âŀ¤":18651,"affiliate":18652,"tow":18653,"ln":18654,"happynewyear":18655,"dif":18656,"jm":18657,"greenwich":18658,"controversy":18659,"dawg":18660,"condol":18661,"savannah":18662,"compensation":18663,"touchdown":18664,"teo":18665,"ambitious":18666,"embroi":18667,"convicted":18668,"iartg":18669,"barack":18670,"trance":18671,"testimony":18672,"audition":18673,"thumb":18674,"myths":18675,"bex":18676,"quez":18677,"orchid":18678,"deny":18679,"entitled":18680,"hood":18681,"grant":18682,"inbox":18683,"bluejays":18684,"rilla":18685,"smallest":18686,"burden":18687,"infamous":18688,"divided":18689,"boundaries":18690,"tter":18691,"elt":18692,"wyoming":18693,"beverage":18694,"mesm":18695,"onews":18696,"buddhist":18697,"yana":18698,"assad":18699,"isms":18700,"barrett":18701,"predicted":18702,"backto":18703,"twit":18704,"ethere":18705,"captains":18706,"escaped":18707,"ayo":18708,"lamborgh":18709,"gardner":18710,"laps":18711,"kal":18712,"advertisement":18713,"insects":18714,"napo":18715,"amen":18716,"acy":18717,"rand":18718,"gk":18719,"teh":18720,"kathle":18721,"tridge":18722,"pancake":18723,"atro":18724,"pyramid":18725,"bula":18726,"paralym":18727,"gauge":18728,"encies":18729,"tomy":18730,"biscuit":18731,"butcher":18732,"qualifier":18733,"county":18734,"kei":18735,"pools":18736,"darker":18737,"shoulders":18738,"ðŁĩºðŁĩ¸ðŁĩºðŁĩ¸":18739,"spre":18740,"(\"":18741,"writers":18742,"gm":18743,"ðŁİĵ":18744,"knit":18745,"huff":18746,"mtb":18747,"phillies":18748,"ost":18749,"denis":18750,"gart":18751,"licensed":18752,"interface":18753,"excel":18754,"dwell":18755,"fromthe":18756,"cofficial":18757,"azzi":18758,"appearing":18759,"forest":18760,"nana":18761,"keith":18762,"manufacturers":18763,"beckham":18764,")?":18765,"ese":18766,"colony":18767,"delicate":18768,"utter":18769,"mcin":18770,"transplant":18771,"preferred":18772,"pard":18773,"arie":18774,"hub":18775,"pods":18776,"perspectives":18777,"pict":18778,"delu":18779,"apper":18780,"bethan":18781,"pmo":18782,"criminals":18783,"feminism":18784,"shack":18785,"circumstances":18786,"fellas":18787,"protesting":18788,"wax":18789,"suggested":18790,"tator":18791,"drew":18792,"omni":18793,"fake":18794,"kathy":18795,"reb":18796,"deline":18797,"berni":18798,"misty":18799,"ðŁij©":18800,"erable":18801,"breakthrough":18802,"menswear":18803,"millennials":18804,"chanyeol":18805,"laz":18806,"insert":18807,"replies":18808,"phrase":18809,"nx":18810,"iheartawards":18811,"audrey":18812,"granite":18813,"racec":18814,"orie":18815,"terra":18816,"innovations":18817,"brittany":18818,"ateral":18819,"pear":18820,"biological":18821,"shments":18822,"institution":18823,"msn":18824,"frequency":18825,"dman":18826,"neglec":18827,"tf":18828,"stefan":18829,"foxnews":18830,"typo":18831,"comms":18832,"sequence":18833,"carmen":18834,"whites":18835,"economist":18836,"exeter":18837,"seum":18838,"resorts":18839,"casually":18840,"bunde":18841,"divide":18842,"ع":18843,"gag":18844,"creed":18845,"retire":18846,"caucus":18847,"rapids":18848,"wrestlemania":18849,"tulsa":18850,"sunderland":18851,"fundament":18852,"odi":18853,"yamaha":18854,"vary":18855,"intrigu":18856,"else":18857,"beacon":18858,"angie":18859,"traded":18860,"transm":18861,"gents":18862,"knitting":18863,"galac":18864,"ðĿĹ":18865,"uto":18866,"seaside":18867,"holt":18868,"rers":18869,"fargo":18870,"trainers":18871,"monsoon":18872,"bale":18873,"sought":18874,"maddie":18875,"hw":18876,"coli":18877,"fran":18878,"favs":18879,"ðŁĴĶ":18880,"intent":18881,"rally":18882,"sbs":18883,"lemonade":18884,"barackobama":18885,"bread":18886,"sticky":18887,"explosive":18888,"chelten":18889,"tj":18890,"assoc":18891,"ramen":18892,"homies":18893,"vlog":18894,"mister":18895,"lord":18896,"âĢįâĻĢï¸ı":18897,"alyssa":18898,"sketchbook":18899,"rumble":18900,"catch":18901,"migrant":18902,"discipline":18903,"unlikely":18904,"chronicles":18905,"flora":18906,"slams":18907,"amid":18908,"sboro":18909,"coop":18910,"jumps":18911,"tranqu":18912,"melis":18913,"sofia":18914,"enri":18915,"gabe":18916,"syri":18917,"nicolas":18918,"chai":18919,"wv":18920,"becky":18921,"footy":18922,"tao":18923,"suppose":18924,"ðŁĺįðŁĺįðŁĺįðŁĺį":18925,"plush":18926,"rish":18927,"ðŁ¤ĵ":18928,"kha":18929,"saturdays":18930,"accent":18931,"hec":18932,"limit":18933,"carlton":18934,"wired":18935,"taylorswift":18936,"ðŁĺij":18937,"sql":18938,"harro":18939,"recipients":18940,"gat":18941,"gop":18942,"thof":18943,"amazed":18944,"ghan":18945,"ðŁıĨðŁıĨ":18946,"porto":18947,"clare":18948,"distant":18949,"nac":18950,"ohio":18951,"ðŁĻıðŁı¼":18952,"mtn":18953,"antibio":18954,"dinosa":18955,"mesa":18956,"partial":18957,"bv":18958,"learnt":18959,"lovato":18960,"question":18961,"extract":18962,"gossip":18963,"gibb":18964,"niagara":18965,"ðŁij¨":18966,"displayed":18967,"sooner":18968,"stevie":18969,"nuggets":18970,"mln":18971,"brom":18972,"turb":18973,"giveaways":18974,"stupi":18975,"blink":18976,"cili":18977,"convenient":18978,"moh":18979,"vive":18980,"fric":18981,"cause":18982,"chamber":18983,"cules":18984,"nearest":18985,"isse":18986,"smallbiz":18987,"tj":18988,"canadians":18989,"smarter":18990,"brasil":18991,"rare":18992,"quette":18993,"wha":18994,"candle":18995,"atomic":18996,"ðŁijįðŁijį":18997,"warrior":18998,"relaxed":18999,"strips":19000,"neur":19001,"kka":19002,"rfc":19003,"jensen":19004,"recovering":19005,"responses":19006,"salam":19007,"orthodox":19008,"active":19009,"ellers":19010,"nit":19011,"âŃIJ":19012,"metropolitan":19013,"centuries":19014,"vida":19015,"grading":19016,"transparent":19017,"simple":19018,"dots":19019,"superintendent":19020,"elevator":19021,"automated":19022,"redskins":19023,"imam":19024,"summertime":19025,"jonathan":19026,"gearing":19027,"michelle":19028,"conflic":19029,"mice":19030,"tote":19031,"publish":19032,"pax":19033,")-":19034,"nailed":19035,"á´":19036,"telescope":19037,"serbia":19038,"bab":19039,"apeu":19040,"stically":19041,"senti":19042,"rats":19043,"isolated":19044,"group":19045,"hatred":19046,"paranormal":19047,"stanley":19048,"alion":19049,"safety":19050,"ls":19051,"र":19052,"nexus":19053,"alexandra":19054,"masks":19055,"++":19056,"tron":19057,"auk":19058,"brotherhood":19059,"browse":19060,"mixes":19061,"simone":19062,"musk":19063,"approve":19064,"lola":19065,"exp":19066,"perth":19067,"futuri":19068,"unseen":19069,"dm":19070,"chelse":19071,"scouting":19072,"owe":19073,"portsmouth":19074,"kram":19075,"mize":19076,"dispen":19077,"sup":19078,"dlc":19079,"advert":19080,"teresa":19081,"isle":19082,"cycle":19083,"metall":19084,"shields":19085,"mariners":19086,"raz":19087,"ingen":19088,"fund":19089,"ango":19090,"jones":19091,"oka":19092,"madden":19093,"broccoli":19094,"dominic":19095,"situations":19096,"mero":19097,"cricke":19098,"punishment":19099,"db":19100,"shaking":19101,"ðŁĺļ":19102,"mq":19103,"arians":19104,"leh":19105,"claw":19106,"weds":19107,"dure":19108,"niel":19109,"jelly":19110,"gourmet":19111,"traders":19112,"levi":19113,"wages":19114,"knees":19115,"wise":19116,"heavenly":19117,"avid":19118,"melody":19119,"zack":19120,"bananas":19121,"apprentice":19122,"prop":19123,"funny":19124,"ode":19125,"respected":19126,"megan":19127,"fewer":19128,"drafted":19129,"medit":19130,"grape":19131,"usarmy":19132,"crusad":19133,"vocali":19134,"preparations":19135,"nonsense":19136,"usage":19137,"thr":19138,"roth":19139,"wizards":19140,"inside":19141,"promotions":19142,"mona":19143,"redsox":19144,"sig":19145,"elegance":19146,"chia":19147,"universal":19148,"ãĢį":19149,"raja":19150,"unga":19151,"pollin":19152,"filipino":19153,"aka":19154,"tsun":19155,"ikon":19156,"biking":19157,"decorations":19158,"zac":19159,"cadets":19160,"humour":19161,"agm":19162,"reppin":19163,"vaccin":19164,"elove":19165,"uw":19166,"diabe":19167,"gallagher":19168,"azer":19169,"dol":19170,"awhile":19171,"prominent":19172,"welsh":19173,"tann":19174,"')":19175,"bien":19176,"wag":19177,"inal":19178,"cwc":19179,"wicket":19180,"urst":19181,"qanon":19182,"xe":19183,"outdoor":19184,"dunn":19185,"starr":19186,"cology":19187,"ricky":19188,"uefa":19189,"rebounds":19190,"smusic":19191,"infant":19192,"ðŁĻĭ":19193,"sop":19194,"umber":19195,"handing":19196,"begin":19197,"sorting":19198,"hash":19199,"spati":19200,"rek":19201,"budapest":19202,"blackhawks":19203,"delete":19204,"rom":19205,"candid":19206,"authori":19207,"debris":19208,"specul":19209,"intersection":19210,"marriott":19211,"imran":19212,"ðŁĺģðŁĺģ":19213,"cruises":19214,"ramsey":19215,"rafael":19216,"awareness":19217,"vascular":19218,"beyoncé":19219,"rug":19220,"ðŁĺĮ":19221,"festiv":19222,"aram":19223,"sable":19224,"basil":19225,"pill":19226,"flooring":19227,"unbeaten":19228,"implications":19229,"uf":19230,"wound":19231,"forge":19232,"pointing":19233,"pots":19234,"popularity":19235,"ðŁijıðŁı»":19236,"manipul":19237,"slots":19238,"debates":19239,"absence":19240,"vermont":19241,"neverforget":19242,"wrist":19243,"gloria":19244,"rence":19245,"husk":19246,"melting":19247,"ðŁİŁ":19248,"braces":19249,"timely":19250,"transforming":19251,"amps":19252,"mak":19253,"poe":19254,"ahan":19255,"generally":19256,"ndp":19257,"aleppo":19258,"unicef":19259,"profs":19260,"nord":19261,"mask":19262,"jacksonville":19263,"vv":19264,"shells":19265,"blooming":19266,"operators":19267,"charcoal":19268,"neville":19269,"magi":19270,"chip":19271,"sama":19272,"iran":19273,"reforms":19274,"accumul":19275,"rue":19276,"æľ":19277,"websites":19278,"gaon":19279,"devastating":19280,"stos":19281,"glacier":19282,"rapp":19283,"chipotle":19284,"pra":19285,"orous":19286,"romney":19287,"season":19288,"decorative":19289,"cisco":19290,"ditch":19291,"complain":19292,"llo":19293,"assume":19294,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":19295,"nels":19296,"centric":19297,"ftw":19298,"carrots":19299,"tata":19300,"canter":19301,"perience":19302,"liers":19303,"demos":19304,"blunt":19305,"operate":19306,"reservations":19307,"leah":19308,"substance":19309,"dison":19310,"ante":19311,"election":19312,"vue":19313,"square":19314,"nonprofit":19315,"caa":19316,"fsu":19317,"yam":19318,"ãĤ¤":19319,"vladi":19320,"completes":19321,"mari":19322,"phillip":19323,"neill":19324,"eras":19325,"kait":19326,"mendo":19327,"maharashtra":19328,"gp":19329,"dane":19330,"providence":19331,"therapeu":19332,"juvenile":19333,"memo":19334,"incorpor":19335,"aaaa":19336,"seventeen":19337,"teenager":19338,"ã":19339,"orns":19340,"wide":19341,"cuteness":19342,"twd":19343,"ffles":19344,"bara":19345,"comedy":19346,"overtime":19347,"yaz":19348,"baron":19349,"unemployment":19350,"ðŁijĭ":19351,"exterior":19352,"dense":19353,"centres":19354,"matchup":19355,"historymonth":19356,"artificial":19357,"quit":19358,"esk":19359,"warn":19360,"critic":19361,"jaf":19362,"ðŁĵ²":19363,"informative":19364,"fuels":19365,"recycle":19366,"naming":19367,"stripe":19368,"solic":19369,"molecular":19370,"deepi":19371,"convo":19372,"ssel":19373,"nae":19374,"descent":19375,"tiz":19376,"accountability":19377,"terry":19378,"rito":19379,"slay":19380,"emo":19381,"demol":19382,"sensation":19383,"cov":19384,"tore":19385,"roundtable":19386,"yol":19387,"excuses":19388,"à¥į":19389,"turquo":19390,"hhhh":19391,"podcasts":19392,"celeb":19393,"messi":19394,"lio":19395,"mann":19396,"contributed":19397,"uz":19398,"generator":19399,"elets":19400,"veggie":19401,"indul":19402,"ensuring":19403,"detroit":19404,"punjab":19405,"transpor":19406,"instruction":19407,"add":19408,"porcel":19409,"paneli":19410,"circles":19411,"persist":19412,"clayton":19413,"spn":19414,"dogsoftwitter":19415,"isnt":19416,"spr":19417,"retailers":19418,"pw":19419,"hungar":19420,"elena":19421,"monaster":19422,"guatem":19423,"jessie":19424,"anz":19425,"rashi":19426,"flee":19427,"carving":19428,"faux":19429,"lal":19430,"henri":19431,"djo":19432,"dull":19433,"sana":19434,"lara":19435,"globe":19436,"crimson":19437,"compass":19438,"pause":19439,"nab":19440,"lionel":19441,"baths":19442,"ufo":19443,"inventory":19444,"singh":19445,"satan":19446,"ðŁĩ¸":19447,"cements":19448,"inform":19449,"generated":19450,"biden":19451,"avg":19452,"tasks":19453,"deer":19454,"sau":19455,"jailed":19456,"pastel":19457,"scc":19458,"nail":19459,"steele":19460,"peris":19461,"lamborghini":19462,"pursue":19463,"margin":19464,"uch":19465,"bosch":19466,"drain":19467,"clara":19468,"bom":19469,"latino":19470,"webster":19471,"rosemary":19472,"rha":19473,"soun":19474,"billionaire":19475,"notch":19476,"percentage":19477,"conor":19478,"'\"":19479,"homes":19480,"earthday":19481,"hort":19482,"biggest":19483,"disin":19484,"walton":19485,"editors":19486,"imma":19487,"omar":19488,"equivalent":19489,"pharmaceu":19490,"ahmed":19491,"cameo":19492,"hanni":19493,"underrated":19494,"gement":19495,"microbi":19496,"voo":19497,"honorable":19498,"obesity":19499,"âļ¡ï¸ı":19500,"limerick":19501,"involvement":19502,"stagram":19503,"boulevard":19504,"burg":19505,"blackandwhite":19506,"liberation":19507,"five":19508,"interim":19509,"smm":19510,"rivalry":19511,"capabilities":19512,"statements":19513,"thumb":19514,"ved":19515,"swans":19516,"barber":19517,"eque":19518,"serena":19519,"helm":19520,"noodle":19521,"sampling":19522,"nawaz":19523,"single":19524,"thunderstorms":19525,"shon":19526,"inev":19527,"ë¯":19528,"topp":19529,"orchard":19530,"bian":19531,"ðŁĺĶ":19532,"doorstep":19533,"salvation":19534,"marketing":19535,"rons":19536,"clemson":19537,"ravi":19538,"intake":19539,"standwith":19540,"sina":19541,"haiku":19542,"pley":19543,"electoral":19544,"philly":19545,"lays":19546,"electric":19547,"capturing":19548,"upp":19549,"ergy":19550,"believing":19551,"cultures":19552,"esday":19553,"invasive":19554,"eded":19555,"speech":19556,"endur":19557,"vietnam":19558,"boycott":19559,"pede":19560,"deliver":19561,"ðŁĴĸðŁĴĸ":19562,"merchant":19563,"stir":19564,"denies":19565,"pockets":19566,"oti":19567,"cuddle":19568,"roland":19569,"mmed":19570,"dened":19571,"learners":19572,"hoop":19573,"sourcing":19574,"hacked":19575,"dim":19576,"environments":19577,"benson":19578,"judicial":19579,"worcester":19580,"pearls":19581,"governments":19582,"arrivals":19583,"corners":19584,"tuning":19585,"labour":19586,"ym":19587,"ordering":19588,"lewi":19589,"ife":19590,"hygiene":19591,"thoughtful":19592,"indonesian":19593,"campaigning":19594,"principle":19595,"assaul":19596,"rubb":19597,"atv":19598,"willy":19599,"entre":19600,"ili":19601,"phon":19602,"duties":19603,"âĻ¥âĻ¥":19604,"snakes":19605,"loop":19606,"amar":19607,"convertible":19608,"bonding":19609,"mentoring":19610,"maxwell":19611,"ethereum":19612,"destroying":19613,"axis":19614,"cairo":19615,"finnish":19616,"shock":19617,"ðŁĺIJ":19618,"caleb":19619,"coma":19620,"pedal":19621,"core":19622,"continent":19623,"elson":19624,"tempo":19625,"helsinki":19626,"acp":19627,"tackling":19628,"stated":19629,"bla":19630,"doub":19631,"smashing":19632,"aja":19633,"cameron":19634,"disruption":19635,"warmth":19636,"beingsalmankhan":19637,"bulletin":19638,"ode":19639,"syracuse":19640,"aran":19641,"mcgregor":19642,"bulk":19643,"anton":19644,"confirmation":19645,"spine":19646,"imran":19647,"instruc":19648,"jacks":19649,"chio":19650,"palm":19651,"stre":19652,"embarrassing":19653,"unt":19654,"eliminate":19655,"toss":19656,"cise":19657,"aws":19658,"onists":19659,"shinee":19660,"jos":19661,"hose":19662,"lively":19663,"opponents":19664,"movements":19665,"recognizing":19666,"sandwiches":19667,"shakes":19668,"exercises":19669,"seat":19670,"profession":19671,"merrychristmas":19672,"lugg":19673,"adoptdont":19674,"marvin":19675,"byrne":19676,"unle":19677,"het":19678,"kuwait":19679,"rahman":19680,"aspect":19681,"humbled":19682,"genes":19683,"fand":19684,"longtime":19685,");":19686,"campu":19687,"angus":19688,"ðŁijįðŁı¼":19689,"quran":19690,"sleeves":19691,"slic":19692,"¸ë":19693,"twelve":19694,"youre":19695,"ike":19696,"gogh":19697,"bst":19698,"dictionary":19699,"reflecting":19700,"toon":19701,"yarn":19702,"embed":19703,"ðŁı´":19704,"reserves":19705,"flooded":19706,"veriz":19707,"dusk":19708,"establish":19709,"proli":19710,"aud":19711,"ritual":19712,"orbit":19713,"declaration":19714,"recordings":19715,"camo":19716,"cassette":19717,"goodluck":19718,"cutter":19719,"bop":19720,"bho":19721,"cheating":19722,"pacific":19723,"mares":19724,"timer":19725,"colt":19726,"trous":19727,"tomorrow":19728,"hansen":19729,"cie":19730,"wang":19731,"bani":19732,"circular":19733,"acute":19734,"farmer":19735,"coys":19736,"pse":19737,"irving":19738,"wj":19739,"hawkins":19740,"bison":19741,"urday":19742,"cruising":19743,"ote":19744,"kath":19745,"whistle":19746,"yourselves":19747,"antis":19748,"slash":19749,"thoroughly":19750,"kesh":19751,"serie":19752,"exem":19753,"enig":19754,"guild":19755,"shred":19756,"hogan":19757,"apo":19758,"ä¸":19759,"puzz":19760,"netball":19761,"aussi":19762,"panorama":19763,"wsj":19764,"avis":19765,"arming":19766,"humph":19767,"browser":19768,"cries":19769,"foggy":19770,"matte":19771,"ðŁĮ»":19772,"iter":19773,"tallest":19774,"byron":19775,"captiv":19776,"jesu":19777,"anyways":19778,"flagship":19779,"pton":19780,"wey":19781,"fayette":19782,"financial":19783,"foul":19784,"solomon":19785,"jennifer":19786,"cucumber":19787,"argue":19788,"textile":19789,"wrestler":19790,"johnston":19791,"pastor":19792,"ðŁĺŃðŁĺŃðŁĺŃðŁĺŃ":19793,"cactus":19794,"edible":19795,"reserved":19796,"richie":19797,"metres":19798,"ingredient":19799,"hella":19800,"unto":19801,"chol":19802,"celebs":19803,"poets":19804,"graham":19805,"hayden":19806,"coincidence":19807,"baw":19808,"communicate":19809,"fletcher":19810,"/-":19811,"toledo":19812,"ecuador":19813,"counsel":19814,"slaughter":19815,"linear":19816,"atp":19817,"osu":19818,"joel":19819,"eved":19820,"conquer":19821,"rustic":19822,"plicity":19823,"recognise":19824,"roommate":19825,"cracked":19826,"jasper":19827,"pher":19828,"ðŁĮº":19829,"woven":19830,"moist":19831,"ffc":19832,"steering":19833,"nish":19834,"standings":19835,"frequent":19836,"ardi":19837,"hazel":19838,"asmsg":19839,"baum":19840,"dart":19841,"sidd":19842,"nath":19843,"chero":19844,"cardboard":19845,"css":19846,"nsfw":19847,"pair":19848,"ðŁĺįðŁĺĺ":19849,"occurred":19850,"homelessness":19851,"malone":19852,"phe":19853,"xia":19854,"paddy":19855,"declare":19856,"theatre":19857,"bf":19858,"persian":19859,"tad":19860,"axe":19861,"suspicious":19862,"lamb":19863,"mucho":19864,"senior":19865,"stas":19866,"kite":19867,"sting":19868,"grad":19869,"kaf":19870,"watering":19871,"د":19872,"spiral":19873,"thms":19874,"educator":19875,"jerome":19876,"ofc":19877,"clock":19878,"sul":19879,"pemb":19880,".........":19881,"parkway":19882,"deaux":19883,"restrictions":19884,"mons":19885,"needle":19886,"ej":19887,"leagues":19888,"watermelon":19889,"aman":19890,"plenary":19891,"maxim":19892,"wab":19893,"comingsoon":19894,"bryce":19895,"vigil":19896,"supermarket":19897,"fortunate":19898,"turquoise":19899,"president":19900,"liv":19901,"interns":19902,"feelin":19903,"fixtures":19904,"stunt":19905,"staged":19906,"premieres":19907,"lok":19908,"practiti":19909,"shortage":19910,"logne":19911,"vec":19912,"concor":19913,"rocke":19914,"lig":19915,"composed":19916,"synthetic":19917,"dip":19918,"camila":19919,"chis":19920,"jou":19921,"susan":19922,"eyebrows":19923,"supplement":19924,"satisfaction":19925,"mohammad":19926,"tibet":19927,"houseof":19928,"pun":19929,"assam":19930,"shadowhun":19931,"psyched":19932,"seduc":19933,"mandatory":19934,"herbert":19935,"scallo":19936,"streamers":19937,"protocol":19938,"blockbuster":19939,"produces":19940,"schnei":19941,"laurel":19942,"tribe":19943,"timehop":19944,"pla":19945,"modelling":19946,"tvtime":19947,"mtvstars":19948,"widow":19949,"metric":19950,"cham":19951,"condo":19952,"flowering":19953,"alec":19954,"dms":19955,"intensity":19956,"¨":19957,"mccartney":19958,"islamabad":19959,"kb":19960,"ffi":19961,"phal":19962,"analog":19963,"fond":19964,"hacks":19965,"positivity":19966,"treaty":19967,"submarine":19968,"connect":19969,"selen":19970,"categories":19971,"cub":19972,"organize":19973,"sik":19974,"quoteoftheday":19975,"reminding":19976,"amor":19977,"locking":19978,"ðŁijıðŁı¼":19979,"compound":19980,"ette":19981,"bout":19982,"recur":19983,"ference":19984,"mizz":19985,"trend":19986,"hipster":19987,"fortress":19988,"forthcoming":19989,"prelimin":19990,"odyssey":19991,"angp":19992,"delici":19993,"evenings":19994,"ðŁĶ¹":19995,"iq":19996,"dw":19997,"dair":19998,"kathryn":19999,"christianity":20000,"moonlight":20001,"hab":20002,"whoo":20003,"fbf":20004,"seth":20005,"genuinely":20006,"pax":20007,"charity":20008,"deployed":20009,"bnb":20010,"bucs":20011,"judg":20012,"conge":20013,"plantation":20014,"impress":20015,"cara":20016,"sclub":20017,"scopy":20018,"landers":20019,"complaints":20020,"bama":20021,"rebuild":20022,"xy":20023,"realism":20024,"shour":20025,"lein":20026,"bracelets":20027,"mera":20028,"assassin":20029,"anchor":20030,"ðŁijĮðŁı¼":20031,"linen":20032,"confron":20033,"chronicle":20034,"comment":20035,"catalog":20036,"illes":20037,"gorge":20038,"metry":20039,"jungkook":20040,"lovemy":20041,"sentin":20042,"seem":20043,"fitness":20044,"allied":20045,"tsman":20046,"digitaltransformation":20047,"pran":20048,"loft":20049,"minton":20050,"aldenrichards":20051,"envel":20052,"cherish":20053,"certainty":20054,"zzz":20055,"rhino":20056,"perkins":20057,"enrich":20058,"capetown":20059,"ometer":20060,"sections":20061,"skeleton":20062,"defenders":20063,"ðŁĺĿ":20064,"penc":20065,"brit":20066,"jah":20067,"capitalism":20068,"ðŁ¥ĩ":20069,"bazaar":20070,"reme":20071,"ext":20072,"kkk":20073,"convert":20074,"stormy":20075,"bye":20076,"karan":20077,"chrysler":20078,"ados":20079,"pressed":20080,"sync":20081,"ationday":20082,"danger":20083,"badges":20084,"refuses":20085,"empowering":20086,"lym":20087,"exports":20088,"adoptdontshop":20089,"ðŁĩ¯":20090,"thc":20091,"awaited":20092,"focuses":20093,"fined":20094,"oat":20095,"hahahah":20096,"âģ©":20097,"nfamily":20098,"fiona":20099,"luckily":20100,"thrilling":20101,"typing":20102,"outbreak":20103,"dies":20104,"heu":20105,"crawl":20106,"nesses":20107,"oath":20108,"scripts":20109,"geeks":20110,"ðŁIJĿ":20111,"pb":20112,"mathematics":20113,"alis":20114,"________________":20115,"gymnastics":20116,"activism":20117,"recommendation":20118,"gren":20119,"wain":20120,"courty":20121,"napol":20122,"cauli":20123,"hornets":20124,"gals":20125,"jockey":20126,"dirty":20127,"atar":20128,"enormous":20129,"pest":20130,"gregation":20131,"anos":20132,"iiii":20133,"defends":20134,"blackhistorymonth":20135,"atx":20136,"mbc":20137,"luggage":20138,"witch":20139,"cob":20140,"lasts":20141,"cum":20142,"ggg":20143,"bathing":20144,"nar":20145,"cebu":20146,"ðŁįĥ":20147,"navigation":20148,"mine":20149,"rejo":20150,"ðŁİĢ":20151,"giftide":20152,"reta":20153,"useless":20154,"pull":20155,"deficit":20156,"allu":20157,"atime":20158,"itv":20159,"trillion":20160,"pue":20161,"acies":20162,"procedure":20163,"lori":20164,"jenny":20165,"cad":20166,"ulously":20167,"drac":20168,"promotes":20169,"ingthe":20170,"canu":20171,"woohoo":20172,"naomi":20173,"zardari":20174,"tsu":20175,"beir":20176,"sdg":20177,"lever":20178,"weber":20179,"abud":20180,"lund":20181,"crowded":20182,"deployment":20183,"terrain":20184,"kenny":20185,"hof":20186,"witnessed":20187,"loch":20188,"jk":20189,"bully":20190,"wren":20191,"poetry":20192,"doff":20193,"wwi":20194,"mored":20195,"dini":20196,"culture":20197,"prompt":20198,"Â¥":20199,"maurice":20200,"topps":20201,"rm":20202,"correspon":20203,"about":20204,"jewels":20205,"gibr":20206,"eagle":20207,"ðŁĺĺðŁĺĺðŁĺĺ":20208,"lending":20209,"souven":20210,"çĶ":20211,"contemporaryart":20212,"establishment":20213,"jong":20214,"âĢ¦\"":20215,"gator":20216,"patriotic":20217,"mccoy":20218,"vape":20219,"humane":20220,"feliz":20221,"coachella":20222,"reposting":20223,"steals":20224,"fuller":20225,"nering":20226,"atra":20227,"(-":20228,"blake":20229,"heather":20230,"worms":20231,"disciplinary":20232,"redemption":20233,"yard":20234,"amin":20235,"\"@_":20236,"dnc":20237,"tds":20238,"kappa":20239,"newark":20240,"commits":20241,"spears":20242,"jams":20243,"tand":20244,"msnbc":20245,"intermedi":20246,"aimed":20247,"atic":20248,"teenth":20249,"observation":20250,"kashmir":20251,"kavanaugh":20252,"oul":20253,"sanfrancisco":20254,"reu":20255,"belated":20256,"chow":20257,"password":20258,"stills":20259,"detained":20260,"sari":20261,"dayton":20262,"darren":20263,"italian":20264,"arth":20265,"amusic":20266,"arbit":20267,"wm":20268,"vm":20269,"hem":20270,"doug":20271,"myr":20272,"asho":20273,"prev":20274,"vind":20275,"brah":20276,"stag":20277,"ี":20278,"previews":20279,"guk":20280,"containing":20281,"leonardo":20282,"saddle":20283,"rushing":20284,"stav":20285,"longh":20286,"gambling":20287,"vegas":20288,"reservation":20289,"endale":20290,"bala":20291,"fla":20292,"variant":20293,"hedge":20294,"bulgaria":20295,"natali":20296,"weaver":20297,"solst":20298,"encouraged":20299,"apc":20300,"asparag":20301,"nest":20302,"cyclists":20303,"fel":20304,"ìĬ¤":20305,"overwhelming":20306,"peyton":20307,"jit":20308,"apost":20309,"mble":20310,"bleeding":20311,"neighbourhood":20312,"avery":20313,"expressions":20314,"macdonald":20315,"gigs":20316,"monds":20317,"illusion":20318,"nct":20319,"camero":20320,"overhead":20321,"myth":20322,"oly":20323,"vio":20324,"etv":20325,"laurie":20326,"unveiling":20327,"prior":20328,"conn":20329,"ironman":20330,"diff":20331,"dayin":20332,"critici":20333,"congo":20334,"revision":20335,"wale":20336,"director":20337,"pines":20338,"blackpink":20339,"garner":20340,"curated":20341,"manitoba":20342,"hac":20343,"commonly":20344,"barton":20345,"....#":20346,"mortality":20347,"livesmatter":20348,"philosop":20349,"shorter":20350,"convince":20351,"freak":20352,"vendors":20353,"insightful":20354,"elly":20355,"sensors":20356,"eled":20357,"sberg":20358,"weightloss":20359,"ukip":20360,"spur":20361,"private":20362,"qua":20363,"ssc":20364,",...":20365,"supervisor":20366,"adviser":20367,"amazingly":20368,"lesser":20369,"ates":20370,"mahon":20371,"oooooo":20372,"saras":20373,"pmoindia":20374,"waffle":20375,"unders":20376,"tolerance":20377,"sculptures":20378,"hersh":20379,"knocking":20380,"smoke":20381,"catholic":20382,"grim":20383,"traveled":20384,"flip":20385,"geoff":20386,"dinosaurs":20387,"slept":20388,"scarlet":20389,"oki":20390,"complaint":20391,"obsc":20392,"nami":20393,"lag":20394,"crossfit":20395,"ufc":20396,"mccain":20397,"referee":20398,"sadness":20399,"penny":20400,"lieu":20401,"mode":20402,"kier":20403,"vols":20404,"wis":20405,"elon":20406,"shea":20407,"bao":20408,"sonia":20409,"claire":20410,"emmanuel":20411,"moisture":20412,"digest":20413,"viii":20414,"teller":20415,"chon":20416,"accessory":20417,"nightclub":20418,"fossil":20419,"awan":20420,"husky":20421,"aboriginal":20422,"brandon":20423,"fficient":20424,"cougars":20425,"sted":20426,"admitted":20427,"ignored":20428,"contentmarketing":20429,"agas":20430,"vase":20431,"executed":20432,"negotiations":20433,"shead":20434,"nand":20435,"tablets":20436,"goth":20437,"tsal":20438,"dfw":20439,"onep":20440,"protector":20441,"spho":20442,"gazette":20443,"andreas":20444,"sser":20445,"compilation":20446,"hav":20447,"containers":20448,"broker":20449,"socal":20450,"porcelain":20451,"hyuk":20452,"airing":20453,"ðŁĴ°":20454,"publisher":20455,"scenario":20456,"spartans":20457,"reviewing":20458,"itudes":20459,"edel":20460,"pearson":20461,"bash":20462,"maui":20463,"aad":20464,"ðŁĮĬ":20465,"liu":20466,"ulate":20467,"programmes":20468,"favour":20469,"webdesign":20470,"realty":20471,"motivational":20472,"crosses":20473,"'...":20474,"busch":20475,"adjustable":20476,"arjun":20477,"mistak":20478,"dimension":20479,"pistol":20480,"weighs":20481,"eny":20482,"unveil":20483,"indycar":20484,"gordon":20485,"fade":20486,"franken":20487,"qualities":20488,"bett":20489,"locate":20490,"kerr":20491,"spc":20492,"confusion":20493,"nee":20494,"lucky":20495,"bases":20496,"depends":20497,"firefighter":20498,"ola":20499,"ret":20500,"maroon":20501,"ðŁĶĬ":20502,"wam":20503,"defining":20504,"wheat":20505,"bil":20506,"és":20507,"bhai":20508,"psych":20509,"tau":20510,"icans":20511,"thik":20512,"obile":20513,"inspector":20514,"ìĨĮë":20515,"illon":20516,"gos":20517,"evangel":20518,"fai":20519,"sist":20520,"vocation":20521,"burge":20522,"chistan":20523,"renewed":20524,"enthusiasm":20525,"enting":20526,"agri":20527,"ikea":20528,"msc":20529,"aerospace":20530,"sensiti":20531,"memoir":20532,"hospice":20533,"cocaine":20534,"derry":20535,"mechanics":20536,"Ħà¸":20537,"tino":20538,"reduces":20539,"collectors":20540,"injustice":20541,"suppre":20542,"vana":20543,"abun":20544,"napa":20545,"susa":20546,"oslo":20547,"eff":20548,"encore":20549,"licence":20550,"cheddar":20551,"zal":20552,"mount":20553,"ðŁĴIJ":20554,"threatens":20555,"!!\"":20556,"archie":20557,"futsal":20558,"scuba":20559,"jos":20560,"gnon":20561,"sexi":20562,"sofficial":20563,"comparing":20564,"dominant":20565,"toftheday":20566,"fait":20567,"proposals":20568,"gift":20569,"yas":20570,"cnc":20571,"lr":20572,"hab":20573,"reservoir":20574,"beliefs":20575,"general":20576,"marti":20577,"td":20578,"este":20579,"ìł":20580,"wil":20581,"ðŁij¯":20582,"ðŁĶ«":20583,"spx":20584,"etwork":20585,"excerpt":20586,"einstein":20587,"hiro":20588,"silhou":20589,"teamed":20590,"perception":20591,"corridor":20592,"mentalhealth":20593,"hints":20594,"benny":20595,"inducted":20596,"swx":20597,"widesp":20598,"speak":20599,"cheryl":20600,"drug":20601,"ðŁĺķ":20602,"hf":20603,"asparagus":20604,"mysteries":20605,"fitzgerald":20606,"offer":20607,"therapist":20608,"career":20609,"damaging":20610,"tsd":20611,"peru":20612,"weibo":20613,"yay":20614,"phoenix":20615,"discre":20616,"macbook":20617,"barker":20618,"stigma":20619,"spread":20620,"rockies":20621,"kangar":20622,"bridg":20623,"pai":20624,"bishop":20625,"tailed":20626,"capsule":20627,"ðŁĴĵ":20628,"geof":20629,"royale":20630,"shortlisted":20631,"oste":20632,"ashamed":20633,"chapp":20634,"keye":20635,"cla":20636,"screenshot":20637,"austrian":20638,"native":20639,"enight":20640,"juliet":20641,"michele":20642,"ðŁĮ´":20643,"travelers":20644,"pil":20645,"footballer":20646,"winchester":20647,"ðŁĻĦ":20648,"azerbai":20649,"goldeng":20650,"organisations":20651,"interpretation":20652,"predator":20653,"oftheweek":20654,"logan":20655,"poké":20656,"marie":20657,"calla":20658,"tnt":20659,"cinde":20660,"getic":20661,"fitfam":20662,"grav":20663,"owens":20664,"ðŁĮ±":20665,"shootout":20666,"salis":20667,"commissions":20668,"cohe":20669,"ptic":20670,"nixon":20671,"hia":20672,"ambition":20673,"marine":20674,"cruelty":20675,"tk":20676,"crude":20677,"salty":20678,"jima":20679,"mongo":20680,"irony":20681,"onwards":20682,"arrests":20683,"strangers":20684,"iger":20685,"cyclist":20686,"rag":20687,"extends":20688,"tradio":20689,"bourg":20690,"moi":20691,"ella":20692,"eable":20693,"lexus":20694,"aul":20695,"dera":20696,"historian":20697,"morton":20698,"tiff":20699,"manner":20700,"kot":20701,"dk":20702,"pointed":20703,"marqu":20704,"aan":20705,"eney":20706,"dublin":20707,"onpoli":20708,"emili":20709,"secret":20710,"flo":20711,"âļ¡":20712,"baj":20713,"steep":20714,"accompanied":20715,"rumours":20716,"devi":20717,"purchasing":20718,"fig":20719,"pub":20720,"schoo":20721,"autonomous":20722,"goalie":20723,"xia":20724,"automatically":20725,"revers":20726,"tero":20727,"fuku":20728,"titanic":20729,"shook":20730,"sandals":20731,"seekers":20732,"excav":20733,"nordic":20734,"bigolive":20735,"bake":20736,"ratt":20737,"zak":20738,"nep":20739,"ðŁĺ¤":20740,"candy":20741,"billions":20742,"bookworm":20743,"ppet":20744,"à³":20745,"surfaces":20746,"scars":20747,"philip":20748,"dogg":20749,"cigars":20750,"cote":20751,"translated":20752,"curator":20753,"sindh":20754,"hangover":20755,"brewer":20756,"ones":20757,"elton":20758,"ðŁĴªðŁı¼":20759,"marcu":20760,"elliot":20761,"righte":20762,"dioce":20763,"russ":20764,"railways":20765,"grandson":20766,"ascen":20767,"apology":20768,"await":20769,"mobili":20770,"respir":20771,"partisan":20772,"olivi":20773,"strike":20774,"yoo":20775,"whitehouse":20776,"expressed":20777,"pups":20778,"bedford":20779,"cultur":20780,"frogs":20781,"flying":20782,"cavali":20783,"cds":20784,"friger":20785,"streetphotography":20786,"resolve":20787,"taliban":20788,"kang":20789,"crushing":20790,"jum":20791,"ðŁĺĴ":20792,"williamson":20793,"tang":20794,"curly":20795,"tman":20796,"veteran":20797,"faire":20798,"artificialintelligence":20799,"unanim":20800,"pren":20801,"backdrop":20802,"frances":20803,"occer":20804,"dorothy":20805,"working":20806,"arthr":20807,"converted":20808,"daylight":20809,"servant":20810,"paddle":20811,"complaining":20812,"thirty":20813,"nadal":20814,"aku":20815,"ibrahim":20816,"addressed":20817,"piss":20818,"greenhouse":20819,"battalion":20820,"simulator":20821,"outlets":20822,"embroidery":20823,"ðŁĵ±":20824,"fiscal":20825,"gerard":20826,"sassy":20827,"ðŁİīðŁİīðŁİī":20828,"ventures":20829,"merit":20830,"publicity":20831,"ðŁijĪ":20832,"sophisticated":20833,"ctu":20834,"conventional":20835,"condolences":20836,"israel":20837,"tradition":20838,"aran":20839,"tess":20840,"glad":20841,"ðŁĺĬðŁĺĬ":20842,"correction":20843,"geon":20844,"amd":20845,"orship":20846,"beast":20847,"chment":20848,"ìŀ":20849,"nico":20850,"wknd":20851,"wels":20852,"cushion":20853,"belie":20854,"voc":20855,"idiots":20856,"underneath":20857,"puma":20858,"cornell":20859,"enation":20860,"lul":20861,"swach":20862,"abig":20863,"urer":20864,"mie":20865,"formerly":20866,"caf":20867,"ernal":20868,"chorus":20869,"julius":20870,"senator":20871,"âľį":20872,"whir":20873,"salvador":20874,"phd":20875,"unified":20876,"booster":20877,"graphical":20878,"wrec":20879,"sonny":20880,"miz":20881,"derers":20882,"sall":20883,"vens":20884,"tuscany":20885,"wid":20886,"yong":20887,"kurds":20888,"waz":20889,"trolls":20890,"macro":20891,"caturday":20892,"pressing":20893,"sasha":20894,"centennial":20895,"gusts":20896,"emc":20897,"before":20898,"denise":20899,"cust":20900,"ðŁĵ¢":20901,"looo":20902,"basel":20903,"england":20904,"yolo":20905,"ardu":20906,"manifesto":20907,"doha":20908,"ìľ":20909,"knives":20910,"bournemouth":20911,"bibl":20912,"barb":20913,"alicia":20914,"Ø©":20915,"comer":20916,"cyclone":20917,"git":20918,"anews":20919,"characteri":20920,"ventura":20921,"intra":20922,"sfgiants":20923,"hut":20924,"bea":20925,"darwin":20926,"eller":20927,"alv":20928,"reese":20929,"bly":20930,"karan":20931,"conclusion":20932,"manny":20933,"flakes":20934,"uniteblue":20935,"nadu":20936,"copp":20937,"edges":20938,"lancashire":20939,"ials":20940,"otta":20941,"philippe":20942,"lent":20943,"chee":20944,"mentors":20945,"festival":20946,"anism":20947,"complimentary":20948,"rj":20949,"pug":20950,"dine":20951,"wei":20952,"cliffs":20953,"sarmy":20954,"tiveness":20955,"treasury":20956,"iland":20957,"aftermath":20958,"rabbi":20959,"oun":20960,"bouquet":20961,"heritage":20962,"zion":20963,"surrender":20964,"shenan":20965,"inks":20966,"karl":20967,"ghty":20968,"policing":20969,"examination":20970,"cey":20971,"persu":20972,"measurement":20973,"hydrogen":20974,"luhan":20975,"âłĢâłĢâłĢâłĢ":20976,"wari":20977,"оÐ":20978,"jy":20979,"fowler":20980,"mish":20981,"alfre":20982,"âĺij":20983,"bbnaija":20984,"catalogue":20985,"recognised":20986,"saver":20987,"huskies":20988,"colin":20989,"mundo":20990,"siva":20991,"png":20992,"discounted":20993,"manutd":20994,"fresno":20995,"devin":20996,"preliminary":20997,"trophies":20998,"plastics":20999,"dug":21000,"procu":21001,"indigo":21002,"gard":21003,"dylan":21004,"pitches":21005,"groundbreaking":21006,"inson":21007,"blac":21008,"anthology":21009,"fh":21010,"explic":21011,"rard":21012,"admiral":21013,"sochi":21014,"lashes":21015,"splendid":21016,"envy":21017,"adv":21018,"sexy":21019,"festivities":21020,"sticking":21021,"bib":21022,"thrill":21023,"opp":21024,"ariel":21025,"botanical":21026,"endurance":21027,"females":21028,"bricks":21029,"vatican":21030,"blackpool":21031,"bermu":21032,"brough":21033,"roller":21034,"bid":21035,"suede":21036,"slovenia":21037,"mming":21038,"mlb":21039,"medalist":21040,"dians":21041,"rehabilitation":21042,"neon":21043,"sgo":21044,"lithu":21045,"ramos":21046,"zed":21047,"pianist":21048,"intensive":21049,"broadband":21050,"study":21051,"petersburg":21052,"luca":21053,"ahhhh":21054,"physician":21055,"dillon":21056,"telecom":21057,"grief":21058,"mun":21059,"acro":21060,"sided":21061,"sly":21062,"blows":21063,"classiccars":21064,"trium":21065,"argy":21066,"?:":21067,"hri":21068,"marshmal":21069,"âĢĵ":21070,"topping":21071,"warsaw":21072,"transc":21073,"preservation":21074,"bav":21075,"refriger":21076,"experiments":21077,"äº":21078,"glit":21079,"sliga":21080,"gage":21081,"factor":21082,"flavours":21083,"brony":21084,"spo":21085,"cookbook":21086,"carriage":21087,"away":21088,"nyfw":21089,"onian":21090,"wg":21091,"simpsons":21092,"rolex":21093,"ðŁı¿":21094,"crosby":21095,"ãħ¤":21096,"credi":21097,"syndic":21098,"pubs":21099,"alife":21100,"poorly":21101,"maced":21102,"ðŁĺŀ":21103,"behindthe":21104,"wenger":21105,"nats":21106,"ðŁİŁ":21107,"rubbish":21108,"procedures":21109,"typhoon":21110,"ophobia":21111,"erdo":21112,"fuel":21113,"viera":21114,"bumps":21115,"millennium":21116,"newzealand":21117,"lectures":21118,"iton":21119,"milky":21120,"responded":21121,"ê°":21122,"landscape":21123,"..@":21124,"bother":21125,"âĸ¶":21126,"zhang":21127,"huawei":21128,"tuition":21129,"sworn":21130,"inu":21131,"yor":21132,"paolo":21133,"auditions":21134,"abil":21135,"malaysian":21136,"hops":21137,"feathers":21138,"mple":21139,"auts":21140,"ão":21141,"bounty":21142,"iche":21143,"ìĺ":21144,"shq":21145,"pinot":21146,"gears":21147,"disappear":21148,"videogames":21149,"tna":21150,"alzheimer":21151,"ðŁĮŀ":21152,"aji":21153,"underwear":21154,"switching":21155,"signage":21156,"oscar":21157,"econ":21158,"drow":21159,"clint":21160,"plated":21161,"gundy":21162,"emblem":21163,"hoes":21164,"icist":21165,"nelly":21166,"junior":21167,"roadshow":21168,"minerals":21169,"atle":21170,"alexandria":21171,"acclaimed":21172,"vell":21173,"shiva":21174,"adhe":21175,"enne":21176,"amnesty":21177,"hounds":21178,"councillor":21179,"ðŁĴ¦":21180,"aesthe":21181,"partnering":21182,"influenced":21183,"magno":21184,"flare":21185,"extinction":21186,"civilian":21187,"majesty":21188,"vail":21189,"lawmakers":21190,"racks":21191,"mcc":21192,"orian":21193,"spices":21194,"errors":21195,"mayer":21196,"coca":21197,"pai":21198,"sooooo":21199,"retiring":21200,"bathro":21201,"ðŁĻĮðŁĻĮ":21202,"âĸª":21203,"suf":21204,"endorsement":21205,"building":21206,"brooch":21207,"palla":21208,"arvind":21209,"agent":21210,"karate":21211,"rhi":21212,"ctv":21213,"taine":21214,"umm":21215,"bax":21216,"reigns":21217,"uniof":21218,"enterprises":21219,"adele":21220,"flake":21221,"attire":21222,"bruce":21223,"bahamas":21224,"gravy":21225,"sain":21226,"cheek":21227,"trivi":21228,"lov":21229,"een":21230,"bblo":21231,"ladygaga":21232,"itta":21233,".\"-":21234,"dustin":21235,"observatory":21236,"eighth":21237,"bloomberg":21238,"khs":21239,"fcc":21240,"gist":21241,"commemorate":21242,"veer":21243,"sexuality":21244,"edc":21245,"nicole":21246,"vacancy":21247,"user":21248,"sona":21249,":'(":21250,"diploma":21251,"tend":21252,"upgrades":21253,"ÅŁ":21254,"jurassic":21255,"cardiac":21256,"drs":21257,"widespread":21258,"Ãł":21259,"dailies":21260,"vendor":21261,"simplicity":21262,"wider":21263,"lenses":21264,"supplements":21265,"depos":21266,"observed":21267,"vines":21268,"partially":21269,"renewal":21270,"collaborate":21271,"alig":21272,"finity":21273,"phu":21274,"zzy":21275,"petit":21276,"ðŁĵħ":21277,"zin":21278,"igu":21279,"smack":21280,"fallon":21281,"ðŁĵ£":21282,"backwards":21283,"component":21284,"oso":21285,"compatible":21286,"binding":21287,"zurich":21288,"thome":21289,"wounds":21290,"lyric":21291,"freshmen":21292,"sneaky":21293,"fibro":21294,"diet":21295,"employer":21296,"insect":21297,"hated":21298,"scher":21299,"razor":21300,"nsw":21301,"booker":21302,"californi":21303,"avfc":21304,"°":21305,"pretending":21306,"pepsi":21307,"alis":21308,"untitled":21309,"kart":21310,"grandparents":21311,"ethe":21312,"ock":21313,"luxemb":21314,"visuals":21315,"smallbusiness":21316,"abdullah":21317,"minho":21318,"subaru":21319,"hra":21320,"revealing":21321,"heartbreaking":21322,"clarity":21323,"amg":21324,"slr":21325,"****":21326,"âŀĸ":21327,"record":21328,"iciary":21329,"minded":21330,"yeh":21331,"excessive":21332,"knuck":21333,"icecream":21334,"truth":21335,"evic":21336,"tastic":21337,"antarc":21338,"rendering":21339,",,":21340,"mitt":21341,"lorenzo":21342,"stpatrick":21343,"boundary":21344,"zig":21345,"vocab":21346,"osaka":21347,"furn":21348,"tun":21349,"gul":21350,"sounding":21351,"blogger":21352,"utterly":21353,"gaf":21354,"advancing":21355,"lcd":21356,"margin":21357,"lifelong":21358,"solstice":21359,"shra":21360,"waits":21361,"plear":21362,"breach":21363,"enligh":21364,"ader":21365,"ittle":21366,"cation":21367,"hoon":21368,"studied":21369,"?????":21370,"kash":21371,"evangeli":21372,"psl":21373,"weights":21374,"metals":21375,"tyres":21376,"turno":21377,"wie":21378,"carb":21379,"gale":21380,"seal":21381,"sunite":21382,"amic":21383,"patterson":21384,"án":21385,"euph":21386,"upstairs":21387,"qualifiers":21388,"khalifa":21389,"applemusic":21390,"ìĨĮëħ":21391,"vaughan":21392,"alter":21393,"cruiser":21394,"mua":21395,"tana":21396,"katrina":21397,"idols":21398,"spoiled":21399,"secretly":21400,"fibre":21401,"partnered":21402,"umes":21403,"giov":21404,"comet":21405,"screenshotsaturday":21406,"keller":21407,"filtr":21408,"fet":21409,"conway":21410,"peu":21411,"badminton":21412,"gid":21413,"mound":21414,"donkey":21415,"buff":21416,"leather":21417,"largely":21418,"broch":21419,"intments":21420,"amuse":21421,"rk":21422,"stove":21423,"impacted":21424,"cont":21425,"cracks":21426,"prisoner":21427,"bari":21428,"contractor":21429,"orioles":21430,"dominate":21431,"polar":21432,"amelia":21433,"drc":21434,"ðŁijĮðŁijĮ":21435,"vist":21436,"suarez":21437,"injection":21438,"blooms":21439,"ðŁļ¨ðŁļ¨":21440,"stiff":21441,"paypal":21442,"snowing":21443,"thursdays":21444,"goose":21445,"wedge":21446,"educated":21447,"weakness":21448,"decker":21449,"abudha":21450,"breezy":21451,"ÛĮ":21452,"hopeful":21453,"obi":21454,"raider":21455,"gham":21456,"deu":21457,"seve":21458,"partly":21459,"fut":21460,"infused":21461,"merri":21462,"thane":21463,"sometime":21464,"hue":21465,"mein":21466,"credit":21467,"sliding":21468,"rande":21469,"cherry":21470,"deadpool":21471,"shol":21472,"aram":21473,"underwood":21474,"skye":21475,"disturbing":21476,"mnt":21477,"polished":21478,"guardians":21479,"hadn":21480,"picasso":21481,"arius":21482,"akshay":21483,"irri":21484,"jh":21485,"happen":21486,"lakh":21487,"dalton":21488,"atthe":21489,"swell":21490,"marsha":21491,"reh":21492,"cours":21493,"jkt":21494,"topus":21495,"service":21496,"rink":21497,"hackers":21498,"donovan":21499,"horo":21500,"tcm":21501,"mayhem":21502,"chase":21503,"devops":21504,"kensing":21505,"scup":21506,"shere":21507,"qualification":21508,"clive":21509,"tong":21510,"nancy":21511,"maris":21512,"derdale":21513,"berman":21514,"cinderella":21515,"jolly":21516,"cic":21517,"loot":21518,"collectibles":21519,"homicide":21520,"gge":21521,"epidemic":21522,"suites":21523,"muddy":21524,"gimme":21525,"erec":21526,"-*":21527,"talla":21528,"lisle":21529,"embroide":21530,"ðŁĩ©ðŁĩª":21531,"verizon":21532,"vector":21533,"beanie":21534,"artisan":21535,"gain":21536,"flores":21537,"vigil":21538,"uso":21539,"ðŁĻıðŁı½":21540,"grinding":21541,"gher":21542,"airports":21543,"responsive":21544,"shaft":21545,"cancel":21546,"ceremonies":21547,"eme":21548,"atari":21549,"brushes":21550,"eager":21551,"bohemi":21552,"childrens":21553,"yankee":21554,"maa":21555,"suspense":21556,"moran":21557,"macar":21558,"sunflower":21559,"crew":21560,"void":21561,"kear":21562,"fashioned":21563,"jennings":21564,"sundayfunday":21565,"submissions":21566,"mead":21567,"herman":21568,"wai":21569,"critically":21570,"leum":21571,"baekhyun":21572,"forcing":21573,"cobra":21574,"ãģ®":21575,"acquire":21576,"alk":21577,"geology":21578,"primar":21579,"importantly":21580,"irez":21581,"bundesliga":21582,"curiosity":21583,"sena":21584,"strict":21585,"consoli":21586,"winters":21587,"venom":21588,"cheltenham":21589,"ðŁįº":21590,"cena":21591,"tat":21592,"bain":21593,"glover":21594,"undercover":21595,"asses":21596,"carn":21597,"memorialday":21598,"ameli":21599,"irene":21600,"chon":21601,"synthesis":21602,"speedy":21603,"mitsubi":21604,"slayer":21605,"composite":21606,"understands":21607,"pew":21608,"interrup":21609,"henri":21610,"morrow":21611,"anom":21612,"thofjuly":21613,"glee":21614,"three":21615,"ðŁĺ®":21616,"andhi":21617,"chatt":21618,"renewables":21619,"yes":21620,"transfers":21621,"!!!!!!!!":21622,"babu":21623,"duter":21624,"loops":21625,"peers":21626,"oilers":21627,"paulo":21628,"ication":21629,"hmu":21630,"wara":21631,"mercer":21632,"homeland":21633,"fuji":21634,"aley":21635,"yearbook":21636,"rem":21637,"reen":21638,"absur":21639,"bois":21640,"]:":21641,"caesar":21642,"shotgun":21643,"kurdish":21644,"oren":21645,"rae":21646,"ancies":21647,"typic":21648,"fh":21649,"default":21650,"replic":21651,"luk":21652,"transactions":21653,"rys":21654,"infantry":21655,"ðŁį¾":21656,"chow":21657,"chickens":21658,"bagh":21659,"wyatt":21660,"aye":21661,"ggi":21662,"brews":21663,"editions":21664,"mira":21665,"commencement":21666,"presu":21667,"periscope":21668,"ichi":21669,"guatemala":21670,"zambia":21671,"paints":21672,"witches":21673,"wani":21674,"undere":21675,"croy":21676,"vows":21677,"usmc":21678,"hearted":21679,"theatres":21680,"shuffle":21681,"level":21682,"multic":21683,"squeeze":21684,"fern":21685,"appet":21686,"postal":21687,"malt":21688,"onboard":21689,"ldnt":21690,"coo":21691,"ssc":21692,"kac":21693,"ðŁĺĩ":21694,"scrap":21695,"marcos":21696,"dealers":21697,"annu":21698,"miller":21699,"cove":21700,"ulary":21701,"vladimir":21702,"beef":21703,"thur":21704,"pickled":21705,"sesame":21706,"bengaluru":21707,"mott":21708,"kathleen":21709,"hist":21710,"notor":21711,"drank":21712,"duchess":21713,"snowfall":21714,"eff":21715,"tiny":21716,"jn":21717,"syour":21718,"specialists":21719,"scotus":21720,"baylor":21721,"everest":21722,"malibu":21723,"prem":21724,"harmful":21725,"lali":21726,"bates":21727,"gye":21728,"differenti":21729,"andra":21730,"geometry":21731,"elover":21732,"blackout":21733,"====":21734,"kota":21735,"interact":21736,"asian":21737,"layo":21738,"samurai":21739,"fidel":21740,"exhausted":21741,"gladi":21742,"pdt":21743,"spheric":21744,"antiqu":21745,"guitar":21746,"sturi":21747,"hopper":21748,"angle":21749,"fills":21750,"slap":21751,"mith":21752,"rodney":21753,"ongi":21754,"insom":21755,"preventing":21756,"cassidy":21757,"apho":21758,"oregon":21759,"loin":21760,"hammond":21761,"contributing":21762,"fn":21763,"garri":21764,"orion":21765,"compelling":21766,"escaping":21767,"aiming":21768,"plumb":21769,"bistro":21770,"beasts":21771,"concerning":21772,"boe":21773,"dopp":21774,"shoplocal":21775,"stumbled":21776,"âĤ¹":21777,"nazis":21778,"âĢįâĻĤï¸ı":21779,"gesture":21780,"warts":21781,"usopen":21782,"higgins":21783,"charli":21784,"hangs":21785,"bombers":21786,"°:":21787,"feeds":21788,"cch":21789,"stil":21790,"nicola":21791,"ðŁĵº":21792,"clamation":21793,"tropic":21794,"afro":21795,"ouk":21796,"expenses":21797,"derrick":21798,"aline":21799,"faw":21800,"regard":21801,"imer":21802,"satin":21803,"thium":21804,"ryder":21805,"pearl":21806,"tess":21807,"mmmmm":21808,"senses":21809,"ðŁĩ¹":21810,"positive":21811,"exhaust":21812,"occur":21813,"norris":21814,"lilly":21815,"isles":21816,"directing":21817,"yofficial":21818,"countless":21819,"samar":21820,"onstage":21821,"flock":21822,"mirrors":21823,"archer":21824,"moi":21825,"kd":21826,"viv":21827,"inos":21828,"sikh":21829,"lei":21830,"sensory":21831,"brits":21832,"knox":21833,"chestnut":21834,"opy":21835,"coliseum":21836,"zaf":21837,"divin":21838,"adapter":21839,":)))":21840,"temple":21841,"kun":21842,"helmets":21843,"tdf":21844,"guide":21845,"mold":21846,"oids":21847,"luther":21848,"heis":21849,"monastery":21850,"spree":21851,"klu":21852,"britney":21853,"jaguars":21854,"greats":21855,"ccc":21856,"kyrie":21857,"machinery":21858,"cricket":21859,"rero":21860,"abo":21861,"aspiring":21862,"semifinals":21863,"aless":21864,"signatures":21865,"vard":21866,"meth":21867,"herbal":21868,"holden":21869,"kingdom":21870,"apor":21871,"reggie":21872,"oreo":21873,"palestinians":21874,"emmys":21875,"sectional":21876,"roi":21877,"neymar":21878,"quel":21879,"cull":21880,"lka":21881,"hazel":21882,"estimate":21883,"ulties":21884,"gow":21885,"bea":21886,"purchases":21887,"belts":21888,"protects":21889,"mé":21890,"guessing":21891,"bbo":21892,"claudia":21893,"fracking":21894,"jonny":21895,"elk":21896,"celtic":21897,"almighty":21898,"raje":21899,"courtyard":21900,"igi":21901,"canes":21902,"ðŁĴªðŁı»":21903,"bankrup":21904,"lethal":21905,"âľĮï¸ı":21906,"graphicdesign":21907,"vader":21908,"pencils":21909,"roughly":21910,"dante":21911,"mfg":21912,"constell":21913,"camel":21914,"jb":21915,"blossoms":21916,"ento":21917,"balochistan":21918,"cinemato":21919,"illard":21920,"jersey":21921,"consent":21922,"dented":21923,"contempl":21924,"scher":21925,"holi":21926,"lough":21927,"stour":21928,"ayo":21929,"beginners":21930,"curb":21931,"vhs":21932,"ajax":21933,"duff":21934,"aveng":21935,"domest":21936,"committing":21937,"aired":21938,"chap":21939,"hedgehog":21940,"disappointing":21941,"freelance":21942,"inland":21943,"charms":21944,"ðŁĺįâĿ¤ï¸ı":21945,"aish":21946,"mx":21947,"buckle":21948,"tidal":21949,"permit":21950,"boating":21951,"racha":21952,"kendrick":21953,"bello":21954,"bhi":21955,"plea":21956,"estimates":21957,"lb":21958,"apologies":21959,"jaya":21960,"bbl":21961,"astoni":21962,"interstate":21963,"maintaining":21964,"elbow":21965,"mup":21966,"epit":21967,"ðŁĺ¡":21968,"violations":21969,"defend":21970,"beh":21971,"slc":21972,"amir":21973,"puri":21974,"tium":21975,"fifa":21976,"blurry":21977,"scrim":21978,"ðŁĻıðŁı¾":21979,"maple":21980,"relatives":21981,"âĺĿ":21982,"choc":21983,"connor":21984,"⾨⾨":21985,"whisp":21986,"listings":21987,"maze":21988,"thanking":21989,"ridd":21990,"grassroots":21991,"shifting":21992,"desperately":21993,"gorilla":21994,"deni":21995,"jules":21996,"strath":21997,"gley":21998,"jain":21999,"buick":22000,"tanner":22001,"ðŁĴĿ":22002,"gae":22003,"prim":22004,"itors":22005,"nano":22006,"separation":22007,"armenia":22008,"bordeaux":22009,"ðŁħ":22010,"pjnet":22011,"burial":22012,"ebon":22013,"gloss":22014,"renew":22015,"grier":22016,"speeds":22017,"comicbooks":22018,"symboli":22019,"purposes":22020,"ãħłãħł":22021,"spatial":22022,"notable":22023,"cion":22024,"nps":22025,"hoffman":22026,"norman":22027,"rtg":22028,"dusty":22029,"situated":22030,"tran":22031,"kfc":22032,"emen":22033,"nickel":22034,"hastings":22035,"settling":22036,"grit":22037,"lena":22038,"waw":22039,"arts":22040,"gum":22041,"caregi":22042,"lewis":22043,"sapphire":22044,"remember":22045,"embedded":22046,"tlc":22047,"blat":22048,"sergeant":22049,"elsa":22050,"bootcamp":22051,"bowman":22052,"photographic":22053,"pillars":22054,"directioners":22055,"classified":22056,"nois":22057,"veer":22058,"barrels":22059,"whoop":22060,"ðŁĺ±ðŁĺ±":22061,"female":22062,"petroleum":22063,"media":22064,"efc":22065,"pokémon":22066,"à¤ķ":22067,"enthusiastic":22068,"varun":22069,"profiles":22070,"pediatric":22071,"accidents":22072,"conrad":22073,"jang":22074,"jojo":22075,"acor":22076,"observer":22077,"lf":22078,"livestock":22079,"forgi":22080,"fos":22081,"elm":22082,"anand":22083,"goe":22084,"cere":22085,"avoiding":22086,"grit":22087,"oman":22088,"thankfully":22089,"scattered":22090,"nicky":22091,"cylinder":22092,"cheesy":22093,"diver":22094,"mahesh":22095,"caves":22096,"earliest":22097,"quinte":22098,"subjects":22099,"bend":22100,"gulf":22101,"vocalist":22102,"glue":22103,"patches":22104,"unstopp":22105,"snyder":22106,"demonstrating":22107,"pio":22108,"horns":22109,"wickets":22110,"andthe":22111,"rama":22112,"yoon":22113,"straight":22114,"bedtime":22115,"orang":22116,"bullets":22117,"saurus":22118,"miners":22119,"incidents":22120,"!...":22121,"ðŁİ¸":22122,"agers":22123,"handles":22124,"states":22125,"inity":22126,"dons":22127,"incredible":22128,"eminem":22129,"aviv":22130,"rudy":22131,"mozart":22132,"folklore":22133,"appliances":22134,"mtl":22135,"frey":22136,"dias":22137,"hua":22138,"pageant":22139,"strive":22140,"imprison":22141,"bullish":22142,"rana":22143,"alerts":22144,"bbmas":22145,"hyper":22146,"derbyshire":22147,"recre":22148,"redd":22149,"deborah":22150,"cosmos":22151,"lawson":22152,"melanie":22153,"psycho":22154,"hoor":22155,"doodles":22156,"sniper":22157,"shady":22158,"mantle":22159,"canadian":22160,"newyear":22161,"interactions":22162,"separated":22163,"cords":22164,"spirituality":22165,"apu":22166,"ito":22167,"pct":22168,"pelosi":22169,"rebellion":22170,"seiz":22171,"worcester":22172,"sectors":22173,"uli":22174,"santa":22175,"е":22176,"ðŁĩªðŁĩ¸":22177,"biased":22178,"classical":22179,"gamma":22180,"deeplear":22181,"emerge":22182,"backer":22183,"surance":22184,"handcrafted":22185,"ðŁİ¥":22186,"francis":22187,"millan":22188,"ici":22189,"crown":22190,"wow":22191,"striped":22192,"unfair":22193,"relaxation":22194,"³ï¸ı":22195,"embracing":22196,"shealth":22197,"paleo":22198,"martini":22199,"distillery":22200,"wrink":22201,"ork":22202,"nath":22203,"hayley":22204,"courthouse":22205,"siber":22206,"sadi":22207,"quietly":22208,"melt":22209,"msm":22210,"meh":22211,"smartphones":22212,"relent":22213,"pping":22214,"warwick":22215,"cologne":22216,"glia":22217,"cotton":22218,"prog":22219,"lone":22220,"ipsw":22221,"starters":22222,"expands":22223,"ump":22224,"sued":22225,"skipper":22226,"infections":22227,"ingle":22228,"á":22229,"clerk":22230,"demonstrate":22231,"acar":22232,"ðŁĺĤðŁĺĤðŁĺĤ":22233,"tibet":22234,"buns":22235,"alom":22236,"demolition":22237,"ssia":22238,"gst":22239,"[]":22240,"soar":22241,"âĺĢ":22242,"ðŁĺª":22243,"ðŁĵĬ":22244,"deepest":22245,"beyond":22246,"aret":22247,"attends":22248,"activated":22249,"dimit":22250,"âļªï¸ı":22251,"highlighted":22252,"magazines":22253,"rumor":22254,"azza":22255,"stephens":22256,"dolph":22257,"shockey":22258,"mats":22259,"weav":22260,"melan":22261,"servers":22262,"traum":22263,"kush":22264,"æĹ":22265,"babys":22266,"paz":22267,"aal":22268,"lause":22269,"breakers":22270,"canterbury":22271,"ulture":22272,"miri":22273,"euros":22274,"taneous":22275,"impressions":22276,"dutch":22277,"ild":22278,"ghi":22279,"purdue":22280,"adequate":22281,"lp":22282,"syner":22283,"angler":22284,"durable":22285,"galore":22286,"rown":22287,"mgmt":22288,"ðŁĵĮ":22289,"lucia":22290,"âĺijï¸ı":22291,"zayn":22292,"borrow":22293,".(":22294,"northumber":22295,"crush":22296,"enga":22297,"sush":22298,"extravag":22299,"tout":22300,"mahal":22301,"alistic":22302,"thermo":22303,"galleries":22304,"esse":22305,"chibi":22306,"attractions":22307,"lexington":22308,"legislature":22309,"documented":22310,"residen":22311,"brownies":22312,"wf":22313,"stool":22314,"planets":22315,"shoppers":22316,"conductor":22317,"msp":22318,"tricky":22319,"fruity":22320,"endra":22321,"feelthe":22322,"whipped":22323,"hairstyle":22324,"refer":22325,"ook":22326,"octopus":22327,"audiences":22328,"kumar":22329,"afterno":22330,"optim":22331,"cfl":22332,"nip":22333,"geni":22334,"alphabet":22335,"annab":22336,"lamin":22337,"accepts":22338,"lng":22339,"ðŁĺ«":22340,"tine":22341,"acom":22342,"cheerleaders":22343,"tk":22344,"gron":22345,"vg":22346,"kung":22347,"jax":22348,"dhabi":22349,"rss":22350,"mackenzie":22351,"beirut":22352,"cleanup":22353,"gypsy":22354,"stell":22355,"burger":22356,"hurricanes":22357,"education":22358,"stina":22359,"âĻ¡âĻ¡":22360,"unfortunate":22361,"jeremi":22362,"badger":22363,"aters":22364,":âĢ¦":22365,"terra":22366,"sublime":22367,"stud":22368,"ymca":22369,"mru":22370,"duterte":22371,"brennan":22372,"bulb":22373,"melo":22374,"ylon":22375,"hacker":22376,"cred":22377,"gud":22378,"asan":22379,"padilla":22380,"embroidered":22381,"vietnamese":22382,"pioneers":22383,"projection":22384,"reboot":22385,"idc":22386,"aney":22387,"primer":22388,"suffers":22389,"winding":22390,"pon":22391,"stoday":22392,"morn":22393,"uch":22394,"allin":22395,"adidas":22396,"elizabeth":22397,"tuck":22398,"ography":22399,"ðŁļĢ":22400,"beg":22401,"osborne":22402,"ghetto":22403,"rh":22404,"cnn":22405,"irma":22406,"makin":22407,"cables":22408,"murders":22409,"ocks":22410,"insta":22411,"alas":22412,"sik":22413,"cuff":22414,"lare":22415,"foodies":22416,"ovic":22417,"atom":22418,"geometric":22419,"empathy":22420,"ี":22421,"centenary":22422,"newspapers":22423,"administrative":22424,"ðŁİĬ":22425,"stive":22426,"contractors":22427,"lett":22428,"tasmania":22429,"awesomeness":22430,"density":22431,"veen":22432,"princeton":22433,"frequently":22434,"reject":22435,"ghi":22436,"modular":22437,"ceramics":22438,"shag":22439,"kiwi":22440,"canvas":22441,"sweatshirt":22442,"anj":22443,"timm":22444,"napoli":22445,"iler":22446,"appeals":22447,"hamilton":22448,"mayo":22449,"weave":22450,"arranged":22451,"wharf":22452,"occupy":22453,"bvb":22454,"asaki":22455,"otter":22456,"norm":22457,"vies":22458,"detox":22459,"tional":22460,"derek":22461,"idad":22462,"admissions":22463,"constituency":22464,"upper":22465,"woot":22466,"alloy":22467,"seve":22468,"lub":22469,"uncomfortable":22470,"edwin":22471,"abre":22472,"dwight":22473,"arche":22474,"virtually":22475,"spol":22476,"prie":22477,"aii":22478,"err":22479,"switch":22480,"barack":22481,"seok":22482,"coul":22483,"wnt":22484,"poul":22485,"olive":22486,"caffeine":22487,"cardiff":22488,"notorious":22489,"demp":22490,"excess":22491,"barr":22492,"tford":22493,"ajay":22494,"bumped":22495,"mythology":22496,"shelley":22497,"falcon":22498,"shakespeare":22499,"mustangs":22500,"noted":22501,"bone":22502,"civilization":22503,"syd":22504,"parsons":22505,"unofficial":22506,"hyped":22507,"spends":22508,"opposed":22509,"vings":22510,"spacex":22511,"notification":22512,"deciding":22513,"biotech":22514,"outsi":22515,"salah":22516,"!.":22517,"fed":22518,"ssy":22519,"cms":22520,"badgers":22521,"cro":22522,"elaine":22523,"nba":22524,"dyour":22525,"nant":22526,"honeymoon":22527,"climbed":22528,"conomy":22529,"atha":22530,"mell":22531,"nebula":22532,"naturephotography":22533,"julie":22534,"bmx":22535,"invested":22536,"mono":22537,"lieutenant":22538,"watkins":22539,"technician":22540,"ose":22541,"kae":22542,"ìĽ":22543,"mcqueen":22544,"preach":22545,"traveller":22546,"flexibility":22547,"zebra":22548,"retailer":22549,"pant":22550,"bender":22551,"brandt":22552,"squid":22553,"warrant":22554,"verified":22555,"cass":22556,"piercing":22557,"honours":22558,"tying":22559,"morris":22560,"kissed":22561,"oprah":22562,"panoramic":22563,"mei":22564,"splatoon":22565,"wichita":22566,"arias":22567,"galli":22568,"indyref":22569,"goodtimes":22570,"atheist":22571,"confession":22572,"owski":22573,"repping":22574,"additions":22575,"mechanism":22576,"zim":22577,"jans":22578,"suf":22579,"chopped":22580,"beginnings":22581,"vitamins":22582,"ãħ¤ãħ¤":22583,"orth":22584,"poles":22585,"rub":22586,"antarctica":22587,"indiefilm":22588,"webcam":22589,"ketch":22590,"brett":22591,"clement":22592,"heron":22593,"defeating":22594,"hydro":22595,"bucket":22596,"wandering":22597,"sidney":22598,"futureof":22599,"binge":22600,"onies":22601,"knockout":22602,"administrator":22603,"synthe":22604,"lent":22605,"jani":22606,"barley":22607,"premierleague":22608,"nerds":22609,"crm":22610,"bras":22611,"botany":22612,"evolved":22613,"rotter":22614,"rowed":22615,"tumor":22616,"wealthy":22617,"ÂŃ":22618,"monarch":22619,"lished":22620,"dahl":22621,"ðŁİĥ":22622,"buch":22623,"kenyan":22624,"ا":22625,"redness":22626,"assembled":22627,"semit":22628,"hudder":22629,"shrop":22630,"rani":22631,"learning":22632,"mory":22633,"itia":22634,"geographic":22635,"worldof":22636,"fb":22637,"phosp":22638,"boogie":22639,"amped":22640,"?...":22641,"chew":22642,"dwarf":22643,"arus":22644,"ssen":22645,"rusty":22646,"recruits":22647,"hk":22648,"garde":22649,"applause":22650,"volumes":22651,"involves":22652,"tac":22653,"handbag":22654,"translate":22655,"ffel":22656,"seym":22657,"aquatic":22658,"transfer":22659,"zodi":22660,"andr":22661,"academia":22662,"crater":22663,"tez":22664,"arse":22665,"adapt":22666,"coloni":22667,"snowman":22668,"mali":22669,"hangin":22670,"dischar":22671,"oysters":22672,"phoe":22673,"colonel":22674,"wba":22675,"hispanic":22676,"thriving":22677,"shy":22678,"agles":22679,"salesforce":22680,"creme":22681,"soles":22682,"lafayette":22683,"âī":22684,"teria":22685,"acha":22686,"sperson":22687,"gogo":22688,"carly":22689,"theore":22690,"amore":22691,"vox":22692,"aft":22693,"ãĤ¹":22694,"staple":22695,"muffin":22696,"diagram":22697,"inox":22698,"sustained":22699,"avent":22700,"meta":22701,"arbitr":22702,"decay":22703,"adole":22704,"н":22705,"ecol":22706,"pho":22707,"nk":22708,"ocu":22709,"granny":22710,"ça":22711,"luxembour":22712,"stadt":22713,"alberto":22714,"levit":22715,"amas":22716,"dx":22717,"orphan":22718,"cobb":22719,"asc":22720,"logy":22721,"immense":22722,"chants":22723,"offline":22724,"pent":22725,"brex":22726,"winger":22727,"plane":22728,"iel":22729,"nichols":22730,"cathy":22731,"naruto":22732,"lowed":22733,"///":22734,"ignorance":22735,"catastro":22736,"youts":22737,"schen":22738,"build":22739,"hazi":22740,"sine":22741,"criticalrole":22742,"dug":22743,"detect":22744,"logs":22745,"enamel":22746,"stpatricksday":22747,"eddie":22748,"copa":22749,"cigarettes":22750,"hoff":22751,"kaya":22752,"lagoon":22753,"rapha":22754,"airborne":22755,"choose":22756,"puertor":22757,"kev":22758,"guiding":22759,"frosty":22760,"borough":22761,"mira":22762,"ðŁİĬ":22763,"cadet":22764,"anush":22765,"yogi":22766,"eger":22767,"fling":22768,"slope":22769,"ninth":22770,"weston":22771,"footwear":22772,"fn":22773,"mayweather":22774,"aam":22775,"plain":22776,"staircase":22777,"witnesses":22778,"workouts":22779,"robust":22780,"dexter":22781,"cohort":22782,"ðŁļĹ":22783,"spell":22784,"haze":22785,"oom":22786,"organising":22787,"wildfire":22788,"contacts":22789,"avon":22790,"mino":22791,"updating":22792,"ðŁį»":22793,"lithium":22794,"ingual":22795,"kis":22796,"auga":22797,"locom":22798,"deduc":22799,"uda":22800,"thak":22801,"boyle":22802,"mper":22803,"hottie":22804,"erik":22805,"revised":22806,"isla":22807,"travelphotography":22808,"ooza":22809,"enqui":22810,"conferences":22811,"clover":22812,"groom":22813,"curves":22814,"liveon":22815,"perf":22816,"displaced":22817,"bolog":22818,"xxxx":22819,"ðŁĺ©ðŁĺ©":22820,"teal":22821,"vessels":22822,"rainforest":22823,"calci":22824,"panther":22825,"giraffe":22826,"tasted":22827,"imagery":22828,"padres":22829,"daytime":22830,"bass":22831,"ripe":22832,"opioid":22833,"nue":22834,"vinyl":22835,"inventor":22836,"sens":22837,"processor":22838,"mut":22839,"gadgets":22840,"biblical":22841,"shannon":22842,"jacqueline":22843,"cary":22844,"theresistance":22845,"alien":22846,"nvi":22847,"cosy":22848,"bihar":22849,"foley":22850,"rend":22851,"mugs":22852,"faken":22853,"clone":22854,"niallo":22855,"grabbed":22856,"chihu":22857,"powerhouse":22858,"ntt":22859,"cherokee":22860,"sponge":22861,"implementing":22862,"rhine":22863,"leone":22864,"ðŁįĢ":22865,"prettiest":22866,"infrared":22867,"improv":22868,"switched":22869,"tubes":22870,"contr":22871,"blk":22872,"projected":22873,"beaver":22874,"yot":22875,"bbcradio":22876,"thigh":22877,"persecu":22878,"apologize":22879,"wack":22880,"poster":22881,"oliver":22882,"aza":22883,"loud":22884,"(?)":22885,"fthe":22886,"womenshi":22887,"sparrow":22888,"blush":22889,"usable":22890,"scales":22891,"itative":22892,"peuge":22893,"needing":22894,"leggings":22895,"glamorous":22896,"matur":22897,"cz":22898,"watt":22899,"dab":22900,"tamar":22901,"etsym":22902,"bauer":22903,"heartfelt":22904,"hn":22905,"elsewhere":22906,"birch":22907,"alumini":22908,"huck":22909,"eme":22910,"jl":22911,"trafford":22912,"dz":22913,"portions":22914,"anasta":22915,"arthritis":22916,"espn":22917,"bergen":22918,"violation":22919,"yoshi":22920,"cz":22921,"northumberland":22922,"closures":22923,"ðŁĩ¯ðŁĩ":22924,"smiley":22925,"rw":22926,"telugu":22927,"intensi":22928,"gregg":22929,"vega":22930,"dungeon":22931,"southbound":22932,"bail":22933,"dominican":22934,"semifinal":22935,"chapters":22936,"hitch":22937,"vanity":22938,"transiti":22939,"recommends":22940,"satisf":22941,"barca":22942,"queens":22943,"((":22944,"destruc":22945,"strait":22946,"ravi":22947,"desserts":22948,"intru":22949,"haram":22950,"kos":22951,"foe":22952,"fatty":22953,"paisley":22954,"magnitude":22955,"dridge":22956,"comey":22957,"schemes":22958,"visionary":22959,"ourt":22960,"downloaded":22961,"ðŁĻĮðŁı½":22962,"gdpr":22963,"lani":22964,"pwc":22965,"guad":22966,"nicest":22967,"stakeholders":22968,"referred":22969,"georgetown":22970,"arvindkejriwal":22971,"schneider":22972,"indoors":22973,"allstar":22974,"stranded":22975,"gender":22976,"zepp":22977,"masses":22978,"ðŁIJ±":22979,"patiently":22980,"bldg":22981,"zab":22982,"wearab":22983,"vivid":22984,"heck":22985,"della":22986,"symb":22987,"jeopar":22988,"lager":22989,"àª":22990,"combines":22991,"nec":22992,"bray":22993,"flop":22994,"txwx":22995,"joys":22996,"pont":22997,"profound":22998,"surround":22999,"madhu":23000,"mable":23001,"ayr":23002,"teas":23003,"nsa":23004,"openly":23005,"ernest":23006,"ãĥ©":23007,"topo":23008,"gna":23009,"antioxid":23010,"tian":23011,"etr":23012,"cello":23013,"mathi":23014,"generosity":23015,"biting":23016,"manic":23017,"kelsey":23018,"cheeks":23019,"tender":23020,"wth":23021,"pronoun":23022,"ultimately":23023,"gusta":23024,"arianag":23025,"gerry":23026,"bleed":23027,"reddy":23028,"mich":23029,"mitsubishi":23030,"operated":23031,"sexually":23032,"mau":23033,"cllr":23034,"vids":23035,"coc":23036,"melted":23037,"ðŁĮĪ":23038,"qld":23039,"itech":23040,"instrumental":23041,"endgame":23042,"ðŁĵĸ":23043,"energi":23044,"brownie":23045,"tamil":23046,"atin":23047,"dominated":23048,"praises":23049,"fireplace":23050,"sensational":23051,"mena":23052,"karti":23053,"unprece":23054,"rupt":23055,"oriental":23056,"mccor":23057,"tournaments":23058,"scenter":23059,"reeves":23060,"prescription":23061,"same":23062,"frau":23063,"truffle":23064,"embo":23065,"romans":23066,"blasts":23067,"technological":23068,"prat":23069,"bsb":23070,"yar":23071,"trendy":23072,"acl":23073,"alad":23074,"ðŁįģ":23075,"ohh":23076,"bankrupt":23077,"thoven":23078,"regards":23079,"iser":23080,"warwick":23081,"vineyards":23082,"realm":23083,"niallofficial":23084,"dota":23085,"gemini":23086,"todo":23087,"vable":23088,"¨¨":23089,"lau":23090,"wreath":23091,"juve":23092,"natasha":23093,"lever":23094,"lori":23095,"horser":23096,"cctv":23097,"airbnb":23098,"esanders":23099,"sinclair":23100,"emabiggest":23101,"highschool":23102,"contest":23103,"optimistic":23104,"tte":23105,"ðŁĴķðŁĴķ":23106,"ssd":23107,"yee":23108,"helena":23109,"consen":23110,"ricks":23111,"jesse":23112,"anic":23113,"ðŁİ¯":23114,"reacts":23115,"robe":23116,"independence":23117,"voltage":23118,"mington":23119,"sant":23120,"à¸Ļà¸":23121,"----------------":23122,"sentinel":23123,"kett":23124,"rehearsing":23125,"aaaaaaaa":23126,"softhe":23127,"stirling":23128,"search":23129,"wigan":23130,"standout":23131,"snail":23132,"pentagon":23133,"Äģ":23134,"chlor":23135,"crust":23136,"netany":23137,"chemist":23138,"disappeared":23139,"ricardo":23140,"spiders":23141,"bose":23142,"warren":23143,"messing":23144,"banners":23145,"guel":23146,"parach":23147,"maid":23148,"counted":23149,"epile":23150,"bonfire":23151,"speechless":23152,"setter":23153,"measured":23154,"rejects":23155,"nikki":23156,"lester":23157,"forensic":23158,"fabrics":23159,"aloha":23160,"preserved":23161,"watford":23162,"detailing":23163,"darth":23164,"bou":23165,"carly":23166,"...'":23167,"tailgate":23168,"notifications":23169,"å¤":23170,"passive":23171,"trousers":23172,"baloch":23173,"rother":23174,"typically":23175,"Ã¥":23176,"spit":23177,"wiz":23178,"sicily":23179,"technically":23180,"expose":23181,"stage":23182,"hubb":23183,"cream":23184,"caps":23185,"poke":23186,"sleek":23187,"june":23188,"temporarily":23189,"dez":23190,"awakens":23191,"lame":23192,"_-":23193,"jiha":23194,"tuesdays":23195,"advised":23196,"advisors":23197,"existed":23198,"disagree":23199,"newsroom":23200,"losers":23201,"worldtour":23202,"drying":23203,"aldi":23204,"harness":23205,"footprint":23206,"hobbit":23207,"pmln":23208,"iro":23209,"quered":23210,"assess":23211,"gaze":23212,"sab":23213,"thian":23214,"íĬ":23215,"tif":23216,"observe":23217,"evil":23218,"drawer":23219,"sweep":23220,"cory":23221,"cody":23222,"kyoto":23223,"callum":23224,"ninj":23225,"laurent":23226,"bei":23227,"sketching":23228,"customized":23229,"dur":23230,"regrets":23231,"knoxville":23232,"ìķĦ":23233,"messaging":23234,"gracie":23235,"abundance":23236,"bidding":23237,"brewed":23238,"flouri":23239,"therapeutic":23240,"altitude":23241,"hogs":23242,"burner":23243,"electro":23244,"wonderfully":23245,"heater":23246,"postpon":23247,"livery":23248,"rall":23249,"adas":23250,"aac":23251,"saul":23252,"brooklyn":23253,"playhouse":23254,"âĻ¥âĻ¥âĻ¥":23255,"charitable":23256,"iny":23257,"zah":23258,"competitions":23259,"beav":23260,"plugged":23261,"ois":23262,"doom":23263,"astronom":23264,"specialized":23265,"maxi":23266,"taps":23267,"cellular":23268,"depressed":23269,"folklorethursday":23270,"crib":23271,"emul":23272,"ë°©":23273,"figh":23274,"ruz":23275,"carlisle":23276,"spear":23277,"sidewalk":23278,"dei":23279,"dependent":23280,"laces":23281,"nhs":23282,"ðŁĮĻ":23283,"realizing":23284,"network":23285,"riche":23286,"regin":23287,"refresh":23288,"stral":23289,"pathology":23290,"plaid":23291,"psychedelic":23292,"hind":23293,"uka":23294,"algorithm":23295,"linking":23296,"progressi":23297,"fey":23298,"dade":23299,"hydrated":23300,"bant":23301,"famed":23302,"cotsw":23303,"boise":23304,"asc":23305,"racing":23306,"javier":23307,"wwen":23308,"marlins":23309,"poop":23310,"swept":23311,"tonights":23312,"wef":23313,"anime":23314,"slovak":23315,"âŀĸâŀĸ":23316,"claus":23317,"lemme":23318,"clippers":23319,"rels":23320,"arianagrande":23321,"rte":23322,"kot":23323,"thalapathy":23324,"hungarian":23325,"zuma":23326,"yvon":23327,"isu":23328,"journeys":23329,"clinics":23330,"bebe":23331,"wwf":23332,"nws":23333,"superheroes":23334,"erit":23335,"sleague":23336,"identification":23337,"motto":23338,"bai":23339,"sourced":23340,"iller":23341,"api":23342,"prise":23343,"unprecedented":23344,"damas":23345,"tunisia":23346,"drain":23347,"underestim":23348,"ether":23349,"quarterly":23350,"rewarding":23351,"alham":23352,"wolverine":23353,"cabine":23354,"hypno":23355,"nadine":23356,"havana":23357,"dae":23358,"ðŁĵĪ":23359,"dron":23360,"readings":23361,"bati":23362,"pico":23363,"merci":23364,"itian":23365,"walkers":23366,"elope":23367,"mikey":23368,"godzilla":23369,"burlington":23370,"abuja":23371,"socialism":23372,"atility":23373,"shell":23374,"harrypotter":23375,"gno":23376,"abur":23377,"releg":23378,"felici":23379,"rogen":23380,"neuroscience":23381,"instin":23382,"atham":23383,"vouchers":23384,"jarre":23385,"fuse":23386,"defici":23387,"monterey":23388,"deport":23389,"midday":23390,"ppard":23391,"freed":23392,"ameter":23393,"wilt":23394,"ningham":23395,"pratt":23396,"liberty":23397,"slogan":23398,"oto":23399,"pri":23400,"coated":23401,"cpd":23402,"nett":23403,"illas":23404,"malawi":23405,"evolve":23406,"accessibility":23407,"ðŁĶ¥ðŁĶ¥ðŁĶ¥ðŁĶ¥":23408,"ornament":23409,"bp":23410,"elis":23411,"sonline":23412,"chiro":23413,"flick":23414,"ibm":23415,"arak":23416,"enables":23417,"garland":23418,"sane":23419,"cuties":23420,"trip":23421,"rotterdam":23422,"nys":23423,"lamps":23424,"lucas":23425,"bog":23426,"rails":23427,"travelled":23428,"hicks":23429,"enu":23430,"sabha":23431,"scrub":23432,"hier":23433,"hartford":23434,"foo":23435,"fernandez":23436,"trevor":23437,"mattress":23438,"appointments":23439,"alej":23440,"fei":23441,"ologist":23442,"safar":23443,"octa":23444,"src":23445,"shaun":23446,"ambient":23447,"dric":23448,"biker":23449,"shee":23450,"mustache":23451,"hta":23452,"boone":23453,"herty":23454,"cardio":23455,"brakes":23456,"recital":23457,"consists":23458,"overwhelmed":23459,"caul":23460,"robbins":23461,"imit":23462,"alth":23463,"url":23464,"bibli":23465,"onne":23466,"blacklivesmatter":23467,"difficulties":23468,"telang":23469,"taller":23470,"ðŁĵĨ":23471,"debating":23472,"burrito":23473,"movember":23474,"strengthening":23475,"boe":23476,"testam":23477,"miracles":23478,"baseball":23479,"renee":23480,"ðŁijīðŁı»":23481,"alfa":23482,"âĺĺ":23483,"unstoppable":23484,"ecs":23485,"gmo":23486,"giftideas":23487,"pathway":23488,"fencing":23489,"ðŁİ¤":23490,"bham":23491,"ras":23492,"sko":23493,"dled":23494,"thelast":23495,"magnum":23496,"binary":23497,"wilde":23498,"wilder":23499,"whati":23500,"barbecue":23501,"hism":23502,"canoe":23503,"kurdi":23504,"elive":23505,"advantages":23506,"madame":23507,"bier":23508,"missing":23509,"entertain":23510,"airforce":23511,"yama":23512,"cis":23513,"hashtags":23514,"jis":23515,"veil":23516,"dreamy":23517,"tense":23518,"mayward":23519,"chateau":23520,"huntington":23521,"âļĵ":23522,"vall":23523,"upon":23524,"blouse":23525,"dunes":23526,"ðŁĺ´":23527,"fertility":23528,"mole":23529,"currencies":23530,"stu":23531,"berlin":23532,"toasted":23533,"divas":23534,"walt":23535,"lark":23536,"pora":23537,"hitter":23538,"umer":23539,"chilled":23540,"balancing":23541,"fais":23542,"yin":23543,"ortiz":23544,"eastenders":23545,"hate":23546,"ural":23547,"april":23548,"timel":23549,"à±":23550,"pero":23551,"stocked":23552,"respects":23553,"tht":23554,"bestfriends":23555,"givingtuesday":23556,"bead":23557,"invent":23558,"imi":23559,"naples":23560,"combining":23561,"tokens":23562,"thirst":23563,"masc":23564,"parrot":23565,"spu":23566,"denton":23567,"*-*":23568,"tres":23569,"suburban":23570,"width":23571,"sive":23572,"contender":23573,"sirius":23574,"lok":23575,"troopers":23576,"outrage":23577,"turbo":23578,"fragile":23579,"messed":23580,"doh":23581,"discord":23582,"netanyahu":23583,"resign":23584,"forgiveness":23585,"mohan":23586,"munch":23587,"camou":23588,"identifying":23589,"enabling":23590,"hotter":23591,"thornton":23592,"jaipur":23593,"arya":23594,"ðŁı»âĢįâĻĢï¸ı":23595,"mustaf":23596,"majors":23597,"oke":23598,"duffy":23599,"rohing":23600,"tilt":23601,"ðŁĩ®ðŁĩ³":23602,"rockstar":23603,"sheep":23604,"hendrix":23605,"rav":23606,"invention":23607,"dou":23608,"laguna":23609,"grumpy":23610,"swis":23611,"impe":23612,")'":23613,"youths":23614,"bunker":23615,"stache":23616,"oppose":23617,"indies":23618,"accelerate":23619,"mlp":23620,"eden":23621,"wann":23622,"kail":23623,"akshaykumar":23624,"supt":23625,"polym":23626,"middleton":23627,"extraordin":23628,"wilson":23629,"australian":23630,"aluminium":23631,"wayne":23632,"alumnus":23633,"matics":23634,"grim":23635,"ernie":23636,"oppa":23637,"competitors":23638,"randall":23639,"hence":23640,"declares":23641,"preaching":23642,"shahe":23643,"cane":23644,"sustainable":23645,"staples":23646,"ledge":23647,"adena":23648,"doctoral":23649,"burgundy":23650,"decorate":23651,"rendered":23652,"risen":23653,"prank":23654,"dior":23655,"beethoven":23656,"floor":23657,"accom":23658,"tot":23659,"hodg":23660,"tourism":23661,"sayin":23662,"objective":23663,"markers":23664,"premiership":23665,"enabled":23666,"camoufla":23667,"giant":23668,"Ñģ":23669,"smokey":23670,"ricket":23671,"pang":23672,"depending":23673,"sation":23674,"evolving":23675,"intercep":23676,"census":23677,"tofthe":23678,"reen":23679,"mendoza":23680,"trumpet":23681,"marketers":23682,"anit":23683,"ðŁĻĬ":23684,"northwestern":23685,"vla":23686,"fotogra":23687,"blackandwhite":23688,"chewan":23689,"wig":23690,"troom":23691,"gingerbread":23692,"kn":23693,"romero":23694,"nfc":23695,"orchi":23696,"funko":23697,"source":23698,"fs":23699,"raped":23700,"ost":23701,"tarot":23702,"annually":23703,"ðŁĺ¬":23704,"rill":23705,"delav":23706,"..!!":23707,"ses":23708,"cann":23709,"medicare":23710,"phel":23711,"apex":23712,"guardian":23713,"remained":23714,"rpm":23715,"añ":23716,"storymonth":23717,"instagood":23718,"neighbour":23719,"ping":23720,"semite":23721,"mystic":23722,"ascot":23723,"mater":23724,"handful":23725,"dangers":23726,"tid":23727,"anaheim":23728,"opoly":23729,"shallow":23730,"namibia":23731,"toria":23732,"procurement":23733,"bigbang":23734,"announcements":23735,"prosecutor":23736,"bengals":23737,"salle":23738,"enroll":23739,"gastro":23740,"suggestion":23741,"bak":23742,"haul":23743,"buddhism":23744,"berniesanders":23745,"flute":23746,"fatigue":23747,"cynthia":23748,"choi":23749,"irwin":23750,"gua":23751,"strous":23752,"hp":23753,"bap":23754,"satisfying":23755,"playa":23756,"ðŁİ¼":23757,"instap":23758,"alice":23759,"tp":23760,"irrigation":23761,"ðŁĩ¬ðŁĩ§":23762,"intric":23763,"clues":23764,"plex":23765,"sax":23766,"hepat":23767,"dumped":23768,"significance":23769,"byu":23770,"medication":23771,"prov":23772,"toughest":23773,"cornish":23774,"âŀľ":23775,"kelley":23776,"uv":23777,"sizz":23778,"sibling":23779,"mest":23780,"distor":23781,"diplomatic":23782,"auntie":23783,"bhat":23784,"sonic":23785,"brenda":23786,"pumpkins":23787,"roch":23788,"blackburn":23789,"urged":23790,"shia":23791,"arrangements":23792,"flood":23793,"saunders":23794,"lecturer":23795,"nouri":23796,"populations":23797,"diplomacy":23798,"consistently":23799,"ðŁ¤Ļ":23800,"tmund":23801,"cauliflower":23802,"lily":23803,"vocabulary":23804,"varieties":23805,"cooker":23806,"uptown":23807,"quent":23808,"mosa":23809,"reinde":23810,"velocity":23811,"spruce":23812,"socialmedi":23813,"iber":23814,"voluntary":23815,"processed":23816,"baltic":23817,"yang":23818,"lebanese":23819,"dp":23820,"dolly":23821,"arrangement":23822,"yuri":23823,"cranberry":23824,"kalyan":23825,"elevation":23826,"cliff":23827,"pushes":23828,"ìĬ¤":23829,"silic":23830,"cowx":23831,"eternity":23832,"slaves":23833,"vinegar":23834,"gloucester":23835,"contained":23836,"breakingnews":23837,"against":23838,"renovated":23839,"normandy":23840,"heroin":23841,"ysm":23842,"mods":23843,"greek":23844,"undi":23845,"trench":23846,"vh":23847,"encourages":23848,"headache":23849,"grange":23850,":'":23851,"evergreen":23852,"ÙĬ":23853,"reckon":23854,"abused":23855,"thru":23856,"choice":23857,"tidy":23858,"colder":23859,"schoice":23860,"hain":23861,"brum":23862,"liars":23863,"breit":23864,"yorker":23865,"shack":23866,"heidi":23867,"michaels":23868,"scopic":23869,"fascist":23870,"playful":23871,"cac":23872,"yasss":23873,"shad":23874,"..?":23875,"quen":23876,"ramirez":23877,"clifton":23878,"prs":23879,"bestfan":23880,"âģł":23881,"generating":23882,"headset":23883,"disappointment":23884,"abstract":23885,"boiled":23886,"parenthood":23887,"azerbaijan":23888,"exhibiting":23889,"bombay":23890,"olivier":23891,"koso":23892,"unlea":23893,"maternity":23894,"izer":23895,"sives":23896,"rhu":23897,"coll":23898,"saskatchewan":23899,"freakin":23900,"dek":23901,"nag":23902,"stabili":23903,"ðŁįķ":23904,"organizer":23905,"bosses":23906,"aru":23907,"uva":23908,"atable":23909,"taun":23910,"afterwards":23911,"fertili":23912,"verge":23913,"azi":23914,"morph":23915,"à¹ģà¸":23916,"jerk":23917,"cosmetic":23918,"kow":23919,"strust":23920,"apache":23921,"postcards":23922,"formul":23923,"ìĭ":23924,"spinal":23925,"jackpot":23926,"electri":23927,"ÃŃ":23928,"loy":23929,"grader":23930,"diablo":23931,"ardi":23932,"hesit":23933,"fw":23934,"archery":23935,"pash":23936,"theories":23937,"repeal":23938,"relive":23939,"percy":23940,"âĺĨ":23941,"imin":23942,"synchron":23943,"shampoo":23944,"coupons":23945,"oto":23946,"lai":23947,"thought":23948,"luxembourg":23949,"mov":23950,"ðŁĺ¥":23951,"gemma":23952,"seated":23953,"mga":23954,"stratford":23955,"uncertainty":23956,"shifts":23957,"esto":23958,"fool":23959,"firearms":23960,"corrie":23961,"kiki":23962,"apparent":23963,"pills":23964,"olympia":23965,"fid":23966,"elevated":23967,"decks":23968,"ignoring":23969,"avalan":23970,"rov":23971,"whistle":23972,"ptsd":23973,"militants":23974,"robotic":23975,"pacers":23976,"quilt":23977,"bankruptcy":23978,"lich":23979,"percussion":23980,"celebrity":23981,"als":23982,"(;":23983,"sut":23984,"pokemongo":23985,"hg":23986,"offs":23987,"gibraltar":23988,"screams":23989,"billie":23990,"genome":23991,"marin":23992,"beams":23993,"archbishop":23994,"emin":23995,"bedrooms":23996,"gated":23997,"olly":23998,"warranty":23999,"atown":24000,"cuddles":24001,"gunna":24002,"kic":24003,"vive":24004,"cymru":24005,"narrow":24006,"prob":24007,"leo":24008,"references":24009,"manufactured":24010,"chopper":24011,"brunswick":24012,"semis":24013,"donia":24014,"rye":24015,"mano":24016,"hurting":24017,"?#":24018,"holli":24019,"investigations":24020,"cels":24021,"ðŁĵŀ":24022,"lester":24023,"temples":24024,"storey":24025,"mcmahon":24026,"toilets":24027,"woof":24028,"ï¸İ":24029,"leverage":24030,"atom":24031,"nightmares":24032,"victorious":24033,"haunting":24034,"customer":24035,"agi":24036,"yoongi":24037,"monty":24038,"veronica":24039,"wur":24040,"intimid":24041,"blankets":24042,"volution":24043,"jm":24044,"âĺİ":24045,"amon":24046,"judith":24047,"ðŁĺİðŁĺİ":24048,"distracted":24049,"drip":24050,"hurricane":24051,"andes":24052,"revelation":24053,"troop":24054,"ableg":24055,"collin":24056,"tibetan":24057,"worrying":24058,"internationally":24059,"eater":24060,"cameroon":24061,"brador":24062,"yuk":24063,"ðŁĴĹðŁĴĹ":24064,"trak":24065,"slopes":24066,"cier":24067,"nea":24068,"oler":24069,"taka":24070,"albion":24071,"volcanic":24072,"amn":24073,"afi":24074,"obstac":24075,"facetime":24076,"gering":24077,"npr":24078,"metallica":24079,"organic":24080,"ðŁĴ¡":24081,"kidd":24082,"dances":24083,"pembro":24084,"washer":24085,"mits":24086,"omer":24087,"emotionally":24088,"tango":24089,"ipo":24090,"docks":24091,"scanning":24092,"specs":24093,"thom":24094,"theology":24095,"emergen":24096,"omi":24097,"gpa":24098,"selections":24099,"unnecessary":24100,"image":24101,"ters":24102,"induced":24103,"gigan":24104,"rentals":24105,"supplied":24106,"mfa":24107,"shankar":24108,"later":24109,"pajam":24110,"clave":24111,"Ùģ":24112,"mahin":24113,"carlson":24114,"avian":24115,"anova":24116,"katie":24117,"ajith":24118,"designated":24119,"chocolates":24120,"investigators":24121,"glazed":24122,"princess":24123,"erry":24124,"ragn":24125,"ourable":24126,"hru":24127,"sundance":24128,"peugeot":24129,"steampunk":24130,"ghlin":24131,"grease":24132,"hires":24133,"zap":24134,"perce":24135,"jill":24136,"tome":24137,"hehehe":24138,"joyful":24139,"maestro":24140,"nished":24141,"genealo":24142,"vich":24143,"pits":24144,"foxes":24145,"goodman":24146,"emerson":24147,"lobes":24148,"converse":24149,"oats":24150,"thomson":24151,"rahim":24152,"malware":24153,"ahi":24154,"mankind":24155,"resin":24156,"img":24157,"swood":24158,"kinder":24159,"scroll":24160,"ara":24161,"sakura":24162,"robbed":24163,"xion":24164,"nya":24165,"cism":24166,"cedar":24167,"bein":24168,"mourning":24169,"torto":24170,"heathrow":24171,"donegal":24172,"barb":24173,"hydration":24174,"kor":24175,"elimination":24176,"supdates":24177,"hills":24178,"appeti":24179,"starred":24180,"kom":24181,"gwen":24182,"ddd":24183,"cray":24184,"scanner":24185,"personalised":24186,"serenity":24187,"redesign":24188,"metaph":24189,"boxed":24190,"judgment":24191,"nose":24192,"ë¹":24193,"erad":24194,"acne":24195,"suppliers":24196,"energetic":24197,"vom":24198,"asap":24199,"ðŁĶ¸":24200,"irvine":24201,"hatch":24202,"lass":24203,"adren":24204,"waffles":24205,"accurately":24206,"icio":24207,"ittle":24208,"seun":24209,"occupy":24210,"webcam":24211,"thenew":24212,"entes":24213,"gai":24214,"jw":24215,"accountable":24216,"visor":24217,"irrit":24218,"licensing":24219,"huddersfield":24220,"genie":24221,"ðŁİ¾":24222,"atmospheric":24223,"tensions":24224,"spartan":24225,"clifford":24226,"olan":24227,"northbound":24228,"ameen":24229,"censor":24230,"uel":24231,"stery":24232,"$$":24233,"farrell":24234,"hyster":24235,"clt":24236,"sedan":24237,"replied":24238,"describing":24239,"microwave":24240,"slab":24241,"prosp":24242,"assisting":24243,"rubio":24244,"ethan":24245,"hhhhh":24246,"guay":24247,"zman":24248,"raise":24249,"rolling":24250,"oe":24251,"nile":24252,"ambrose":24253,"scarborough":24254,"heroic":24255,"cooks":24256,"mort":24257,"chopra":24258,"ðŁĮ·":24259,"tob":24260,"shaving":24261,"stacey":24262,"dorm":24263,"motorsports":24264,"wiki":24265,"folds":24266,"spiced":24267,"stressful":24268,"literal":24269,"fudge":24270,"peggy":24271,"waite":24272,"tresses":24273,"sesh":24274,"pric":24275,"ðŁİħ":24276,"fright":24277,"rva":24278,"mumbai":24279,"pom":24280,"ttv":24281,"cellar":24282,"tome":24283,"android":24284,"doris":24285,"tsunami":24286,"tinder":24287,"oec":24288,"mwc":24289,"dortmund":24290,"nothin":24291,"liti":24292,"sou":24293,"believein":24294,"atu":24295,"knocks":24296,"magni":24297,"sssss":24298,"rohit":24299,"inews":24300,"angi":24301,"mandy":24302,"kettle":24303,"intermediate":24304,"avant":24305,"curl":24306,"endorsed":24307,"orio":24308,"urt":24309,"consideration":24310,"wires":24311,"shelters":24312,"bino":24313,"vikram":24314,"implemented":24315,"lydia":24316,"buk":24317,"parody":24318,"cnews":24319,"undergraduate":24320,"canucks":24321,"sami":24322,"politically":24323,"rotten":24324,"ghz":24325,"textiles":24326,"overload":24327,"moderni":24328,"recreational":24329,"flir":24330,"baton":24331,"typography":24332,"ovation":24333,"intriguing":24334,"pilgrimage":24335,"alge":24336,"adays":24337,"tcmparty":24338,"spelled":24339,"curls":24340,"booze":24341,"stem":24342,"annes":24343,"irls":24344,"sponge":24345,"shopper":24346,"signation":24347,"brass":24348,"mistress":24349,"leah":24350,"beginner":24351,"lauderdale":24352,"august":24353,"preschool":24354,"taping":24355,"taipei":24356,"executives":24357,"bd":24358,"rhetor":24359,"escor":24360,"immuno":24361,"deeplearning":24362,"statues":24363,"itus":24364,"manuscript":24365,"lyric":24366,"corvette":24367,"molly":24368,"lage":24369,"dep":24370,"cnbc":24371,"lest":24372,"jessi":24373,"fife":24374,"griffith":24375,"opposing":24376,"rang":24377,"drills":24378,"respectful":24379,"pity":24380,"dell":24381,"harding":24382,"playboy":24383,"bloke":24384,"shutout":24385,"kili":24386,"osp":24387,"seattle":24388,"bcpoli":24389,"mises":24390,"journals":24391,"teaming":24392,"esther":24393,"freddy":24394,"Ķï¸ı":24395,"metrics":24396,"notre":24397,"garry":24398,"forty":24399,"navigate":24400,"periods":24401,"benedic":24402,"jid":24403,"daw":24404,"ancestors":24405,"restoring":24406,"cong":24407,"allergy":24408,"titanium":24409,"cence":24410,"leaning":24411,"abbas":24412,"vast":24413,"ucf":24414,"roofing":24415,"eman":24416,"severely":24417,"vogue":24418,"veau":24419,"inbound":24420,"dz":24421,"taneously":24422,"stretching":24423,"manchester":24424,"dryer":24425,"davis":24426,"kanth":24427,"thegame":24428,"itted":24429,"retain":24430,"elles":24431,"congestion":24432,"fraternity":24433,"ollie":24434,"loki":24435,"freely":24436,"choo":24437,"pony":24438,"scep":24439,"tably":24440,"balt":24441,"rockn":24442,"dime":24443,"logging":24444,"ðŁį·":24445,"adu":24446,"havoc":24447,"waterford":24448,"charis":24449,"sweetie":24450,"running":24451,"nerd":24452,"erdogan":24453,"zara":24454,"weighing":24455,"fifty":24456,"precise":24457,"lowell":24458,"kurdistan":24459,"ryo":24460,"orth":24461,"synth":24462,"liners":24463,"phenomenon":24464,"artillery":24465,"illegally":24466,"construct":24467,"nostalgic":24468,"garth":24469,"alta":24470,"shelton":24471,"asean":24472,"wander":24473,"durban":24474,"diversi":24475,"bono":24476,"clon":24477,"leman":24478,"shun":24479,"obstacles":24480,"appetite":24481,"feeder":24482,"respiratory":24483,"dixie":24484,"formula":24485,"anto":24486,"sober":24487,"extinct":24488,"auc":24489,"ingles":24490,"legitimate":24491,";;":24492,"minnie":24493,"ipswich":24494,"dramatically":24495,"ðŁijıðŁı¼":24496,"ingham":24497,"military":24498,"monet":24499,"usnavy":24500,"fork":24501,"dunno":24502,"player":24503,"qotd":24504,"stoo":24505,"exor":24506,"ethiopian":24507,"filmfest":24508,"pered":24509,"cate":24510,"saudi":24511,"inner":24512,"sincere":24513,"tionality":24514,"alee":24515,"deeds":24516,"cooperative":24517,"ironic":24518,"crocod":24519,"brary":24520,"postseason":24521,"camper":24522,"canary":24523,"ein":24524,"extensions":24525,"nbd":24526,"sherwood":24527,"spokane":24528,"hump":24529,"jitsu":24530,"ê¹":24531,"daryl":24532,"psi":24533,"stabbed":24534,"offerings":24535,"expects":24536,"caval":24537,"bodybuilding":24538,"framing":24539,"fca":24540,"yearly":24541,"bombed":24542,"skil":24543,"researching":24544,"judiciary":24545,"greeted":24546,"tudor":24547,"milo":24548,"innovate":24549,"ðŁĺĽ":24550,"rhs":24551,"ruby":24552,"contributor":24553,"famer":24554,"socially":24555,"mlin":24556,"fiery":24557,"utter":24558,"beaut":24559,"itos":24560,"devoted":24561,"rainbow":24562,"barney":24563,"peren":24564,"arjun":24565,"rna":24566,"gabby":24567,"uti":24568,"hannity":24569,"pickle":24570,"serv":24571,"quakes":24572,"ppe":24573,"fem":24574,"whitec":24575,"jn":24576,"victories":24577,"ðŁ§¡":24578,"golfer":24579,"congratulates":24580,"resulting":24581,"mechanic":24582,"urve":24583,"centered":24584,"kiev":24585,"ans":24586,"incub":24587,"<<":24588,"cmo":24589,"bestfanarmy":24590,"daph":24591,"enham":24592,"oncology":24593,"kush":24594,"txt":24595,"oriented":24596,"fashionable":24597,"csr":24598,"sahara":24599,"rack":24600,"pdp":24601,"hanson":24602,"à¸ĩ":24603,"tiers":24604,"rar":24605,"panam":24606,"insky":24607,"sahi":24608,"testament":24609,"asthma":24610,"inher":24611,"fisheries":24612,"order":24613,"howe":24614,"gallon":24615,"epis":24616,"suzanne":24617,"drowning":24618,"panelists":24619,"ðŁĺ²":24620,"ë¦":24621,"alach":24622,"commemorative":24623,"attribu":24624,"ðŁij»":24625,"moo":24626,"visional":24627,"weeksary":24628,"gust":24629,"akin":24630,"pointe":24631,"eee":24632,"dispar":24633,"nipp":24634,"dental":24635,"stall":24636,"pian":24637,"bore":24638,"ulster":24639,"tick":24640,"irr":24641,"taehyung":24642,"microphone":24643,"bermuda":24644,"gaard":24645,"eler":24646,"plumbing":24647,"hugely":24648,"âļ«ï¸ı":24649,"raceway":24650,"cambridge":24651,"marcel":24652,"burnley":24653,"toast":24654,"hollywood":24655,"fasting":24656,"mered":24657,"hibition":24658,"capped":24659,"beneficial":24660,"owning":24661,"contamin":24662,"arabian":24663,"toon":24664,"capac":24665,"hulu":24666,"smir":24667,"nutrients":24668,"sein":24669,"graphs":24670,"conditional":24671,"ðŁijħ":24672,"orac":24673,"playin":24674,"northe":24675,"tornad":24676,"marian":24677,"jumbo":24678,"lexi":24679,"incredibleindia":24680,"roadto":24681,"ukone":24682,"confusing":24683,"sph":24684,"shank":24685,"pied":24686,"mqm":24687,"positively":24688,"sherry":24689,"pathways":24690,"considers":24691,"tofu":24692,"arguments":24693,"resilient":24694,"chett":24695,"withdra":24696,"tero":24697,"atedly":24698,"swana":24699,"heb":24700,"flight":24701,"harley":24702,"decrease":24703,"kindle":24704,"bookshop":24705,"³ï¸ı":24706,"martyrs":24707,"smur":24708,"mccl":24709,"concerto":24710,"stime":24711,"rejoice":24712,"applau":24713,"clement":24714,"merkel":24715,"jaime":24716,"immortal":24717,"isleof":24718,"marco":24719,"youtuber":24720,"stalking":24721,"metoo":24722,"stack":24723,"spouse":24724,"ust":24725,"luv":24726,"âļ¾ï¸ı":24727,"equestrian":24728,"eving":24729,"flin":24730,"nickname":24731,"thebig":24732,"asar":24733,"stacks":24734,"walker":24735,"bora":24736,"kidnapped":24737,"hurling":24738,"humbold":24739,"recalls":24740,"copper":24741,"annis":24742,"seo":24743,"merger":24744,"muir":24745,"addy":24746,"ðŁĴªðŁĴª":24747,"bex":24748,"cracy":24749,"conan":24750,"congratulation":24751,"midst":24752,"âĻ¬":24753,"forbi":24754,"optic":24755,"crate":24756,"crocodile":24757,"madagas":24758,"securing":24759,"aston":24760,"ogue":24761,"savior":24762,"salisbury":24763,"loveit":24764,"fujifilm":24765,"castles":24766,"asst":24767,"arrows":24768,"spacious":24769,"trs":24770,"polyvore":24771,"progression":24772,"mri":24773,"nelson":24774,"bim":24775,"indicator":24776,"oda":24777,"pepe":24778,"resignation":24779,"gut":24780,"sneaker":24781,"logically":24782,"azy":24783,"arella":24784,"tearing":24785,"joshi":24786,"ssionism":24787,"qpr":24788,"mariah":24789,"px":24790,"bleed":24791,"mian":24792,"medley":24793,"weiss":24794,"kerry":24795,"gatory":24796,"atal":24797,"madison":24798,"avenger":24799,"naby":24800,"pland":24801,"giles":24802,"freshwater":24803,"dington":24804,"taj":24805,"demonstrates":24806,"ntv":24807,"bulbs":24808,"sundaymorning":24809,"peake":24810,"souvenir":24811,"wah":24812,"tonnes":24813,"mkt":24814,"complexity":24815,"conden":24816,"rossi":24817,"bing":24818,"yds":24819,"suk":24820,"ngo":24821,"midland":24822,"oly":24823,"lifeis":24824,"ripple":24825,"moreno":24826,"dders":24827,"tus":24828,"áĥ":24829,"boul":24830,"xa":24831,"holdings":24832,"wny":24833,"shadowhunters":24834,"kei":24835,"aspire":24836,"mous":24837,"owen":24838,"soak":24839,"skirts":24840,"mountaine":24841,"storming":24842,"chrome":24843,"riots":24844,"sarato":24845,"amaze":24846,"lessness":24847,"navar":24848,"criteria":24849,"rafa":24850,"indulge":24851,"ayer":24852,"porto":24853,"namo":24854,"................":24855,"yields":24856,"valle":24857,"jh":24858,"macron":24859,"sains":24860,"durant":24861,"trailers":24862,"wot":24863,"confederate":24864,"shrin":24865,"idol":24866,"formally":24867,"tene":24868,"motorcycles":24869,"thang":24870,"node":24871,"banger":24872,"daly":24873,"pats":24874,"enrollment":24875,"auctions":24876,"atal":24877,"arbor":24878,"logos":24879,"dearest":24880,"transaction":24881,"domingo":24882,"flea":24883,"sermon":24884,"deck":24885,"sincere":24886,"questioning":24887,"julio":24888,"wasp":24889,"pretz":24890,"armenian":24891,"kham":24892,"inflammation":24893,"picturesque":24894,"accidental":24895,"filmmakers":24896,"ðŁĺļ":24897,"ðŁĴį":24898,"casey":24899,"sob":24900,"yeezy":24901,"goodwill":24902,"paragra":24903,"ssly":24904,"feather":24905,"dyed":24906,"assassination":24907,"nade":24908,"bcs":24909,"applies":24910,"feminine":24911,"feu":24912,"extent":24913,"deputies":24914,"lack":24915,"psychic":24916,"goi":24917,"killings":24918,"pseu":24919,"ðŁ¤ª":24920,"unc":24921,"marl":24922,"tane":24923,"mckenna":24924,"surfer":24925,"influences":24926,"freeway":24927,"hackney":24928,"malaria":24929,"eland":24930,"teau":24931,"remastered":24932,"ر":24933,"razor":24934,"ggy":24935,"corro":24936,"laksh":24937,"flair":24938,"honesty":24939,"hooray":24940,"depp":24941,"amc":24942,"wednesdays":24943,"qa":24944,"edits":24945,"-$":24946,"sevilla":24947,"doubled":24948,"humanities":24949,"ccot":24950,"somos":24951,"rine":24952,"afa":24953,"sioux":24954,"reconstruction":24955,"welding":24956,"threads":24957,"amish":24958,"encouragement":24959,"poder":24960,"bock":24961,"balm":24962,"ptions":24963,"standup":24964,"accomplishments":24965,"guarding":24966,"conviction":24967,"acion":24968,"napoleon":24969,"depicting":24970,"attack":24971,"sui":24972,"wearable":24973,"âĸªï¸ı":24974,"potter":24975,"escort":24976,"vise":24977,"tots":24978,"boon":24979,"eventprofs":24980,"angular":24981,"womenshistorymonth":24982,"barrow":24983,"schi":24984,"accomp":24985,"tik":24986,"lend":24987,"kensington":24988,"wolfe":24989,"stacked":24990,"crashing":24991,"exhibit":24992,"winged":24993,"sabrina":24994,"masa":24995,"kms":24996,"always":24997,"ett":24998,"plasma":24999,"counseling":25000,"pickles":25001,"nfldraft":25002,"mrs":25003,"inevitable":25004,"courageous":25005,"stafford":25006,"writerslife":25007,"hos":25008,"ej":25009,"ghyun":25010,"trademark":25011,"adrian":25012,"influencer":25013,"coronation":25014,"raging":25015,"explored":25016,"usaf":25017,"exception":25018,"eux":25019,"tanker":25020,"swami":25021,"packet":25022,"ðŁij¨âĢį":25023,"fen":25024,"sheen":25025,"aero":25026,"jl":25027,"regal":25028,"nwt":25029,"auster":25030,"mehta":25031,"charge":25032,"aste":25033,"bate":25034,"infeld":25035,"racecourse":25036,"collapsed":25037,"fleece":25038,"zil":25039,"allie":25040,"alternatives":25041,"georges":25042,"ðŁĵį":25043,"quirky":25044,"fcb":25045,"natgeo":25046,"philanthropy":25047,"brai":25048,"everyday":25049,"ðŁIJ°":25050,"achers":25051,"jaan":25052,"fines":25053,"qi":25054,"fisherman":25055,"distinct":25056,"grimes":25057,"nationalist":25058,"commence":25059,"rown":25060,"âĢ³":25061,"zing":25062,"fter":25063,"hrw":25064,"baroque":25065,"blender":25066,"kitty":25067,"hooks":25068,"cited":25069,"wanda":25070,"consensus":25071,"reindeer":25072,"anand":25073,"supply":25074,"meds":25075,"vn":25076,"olph":25077,"ratchet":25078,"sheldon":25079,"securities":25080,"ë°©íĥ":25081,"crom":25082,"mosquito":25083,"jeric":25084,"immac":25085,"dimensions":25086,"â¤":25087,"dissi":25088,"spongebob":25089,"damien":25090,"stevenson":25091,"joanne":25092,"delish":25093,"yikes":25094,"thanx":25095,"surveys":25096,"postponed":25097,"alcoholic":25098,"alised":25099,"ðŁĻıðŁı»":25100,"doch":25101,"sentim":25102,"meredith":25103,"compares":25104,"bago":25105,"happydays":25106,"moss":25107,"ãħĭ":25108,"nec":25109,"gnment":25110,"frustrated":25111,"combin":25112,"riv":25113,"eclec":25114,"collo":25115,"compliment":25116,"actorslife":25117,"ctto":25118,"nicar":25119,"ophon":25120,"aparthe":25121,"mant":25122,"jade":25123,"trolley":25124,"optimization":25125,"eyeon":25126,"ecological":25127,"quist":25128,"ephe":25129,"à¥ĩ":25130,"cinco":25131,"appoints":25132,"oldschool":25133,"cpr":25134,"behavioral":25135,"minaj":25136,":-(":25137,"tagging":25138,"eval":25139,"joaqu":25140,"ðŁĺ«":25141,"hak":25142,"deme":25143,"jamaican":25144,"sos":25145,"hyatt":25146,"handbook":25147,"librarian":25148,"hannibal":25149,"pumping":25150,"chom":25151,"fman":25152,"gai":25153,"hull":25154,"responders":25155,"greenville":25156,"nus":25157,"vaugh":25158,"ðŁİīðŁİī":25159,"taxi":25160,"goldberg":25161,"mantra":25162,"tease":25163,"forbidden":25164,"methodist":25165,"ativity":25166,"****":25167,"ect":25168,"mcgr":25169,"Ħëĭ":25170,"seb":25171,"amidst":25172,"disappear":25173,"thyro":25174,"philips":25175,"erina":25176,"vicious":25177,"streamer":25178,"millionaire":25179,"map":25180,"strick":25181,"hackathon":25182,"gha":25183,"edic":25184,"mika":25185,"peck":25186,"illi":25187,"antoine":25188,"arca":25189,"optic":25190,"maure":25191,"ðŁĩ¦ðŁĩº":25192,"clashes":25193,"manly":25194,"âĺģ":25195,"alvar":25196,"andres":25197,"mei":25198,"elm":25199,"wwww":25200,"altered":25201,"lte":25202,"ê¹Ģ":25203,"mojo":25204,"forrest":25205,"thalai":25206,"nont":25207,"speeches":25208,"acknowledge":25209,"ignite":25210,"xfactor":25211,"ðŁ¥Ĥ":25212,"meadow":25213,"disrupt":25214,"debuted":25215,"scrimmage":25216,"pharmaceutical":25217,"fidd":25218,"foundations":25219,"philosopher":25220,"etal":25221,"publishers":25222,"boys":25223,"cke":25224,"rugged":25225,"optimism":25226,"rebe":25227,"philharmon":25228,"narcis":25229,"rallies":25230,"luis":25231,"goblue":25232,"folded":25233,"unacceptable":25234,"optimal":25235,"lisa":25236,"polaro":25237,"+.":25238,"enza":25239,"âĿ£ï¸ı":25240,"monopoly":25241,"graceful":25242,"dairy":25243,"dua":25244,"difficulty":25245,"judgement":25246,"osi":25247,"mersey":25248,"flux":25249,"newfound":25250,"terns":25251,"dimensional":25252,"invic":25253,"alba":25254,"amit":25255,"abudhabi":25256,"algeria":25257,"automobile":25258,"thead":25259,"lotion":25260,"accelerator":25261,"vacant":25262,"ition":25263,"luf":25264,"alic":25265,"pll":25266,"blazing":25267,"baz":25268,"sene":25269,"ðŁij¼":25270,"villains":25271,"directory":25272,"eisen":25273,"tock":25274,"brochure":25275,"ripp":25276,"hbd":25277,"zaynmalik":25278,"niche":25279,"lolol":25280,"certificates":25281,"morse":25282,"facup":25283,"xham":25284,"unwanted":25285,"imports":25286,"carnegie":25287,"fansign":25288,"mou":25289,"ralph":25290,"destroyer":25291,"swing":25292,"trekking":25293,"ciliation":25294,"pitbull":25295,"gaps":25296,"howell":25297,"definitive":25298,"mcle":25299,"fps":25300,"etz":25301,"bolly":25302,"lynn":25303,"gano":25304,"ature":25305,"fursuit":25306,"coil":25307,"nav":25308,"butts":25309,"trojans":25310,"eure":25311,"enko":25312,"schumer":25313,"horrific":25314,"installment":25315,"brb":25316,"suburbs":25317,"abel":25318,"vir":25319,"desh":25320,"cunningham":25321,"ðŁIJ»":25322,"spann":25323,"schwe":25324,"kemp":25325,"tru":25326,"stealth":25327,"ques":25328,"lew":25329,"delights":25330,"koch":25331,"humili":25332,"criti":25333,"ilt":25334,"spells":25335,"miley":25336,"caric":25337,"ðŁį´":25338,"lcfc":25339,"substitute":25340,"oung":25341,"?!!":25342,"affir":25343,"predictable":25344,"classof":25345,"err":25346,"cypress":25347,"chandra":25348,"ageing":25349,"____":25350,"therland":25351,"doncaster":25352,"elin":25353,"yoshi":25354,"sailors":25355,"harris":25356,"joanna":25357,"nigerians":25358,"hers":25359,"plague":25360,"procra":25361,"kno":25362,"canton":25363,"busines":25364,"unh":25365,"prakash":25366,"cin":25367,"bowen":25368,"coating":25369,"mals":25370,"begging":25371,"smithson":25372,"pontiac":25373,"spies":25374,"damian":25375,"pline":25376,"undant":25377,"alta":25378,"oness":25379,"shameless":25380,"daq":25381,"bbm":25382,"wales":25383,"stampede":25384,"serum":25385,"ÙĨ":25386,"catalyst":25387,"xn":25388,"absc":25389,"freezer":25390,"chun":25391,"arios":25392,"mccre":25393,"forehead":25394,"hears":25395,"damascus":25396,"tacoma":25397,"arduino":25398,"encounters":25399,"stanton":25400,"lgb":25401,"abas":25402,"\"..":25403,"kete":25404,"dracula":25405,"elem":25406,"gne":25407,"zeppelin":25408,"labrador":25409,"pulp":25410,"optional":25411,"orn":25412,"russians":25413,"sanitation":25414,"hilary":25415,"etsymntt":25416,"penalties":25417,"aust":25418,"igans":25419,"olympian":25420,"medicaid":25421,"versace":25422,"vape":25423,"restra":25424,"peep":25425,"sexiest":25426,"stalls":25427,"dile":25428,"thea":25429,"punjabi":25430,"puppy":25431,"tuesdaymotivation":25432,"ðŁĵļ":25433,"theflash":25434,"rocket":25435,"modest":25436,"chihuahu":25437,"onna":25438,"ksa":25439,"hurdles":25440,"cave":25441,"failures":25442,"split":25443,"boho":25444,"gurl":25445,"disappoint":25446,"howard":25447,"nugget":25448,"franz":25449,"stalert":25450,"kazakh":25451,"forgetting":25452,"schri":25453,"agate":25454,"amat":25455,"everett":25456,"duet":25457,"veterinary":25458,"julian":25459,"chills":25460,"brave":25461,"ghostbusters":25462,"lando":25463,"greets":25464,"profitable":25465,"dé":25466,"tir":25467,"zee":25468,"omen":25469,"pdx":25470,"grayson":25471,"hari":25472,"fixes":25473,"stabbing":25474,"swimmer":25475,"symbols":25476,"compliments":25477,"pose":25478,"functioning":25479,"thnx":25480,"gir":25481,"corporations":25482,"barlow":25483,"loe":25484,"offseason":25485,"distinctive":25486,"marvelous":25487,"nikon":25488,"enrique":25489,"kyu":25490,"jaws":25491,"amoto":25492,"lombar":25493,"travelblogger":25494,"fah":25495,"ourism":25496,"tristan":25497,"soe":25498,"cease":25499,"ðŁıħ":25500,"zac":25501,"mckenzie":25502,"taxpayers":25503,"swimsuit":25504,"blo":25505,"lesley":25506,"kansas":25507,"wks":25508,"kiel":25509,"provoking":25510,"myles":25511,"string":25512,"kangaroo":25513,"galactic":25514,"fifth":25515,"ske":25516,"weir":25517,"llis":25518,"matory":25519,"ðŁĩ¿":25520,"unci":25521,"reproductive":25522,"rooting":25523,"tides":25524,"gadget":25525,"..........":25526,"alexander":25527,"bowler":25528,"screw":25529,"apolog":25530,"erika":25531,"walters":25532,"shetty":25533,"lane":25534,"banter":25535,"asant":25536,"meso":25537,"vain":25538,"\"\"\"":25539,"usi":25540,"ferdin":25541,"accomplish":25542,"mansfield":25543,"bombar":25544,"collaborating":25545,"clap":25546,"iture":25547,"sda":25548,"smoky":25549,"nak":25550,"imperson":25551,"carla":25552,"comra":25553,"burgl":25554,"loco":25555,"ties":25556,"inhi":25557,"tracey":25558,"seis":25559,"disser":25560,"rrrr":25561,"dray":25562,"protect":25563,"corona":25564,"hunger":25565,"cken":25566,"celi":25567,"troubled":25568,"predators":25569,"fictional":25570,"shaved":25571,"richest":25572,"metaboli":25573,"fulham":25574,"grooming":25575,"monochrome":25576,"wasting":25577,"asco":25578,"aste":25579,"tista":25580,"remedies":25581,"ungsoo":25582,"southend":25583,"permanently":25584,"bumble":25585,"procrastin":25586,"identical":25587,"practically":25588,"mascul":25589,"suke":25590,"assured":25591,"valerie":25592,"deviant":25593,"grizzlies":25594,"thier":25595,"pura":25596,"nepal":25597,"notts":25598,"bilateral":25599,"spoil":25600,"carmel":25601,"cinematic":25602,"phl":25603,"nifty":25604,"mao":25605,"hypocri":25606,"laser":25607,"pantry":25608,"mathematical":25609,"elisa":25610,"coordination":25611,"belmont":25612,"ait":25613,"radiant":25614,"boiler":25615,"mang":25616,"fag":25617,"crc":25618,"hams":25619,"brin":25620,"â¬ĩï¸ı":25621,"familia":25622,"âĿ£":25623,"saber":25624,"rupert":25625,"ggan":25626,"ritz":25627,"mich":25628,"salford":25629,"levi":25630,"gral":25631,"ðŁĴ¤":25632,"nino":25633,"ced":25634,"businessman":25635,"ultr":25636,"simply":25637,"compression":25638,"pains":25639,"halt":25640,"ë°©íĥĦ":25641,"landscaping":25642,"nf":25643,"crooked":25644,"erd":25645,"ittin":25646,"ddleston":25647,"surpassed":25648,"inoa":25649,"dag":25650,"blen":25651,"extending":25652,"ating":25653,"algae":25654,"baller":25655,"umar":25656,"snooker":25657,"collu":25658,"flown":25659,"thub":25660,"ridiculously":25661,"kish":25662,"ople":25663,"dire":25664,"asser":25665,"aristo":25666,"sciss":25667,"hating":25668,"trouble":25669,"sylvia":25670,"succul":25671,"plots":25672,"sincerely":25673,"aler":25674,"laureate":25675,"brack":25676,"attn":25677,"rifles":25678,"meto":25679,"collectible":25680,"cuomo":25681,"contestant":25682,"consistency":25683,"antz":25684,"ranges":25685,"abigail":25686,"deb":25687,"minister":25688,"growers":25689,"anoo":25690,"hoover":25691,"dreamer":25692,"nucle":25693,"research":25694,"miy":25695,"shahid":25696,"mav":25697,"dhoni":25698,"cini":25699,"doj":25700,"hindus":25701,"partying":25702,"dali":25703,"alonso":25704,"informal":25705,"clarkson":25706,"itton":25707,"kian":25708,"cityo":25709,"mori":25710,"lasted":25711,"aspen":25712,"library":25713,"suspici":25714,"quat":25715,"denial":25716,"folder":25717,"chori":25718,"sweeping":25719,"enix":25720,"ðŁįĤ":25721,"ØŃ":25722,"nascar":25723,"handmadehour":25724,"moul":25725,"heatwave":25726,"emer":25727,"examine":25728,"ibn":25729,"grind":25730,"pov":25731,"tionist":25732,"mbo":25733,"sheila":25734,"integrate":25735,"omes":25736,"takeaway":25737,"cerv":25738,"connie":25739,"ticket":25740,"celed":25741,"bien":25742,"visually":25743,"madagascar":25744,"sorry":25745,"gui":25746,"parkrun":25747,"traits":25748,"labe":25749,"poisoning":25750,"à¥Ģ":25751,"viable":25752,"bohemian":25753,"dentistry":25754,"bados":25755,"sprouts":25756,"masked":25757,"teddy":25758,"ðŁĺ·":25759,"saf":25760,"saas":25761,"jiang":25762,"tight":25763,"speaker":25764,"withdrawal":25765,"bcn":25766,"assigned":25767,"classrooms":25768,"fleming":25769,"ðŁĴ«":25770,"supergirl":25771,"totals":25772,"tabletop":25773,"ebooks":25774,"horizontal":25775,"craz":25776,"flush":25777,"jard":25778,"cdc":25779,"erson":25780,"ãħł":25781,"greenwood":25782,"nih":25783,"cox":25784,"ada":25785,"litre":25786,"going":25787,"vicky":25788,"curved":25789,"louie":25790,"grains":25791,"hye":25792,"longe":25793,"remedy":25794,"trainee":25795,"sanjay":25796,"superstars":25797,"maser":25798,"manu":25799,"sage":25800,"whl":25801,"ðŁĺĤðŁĺŃ":25802,"ðŁijįðŁı»":25803,"msd":25804,"enz":25805,"rabhu":25806,"joo":25807,"ghu":25808,"acer":25809,"epo":25810,"resurrection":25811,"justicefor":25812,"blended":25813,"moda":25814,"avalanche":25815,"francesco":25816,"respective":25817,"gs":25818,"yeast":25819,"welch":25820,"devotion":25821,"getin":25822,"atheism":25823,"amic":25824,"carolyn":25825,"loc":25826,"ldnont":25827,"avec":25828,"usda":25829,"legged":25830,"bravery":25831,"blower":25832,"cowboy":25833,"heh":25834,"stible":25835,"buffal":25836,"channel":25837,"runchat":25838,"âĺķï¸ı":25839,"ideology":25840,"bestseller":25841,"yoo":25842,"peanu":25843,"bonne":25844,"felic":25845,"edison":25846,"fractu":25847,"narendra":25848,"ppets":25849,"seymour":25850,"riviera":25851,"hector":25852,"necessarily":25853,"bianca":25854,"societies":25855,"thebest":25856,"wg":25857,"sentences":25858,"wink":25859,"vaccines":25860,"palooza":25861,"jamming":25862,"asf":25863,"mpus":25864,"agreements":25865,"eck":25866,"bac":25867,"honore":25868,"compul":25869,"wildcat":25870,"imposed":25871,"yoga":25872,"hudson":25873,"canceled":25874,"lich":25875,"fuzzy":25876,"esque":25877,"chuk":25878,"wvu":25879,"sek":25880,"flipping":25881,"rhon":25882,"wished":25883,"wha":25884,"capability":25885,"lenovo":25886,"ìĨĮëħĦëĭ":25887,"vivo":25888,"tvd":25889,"nora":25890,"silk":25891,"pasadena":25892,"yosemite":25893,"valuation":25894,"clocks":25895,"uber":25896,"mrc":25897,"darkest":25898,"aubre":25899,"sso":25900,"belly":25901,"wrestlers":25902,"killin":25903,"louder":25904,"buckley":25905,"geel":25906,"adon":25907,"uns":25908,"appealing":25909,"ðŁij¯":25910,"semitism":25911,"listens":25912,"fitz":25913,"ãĥ³ãĥ":25914,"nylon":25915,"arty":25916,"seemingly":25917,"hala":25918,"suited":25919,"ety":25920,"sheds":25921,"muffins":25922,"apric":25923,"uments":25924,"uta":25925,"jammu":25926,"chelseafc":25927,"starz":25928,"yoko":25929,"root":25930,"cleansing":25931,"diar":25932,"pioneering":25933,"iheartradio":25934,"digiti":25935,"findyour":25936,"cano":25937,"ðŁĴİ":25938,"zol":25939,"spacecraft":25940,"sixers":25941,"moisturi":25942,"bile":25943,"tists":25944,"horton":25945,"ranging":25946,"columbi":25947,"meteoro":25948,"sentiment":25949,"epl":25950,"footh":25951,"textbook":25952,"drainage":25953,"rly":25954,"scue":25955,"imrankhan":25956,"ðŁĴ¸":25957,"margarita":25958,"eddy":25959,"predicts":25960,"gamergate":25961,"advise":25962,"growthhacking":25963,"loveyou":25964,"ugand":25965,"vf":25966,"benghazi":25967,"slater":25968,"newor":25969,"chel":25970,"independenceday":25971,"pnp":25972,"cullen":25973,"hoodies":25974,"numbered":25975,"britt":25976,"tsa":25977,"kltu":25978,"sages":25979,"momo":25980,"oneplus":25981,"coll":25982,"guts":25983,"wta":25984,"mesmeri":25985,"enhancing":25986,"chiroprac":25987,"jis":25988,"teenagers":25989,"mone":25990,"constellation":25991,"sweepstakes":25992,"eze":25993,"slovakia":25994,"laye":25995,"pearce":25996,"waver":25997,"pogba":25998,"kron":25999,"surgeons":26000,"marx":26001,"tid":26002,"gga":26003,"descend":26004,"pours":26005,"uprising":26006,"walla":26007,"sabbath":26008,"bachelore":26009,"mackin":26010,"kam":26011,"peterborough":26012,"hora":26013,"ðŁĮŁðŁĮŁ":26014,"thinkbig":26015,"rj":26016,"hydrau":26017,"spal":26018,"universit":26019,"ðŁıī":26020,"mailonline":26021,"leagueof":26022,"tenants":26023,"wally":26024,"lance":26025,"heavens":26026,"ddr":26027,"bolts":26028,"amir":26029,"iphone":26030,"cigar":26031,"endu":26032,"rei":26033,"elabor":26034,"ringing":26035,"johnson":26036,"characteristics":26037,"saloon":26038,"algorithms":26039,"talkin":26040,"mtn":26041,"dive":26042,"regionals":26043,"ffice":26044,"hati":26045,"deviantart":26046,"sotto":26047,"shiro":26048,"lama":26049,"kwe":26050,"faded":26051,"porting":26052,"tummy":26053,"estates":26054,"buenos":26055,"ðŁ¦ģ":26056,"believer":26057,"penetr":26058,"darn":26059,"spite":26060,"canopy":26061,"fashioni":26062,"tilla":26063,"petals":26064,"elijah":26065,"brawl":26066,"martyr":26067,"ë°©íĥĦìĨĮëħĦëĭ":26068,"midtown":26069,"erich":26070,"dapper":26071,"smtown":26072,"megam":26073,"www":26074,"lele":26075,"ons":26076,"catfish":26077,"firth":26078,"fossilfriday":26079,"ballpark":26080,"thaw":26081,"potent":26082,"illie":26083,"creep":26084,"carp":26085,"soap":26086,"gundam":26087,"infec":26088,"yyyyy":26089,"न":26090,"zag":26091,"ritt":26092,"calculator":26093,"boca":26094,"oko":26095,"toad":26096,"threaten":26097,"refined":26098,"olympic":26099,"accomplishment":26100,"bacterial":26101,"aji":26102,"tatum":26103,"feliz":26104,"sheed":26105,"jat":26106,"thic":26107,"jamal":26108,"ðĿĺ":26109,"lina":26110,"ðŁIJ¯":26111,"joking":26112,"yotpo":26113,"pinch":26114,"akron":26115,"herb":26116,"motivation":26117,"lia":26118,"hostage":26119,"creek":26120,"gamble":26121,"russell":26122,"patti":26123,"fotos":26124,"cpc":26125,"broken":26126,"backthe":26127,"clays":26128,"umm":26129,"stockton":26130,"maternal":26131,"ür":26132,"lakel":26133,"century":26134,"bek":26135,"infected":26136,"ม":26137,"smackdown":26138,"manned":26139,"tahoe":26140,"smes":26141,"basa":26142,"sula":26143,"augusta":26144,".*":26145,"rohingya":26146,"greed":26147,"counselor":26148,"silhouette":26149,"gravit":26150,"clause":26151,"'-":26152,"bobc":26153,"occasions":26154,"nowadays":26155,"dictat":26156,"beard":26157,"nally":26158,"brightest":26159,"kabul":26160,"incindia":26161,"dhanush":26162,"archaeological":26163,"cheape":26164,"mizzou":26165,"dhi":26166,"ovski":26167,"baxter":26168,"assemble":26169,"â":26170,"gigi":26171,"acam":26172,"wisely":26173,"hazard":26174,"northampton":26175,"âľĪï¸ı":26176,"meth":26177,"blasting":26178,"reunite":26179,"mulus":26180,"alizes":26181,"tread":26182,"mila":26183,"edward":26184,"kova":26185,"pesto":26186,"ðŁij¶":26187,"vitz":26188,"hydraulic":26189,"refurbished":26190,"motel":26191,"isabella":26192,"homme":26193,"severance":26194,"uphol":26195,"miserable":26196,"fari":26197,"latter":26198,"efer":26199,"crackers":26200,"esl":26201,"acio":26202,"yyj":26203,"inan":26204,"ecb":26205,"zind":26206,"panas":26207,"trucking":26208,"reed":26209,"shaker":26210,"burgess":26211,"empire":26212,"agnes":26213,"nington":26214,"artworks":26215,"frs":26216,"tile":26217,"biome":26218,"eun":26219,"chong":26220,"americana":26221,"godfather":26222,"goblin":26223,"ishi":26224,"!).":26225,"tempted":26226,"genomics":26227,"mandate":26228,"cky":26229,"ðŁĴĻðŁĴĽ":26230,"somali":26231,"brandy":26232,"inven":26233,"spokesperson":26234,"pcb":26235,"yuan":26236,"hg":26237,"faz":26238,"starwars":26239,"rowan":26240,"bluegrass":26241,"dong":26242,"dday":26243,"trinidad":26244,"erton":26245,"banning":26246,"retention":26247,"cured":26248,"toberfest":26249,"reset":26250,"weis":26251,"detached":26252,"behindthescenes":26253,"immunity":26254,"pha":26255,"bray":26256,"ðŁij½":26257,"rancho":26258,"ramsay":26259,"estonia":26260,"ndtv":26261,"].":26262,"cabaret":26263,"taro":26264,"dv":26265,"showcases":26266,"plum":26267,"ðŁij¸":26268,"sonoma":26269,"prepa":26270,"memorab":26271,"estu":26272,"driveway":26273,"ules":26274,"magnus":26275,"xr":26276,"nnn":26277,"muchas":26278,"enge":26279,"streamed":26280,"forestry":26281,"audiobook":26282,"troy":26283,"reckless":26284,"kilom":26285,"ruler":26286,"rak":26287,"procession":26288,"ions":26289,"poole":26290,"noctur":26291,"whs":26292,"farmhouse":26293,"pera":26294,"parme":26295,"hypocrisy":26296,"sics":26297,"vant":26298,"cask":26299,"holistic":26300,"aust":26301,"п":26302,"indo":26303,"ðŁij©âĢį":26304,"diso":26305,"dispatch":26306,"olsen":26307,"makeit":26308,"ennis":26309,"centre":26310,"arrange":26311,"ðŁĮ¼":26312,"salted":26313,"easiest":26314,"fate":26315,"regatta":26316,"mozz":26317,"acan":26318,"sini":26319,"gically":26320,"chops":26321,"chicken":26322,"workin":26323,"hagg":26324,"involve":26325,"weeds":26326,"bookday":26327,"wakeup":26328,"kyr":26329,"michelin":26330,"fuss":26331,"rejuven":26332,"vacancies":26333,"incarcer":26334,"mst":26335,"scents":26336,"sovereign":26337,"kicker":26338,"à§":26339,"bod":26340,"âĢĶ>":26341,"sah":26342,"mobil":26343,"shropshire":26344,"ophone":26345,"dresser":26346,"missuni":26347,"hepburn":26348,"imo":26349,"foliage":26350,"diagnostic":26351,"assan":26352,"cycling":26353,"guilt":26354,"csa":26355,"puertorico":26356,"winelover":26357,"wakefield":26358,"doggy":26359,"khe":26360,"papp":26361,"cog":26362,"allot":26363,"cuck":26364,"poetic":26365,"mio":26366,"revit":26367,"magician":26368,"ç¥":26369,"antenna":26370,"westwood":26371,"mberg":26372,"luxe":26373,"oatmeal":26374,"ج":26375,"teat":26376,"ffee":26377,"searches":26378,"lly":26379,"pluto":26380,"elon":26381,"lettering":26382,"innocence":26383,"fai":26384,"annon":26385,"telangana":26386,"mait":26387,"neural":26388,"canni":26389,"aroma":26390,"astor":26391,"fex":26392,"cocac":26393,"monetary":26394,"fent":26395,"unsure":26396,"'@":26397,"indirec":26398,"tehran":26399,"isolation":26400,"libs":26401,"makeup":26402,"mercedes":26403,"ffy":26404,"hetero":26405,"deo":26406,"scom":26407,"cursed":26408,"veteransday":26409,"frankenstein":26410,"shrews":26411,"deco":26412,"geese":26413,"leftover":26414,"hadid":26415,"variable":26416,"academics":26417,"carolin":26418,"undergoing":26419,"variation":26420,"nah":26421,"ssier":26422,"gamersunite":26423,"pursuing":26424,"emerged":26425,"llers":26426,"controlling":26427,"roaring":26428,"meteor":26429,"volt":26430,"dawgs":26431,"beaver":26432,"islife":26433,"bathrooms":26434,"acional":26435,"prevent":26436,"lakedistrict":26437,"inals":26438,"yani":26439,"grabbing":26440,"sacks":26441,"lez":26442,"sway":26443,"kool":26444,"times":26445,"klopp":26446,"lade":26447,"concord":26448,"resulted":26449,"revive":26450,"reconciliation":26451,"oland":26452,"azz":26453,"giro":26454,"mandarin":26455,"deen":26456,"nutritional":26457,"iscoming":26458,"vani":26459,"awwww":26460,"derived":26461,"loveyour":26462,"stopthe":26463,"shouting":26464,"novak":26465,"ðŁĻĮðŁı¾":26466,"loaf":26467,"displaying":26468,"sundaywith":26469,"maguire":26470,"cheri":26471,"ðŁıŁ":26472,"rematch":26473,"quic":26474,"Ú©":26475,"yin":26476,"ðŁĺ¹":26477,"ilive":26478,"zip":26479,"ourke":26480,"downloads":26481,"swat":26482,"mississ":26483,"carers":26484,"tment":26485,"property":26486,"hahahahahaha":26487,"gibbs":26488,"surrey":26489,"arise":26490,"ticism":26491,"stia":26492,"irling":26493,"frog":26494,"cose":26495,"bassist":26496,"foreig":26497,"leau":26498,"pillows":26499,"holla":26500,"elie":26501,"disclosure":26502,"peanuts":26503,"intech":26504,"wwc":26505,"plunge":26506,"triumph":26507,"cori":26508,"slippers":26509,"ðŁĻıðŁĻı":26510,"neutrality":26511,"mare":26512,"hairy":26513,"gangster":26514,"humming":26515,"custard":26516,"merlin":26517,"alea":26518,"sby":26519,"damp":26520,"mohan":26521,"verbal":26522,"jst":26523,"gutted":26524,"bjor":26525,"unfinished":26526,"ðŁĩ¯ðŁĩµ":26527,"unhappy":26528,"âļ«ï¸ı":26529,"bypass":26530,"atsu":26531,"fischer":26532,"sav":26533,"africans":26534,"reuse":26535,"midway":26536,"demolished":26537,"gerrard":26538,"hercules":26539,"ÄŁ":26540,"medicines":26541,"clicking":26542,"surround":26543,"joong":26544,"waving":26545,"tribes":26546,"wetlands":26547,"officiel":26548,"arguing":26549,"lle":26550,"dova":26551,"suzy":26552,"clubhouse":26553,"negro":26554,"obtain":26555,"gao":26556,"glance":26557,"assist":26558,"chos":26559,"ãĤ¢":26560,"âĺķ":26561,"adrid":26562,"occurs":26563,"stans":26564,"pardon":26565,"liveli":26566,"employed":26567,"revisit":26568,"ffxiv":26569,"bble":26570,"nearing":26571,"miner":26572,"ðŁĺ¹":26573,"giovanni":26574,"upto":26575,"marvell":26576,"marse":26577,"towels":26578,"cbn":26579,"engineered":26580,"yelling":26581,"spartan":26582,"sians":26583,"ðŁĻĮðŁı¼":26584,"sev":26585,"coyote":26586,"stadi":26587,"tcm":26588,"appen":26589,"shenanigans":26590,"openaccess":26591,"soaked":26592,"masqu":26593,"levine":26594,"strokes":26595,"lk":26596,"apartheid":26597,"hiphop":26598,"chardon":26599,"maymay":26600,"haasan":26601,"stripped":26602,"fro":26603,"scription":26604,"fton":26605,"hf":26606,"prisons":26607,"marshal":26608,"ķãĤ":26609,"ancho":26610,"compromise":26611,"classification":26612,"buzzfeed":26613,"bbloggers":26614,"deserving":26615,")/":26616,"sway":26617,"obo":26618,"campers":26619,"podernfamily":26620,"poured":26621,"brie":26622,"squirrels":26623,"seize":26624,":#":26625,"lek":26626,"timb":26627,"stacy":26628,"nasdaq":26629,"repeatedly":26630,"brat":26631,"mighty":26632,"competitor":26633,"mahone":26634,"desi":26635,"oke":26636,"bmw":26637,"shie":26638,"fcb":26639,"cheapest":26640,"minimalist":26641,"paramount":26642,"nate":26643,"haras":26644,"insanity":26645,"lateral":26646,"mentality":26647,"mozam":26648,"tapped":26649,"yadav":26650,"usp":26651,"bway":26652,"theod":26653,"bilt":26654,"raids":26655,"empress":26656,"adapted":26657,"patron":26658,"nutshell":26659,"agra":26660,"beaded":26661,"sundaywithmarsha":26662,"viking":26663,"proceed":26664,"maintained":26665,"thinkbigsundaywithmarsha":26666,"snes":26667,"musica":26668,"tower":26669,"chab":26670,"bok":26671,"smt":26672,"insult":26673,"harvesting":26674,"window":26675,"ruther":26676,"beige":26677,"decal":26678,"indicate":26679,"mailing":26680,"rift":26681,"pole":26682,"anderson":26683,"choral":26684,"spride":26685,"lili":26686,"evelyn":26687,"imrankhanpti":26688,"....\"":26689,"kered":26690,"undp":26691,"waterfalls":26692,"sears":26693,"lemans":26694,"worldseries":26695,"riel":26696,"anie":26697,"appar":26698,"scorers":26699,"lamp":26700,"athan":26701,"physicians":26702,"quinoa":26703,"refusing":26704,"vuitton":26705,"unleash":26706,"sla":26707,"pati":26708,"shouts":26709,"intentions":26710,"foamed":26711,"european":26712,"neighborhoods":26713,"meer":26714,"manson":26715,"duh":26716,"brat":26717,"cones":26718,"bowl":26719,"kazakhstan":26720,"ि":26721,"inappropriate":26722,"delhi":26723,"ketchup":26724,"fulton":26725,"sys":26726,"consult":26727,"garfield":26728,"togo":26729,"fml":26730,"fled":26731,"bds":26732,"facilitate":26733,"reebok":26734,"selfie":26735,"elevate":26736,"activate":26737,"bible":26738,"cawx":26739,"bys":26740,"camille":26741,"syou":26742,"skool":26743,"hert":26744,"wbc":26745,"pledges":26746,"recorder":26747,"posh":26748,"acre":26749,"soaking":26750,"matil":26751,"vsco":26752,"shootings":26753,"plar":26754,"econ":26755,"ðŁĻĮðŁı»":26756,"rashid":26757,"ubi":26758,"ðŁ¤¤":26759,"swinging":26760,"wipe":26761,"raptor":26762,"msu":26763,"musicvideo":26764,"durham":26765,"attic":26766,"aparty":26767,"fetus":26768,"activation":26769,"aaz":26770,"motivate":26771,"ðŁĴķðŁĴķðŁĴķ":26772,"jal":26773,"म":26774,"agon":26775,"scheer":26776,"stalker":26777,"foster":26778,"azzo":26779,"telegram":26780,"vigor":26781,"slaugh":26782,"screenshots":26783,"entrepreneu":26784,"kristin":26785,"intention":26786,"chilli":26787,"fraction":26788,"dona":26789,"gea":26790,"tcu":26791,"site":26792,"lak":26793,"emil":26794,"dnt":26795,"boro":26796,"wilkinson":26797,"recu":26798,"atoday":26799,"tanya":26800,"blanco":26801,"cdn":26802,"brilliantly":26803,"gcc":26804,"acc":26805,"evacuated":26806,"therine":26807,"denny":26808,"caitlin":26809,"shepard":26810,"pouch":26811,"handheld":26812,"southeastern":26813,"haa":26814,"ô":26815,"resolutions":26816,"ledger":26817,"srin":26818,"rar":26819,"shattered":26820,"chimney":26821,"imwith":26822,"meteor":26823,"handled":26824,"rake":26825,"townsend":26826,"enhan":26827,"shipy":26828,"duct":26829,"twx":26830,"inflammatory":26831,"warhammer":26832,"theatrical":26833,"gros":26834,"skar":26835,"scotty":26836,"niel":26837,"tito":26838,"tini":26839,"connection":26840,"_.":26841,"goldenglobes":26842,"shaq":26843,"ðŁı³ï¸ı":26844,"hallway":26845,"fronts":26846,"effectiveness":26847,"glaston":26848,"dhs":26849,"expi":26850,"toh":26851,"cpl":26852,"scs":26853,"reo":26854,"hag":26855,"resemblance":26856,"horan":26857,"abusive":26858,"quer":26859,"virtue":26860,"cholester":26861,"aq":26862,"shane":26863,"mce":26864,"carriers":26865,"distress":26866,"rewind":26867,"¡":26868,"voodoo":26869,"intact":26870,"anno":26871,"ðŁĺ¤":26872,"piled":26873,"adia":26874,"ãĥ³":26875,"enow":26876,"digs":26877,"lightly":26878,"goofy":26879,"turbine":26880,"governors":26881,"conte":26882,"reopen":26883,"pah":26884,"ive":26885,"crafting":26886,"sweeps":26887,"jodi":26888,"ande":26889,"zucker":26890,"kawaii":26891,"oko":26892,"vai":26893,"outline":26894,"kristi":26895,"tsn":26896,"inspo":26897,"quint":26898,"filthy":26899,"lynne":26900,"listeners":26901,"departing":26902,"ord":26903,"tweed":26904,",&":26905,"alek":26906,"selfish":26907,"norther":26908,"recognizes":26909,"ips":26910,"bes":26911,"aed":26912,"wills":26913,"peat":26914,"surroundings":26915,"monuments":26916,"aisle":26917,"becker":26918,"lav":26919,"quantity":26920,"vah":26921,"helicopters":26922,"tucked":26923,"alvarez":26924,"shape":26925,"obey":26926,"additi":26927,"roadside":26928,"mite":26929,"blers":26930,"epage":26931,"jau":26932,"ignorant":26933,"bins":26934,"lulu":26935,"xo":26936,"cfo":26937,"eeeee":26938,"apprenticeship":26939,"sheffiel":26940,"toi":26941,"hok":26942,"fakenews":26943,"deploy":26944,"aidan":26945,"huskers":26946,"ãĢİ":26947,"westbrook":26948,"mister":26949,"configur":26950,"carr":26951,"fica":26952,"proceedings":26953,"haw":26954,"steak":26955,"murderer":26956,"payday":26957,"ajo":26958,"pvc":26959,"donates":26960,"biaf":26961,"nomnom":26962,"beit":26963,"kali":26964,"xrp":26965,"ahmedabad":26966,"semic":26967,"chey":26968,"xtra":26969,"antwer":26970,"headlining":26971,"squares":26972,"rounded":26973,"fluore":26974,"bold":26975,"disasters":26976,"amoo":26977,"generic":26978,"cranes":26979,"briefly":26980,"gig":26981,"austerity":26982,"anticipation":26983,"forti":26984,"treasurer":26985,"canny":26986,"cecil":26987,"detected":26988,"checklist":26989,"ว":26990,"pamela":26991,"barbados":26992,"anfield":26993,"hearty":26994,"txlege":26995,"perenni":26996,"arrog":26997,"ingram":26998,"âĹı":26999,"tyne":27000,"spoon":27001,"ration":27002,"amba":27003,"mbe":27004,"camel":27005,"hhs":27006,"yorkshire":27007,"reflective":27008,"freaks":27009,"tok":27010,"judo":27011,"particles":27012,"dubs":27013,"banjo":27014,"accreditation":27015,"proverbs":27016,"overdose":27017,"integral":27018,"guang":27019,"mcs":27020,"supercar":27021,"afb":27022,"alvin":27023,"ails":27024,"xtre":27025,"staging":27026,"twent":27027,"rabbits":27028,"maro":27029,"instem":27030,"doll":27031,"cray":27032,"santana":27033,"bleach":27034,"minions":27035,"cheap":27036,"mant":27037,"divers":27038,"catalonia":27039,"lois":27040,"matri":27041,"cougar":27042,"kayak":27043,"egre":27044,"pso":27045,"aia":27046,"å®":27047,"charlton":27048,"tracked":27049,"scari":27050,"pett":27051,"fwd":27052,"xin":27053,"gravel":27054,"bric":27055,"biggboss":27056,"arden":27057,"hugging":27058,"palms":27059,"stv":27060,"limb":27061,"themovie":27062,"handicap":27063,"rime":27064,"zai":27065,"stub":27066,"india":27067,"lithuania":27068,"rhyth":27069,"pita":27070,"macedonia":27071,"highered":27072,"bridget":27073,"schwarz":27074,"skelet":27075,"hikes":27076,"antarctic":27077,"cps":27078,"mashup":27079,"а":27080,"nell":27081,"chandra":27082,"heir":27083,"anus":27084,"sheridan":27085,"mimi":27086,"museu":27087,"becca":27088,"anir":27089,"barrie":27090,"diocese":27091,"comparable":27092,"ðŁı³ï¸ıâĢį":27093,"yukon":27094,"mep":27095,"hormon":27096,"meric":27097,"alf":27098,"conquered":27099,"christchurch":27100,"ðŁĴĻðŁĴĻ":27101,"hazardous":27102,"pooh":27103,"conting":27104,"retrospective":27105,"parame":27106,"nair":27107,"consor":27108,"hotra":27109,"astonishing":27110,"caterpillar":27111,"uman":27112,"tism":27113,"tvs":27114,"servic":27115,"croydon":27116,"morales":27117,"cg":27118,"cum":27119,"teur":27120,"scanada":27121,"sall":27122,"magnolia":27123,"elise":27124,"thour":27125,"ி":27126,"agomez":27127,"phelps":27128,"ë°©íĥĦìĨĮëħĦëĭ¨":27129,"whos":27130,"weaving":27131,"sisd":27132,"proposes":27133,"crows":27134,"presale":27135,"economies":27136,"bernardo":27137,"shahid":27138,"airshow":27139,"mccann":27140,"horticul":27141,"nrl":27142,"duel":27143,"mongolia":27144,"toulou":27145,"requirement":27146,"structured":27147,"edi":27148,"olives":27149,"hea":27150,"cuter":27151,"к":27152,"enthusiast":27153,"harriet":27154,"dominion":27155,"submer":27156,"ðŁįĥ":27157,"saab":27158,"nesburg":27159,"moff":27160,"defended":27161,"burt":27162,"rewarded":27163,"goldman":27164,"optics":27165,"khalid":27166,"households":27167,"buckets":27168,"cecil":27169,"chess":27170,"substantial":27171,"efl":27172,"operation":27173,"evaluate":27174,"stn":27175,"recession":27176,"lll":27177,"tomas":27178,"truths":27179,"akbar":27180,"swords":27181,"pact":27182,"embarrass":27183,"hao":27184,"ayurve":27185,"scripture":27186,"nycc":27187,"opt":27188,"diameter":27189,"scented":27190,"organizers":27191,"relat":27192,"hae":27193,"dreamers":27194,"dese":27195,"ðŁĮ»":27196,"restricted":27197,"nale":27198,"rhp":27199,"dolan":27200,"munster":27201,"haired":27202,"consultants":27203,"joints":27204,"humil":27205,"dill":27206,"relentless":27207,"té":27208,"afil":27209,"utilities":27210,"japanese":27211,"condemn":27212,"petite":27213,"collide":27214,"qf":27215,"peaches":27216,"courier":27217,"lore":27218,"âĺİï¸ı":27219,"reliability":27220,"chuk":27221,"ðŁĻĥ":27222,"stures":27223,"gether":27224,"hostel":27225,"bier":27226,"-_-":27227,"âĩ":27228,"eze":27229,"tailo":27230,"dient":27231,"bluff":27232,"chuffed":27233,"pilip":27234,"monarch":27235,"eem":27236,"buchan":27237,"bick":27238,"opau":27239,"kups":27240,"ย":27241,"pistons":27242,"spins":27243,"mand":27244,"cest":27245,"burne":27246,"vile":27247,"cherries":27248,"beckett":27249,"needles":27250,"panch":27251,"ëĤ":27252,"hahah":27253,"troubles":27254,"insists":27255,"doyou":27256,"gmc":27257,"mortar":27258,"delegate":27259,"inn":27260,"ganda":27261,"sinatra":27262,"त":27263,"speeding":27264,"pupil":27265,"premises":27266,"alignment":27267,"pikach":27268,"asus":27269,"jalan":27270,"ص":27271,"limestone":27272,"folkl":27273,"parmesan":27274,"ceil":27275,"moy":27276,"shawnmendes":27277,"acup":27278,"hust":27279,"otes":27280,"medina":27281,"madi":27282,"gtav":27283,"censorship":27284,"arg":27285,"sweeney":27286,"sykes":27287,"colo":27288,"footsteps":27289,"canned":27290,"advance":27291,"gtaonline":27292,"healthyliving":27293,"ðŁį¾":27294,"aig":27295,"pality":27296,"ocs":27297,"hebrew":27298,"imminent":27299,"berkshire":27300,"jeremiah":27301,"outgoing":27302,"baker":27303,"entrata":27304,"maids":27305,"groves":27306,"boc":27307,"adel":27308,"mfw":27309,"conscience":27310,"armys":27311,"nutella":27312,"contestalert":27313,"novelist":27314,"lah":27315,"banker":27316,"marquez":27317,"ðŁı¡":27318,"toff":27319,"outage":27320,"grp":27321,"ðŁĺŃðŁĺŃðŁĺŃðŁĺŃ":27322,"muscle":27323,"dudley":27324,"nvidia":27325,"midi":27326,"muni":27327,"essays":27328,"datac":27329,"carter":27330,"ร":27331,"tans":27332,"ives":27333,"publications":27334,"aler":27335,"okwx":27336,"ilu":27337,"cutt":27338,"harp":27339,"outlaw":27340,"lutheran":27341,"brill":27342,"bolic":27343,"dowell":27344,"greenland":27345,"besties":27346,"pathi":27347,"payton":27348,"guest":27349,"harden":27350,"ðŁ¤©":27351,"anned":27352,"evacuation":27353,"poised":27354,"mcder":27355,"bhan":27356,"oi":27357,"envelope":27358,"cid":27359,"cavi":27360,"tapas":27361,"bookreview":27362,"greyhound":27363,"âĻª":27364,"feud":27365,"lungs":27366,"forte":27367,"raider":27368,"ffer":27369,"onix":27370,"depend":27371,"ynwa":27372,"relating":27373,"devs":27374,"ðŁĴIJ":27375,"acquires":27376,"dha":27377,"jyo":27378,"privati":27379,"canine":27380,"kb":27381,"crab":27382,"sardin":27383,"imagining":27384,"kj":27385,"empor":27386,"downhill":27387,"nez":27388,"taeyeon":27389,"nickimin":27390,"gbp":27391,"àµ":27392,"wap":27393,"secco":27394,"mashed":27395,"ðŁĴ¥ðŁĴ¥":27396,"augustine":27397,"dissol":27398,"dictator":27399,"âĵ":27400,"viper":27401,"edfringe":27402,"vaux":27403,"hardwork":27404,"booklet":27405,"nox":27406,"chiff":27407,"ðŁĴ¨":27408,"observations":27409,"xboxone":27410,"usher":27411,"keer":27412,"lup":27413,"dallas":27414,"calgary":27415,"madra":27416,"dious":27417,"kbs":27418,"woodward":27419,"heroine":27420,"lumber":27421,"seaworld":27422,"ows":27423,"mcke":27424,"maverick":27425,"gula":27426,"crossroads":27427,"fang":27428,"sade":27429,"nikol":27430,"cheetah":27431,"mec":27432,"ppg":27433,"erick":27434,"ðŁİµ":27435,"toxic":27436,"bjj":27437,"viola":27438,"spire":27439,"chino":27440,"travis":27441,"institutional":27442,"haas":27443,"lowry":27444,"wac":27445,"eae":27446,"humid":27447,"mpton":27448,"ruck":27449,"jew":27450,"cine":27451,"zimmer":27452,"sef":27453,"bharat":27454,"frees":27455,"aamir":27456,"ðŁĴħ":27457,"zinc":27458,"wane":27459,"multiplayer":27460,"royalwedding":27461,"eel":27462,"precipit":27463,"query":27464,"kimberly":27465,"isabel":27466,"fulfill":27467,"igan":27468,"vaul":27469,"pane":27470,"scy":27471,"digit":27472,"gunn":27473,"utah":27474,"dogday":27475,"fion":27476,"xiaomi":27477,"dac":27478,"elast":27479,"chavez":27480,"roblo":27481,"gine":27482,"tenth":27483,"abh":27484,"keto":27485,"hurdle":27486,"nadia":27487,"memorabilia":27488,"habs":27489,"quan":27490,"hw":27491,"hvac":27492,"pixar":27493,"eccle":27494,"kramer":27495,"accuses":27496,"ðŁĴļðŁĴļ":27497,"perse":27498,"meantime":27499,"wahl":27500,"atletico":27501,"âĢ¢âĢ¢âĢ¢âĢ¢":27502,"ottoman":27503,"novo":27504,"kus":27505,"connected":27506,"trusts":27507,"dmv":27508,"spencer":27509,"rahulg":27510,"dove":27511,"stokes":27512,"bologna":27513,"enthusiasts":27514,"ê":27515,"rockstargames":27516,"tedcruz":27517,"duras":27518,"sacked":27519,"latex":27520,"immersive":27521,"cert":27522,"lucin":27523,"principals":27524,"fares":27525,"sails":27526,"farn":27527,"ament":27528,"saffron":27529,"quentin":27530,"checkpoint":27531,"ferris":27532,"excur":27533,"ðŁijīðŁı¼":27534,"bailey":27535,"seh":27536,"terre":27537,"madam":27538,"sband":27539,"wanderers":27540,"cumberbatch":27541,"yyc":27542,"digitally":27543,"blackandwhitephotography":27544,"rollin":27545,"moroccan":27546,"ðŁĮħ":27547,"dinner":27548,"dwell":27549,"toom":27550,"mye":27551,"ezra":27552,"cpfc":27553,"warhol":27554,"meer":27555,"jonah":27556,"noaa":27557,"sgate":27558,"soon":27559,"secular":27560,"gating":27561,"tio":27562,"driver":27563,"sissy":27564,"assange":27565,"tath":27566,"edmund":27567,"bobcats":27568,"raji":27569,"postage":27570,"studs":27571,"mgm":27572,"kato":27573,"edinburgh":27574,"meetthe":27575,"shirt":27576,"faa":27577,"mensfashion":27578,"spreads":27579,"wim":27580,"carts":27581,"phoebe":27582,"jars":27583,"botswana":27584,"ÙĤ":27585,"edwar":27586,"skar":27587,"rive":27588,"gusty":27589,"ctv":27590,"ferdinand":27591,"sutherland":27592,"nickiminaj":27593,"kv":27594,"sius":27595,"beech":27596,"rez":27597,"desires":27598,"onial":27599,"campo":27600,"quarry":27601,"lorraine":27602,"gilmore":27603,"iggy":27604,"µï¸ı":27605,"hopping":27606,"aviz":27607,"ðŁĮº":27608,"unisex":27609,"dedicate":27610,"attitudes":27611,"steer":27612,"junkie":27613,"railway":27614,"yb":27615,"whisper":27616,"keyan":27617,"kus":27618,"jug":27619,"dix":27620,"ains":27621,"summon":27622,"ovich":27623,"syed":27624,"herald":27625,"maison":27626,"meded":27627,"wildflower":27628,"mainland":27629,"risky":27630,"rukh":27631,"overlooked":27632,"kic":27633,"destroys":27634,"naman":27635,"kip":27636,"zano":27637,"championsleague":27638,"bandit":27639,"quincy":27640,"smile":27641,"calvin":27642,"openings":27643,"tapp":27644,"olulu":27645,"spectro":27646,"accredited":27647,"apk":27648,"praised":27649,"barnett":27650,"pollen":27651,"premiered":27652,"selenagomez":27653,"toured":27654,"screenings":27655,"uuu":27656,"miso":27657,"ense":27658,"adamlambert":27659,"guelph":27660,"haryana":27661,"hutto":27662,"lear":27663,"ltc":27664,"poached":27665,"brexit":27666,"æĿ":27667,"ttc":27668,"pavement":27669,"mongers":27670,"roe":27671,"aders":27672,"lington":27673,"participant":27674,"cared":27675,"gail":27676,"yates":27677,"lantic":27678,"dashboard":27679,"joo":27680,"felipe":27681,"ssionist":27682,"bum":27683,"send":27684,"aeri":27685,"thugs":27686,"lucifer":27687,"ahe":27688,"detector":27689,"filly":27690,"gasoline":27691,"hamper":27692,"humpday":27693,"theta":27694,"theband":27695,"forecasts":27696,"ohhh":27697,"lobb":27698,"holl":27699,"cpu":27700,"azu":27701,"adar":27702,"hailey":27703,"bub":27704,"cart":27705,"quoted":27706,"anarchy":27707,"pancre":27708,"twitart":27709,"alden":27710,"stash":27711,"theless":27712,"orni":27713,"beliebers":27714,"mormon":27715,"particle":27716,"aviation":27717,"â¬Ĩ":27718,"webcamtoy":27719,"saddened":27720,"cruis":27721,"hamlet":27722,"nct":27723,"rollins":27724,"marquee":27725,"sawyer":27726,"reliance":27727,"aura":27728,"diec":27729,"soothing":27730,"signings":27731,"akis":27732,"ó":27733,"atkins":27734,"aerop":27735,"ðŁĮ¿":27736,"yab":27737,"shari":27738,"connol":27739,"dubbed":27740,"manufacture":27741,"convincing":27742,"feelthebern":27743,"rau":27744,"pulit":27745,"onec":27746,"gemstone":27747,"urging":27748,"bagu":27749,"gah":27750,"acids":27751,"fianc":27752,"zodiac":27753,"snoop":27754,"herrera":27755,"initiated":27756,"venge":27757,"professors":27758,"prodi":27759,"stronger":27760,"emission":27761,"bba":27762,"halle":27763,"tapp":27764,"hawan":27765,"whim":27766,"competed":27767,"myrtle":27768,"irport":27769,"coldplay":27770,"ache":27771,"skep":27772,"mson":27773,"ssic":27774,"calligraphy":27775,"swimmers":27776,"mey":27777,"ppc":27778,"thrift":27779,"poc":27780,"replaces":27781,"commuter":27782,"âģ¦âģ¦@":27783,"goers":27784,"logue":27785,"paradig":27786,"baskets":27787,"sensitivity":27788,"johan":27789,"atlantis":27790,"&&":27791,"suitcase":27792,"anxious":27793,"lh":27794,"stri":27795,"galloway":27796,"stread":27797,"warden":27798,"grounded":27799,"fficiency":27800,"lifeat":27801,"relic":27802,"disguise":27803,"islanders":27804,"fcofficial":27805,"classicalmusic":27806,"bmc":27807,"enfield":27808,"bique":27809,"oakley":27810,"batman":27811,"slaying":27812,"nerves":27813,"multit":27814,"calcium":27815,"projector":27816,"scottsdale":27817,"antino":27818,"grips":27819,"kimmel":27820,"desmond":27821,"protestors":27822,"hiatus":27823,"metabolism":27824,"concluded":27825,"presser":27826,"tipping":27827,"slide":27828,"eto":27829,"hunting":27830,"ausopen":27831,"rik":27832,"ppery":27833,"innovators":27834,"pitchers":27835,"agger":27836,"fungi":27837,"zad":27838,"prolific":27839,"rocknroll":27840,"blames":27841,"ctar":27842,"stamford":27843,"qad":27844,"mozzarella":27845,"insanely":27846,"denver":27847,"phouse":27848,"nomad":27849,"ï¿":27850,"sris":27851,"produ":27852,"henley":27853,"pagan":27854,"amtrak":27855,"rubi":27856,"incl":27857,"tutor":27858,"scotia":27859,"woes":27860,"singapo":27861,"funnel":27862,"turnbull":27863,"knowledge":27864,"grimm":27865,"realmadrid":27866,"weare":27867,"missiles":27868,"consol":27869,"emojis":27870,"sneak":27871,"smiths":27872,"ruiz":27873,"brou":27874,"iel":27875,"haver":27876,"ðŁĮļ":27877,"kingof":27878,"basilica":27879,"circulation":27880,"printers":27881,"tapping":27882,"ridley":27883,"dragged":27884,"haj":27885,"writer":27886,"fundamentals":27887,"personalities":27888,"metre":27889,"stereotypes":27890,"burle":27891,"bestof":27892,"nffc":27893,"hath":27894,"ministries":27895,"aali":27896,"tracing":27897,"paved":27898,"łï¸ı":27899,"gic":27900,"inspire":27901,"tug":27902,"hare":27903,"repeated":27904,"expon":27905,"lolli":27906,"rhode":27907,"precin":27908,"installations":27909,"instagram":27910,"azar":27911,"ies":27912,"solely":27913,"dukes":27914,"missionary":27915,"vanguard":27916,"fursuitfriday":27917,"ond":27918,"polari":27919,"mast":27920,"haran":27921,"josé":27922,"jacked":27923,"ecoun":27924,"alities":27925,"neph":27926,"ravel":27927,"moderated":27928,"scow":27929,"sfb":27930,"uruguay":27931,"aso":27932,"nig":27933,"audu":27934,"pints":27935,"latina":27936,"benz":27937,"mitting":27938,"charted":27939,"matology":27940,"citro":27941,"biopic":27942,"ðŁijŃ":27943,"djokovic":27944,"foxy":27945,"aguil":27946,"soto":27947,"anada":27948,"sinking":27949,"scrap":27950,"hairs":27951,"bethany":27952,"factfriday":27953,"ðŁIJIJ":27954,"unleashed":27955,")(":27956,"contradic":27957,"ramon":27958,"coastline":27959,"yong":27960,"snsd":27961,"ligan":27962,"pome":27963,"mitage":27964,"gett":27965,"wati":27966,"risk":27967,"soaring":27968,"brush":27969,"fpl":27970,"avan":27971,"åĨ":27972,"larson":27973,"shear":27974,"multil":27975,"blur":27976,"multimedia":27977,"chunky":27978,"pari":27979,"nani":27980,"weird":27981,"cholesterol":27982,"charles":27983,"dreamed":27984,"tanning":27985,"puzzles":27986,"fram":27987,"handball":27988,"chag":27989,"belize":27990,"alu":27991,"bangs":27992,"ÑĦ":27993,"detectives":27994,"mcg":27995,"ishq":27996,"bothered":27997,"safc":27998,"mping":27999,"teneri":28000,"gays":28001,"sailor":28002,"angi":28003,"multicul":28004,"guessed":28005,"rosé":28006,"highways":28007,"broom":28008,"chattanoo":28009,"-'":28010,"seeker":28011,"oned":28012,"atf":28013,"luc":28014,"><":28015,"bari":28016,"percep":28017,"jewelry":28018,"asph":28019,"sorrow":28020,"sling":28021,"mammoth":28022,"jackie":28023,"ë§":28024,"wiltshire":28025,"sao":28026,"cancell":28027,"impaired":28028,"torial":28029,"breed":28030,"guyen":28031,"judice":28032,"title":28033,"prospective":28034,"applicants":28035,"ðŁįĬ":28036,"episcop":28037,"eid":28038,"byo":28039,"stockings":28040,"ðŁĴĥðŁĴĥ":28041,"llp":28042,"snag":28043,"keepit":28044,"lough":28045,"olson":28046,"maturity":28047,"!!!\"":28048,"copter":28049,"isha":28050,"bli":28051,"wilmington":28052,"tryouts":28053,"thai":28054,"ðŁ¥³":28055,"pebble":28056,"kraft":28057,"fp":28058,"º":28059,"ssively":28060,"livin":28061,"contestants":28062,"textures":28063,"joan":28064,"hdr":28065,"filmfestival":28066,"provence":28067,"wido":28068,"opend":28069,"csi":28070,"stown":28071,"croati":28072,"adjust":28073,"hostile":28074,"analysts":28075,"ilan":28076,"cuppa":28077,"brum":28078,"newfoundland":28079,"goodwin":28080,"mett":28081,"mallorca":28082,"plugs":28083,"buk":28084,"bbhutto":28085,"wrestle":28086,"saire":28087,"shopped":28088,"forza":28089,"lehead":28090,"vivo":28091,"bast":28092,"roxy":28093,"regis":28094,"hardworking":28095,"honolulu":28096,"despair":28097,"youngsters":28098,"nig":28099,"impromp":28100,"rolltide":28101,"deemed":28102,"treason":28103,"rushed":28104,"forged":28105,"fff":28106,"pikachu":28107,"briggs":28108,"doit":28109,"accent":28110,"laus":28111,"glaze":28112,"competent":28113,"aho":28114,"photog":28115,"midfield":28116,"lego":28117,"harvard":28118,"minorities":28119,"reilly":28120,"sliced":28121,"onceupon":28122,"initially":28123,"financially":28124,"landscapephotography":28125,"hardro":28126,"quo":28127,"mmers":28128,"parkinson":28129,"smugg":28130,"readiness":28131,"brutally":28132,"gloucester":28133,"mped":28134,"bbhuttozardari":28135,"murder":28136,"yed":28137,"dataviz":28138,"srt":28139,"downing":28140,"bians":28141,"mü":28142,"fleck":28143,"flipped":28144,"sly":28145,"brilliance":28146,"rim":28147,"kum":28148,"bubba":28149,"koi":28150,"knitted":28151,"sorg":28152,"mais":28153,"ðŁĮ²":28154,"tiss":28155,"sustain":28156,"sensu":28157,"akhan":28158,"ziest":28159,"examines":28160,"chardonnay":28161,"username":28162,"shortlist":28163,"rebs":28164,"ono":28165,"daring":28166,"hardwood":28167,"cheque":28168,"righteous":28169,"lightening":28170,"dirk":28171,"shradd":28172,"dura":28173,"downstairs":28174,"shal":28175,"amigos":28176,"ruff":28177,"slaw":28178,"ries":28179,"rednation":28180,"manus":28181,"ðŁĩ§ðŁĩ·":28182,"distinction":28183,"ubun":28184,"duran":28185,"migra":28186,"thians":28187,"laver":28188,"domestic":28189,"kx":28190,"jazzy":28191,"justify":28192,"belonging":28193,"insulation":28194,"colorstv":28195,"drunken":28196,"channeling":28197,"quand":28198,"xiii":28199,"enlighten":28200,"kano":28201,"fatima":28202,"teenchoice":28203,"terrified":28204,"pba":28205,"asley":28206,"metmuseum":28207,"dune":28208,"packer":28209,"kio":28210,"ðŁĴľðŁĴľ":28211,"boiler":28212,"fascism":28213,"armored":28214,"backgrounds":28215,"inmates":28216,"embarrassed":28217,"defines":28218,"thd":28219,"wego":28220,"silicone":28221,"loon":28222,"elding":28223,"borrowed":28224,"hemp":28225,"aksh":28226,"kawasaki":28227,"bry":28228,"deaf":28229,"killer":28230,"disposal":28231,"ðŁĩ°":28232,"glastonbury":28233,"uncovered":28234,"oxide":28235,"poff":28236,"dant":28237,"kj":28238,"kuro":28239,"drizzle":28240,"peoples":28241,"fee":28242,"propri":28243,"ddlovato":28244,"piggy":28245,"otis":28246,"allergies":28247,"ubis":28248,"penguin":28249,"sera":28250,"viz":28251,"prosperous":28252,"icides":28253,"tornadoes":28254,"senegal":28255,"webcast":28256,"stored":28257,"enchanted":28258,"bbcone":28259,"bayarea":28260,"entrepreneurial":28261,"rednationrising":28262,"experimenting":28263,"angan":28264,"lotto":28265,"theyre":28266,"pore":28267,"erp":28268,"serene":28269,"eastwood":28270,"brokers":28271,"barge":28272,"stallion":28273,"timberlake":28274,"tailored":28275,"dystop":28276,"bate":28277,"lators":28278,"dixit":28279,"branson":28280,"dynamo":28281,"kylie":28282,"shameful":28283,"btwn":28284,"springtime":28285,"mixture":28286,"sounded":28287,"luton":28288,"dades":28289,"mala":28290,"opra":28291,"enic":28292,"rahulgandhi":28293,"sewer":28294,"~~~~":28295,"kyu":28296,"northeastern":28297,"caer":28298,"bcu":28299,"nirvana":28300,"kitchens":28301,"ousy":28302,"alm":28303,"riverdale":28304,"hidden":28305,"flint":28306,"spd":28307,"patrons":28308,"katyperry":28309,"augh":28310,"exhibitions":28311,"smc":28312,"shuts":28313,"atore":28314,"dain":28315,"something":28316,"berth":28317,"bog":28318,"porter":28319,"gento":28320,"concussion":28321,"anglic":28322,"rowe":28323,"grilling":28324,"scarlett":28325,"mastering":28326,"mornin":28327,"commented":28328,"sime":28329,"sizing":28330,"christy":28331,"ceos":28332,"stm":28333,"atry":28334,"tariffs":28335,"vacation":28336,"prejudice":28337,"psu":28338,"parental":28339,"farage":28340,"cana":28341,"capcom":28342,"kosovo":28343,"youre":28344,"menstru":28345,"stalin":28346,"grapefruit":28347,"bran":28348,"chesa":28349,"daven":28350,"excel":28351,"!!)":28352,"à¹Į":28353,"distributor":28354,"cea":28355,"bridesma":28356,"millennial":28357,"wain":28358,"observing":28359,"misery":28360,"planetary":28361,"exposing":28362,"braised":28363,"compton":28364,"dongha":28365,"ql":28366,"springsteen":28367,"thul":28368,"sylve":28369,"cabo":28370,"palad":28371,"nielsen":28372,"gazing":28373,"baja":28374,"roud":28375,"orchids":28376,"johannesburg":28377,"seman":28378,"dji":28379,"operative":28380,"affection":28381,"eclectic":28382,"atc":28383,"mutant":28384,"awx":28385,"nice":28386,"melbourne":28387,"indulg":28388,"tulip":28389,"diaspora":28390,"welp":28391,"biggie":28392,"mississauga":28393,"retriever":28394,"oran":28395,"tammy":28396,"cta":28397,"hippo":28398,"seasoned":28399,"germans":28400,"engv":28401,"marvellous":28402,"imf":28403,"relays":28404,"montan":28405,"mauriti":28406,"meister":28407,"assurance":28408,"reigning":28409,"sufficient":28410,"hane":28411,"nothing":28412,"posse":28413,"navy":28414,"inlove":28415,"brighton":28416,"enqu":28417,"chung":28418,"sweaty":28419,"esc":28420,"caled":28421,"mans":28422,"nicaragua":28423,"slices":28424,"mocha":28425,"washingtonpost":28426,"bbn":28427,"damned":28428,"growing":28429,"enburg":28430,"loan":28431,"mes":28432,"whoops":28433,"believers":28434,"spiel":28435,"vodaf":28436,"lat":28437,"sled":28438,"cricketer":28439,"browne":28440,"golfers":28441,"barra":28442,"watchers":28443,"luigi":28444,"swamy":28445,"moms":28446,"pitched":28447,"santor":28448,"crs":28449,"sire":28450,"scamp":28451,"bode":28452,"stewar":28453,"jonny":28454,"entity":28455,"pacqui":28456,"mindful":28457,"minindia":28458,"bearded":28459,"tempt":28460,"scorpion":28461,"eaton":28462,"authorized":28463,"arto":28464,"svp":28465,"opathy":28466,"cchini":28467,"housemusic":28468,"disneyworld":28469,"âĢĶ@":28470,"propose":28471,"diy":28472,"expense":28473,"teng":28474,"puppets":28475,"smel":28476,"daca":28477,"perry":28478,"finn":28479,"boosting":28480,"leftovers":28481,"cougs":28482,"satellites":28483,"many":28484,"aze":28485,"gong":28486,"fie":28487,"methodo":28488,"ferries":28489,"ðŁ¤ĶðŁ¤Ķ":28490,"explorers":28491,"loader":28492,"attracted":28493,"ilton":28494,"goddamn":28495,"piazza":28496,"doctr":28497,"saving":28498,"paragraph":28499,"visualization":28500,"mayors":28501,"workflow":28502,"ackles":28503,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":28504,"स":28505,"twerk":28506,"clut":28507,"lover":28508,"teases":28509,"sian":28510,"ote":28511,"deterior":28512,"accord":28513,"lfw":28514,"swarovski":28515,"natal":28516,"traps":28517,"kina":28518,"analyze":28519,"layered":28520,"beverages":28521,"unit":28522,"ransom":28523,"peshaw":28524,"destined":28525,"astrology":28526,"sipping":28527,"mileycyrus":28528,"camino":28529,"marshmallow":28530,"bliss":28531,"outback":28532,"faq":28533,"intoler":28534,"humility":28535,"poppin":28536,"halloween":28537,"montene":28538,"ophy":28539,"nun":28540,"tattooed":28541,"aas":28542,"ðŁĮ³":28543,"daley":28544,"quality":28545,"dusa":28546,"fishermen":28547,"swif":28548,"terrac":28549,"stau":28550,"lein":28551,"trolling":28552,"shipment":28553,"gardener":28554,"marchmadness":28555,"headband":28556,"grt":28557,"burnett":28558,"wand":28559,"!!!!!!!!!":28560,"ghe":28561,"dux":28562,"hud":28563,"warner":28564,"ðŁĩ¦":28565,"exile":28566,"rescue":28567,"rata":28568,"dhan":28569,"ducati":28570,"drown":28571,"blends":28572,"spie":28573,"alligator":28574,"simultaneously":28575,"brooke":28576,"uke":28577,"khar":28578,"communion":28579,"rika":28580,"fordfc":28581,"chinatown":28582,"yourown":28583,"mey":28584,"canal":28585,"systematic":28586,"depri":28587,"oxford":28588,"anil":28589,"wut":28590,"equation":28591,"bez":28592,"fleur":28593,"thegood":28594,"langley":28595,"adity":28596,"edith":28597,"alfie":28598,"оÑĤ":28599,"encry":28600,"brill":28601,"exemp":28602,"cesar":28603,"mbling":28604,"abri":28605,"scicom":28606,"jing":28607,"schooling":28608,"mika":28609,"mechanisms":28610,"impromptu":28611,"rhea":28612,"moore":28613,"crimea":28614,"besto":28615,"wright":28616,"elders":28617,"rods":28618,"kamal":28619,"folklore":28620,"beet":28621,"minion":28622,"relieve":28623,"thro":28624,"teamusa":28625,"pascal":28626,"madewith":28627,"bolivia":28628,"itti":28629,"freebies":28630,"desired":28631,"bestselling":28632,"liness":28633,"laden":28634,"keane":28635,"mists":28636,"hippie":28637,"attachment":28638,"@/":28639,"sew":28640,"flanagan":28641,"âĿĹï¸ı":28642,"supremac":28643,"stlcards":28644,"sias":28645,"qu":28646,"rhys":28647,"steep":28648,"valleys":28649,"vw":28650,"paving":28651,"dispat":28652,"alison":28653,"porte":28654,"idu":28655,"newsc":28656,"socket":28657,"mos":28658,"costar":28659,"revo":28660,"proteins":28661,"stanleycup":28662,"mcal":28663,"earring":28664,"secs":28665,"mclean":28666,"capric":28667,"nickelo":28668,"aden":28669,"vc":28670,"shouse":28671,"adaptive":28672,"maximize":28673,"entertainer":28674,"prose":28675,"griffi":28676,"sixteen":28677,"lamar":28678,"mirage":28679,"saudiarabia":28680,"aweather":28681,"rust":28682,"infiltr":28683,"fashionweek":28684,"ðŁĺĬðŁĺĬðŁĺĬ":28685,"selective":28686,"bubble":28687,"aden":28688,"fennel":28689,"decisive":28690,"mta":28691,"mocking":28692,"mbles":28693,"stamp":28694,"mule":28695,"bernardo":28696,"grin":28697,"pott":28698,"jingle":28699,"vettel":28700,"colombian":28701,"camo":28702,"motivationmonday":28703,"bahan":28704,"ply":28705,"dhary":28706,"kami":28707,"xmen":28708,"sleeper":28709,"gara":28710,"mysti":28711,"confidential":28712,"conflicts":28713,"pneu":28714,"ces":28715,"insurtech":28716,"cleanse":28717,"merely":28718,"vais":28719,"tux":28720,"thegreat":28721,"sharon":28722,"maj":28723,"hola":28724,"ecosystems":28725,"ajay":28726,"aaj":28727,"hush":28728,"harmon":28729,"backtoschool":28730,"wikileaks":28731,"reflected":28732,"ðŁĺĵ":28733,"commemorating":28734,"acet":28735,"buckingham":28736,"messiah":28737,"tuous":28738,"hornet":28739,"tobe":28740,"dq":28741,"heine":28742,"mig":28743,"plate":28744,"nicholson":28745,"spie":28746,"cumberland":28747,"normal":28748,"phobia":28749,"happyhalloween":28750,"cityfc":28751,"mcel":28752,"gillian":28753,"keto":28754,"lude":28755,"demise":28756,"suga":28757,"strate":28758,"mcgrath":28759,"visitscotland":28760,"fooled":28761,"cbr":28762,"gcse":28763,"colori":28764,"potd":28765,"missuniverse":28766,"finances":28767,"mapoli":28768,"forks":28769,"Ø´":28770,"cannon":28771,"medicinal":28772,"ðŁĹĵ":28773,"kho":28774,"wreck":28775,"panto":28776,"bagel":28777,"gull":28778,"syndicate":28779,"icy":28780,"prc":28781,"kien":28782,"zika":28783,"tish":28784,"peta":28785,"cco":28786,"liza":28787,"chut":28788,"extraction":28789,"elg":28790,"gli":28791,"fueled":28792,"posit":28793,"respectively":28794,"leicester":28795,"brink":28796,"vulnerability":28797,"imported":28798,"esha":28799,"ðŁ¦ħ":28800,"rural":28801,"rell":28802,"gaming":28803,"atlantic":28804,"abandon":28805,"noah":28806,"resolved":28807,"prostate":28808,"allergic":28809,"psd":28810,"âĺ¹":28811,"dungeon":28812,"fangirl":28813,"illuminated":28814,"mhs":28815,"whitesox":28816,"dently":28817,"cko":28818,"endorse":28819,"overly":28820,"dazzling":28821,"prioriti":28822,"nightlife":28823,"util":28824,"behave":28825,"flamen":28826,"eastbound":28827,"ðŁĴŁ":28828,"iloveyou":28829,"govuk":28830,"mozambique":28831,"allegi":28832,"dri":28833,"testimonial":28834,"aths":28835,"ì§Ģ":28836,"mmy":28837,"shabby":28838,"prosecco":28839,"friendships":28840,"calam":28841,"damages":28842,"offset":28843,"jurassic":28844,"juno":28845,"arrell":28846,"ðŁĴ©":28847,"interventions":28848,"daredevil":28849,"carver":28850,"runaway":28851,"rane":28852,"trustees":28853,"haute":28854,"depths":28855,"ðŁİŃ":28856,"mein":28857,"sacrifices":28858,"concier":28859,"nesting":28860,"izzy":28861,"metam":28862,"ilovemy":28863,"urine":28864,"dulu":28865,"malhotra":28866,"veins":28867,"nightly":28868,"coat":28869,"andi":28870,"hewitt":28871,"lonel":28872,"cible":28873,"write":28874,"jennie":28875,"santac":28876,"ĸï¸ı":28877,"strato":28878,"singapore":28879,"soprano":28880,"kristen":28881,"cheerful":28882,"fleetwood":28883,"fairi":28884,"meli":28885,"wast":28886,"turnt":28887,"sforsale":28888,"scrolling":28889,"angelina":28890,"rendition":28891,"jericho":28892,"nicky":28893,"orb":28894,"flavo":28895,"patriot":28896,"asheville":28897,"sickness":28898,"refund":28899,"aggression":28900,"bpl":28901,"ãĥĥ":28902,"elusive":28903,"thistory":28904,"hanger":28905,"buffs":28906,"villas":28907,"atkinson":28908,"sph":28909,"jait":28910,"declined":28911,"wok":28912,"supremacy":28913,"ootball":28914,"eyang":28915,"ðŁİĵ":28916,"sford":28917,"athi":28918,"consume":28919,"roadster":28920,"eso":28921,"upro":28922,"recipe":28923,"auf":28924,"uci":28925,"aron":28926,"oooh":28927,"csgo":28928,"reich":28929,"mcd":28930,"minute":28931,"ladies":28932,"punk":28933,"rutgers":28934,"meek":28935,"arizon":28936,"taj":28937,"landlord":28938,"degra":28939,"autumn":28940,"lynx":28941,"usf":28942,"bhi":28943,"fairytale":28944,"donghae":28945,"betsy":28946,"exploded":28947,"chennai":28948,"opa":28949,"protag":28950,"brant":28951,"ðŁĵ°:":28952,"gf":28953,"palli":28954,"ðŁı¼âĢįâĻĢï¸ı":28955,"sut":28956,"illini":28957,"columnist":28958,"shirtless":28959,"decentr":28960,"searched":28961,"ecor":28962,"buggy":28963,"sack":28964,"ðŁĺĤðŁĺŃ":28965,"det":28966,"theri":28967,"ornaments":28968,"bringback":28969,"tov":28970,"quarterfinals":28971,"iche":28972,"constra":28973,"gier":28974,"buchanan":28975,"vix":28976,"kayaking":28977,"mustread":28978,"swallow":28979,"melb":28980,"scaf":28981,"opal":28982,"mayoral":28983,"harat":28984,"ðŁ¦ĭ":28985,"schedules":28986,"idf":28987,"hague":28988,"roz":28989,"aah":28990,"dmc":28991,"duplic":28992,"cache":28993,"orphan":28994,"fracture":28995,"recon":28996,"chav":28997,"bunnies":28998,"alain":28999,"mustafa":29000,"ðŁİĻ":29001,"vacations":29002,"dynamite":29003,"texted":29004,"broadcaster":29005,"ðŁĴ£":29006,"steamed":29007,"rocker":29008,"dietary":29009,"luxurytravel":29010,"inaugurated":29011,"sawards":29012,"vaughn":29013,"lincolnshire":29014,"clicked":29015,"kraja":29016,"fanc":29017,"removes":29018,"layoffs":29019,"mcfar":29020,"breeds":29021,"winnie":29022,"jonghyun":29023,"incentive":29024,"variations":29025,"patton":29026,"aturday":29027,"persistent":29028,"prun":29029,"piers":29030,"dales":29031,"æĸ":29032,"breastfeeding":29033,"rance":29034,"tawa":29035,"Ĥâĸ":29036,"murdoch":29037,"captive":29038,"thistle":29039,"nica":29040,"commodity":29041,"couldnt":29042,"boardwalk":29043,"gracious":29044,"practitioners":29045,"ngc":29046,"scrum":29047,"nero":29048,"camouflage":29049,"colon":29050,"hei":29051,"physicist":29052,"saturdaymorning":29053,"tener":29054,"siwon":29055,"columns":29056,"brune":29057,"yvr":29058,"bair":29059,"retires":29060,"halam":29061,"caber":29062,"shazam":29063,"minu":29064,"cascade":29065,"milkshake":29066,"grid":29067,"dren":29068,"vincent":29069,"sodium":29070,"platter":29071,"cheerleader":29072,"chenko":29073,"yak":29074,"eliminated":29075,"typo":29076,"yman":29077,"rethink":29078,"âĿĹ":29079,"tsville":29080,"bernardokath":29081,"extr":29082,"ðŁĺģðŁĺģðŁĺģ":29083,"tao":29084,"reper":29085,"moths":29086,"empowered":29087,"citing":29088,"transported":29089,"monks":29090,"sanat":29091,"clears":29092,"bachelorette":29093,"campbell":29094,"rachael":29095,"harle":29096,"handler":29097,"climbs":29098,"interference":29099,"release":29100,"shand":29101,"rbs":29102,"hrh":29103,"ãģª":29104,"valle":29105,"ré":29106,"slime":29107,"wakes":29108,"chubby":29109,"sloan":29110,"elves":29111,"athen":29112,"attorneys":29113,"microscope":29114,"stoner":29115,"scaling":29116,"obe":29117,"cout":29118,"seman":29119,"midweek":29120,"balsam":29121,"ðŁĺįâĿ¤":29122,"tiful":29123,"vish":29124,"lotta":29125,"ripping":29126,"remn":29127,"tire":29128,"leap":29129,"havent":29130,"laby":29131,"himach":29132,"whispers":29133,"wein":29134,"ðŁİ¸":29135,"wildflowers":29136,"sele":29137,"ucc":29138,"liability":29139,"azine":29140,"swings":29141,"kya":29142,"tair":29143,"remain":29144,"edo":29145,"flops":29146,"pocket":29147,"grandad":29148,"examiner":29149,"gris":29150,"ffect":29151,"ðŁijĬðŁı»":29152,"studded":29153,"heartbeat":29154,"deacon":29155,"firmly":29156,"infectious":29157,"stef":29158,"outlines":29159,"leasing":29160,"claws":29161,"sense":29162,"tabs":29163,"hoot":29164,"mosul":29165,"spawn":29166,"coa":29167,"hogwarts":29168,"vein":29169,"albania":29170,"manuel":29171,"bino":29172,"vauxhall":29173,"scotland":29174,"gobucks":29175,"matty":29176,"physio":29177,"torino":29178,"constable":29179,"investigated":29180,"slower":29181,"mistaken":29182,"bayer":29183,"wildfires":29184,"voic":29185,"xon":29186,"timeto":29187,"chassis":29188,"barric":29189,"pion":29190,"baldhead":29191,"wook":29192,"registr":29193,"drafts":29194,"bhs":29195,"ligue":29196,"lick":29197,"staffordshire":29198,"bafta":29199,"darry":29200,"jeanne":29201,"vending":29202,"corp":29203,"âĽ³ï¸ı":29204,"kiddos":29205,"fenway":29206,"cao":29207,"westbound":29208,"ðŁĺĻ":29209,"dvr":29210,"quicker":29211,"blah":29212,"goodie":29213,"ðŁĴĭðŁĴĭ":29214,"vox":29215,"esper":29216,"facade":29217,"correlation":29218,"redbull":29219,"roup":29220,"declining":29221,"chive":29222,"mcgee":29223,"turo":29224,"inder":29225,"feller":29226,"fug":29227,"ilysm":29228,"mardi":29229,"peshawar":29230,"kieran":29231,"inema":29232,"meatballs":29233,"peck":29234,"depressing":29235,"sensing":29236,"giz":29237,"ddington":29238,"springwatch":29239,"roaming":29240,"yellowstone":29241,"horseshoe":29242,"amman":29243,"weekday":29244,"olor":29245,"ðŁ¥°":29246,"boosts":29247,"sprint":29248,"scarves":29249,"jee":29250,"beetro":29251,"clan":29252,"allthe":29253,"ìĦ¸ë":29254,"enlightenment":29255,"adobe":29256,"regeneration":29257,"?@":29258,"contag":29259,"yachts":29260,"tou":29261,"mora":29262,"envoy":29263,"rani":29264,"goli":29265,"dhanushkraja":29266,"woodworking":29267,"strengths":29268,"sedi":29269,"discs":29270,"arina":29271,"scon":29272,"lite":29273,"another":29274,"ðŁ¥Ĭ":29275,"yemen":29276,"guern":29277,"savvy":29278,"loyed":29279,"biomed":29280,"heartbreak":29281,"comrades":29282,"millie":29283,"patch":29284,"unf":29285,"jarvis":29286,"blaming":29287,"commemoration":29288,"gey":29289,"å¥":29290,"cardiovascular":29291,"aligned":29292,"document":29293,".?":29294,"aesthetics":29295,"emu":29296,"theirs":29297,"leh":29298,"psic":29299,"sif":29300,"plateau":29301,"expend":29302,"dominating":29303,"robes":29304,"mauritius":29305,"exceptionally":29306,"homer":29307,"discoveries":29308,"braun":29309,"tennant":29310,"insulin":29311,"ðŁİ®":29312,"carbs":29313,"teas":29314,"?!\"":29315,"zie":29316,"francois":29317,"browsing":29318,"thol":29319,"clarence":29320,"helper":29321,"obtained":29322,"cassie":29323,"lees":29324,"!,":29325,"pomegran":29326,"hubs":29327,"prestige":29328,"][":29329,"macher":29330,"bottled":29331,"punch":29332,"pipe":29333,"och":29334,"gallons":29335,"deliveries":29336,"ura":29337,"unday":29338,"monde":29339,"depicts":29340,"regency":29341,"outrageous":29342,"khaled":29343,"caro":29344,"hearti":29345,"zag":29346,"developmental":29347,"overcoming":29348,"statistical":29349,"flavored":29350,"fords":29351,"creatives":29352,"laurence":29353,"dias":29354,"sunscreen":29355,"inked":29356,"preacher":29357,"nul":29358,"impacting":29359,"autistic":29360,"âļĶï¸ı":29361,"oss":29362,"pelicans":29363,"celeste":29364,"vb":29365,"rump":29366,"mcgra":29367,"fairfax":29368,"humor":29369,"bbcnews":29370,"rowling":29371,"calder":29372,"seamless":29373,"agne":29374,"pti":29375,"mixed":29376,"tshirts":29377,"merci":29378,"btob":29379,"womeninstem":29380,"genealogy":29381,"preven":29382,"lour":29383,"cradle":29384,"giuse":29385,"о":29386,"chrono":29387,"fairness":29388,"chocolate":29389,"tory":29390,"asda":29391,"prescott":29392,"stretched":29393,"alman":29394,"uil":29395,"recharge":29396,"intre":29397,"obst":29398,"hospital":29399,"hayward":29400,"tenerife":29401,"friedman":29402,"vaping":29403,"confessions":29404,"yeah":29405,"balli":29406,"lucknow":29407,"corpse":29408,"sculptor":29409,"ampton":29410,"tpp":29411,"indicates":29412,"surplus":29413,"truman":29414,"ðĿĻ":29415,"sinha":29416,"invo":29417,"sovereign":29418,"kev":29419,"establishing":29420,"engraved":29421,"assuming":29422,"ðŁıģ":29423,"souza":29424,"fabi":29425,"toned":29426,"ounge":29427,"deloit":29428,"downey":29429,"noble":29430,"omor":29431,"cartridge":29432,"ðŁıIJ":29433,"uhur":29434,"holloway":29435,"successes":29436,"rsa":29437,"âĦ¢":29438,"mazz":29439,"twd":29440,"discourse":29441,".<":29442,"yat":29443,"satisfy":29444,"compri":29445,"ह":29446,"graphite":29447,"dissertation":29448,"arter":29449,"íĶ":29450,"bally":29451,"zombi":29452,"lyons":29453,"aic":29454,"ubc":29455,"prada":29456,"eil":29457,"dax":29458,"clai":29459,"granddaughter":29460,"extravaganza":29461,"challenge":29462,"ðŁ¤ŀ":29463,"pover":29464,"primarily":29465,"daddy":29466,"mana":29467,"bikers":29468,"inquiries":29469,"daun":29470,"feline":29471,"generative":29472,"hef":29473,"benefiting":29474,"lindsey":29475,"polka":29476,"demonstrated":29477,"alle":29478,"randy":29479,"osu":29480,"lowkey":29481,"weirdest":29482,"redbull":29483,"oury":29484,"nous":29485,"woodstock":29486,"credenti":29487,"nicer":29488,"gado":29489,"alyss":29490,"aph":29491,"preparedness":29492,"stationary":29493,"incorporated":29494,"dyer":29495,"saratoga":29496,"celesti":29497,":\"":29498,"antibiotics":29499,"orgs":29500,"indefin":29501,"apron":29502,"иÐ":29503,"fifteen":29504,"nof":29505,"ðŁĶĿ":29506,"phx":29507,"tega":29508,"mz":29509,"organizational":29510,"onair":29511,"bandung":29512,"pleasures":29513,"mori":29514,"secretari":29515,"raccoon":29516,"cashi":29517,"pilates":29518,"kon":29519,"geoffrey":29520,"lao":29521,"kamp":29522,"departments":29523,"backpacking":29524,"anam":29525,"ë":29526,"crackdown":29527,"aunty":29528,"ondo":29529,"lizzie":29530,"phers":29531,"cun":29532,"ðŁĩ±":29533,"kpop":29534,"put":29535,"intentional":29536,"connolly":29537,"barclays":29538,"hsfb":29539,"swindon":29540,"uku":29541,"sally":29542,"aint":29543,"âľħ":29544,"penang":29545,"uplifting":29546,"epilepsy":29547,"interro":29548,"bungal":29549,"goku":29550,"blueberries":29551,"द":29552,"ussia":29553,"silky":29554,"moured":29555,"istic":29556,"briefs":29557,"meats":29558,"gob":29559,"chaser":29560,"statewide":29561,"prasad":29562,"glitch":29563,"arin":29564,"banff":29565,"member":29566,"ðŁĺŃâĿ¤ï¸ı":29567,"loving":29568,"halla":29569,"ม":29570,"smokers":29571,"yaku":29572,"scicomm":29573,"physio":29574,"swol":29575,"lemons":29576,"gelato":29577,"chool":29578,"capitals":29579,"kistan":29580,"tights":29581,"spikes":29582,"travellers":29583,"iklan":29584,"commissioning":29585,"arine":29586,"emabiggestfans":29587,"emphasis":29588,"frontline":29589,"paddock":29590,"destructive":29591,"baha":29592,"linger":29593,"jewish":29594,"shetland":29595,"mcgin":29596,"monkey":29597,"koz":29598,"sone":29599,"rajini":29600,"teh":29601,"yen":29602,"cvs":29603,"masquer":29604,"girly":29605,"wesle":29606,"wasnt":29607,"brody":29608,"terminator":29609,"gille":29610,"maggi":29611,"birdie":29612,"jeopardy":29613,"cubic":29614,"vmware":29615,"intricate":29616,"anup":29617,"topia":29618,"easton":29619,"sabres":29620,"investigates":29621,"busting":29622,"bilingual":29623,"valentino":29624,"informat":29625,"ferre":29626,"adventur":29627,"hydrate":29628,"forsy":29629,"aziz":29630,"santo":29631,"ede":29632,"whistler":29633,"continuously":29634,"dham":29635,"unused":29636,"jihad":29637,"addictive":29638,"vidy":29639,"dob":29640,"ido":29641,"fied":29642,"niversary":29643,"none":29644,"fuer":29645,"ðŁĺįðŁĺĺ":29646,"covenant":29647,"printable":29648,"immaculate":29649,"oem":29650,"clt":29651,"servants":29652,"consumed":29653,"unreleased":29654,"scum":29655,"packaged":29656,"mere":29657,"ìĦ¸ë¸":29658,"toby":29659,"taf":29660,"spoons":29661,"meal":29662,"fball":29663,"fairfield":29664,"janet":29665,"silverstone":29666,"dartmouth":29667,"followme":29668,"voyager":29669,"kombat":29670,"anniver":29671,"enew":29672,"magdal":29673,"hove":29674,"sath":29675,"grizzly":29676,"cardi":29677,"gartner":29678,"sandy":29679,"kanye":29680,"posture":29681,"poign":29682,"impulse":29683,"radiology":29684,"horizons":29685,"siam":29686,"aishwar":29687,"==>":29688,"noche":29689,"tris":29690,"elyn":29691,"comme":29692,"dui":29693,"cec":29694,"councillors":29695,"cuddling":29696,"creeping":29697,"locke":29698,"manages":29699,"transferred":29700,"necks":29701,"dier":29702,"dano":29703,"vick":29704,"lunches":29705,"dhe":29706,"ensures":29707,"criss":29708,"ulster":29709,"bannon":29710,"contenders":29711,"spam":29712,"sweetness":29713,"medal":29714,"honduras":29715,"arctic":29716,"ultrasound":29717,"infr":29718,"discovers":29719,"eiffel":29720,"casters":29721,"ruben":29722,"dust":29723,"aweed":29724,"atrium":29725,"lestwe":29726,"seared":29727,"ðŁĵº:":29728,"tyne":29729,"exchanges":29730,"littlemix":29731,"lle":29732,"astronauts":29733,"hershey":29734,"workday":29735,"knob":29736,"sov":29737,"resigns":29738,"todayshow":29739,"derman":29740,"anth":29741,"afc":29742,"taster":29743,"swoo":29744,"saeed":29745,"pering":29746,"narrowly":29747,"rnli":29748,"bestbuy":29749,"panasonic":29750,"obstacle":29751,"farmers":29752,"ðŁİĻ":29753,"pawan":29754,"kiest":29755,"angers":29756,"absurd":29757,"ohmy":29758,"sino":29759,"pistachi":29760,"spice":29761,"giuli":29762,"primetime":29763,"kow":29764,"kens":29765,"exagger":29766,"!?!":29767,"uba":29768,"middles":29769,"judd":29770,"ejec":29771,"slammed":29772,"pensions":29773,"ofa":29774,"recreate":29775,"bhp":29776,"xxl":29777,"liverpool":29778,"thresh":29779,"purity":29780,"nieu":29781,"holics":29782,"wrath":29783,"rado":29784,"glio":29785,"amma":29786,"dilemma":29787,"cru":29788,"letsgo":29789,"....@":29790,"âĿĵ":29791,"suggesting":29792,"trumps":29793,"horus":29794,"fv":29795,"icom":29796,"referring":29797,"predictive":29798,"tarts":29799,"gette":29800,"sock":29801,"glossy":29802,"pinky":29803,"alec":29804,"thyme":29805,"oura":29806,"theroad":29807,"petr":29808,"cram":29809,"pfi":29810,"dvn":29811,"meier":29812,"incentives":29813,"tunnels":29814,"mobil":29815,"recap":29816,"extras":29817,"upright":29818,"revamp":29819,"perseverance":29820,",-":29821,"otp":29822,"mirror":29823,"arwx":29824,"gerry":29825,"maher":29826,"gor":29827,"homepage":29828,"amis":29829,"agra":29830,"madele":29831,"bestfriend":29832,"siriusxm":29833,"bundles":29834,"admiring":29835,"tdsb":29836,"ðŁįģ":29837,"chas":29838,"slowing":29839,"roh":29840,"wallpapers":29841,"âĢ¦/":29842,"tekken":29843,"gangs":29844,"tala":29845,"lindsay":29846,"shoul":29847,"linebacker":29848,"toolkit":29849,"uranium":29850,"calyp":29851,"abrams":29852,"matthi":29853,"ðŁı¿":29854,"honourable":29855,"dayo":29856,"versail":29857,"tank":29858,"stc":29859,"fritz":29860,"splend":29861,"patag":29862,"annoyed":29863,"onday":29864,"devastated":29865,"chattanooga":29866,"nationalism":29867,"massey":29868,"jenn":29869,"tailor":29870,"devgn":29871,"organs":29872,"zucchini":29873,"onfox":29874,"satire":29875,"wexford":29876,"disgrace":29877,"noto":29878,"volta":29879,"âĿ¤ï¸ıâĿ¤ï¸ıâĿ¤ï¸ıâĿ¤ï¸ı":29880,"à¶":29881,"homeowners":29882,"pointer":29883,"mcr":29884,"austen":29885,"daysto":29886,"moons":29887,"palma":29888,"grazing":29889,"eso":29890,"influencers":29891,"shahidkapoor":29892,"compliant":29893,"measurements":29894,"develops":29895,"yd":29896,"parl":29897,"pvt":29898,"randolph":29899,"tortured":29900,"gerald":29901,"elias":29902,"deepikap":29903,"warmup":29904,"hickory":29905,"gap":29906,"coffin":29907,"amour":29908,"reneg":29909,"mounting":29910,"sevens":29911,"igle":29912,"hier":29913,"decad":29914,"tright":29915,"escapes":29916,"werner":29917,"tfl":29918,"fulfilled":29919,"niger":29920,"sourdough":29921,"reaper":29922,"chooses":29923,"spinner":29924,"weeknd":29925,"filtered":29926,"shuk":29927,"kati":29928,"oldham":29929,"opensource":29930,"khanna":29931,"atelier":29932,"connec":29933,"ophobic":29934,"glas":29935,"complications":29936,"arson":29937,"councils":29938,"smol":29939,"assy":29940,"lurking":29941,"lingui":29942,"hanks":29943,"ein":29944,"Ùħ":29945,"rugs":29946,"nguyen":29947,"nouveau":29948,"menace":29949,"lev":29950,"aladdin":29951,"ruining":29952,"roundabout":29953,"km":29954,"conor":29955,"shoops":29956,"mayday":29957,"traumatic":29958,"prabhas":29959,"kaiser":29960,"kita":29961,"router":29962,"pedro":29963,"retar":29964,"stunner":29965,"spanish":29966,"disturbed":29967,"academy":29968,"elearning":29969,"witty":29970,"seng":29971,"feral":29972,"avy":29973,"stab":29974,"keaton":29975,"urdu":29976,"koto":29977,"hui":29978,"cooke":29979,"arian":29980,"thepersonal":29981,"uma":29982,"seap":29983,"asting":29984,"rhetoric":29985,"handwriting":29986,"municipality":29987,"consortium":29988,"ðŁIJŁ":29989,"glasgow":29990,"raya":29991,"eliza":29992,"polymer":29993,"broth":29994,"practi":29995,"correspondent":29996,"addicts":29997,"gayle":29998,"ailing":29999,"ofe":30000,"pli":30001,"heartw":30002,"stitch":30003,"sightings":30004,"priests":30005,"samo":30006,"sloth":30007,"goodwood":30008,"rocco":30009,"sabc":30010,"summit":30011,"lace":30012,"presley":30013,"itten":30014,"cincy":30015,"thepersonalnetwork":30016,"sweek":30017,"pegas":30018,"afcon":30019,"registry":30020,"cim":30021,"leth":30022,"dicap":30023,"candice":30024,"fluent":30025,"smack":30026,"pedestri":30027,"aloud":30028,"carac":30029,"priyankach":30030,"pgh":30031,"irons":30032,"dolce":30033,"latvia":30034,"deceased":30035,"therock":30036,"clap":30037,"cene":30038,"foam":30039,"morrissey":30040,"gret":30041,"essentially":30042,"comcast":30043,"beagle":30044,"argues":30045,"inged":30046,"-âĢ¦":30047,"sag":30048,"hasan":30049,"ðŁĻĨ":30050,"ðŁį°":30051,"nhra":30052,"kannada":30053,"indicators":30054,"oner":30055,"brixton":30056,"atas":30057,"screenplay":30058,"sorority":30059,"shaheed":30060,"heem":30061,"classmates":30062,"tainment":30063,"esi":30064,"breastcancer":30065,"zuckerberg":30066,"auror":30067,"encia":30068,"refers":30069,"kaeper":30070,"vortex":30071,"compart":30072,"lymph":30073,"photographing":30074,"steff":30075,"restling":30076,"parsley":30077,"momento":30078,"thman":30079,"lacking":30080,"dutt":30081,"oculus":30082,"fino":30083,"frenzy":30084,"rasc":30085,"dern":30086,"dismissed":30087,"nook":30088,"metgala":30089,"shill":30090,"raphael":30091,"mavericks":30092,"exhibits":30093,"eagerly":30094,"cpa":30095,"amenities":30096,".âłĢ":30097,"exodus":30098,"ernst":30099,"lita":30100,"dealt":30101,"womensmarch":30102,"iain":30103,"scoreboard":30104,"campeones":30105,"cen":30106,"tiki":30107,"garrison":30108,"fidelity":30109,"brag":30110,"roadmap":30111,"psychop":30112,"loe":30113,"bleu":30114,"ðŁijĬðŁı¼":30115,"sauvi":30116,"springer":30117,"temptation":30118,"rudolph":30119,"acura":30120,"wicz":30121,"parachute":30122,"strol":30123,"lenny":30124,"zik":30125,"doms":30126,"nbaf":30127,"alpac":30128,"vivian":30129,"rove":30130,"preet":30131,"perpetu":30132,"snake":30133,"airsoft":30134,"inflatable":30135,"princes":30136,"atie":30137,"ffey":30138,"patient":30139,"mire":30140,"chelle":30141,"slack":30142,"groovy":30143,"#:":30144,"uploading":30145,"!!!!!!!!!!!!!!!!":30146,"siemens":30147,"provision":30148,"vfx":30149,"needy":30150,"fats":30151,"topoli":30152,"bhutto":30153,"sathletics":30154,"alums":30155,"twinning":30156,"southwestern":30157,"adopting":30158,"lastnight":30159,"manne":30160,"laga":30161,"twell":30162,"acia":30163,"----":30164,"eyewear":30165,"hurley":30166,"flee":30167,"sach":30168,"pecker":30169,"costly":30170,"isk":30171,"crates":30172,"policy":30173,"erosion":30174,"ingo":30175,"werk":30176,"ðŁIJį":30177,"tortoise":30178,"therapies":30179,"internet":30180,"chihuahua":30181,"rips":30182,"frei":30183,"edor":30184,"taiji":30185,"tfc":30186,"dod":30187,"dempsey":30188,"christin":30189,"cheng":30190,"hips":30191,"graeme":30192,"compassionate":30193,"cavaliers":30194,"historic":30195,"soulful":30196,"criminal":30197,"jac":30198,"vinci":30199,"expired":30200,"surat":30201,"turismo":30202,"kona":30203,"seaweed":30204,"berts":30205,"leica":30206,"expressing":30207,"aal":30208,"wort":30209,"breakfast":30210,"herring":30211,"amused":30212,"rhubarb":30213,"martian":30214,"cosplayer":30215,"yash":30216,"strial":30217,"raul":30218,"referral":30219,"dwts":30220,"jw":30221,"adler":30222,"curtains":30223,"gur":30224,"valence":30225,"tyrone":30226,"swfc":30227,"coached":30228,"reborn":30229,"diabetic":30230,"choke":30231,"norfolk":30232,"investigative":30233,"ðŁĴ¯ðŁĴ¯":30234,"zid":30235,"vmas":30236,"phie":30237,"objectives":30238,"âľĭ":30239,"overdue":30240,"divers":30241,"matsu":30242,"ðŁİŁï¸ı":30243,"casualties":30244,"ว":30245,"alk":30246,"standardi":30247,"realist":30248,"artifacts":30249,"pandor":30250,"kex":30251,"invin":30252,"(!)":30253,"iney":30254,"paraly":30255,"mrt":30256,"faye":30257,"thevoice":30258,"onga":30259,"deed":30260,"skinner":30261,"azwx":30262,"specimen":30263,"priyankachopra":30264,"nuevo":30265,"barkley":30266,"toulouse":30267,"resumes":30268,"footballers":30269,"citi":30270,"fetch":30271,"ère":30272,"lestweforget":30273,"ðŁĻĭ":30274,"chunk":30275,"drifting":30276,"manipulation":30277,"equals":30278,"putt":30279,"kyungsoo":30280,"âĿ¤ï¸ı#":30281,"elastic":30282,"parano":30283,"foy":30284,"doping":30285,"cincy":30286,"ssler":30287,"interrupted":30288,"alay":30289,"adores":30290,"amethy":30291,"convoy":30292,"ãĢı":30293,"Ĭãģ":30294,"blacklist":30295,"generals":30296,"sachin":30297,"brushed":30298,"ounces":30299,"nonstop":30300,"illiams":30301,"btsarmy":30302,"uav":30303,"ruff":30304,"burma":30305,"bik":30306,"defence":30307,"schultz":30308,"boasts":30309,"loneliness":30310,"gore":30311,"transforms":30312,"alumna":30313,"@@":30314,"rappers":30315,"nehru":30316,"caro":30317,"himalayan":30318,"wearables":30319,"geh":30320,"peppermint":30321,"redevelopment":30322,"flamingo":30323,"cosby":30324,"bigbaldhead":30325,"agri":30326,"barefoot":30327,"scopes":30328,"regram":30329,"ghana":30330,"ðŁİ«":30331,"iheart":30332,"sadie":30333,"carrie":30334,"microbial":30335,"kuala":30336,"skater":30337,"querque":30338,"âĻ©":30339,"genres":30340,"reasoning":30341,"chased":30342,"aso":30343,"slipped":30344,"encan":30345,"vamos":30346,"kers":30347,"adverse":30348,"moil":30349,"commodities":30350,"withyou":30351,"silent":30352,"hype":30353,"ande":30354,"amination":30355,"whispe":30356,"litz":30357,"âļ½ï¸ıâļ½ï¸ı":30358,"riff":30359,"ppy":30360,"lambs":30361,"ganesh":30362,"absent":30363,"regulator":30364,"marseille":30365,"enroll":30366,"parcel":30367,"wap":30368,"byrd":30369,"ðŁĩŃ":30370,"tuber":30371,"countrymusic":30372,"parl":30373,"controllers":30374,"responsibilities":30375,"wey":30376,"chate":30377,"montenegro":30378,"chico":30379,"milan":30380,"lms":30381,"trainees":30382,"appropriately":30383,"uncertain":30384,"poppies":30385,"edsheeran":30386,"nutritious":30387,"garo":30388,"deutsch":30389,"awesome":30390,"ãĥ¼":30391,"comfortably":30392,"landmarks":30393,"eti":30394,"reusable":30395,"danielle":30396,"rosal":30397,"coles":30398,"justic":30399,"ccs":30400,"fanny":30401,"nim":30402,"mcu":30403,"clinch":30404,"atene":30405,"merge":30406,"imdb":30407,"anglo":30408,"uccino":30409,"panini":30410,"annot":30411,"burberry":30412,"feature":30413,"predicting":30414,"fashionista":30415,"sask":30416,"imaginary":30417,"mmo":30418,"southsudan":30419,"spear":30420,"hubble":30421,"jointhe":30422,"coyotes":30423,"sligo":30424,"kodak":30425,"sitcom":30426,"polaroid":30427,"rooted":30428,"corrup":30429,"ðŁĻĮðŁĻĮ":30430,"brisban":30431,"atz":30432,"ahl":30433,"remy":30434,"talent":30435,"avalon":30436,"rada":30437,"pauline":30438,"locomotive":30439,"goons":30440,"nemo":30441,"maserati":30442,"icu":30443,"stutt":30444,"historically":30445,"smb":30446,"presby":30447,"avoid":30448,"sooners":30449,"rhinestone":30450,"wad":30451,"rising":30452,"trot":30453,"modes":30454,"regent":30455,"optimize":30456,"reece":30457,"smu":30458,"verti":30459,"newyorkcity":30460,"cortez":30461,"rac":30462,"incase":30463,"sinc":30464,"fielding":30465,"etta":30466,"tiffany":30467,"almonds":30468,"saddle":30469,"krat":30470,"matter":30471,"glow":30472,"starving":30473,"glo":30474,"crappy":30475,"slur":30476,"std":30477,"monitors":30478,"receipt":30479,"maymayentrata":30480,"mcil":30481,"unis":30482,"rainbows":30483,"caldwell":30484,"pacquiao":30485,"jop":30486,"afe":30487,"hook":30488,"essen":30489,"wizard":30490,"median":30491,"flaws":30492,"coms":30493,"âĿĦ":30494,"ingh":30495,"haynes":30496,"antonio":30497,"templates":30498,"outer":30499,"naw":30500,"cardigan":30501,"belgrade":30502,"ðŁĴī":30503,"homo":30504,"aise":30505,"ropes":30506,"nove":30507,"whatyou":30508,"trigge":30509,"conception":30510,"adukone":30511,"nadi":30512,"friars":30513,"swer":30514,"adjusted":30515,"hotline":30516,"sanity":30517,"kaur":30518,"downloading":30519,"cgi":30520,"tenor":30521,"ethnic":30522,"appalach":30523,"ุ":30524,"pag":30525,"golds":30526,"onset":30527,"investigator":30528,"cartel":30529,"peacefully":30530,"jarrett":30531,"catalan":30532,"polio":30533,"num":30534,"frustration":30535,"dharma":30536,"mylife":30537,"âľĮðŁı»":30538,"aberdeen":30539,"musa":30540,"binder":30541,"sparkly":30542,"fleeing":30543,"instinct":30544,"coping":30545,"dominance":30546,"illers":30547,"era":30548,"uconn":30549,"looms":30550,"livingston":30551,"gali":30552,"hes":30553,"cma":30554,"bela":30555,"seley":30556,"monk":30557,"lach":30558,"marx":30559,"´":30560,"merica":30561,"womanin":30562,"essex":30563,"raina":30564,"jimi":30565,"neptune":30566,"zack":30567,"chinese":30568,"martins":30569,"chandelier":30570,"hern":30571,"withus":30572,"earl":30573,"asphalt":30574,"modules":30575,"stp":30576,"ulla":30577,"psychiatric":30578,"mileage":30579,"captivating":30580,"sider":30581,"mento":30582,"mort":30583,"trance":30584,"talbot":30585,"abby":30586,"ìĥ":30587,"âľĮðŁı¼":30588,"jak":30589,"dawn":30590,"turnup":30591,"screwed":30592,"feds":30593,"blueprint":30594,"ðŁĴĸðŁĴĸ":30595,"harsh":30596,"eros":30597,"insomnia":30598,"bankers":30599,"taemin":30600,"misconduct":30601,"humber":30602,"gidi":30603,"eduardo":30604,"cona":30605,"muscular":30606,"consuming":30607,"rash":30608,"donnie":30609,"dipped":30610,"collie":30611,"samuel":30612,"meltdown":30613,"ðŁĺįðŁĺįðŁĺį":30614,"mez":30615,"examining":30616,"schwartz":30617,"pristine":30618,"ðŁIJĿ":30619,"veit":30620,"fulfilling":30621,"anesthe":30622,"guesses":30623,"draft":30624,"somme":30625,"solid":30626,"pational":30627,"hoped":30628,"evolutionary":30629,"aller":30630,"entertained":30631,"slips":30632,"ludwig":30633,"concludes":30634,"sensible":30635,"bonnet":30636,"craze":30637,"tras":30638,"hazards":30639,"constantine":30640,"edics":30641,"startrek":30642,"toc":30643,"occupational":30644,"incheon":30645,"deepikapadukone":30646,"pizzas":30647,"newcomer":30648,"depart":30649,"oppression":30650,"ebony":30651,"fossils":30652,"trojan":30653,"elen":30654,"steaks":30655,"khou":30656,"positioning":30657,"ugby":30658,"redcross":30659,"akh":30660,"dolce":30661,"usmnt":30662,"ppen":30663,"dilig":30664,"mavs":30665,"caller":30666,"costello":30667,"âĽĦ":30668,"dyn":30669,"things":30670,"rhinos":30671,"axi":30672,"sarkar":30673,"convocation":30674,"atters":30675,"ssss":30676,"fungus":30677,"eugen":30678,"russo":30679,"squat":30680,"wsb":30681,"elion":30682,"williamsburg":30683,"soff":30684,"deficiency":30685,"bearer":30686,"okin":30687,"keystone":30688,"twain":30689,"calming":30690,"breakable":30691,"wares":30692,"horseracing":30693,"combs":30694,"bunting":30695,"uit":30696,"tland":30697,"ðŁĴĻðŁĴĻðŁĴĻ":30698,"gastron":30699,"sabot":30700,"ickers":30701,"commissioners":30702,"senate":30703,"iiot":30704,"athena":30705,"nitrogen":30706,"antony":30707,"erotic":30708,"dialo":30709,"missou":30710,"hypocr":30711,"âľĪ":30712,"kaepernick":30713,"canv":30714,"droo":30715,"cleveland":30716,"osh":30717,"monsta":30718,"stefano":30719,"^)":30720,"shul":30721,"poison":30722,"hae":30723,"commercials":30724,"maul":30725,"nitro":30726,"coworker":30727,"aloe":30728,"vapor":30729,"tents":30730,"russian":30731,"quid":30732,"questionable":30733,"midget":30734,"poker":30735,"girlfriends":30736,"sinthe":30737,"eritrea":30738,"tenure":30739,"deposits":30740,"buckeyes":30741,"spotter":30742,"theodore":30743,"trinity":30744,"joaquin":30745,"ucci":30746,"followthe":30747,"cafc":30748,"mpa":30749,"ðŁIJ»":30750,"plotting":30751,"domino":30752,"taek":30753,"sionally":30754,"dicaprio":30755,"pap":30756,"carmel":30757,"iger":30758,"btcc":30759,"bethle":30760,"wwwbigbaldhead":30761,"foodie":30762,"baghdad":30763,"masonry":30764,"offended":30765,"à·":30766,"à¸ģ":30767,"scro":30768,"verses":30769,"orient":30770,"arches":30771,"piyu":30772,"knowyour":30773,"gree":30774,"takers":30775,"guard":30776,"dishon":30777,"bucketlist":30778,"bhafc":30779,"wardly":30780,"ðŁİīðŁİĬ":30781,"leighton":30782,"pew":30783,"stray":30784,"assaulted":30785,"inhal":30786,"lyfe":30787,"amarketing":30788,"lx":30789,"katz":30790,"ubuntu":30791,"meo":30792,"cartoonist":30793,"turnover":30794,"miz":30795,"dislike":30796,"mullen":30797,"mof":30798,"bland":30799,"hides":30800,"emerges":30801,"chorizo":30802,"trustee":30803,"mahog":30804,"lansing":30805,"paralympic":30806,"faint":30807,"fauna":30808,"chal":30809,"snar":30810,"cath":30811,"benton":30812,"castillo":30813,"slippery":30814,"apricot":30815,"oecd":30816,"baro":30817,"lz":30818,"heming":30819,"clowns":30820,"coworkers":30821,"peruvian":30822,"commuters":30823,"yell":30824,"ðŁļ´":30825,"undering":30826,"vj":30827,"ttp":30828,"flipk":30829,"wana":30830,"socent":30831,"ĤâĸĤâĸ":30832,"à¤Ĥ":30833,"oosa":30834,"jagger":30835,"dism":30836,"eless":30837,"dham":30838,"calif":30839,"aofficial":30840,"eclip":30841,"harrogate":30842,"grapp":30843,"comrade":30844,"ntr":30845,"concentrate":30846,"thighs":30847,"bitcoin":30848,"belarus":30849,"ëĵ":30850,"enduring":30851,"nowwatching":30852,"industrial":30853,"pip":30854,"aron":30855,"arat":30856,"®":30857,"whitby":30858,"ooooooo":30859,"saree":30860,"ticals":30861,"misleading":30862,"yoon":30863,"years":30864,"sleigh":30865,"romanian":30866,"scissors":30867,"vampires":30868,"acup":30869,"abba":30870,"thweeksary":30871,"centri":30872,"flye":30873,"uo":30874,"cbi":30875,"buena":30876,"sind":30877,"marino":30878,"burr":30879,"rebuilding":30880,"ल":30881,"anniversaire":30882,"acca":30883,"ðŁĴĢðŁĴĢ":30884,"getting":30885,"tulips":30886,"wolfpack":30887,"âľįï¸ı":30888,"morethan":30889,"takin":30890,"ðŁ¤ĺðŁı»":30891,"ube":30892,"monic":30893,"doubts":30894,"mower":30895,"cobalt":30896,"donne":30897,"speculation":30898,"arguably":30899,"kaku":30900,"https":30901,"prosecution":30902,"dinah":30903,"stamatic":30904,"disclosed":30905,"beverly":30906,"flwx":30907,"crabs":30908,"extraordinaire":30909,"warmest":30910,"imperi":30911,"ologists":30912,"traces":30913,"parc":30914,"lakeside":30915,"amr":30916,"teri":30917,"hourly":30918,"domination":30919,"arrow":30920,"shrewsbury":30921,"ancestry":30922,"wrangler":30923,"triggered":30924,"pensac":30925,"rooster":30926,"survives":30927,"aon":30928,"boko":30929,"valor":30930,"loveis":30931,"lag":30932,"pey":30933,"focal":30934,"outlaws":30935,"blanc":30936,"articho":30937,"wits":30938,"marshall":30939,"diego":30940,"supportsmall":30941,"uca":30942,"sah":30943,"jeet":30944,"synago":30945,"governing":30946,"ðŁĴ¬":30947,"salads":30948,"create":30949,"miriam":30950,"censored":30951,"amide":30952,"nou":30953,"zeta":30954,"allegiance":30955,"*)":30956,"blm":30957,"rican":30958,"pastors":30959,"olympus":30960,"bloc":30961,"whirl":30962,"starry":30963,"prone":30964,"yk":30965,"pne":30966,"congratulating":30967,"bev":30968,"sober":30969,"loveisland":30970,"sair":30971,"aning":30972,"tutorials":30973,"qe":30974,"lund":30975,"inist":30976,"clever":30977,"taxpayer":30978,"aliz":30979,"wrench":30980,"ddling":30981,"capri":30982,"hpa":30983,"ðŁı»âĢįâĻĤï¸ı":30984,"naj":30985,"oj":30986,"futuristic":30987,"jellyfish":30988,"ðŁĶ¥ðŁĶ¥ðŁĶ¥ðŁĶ¥":30989,"celery":30990,"plank":30991,"fila":30992,"neme":30993,"unhealthy":30994,"lections":30995,"ðŁ§¡":30996,"ritchie":30997,"nws":30998,"mikha":30999,"wonderwoman":31000,"âĢİ":31001,"hipstamatic":31002,"kag":31003,"ðŁĴľðŁĴľðŁĴľ":31004,"poultry":31005,"mow":31006,"words":31007,"loff":31008,"ðŁ¤£ðŁ¤£":31009,"relatable":31010,"remixes":31011,"kenyatta":31012,"kem":31013,"resigned":31014,"fod":31015,"straigh":31016,"jlo":31017,"hutch":31018,"boxers":31019,"colleen":31020,"mags":31021,"instructional":31022,"kol":31023,"attracts":31024,"prag":31025,"accountant":31026,"goggles":31027,"bru":31028,"thole":31029,"marrow":31030,"leuke":31031,"octo":31032,"ponds":31033,"bubbly":31034,"heist":31035,"ìĹij":31036,"imp":31037,"ahar":31038,"haunt":31039,"hallmark":31040,"psych":31041,"kkkkkkkk":31042,"columb":31043,"jumpsuit":31044,"costco":31045,"sidelines":31046,"aggies":31047,"overturned":31048,"nib":31049,"keychain":31050,"fuk":31051,"faf":31052,"miam":31053,"assistants":31054,"cycled":31055,"rider":31056,"dammit":31057,"redwings":31058,"mages":31059,"kins":31060,"ìĤ":31061,"hod":31062,"sont":31063,"caroline":31064,"\"'":31065,"cule":31066,"braid":31067,"felony":31068,"arities":31069,"rutherford":31070,"depiction":31071,"isabelle":31072,"roach":31073,"kday":31074,"fifthharmony":31075,"emy":31076,"ligam":31077,"barista":31078,"albuquerque":31079,"gross":31080,"ðŁįº":31081,"ooks":31082,"ðŁij¼":31083,"duncan":31084,"tryin":31085,"jags":31086,"gould":31087,"litho":31088,"âģ£":31089,"аÐ":31090,"sammy":31091,"tung":31092,"casser":31093,"apolo":31094,"aaaaa":31095,"mang":31096,"asics":31097,"shen":31098,"pye":31099,"turbul":31100,"ssp":31101,"saintsfc":31102,"onlin":31103,"nanny":31104,"hester":31105,"doz":31106,"à¸Ķ":31107,"thread":31108,"rents":31109,"khand":31110,"ðŁĴªðŁı½":31111,"unconditional":31112,"robson":31113,"carre":31114,"phon":31115,"sacrificed":31116,"£":31117,"autos":31118,"parker":31119,"oca":31120,"login":31121,"keegan":31122,"hardcover":31123,"doughnuts":31124,"ðŁĮİ":31125,"spitfire":31126,"refreshments":31127,"saskatoon":31128,"commodore":31129,"jf":31130,"rubber":31131,"halamadrid":31132,"childcare":31133,"strada":31134,"iom":31135,"rik":31136,"dakar":31137,"thermom":31138,"cropped":31139,"garu":31140,"alik":31141,"veni":31142,"ift":31143,"sika":31144,"rituals":31145,"zul":31146,"ech":31147,"©":31148,"sudan":31149,"lland":31150,"ime":31151,"docker":31152,"ì¤":31153,"feared":31154,"fao":31155,"walter":31156,"nog":31157,"mutuals":31158,"lh":31159,"align":31160,"monia":31161,"conceptart":31162,"ðŁĻıðŁı¼":31163,"scoe":31164,"competence":31165,"swine":31166,"lyme":31167,"launch":31168,"greener":31169,"abstractart":31170,"inquis":31171,"granada":31172,"gaelic":31173,"fluff":31174,"dbacks":31175,"graveyard":31176,"babe":31177,"academic":31178,"adventurous":31179,"johann":31180,"~!":31181,"bibi":31182,"|#":31183,"plings":31184,"getty":31185,"asb":31186,"âĿ¤ï¸ı@":31187,"staff":31188,"religions":31189,"bangor":31190,"worldbookday":31191,"megh":31192,"devin":31193,"ashore":31194,"meridian":31195,"github":31196,"quiz":31197,"allstars":31198,"bestest":31199,"irresi":31200,"acker":31201,"dote":31202,"warrington":31203,"polly":31204,"neworleans":31205,"crou":31206,"wigs":31207,"chey":31208,"smithsonian":31209,"lasag":31210,"detour":31211,"boris":31212,"straps":31213,"mariah":31214,"intentionally":31215,"koh":31216,"ðŁį¸":31217,"ssian":31218,"marissa":31219,"coral":31220,"episcopal":31221,"casualty":31222,"tomo":31223,"supplychain":31224,"samp":31225,"ongo":31226,"roo":31227,"caviar":31228,"pfw":31229,"claudio":31230,"buffalo":31231,"sations":31232,"matty":31233,"snapback":31234,"lds":31235,"alarms":31236,"matte":31237,"âĺĶï¸ı":31238,"conditioner":31239,"dors":31240,"hex":31241,"fizz":31242,"astri":31243,"sussex":31244,"security":31245,"qaeda":31246,"allstar":31247,"cocacola":31248,"asone":31249,"clicks":31250,"scans":31251,"mute":31252,"heavier":31253,"ðŁİ§":31254,"âĺŀ":31255,"lvl":31256,"bookboost":31257,"youtube":31258,"flashes":31259,"fjor":31260,"csu":31261,"explode":31262,"dodge":31263,"cairn":31264,"gonzales":31265,"thill":31266,"pelle":31267,"hartley":31268,"renewable":31269,"retin":31270,"estre":31271,"costarica":31272,"shipyard":31273,"ncfc":31274,"priya":31275,"aghan":31276,"anath":31277,"plugin":31278,"corey":31279,"rebound":31280,"oru":31281,"katrin":31282,"hormone":31283,"gim":31284,"mahindra":31285,"ssus":31286,"parkland":31287,"harper":31288,"fantastic":31289,"inferno":31290,"epilo":31291,"wrestling":31292,"fect":31293,"cit":31294,"acoun":31295,"tossed":31296,"monumental":31297,"chartered":31298,"bust":31299,"petra":31300,"âĮļ":31301,"wildflowerhour":31302,"sweaters":31303,"*.":31304,"bler":31305,"atech":31306,"gowan":31307,"demographic":31308,"bral":31309,"suicide":31310,"renovations":31311,"vuel":31312,"sinister":31313,"armani":31314,"misogy":31315,"pharrell":31316,"naps":31317,"uniting":31318,"crusaders":31319,"corgi":31320,"insured":31321,"thani":31322,"noor":31323,"gq":31324,"dada":31325,"bicycles":31326,"snuggle":31327,"schan":31328,"tenberg":31329,"ssal":31330,"femme":31331,"boil":31332,"½ï¸ı":31333,"reap":31334,"occurring":31335,"hussein":31336,"divid":31337,"stoke":31338,"shalom":31339,"naia":31340,"olic":31341,"frustrating":31342,"Ùĩ":31343,"igs":31344,"grover":31345,"scenarios":31346,"nds":31347,"brutality":31348,"medalli":31349,"buon":31350,"sass":31351,"skateboarding":31352,"onyx":31353,"lorry":31354,"nyu":31355,"gautam":31356,"mmings":31357,"gug":31358,"endi":31359,"lothian":31360,"commando":31361,"chalk":31362,"phora":31363,"assessing":31364,"tigh":31365,"crunchy":31366,"aday":31367,"isl":31368,"ciara":31369,"pilgrims":31370,"kamal":31371,"pto":31372,"britanni":31373,"tani":31374,"smc":31375,"lure":31376,"appstore":31377,"aby":31378,"golfing":31379,"clc":31380,"fau":31381,"anas":31382,"shutting":31383,"regulated":31384,"carnage":31385,"scowboys":31386,"allenge":31387,"cma":31388,"humboldt":31389,"relle":31390,"kumb":31391,"heri":31392,"refinery":31393,"soundcheck":31394,"dwayne":31395,"bosnia":31396,"isp":31397,"thealth":31398,"anniv":31399,"relevance":31400,"mya":31401,"baggage":31402,"dread":31403,"sbc":31404,"thed":31405,"buh":31406,"hijab":31407,"loid":31408,"kew":31409,"cte":31410,"respect":31411,"lovelies":31412,"cubes":31413,"celebrate":31414,"dirt":31415,"savers":31416,"_,":31417,"garment":31418,"pulitzer":31419,"masjid":31420,"beatport":31421,"alarts":31422,"encryption":31423,"sner":31424,"pleads":31425,"foundry":31426,"symmetry":31427,"rumi":31428,"birthplace":31429,"scallops":31430,"supple":31431,"pivotal":31432,"tati":31433,"node":31434,"sod":31435,"proxim":31436,"trics":31437,"coldest":31438,"brent":31439,"mandu":31440,"clair":31441,"each":31442,"andalu":31443,"hiddleston":31444,"ðŁIJº":31445,"melts":31446,"vance":31447,"pinn":31448,"sements":31449,"screened":31450,"sachs":31451,"obl":31452,"icha":31453,"âĺĺï¸ı":31454,"schoolers":31455,"healed":31456,"logged":31457,"ðŁ¤ĺðŁı¼":31458,"icus":31459,"boredom":31460,"bish":31461,"bffs":31462,"talking":31463,"suresh":31464,"hookem":31465,"deon":31466,"defl":31467,"eileen":31468,"ðŁįķ":31469,"womenintech":31470,"risotto":31471,"ranger":31472,"advertise":31473,"à¸ģà¸":31474,"telly":31475,"lago":31476,"dartmoor":31477,"dong":31478,"skates":31479,"logo":31480,"unner":31481,"mailbox":31482,"masala":31483,"looooo":31484,"amethyst":31485,"chewing":31486,"cbb":31487,"australians":31488,"rcmp":31489,"gameart":31490,"#...":31491,"korn":31492,"extremism":31493,"fruitful":31494,"ancient":31495,"pubg":31496,"polite":31497,"whit":31498,"murals":31499,"mgr":31500,"lineman":31501,"davao":31502,"stems":31503,"tennis":31504,"avage":31505,"tupac":31506,"gigantic":31507,"hsbc":31508,"autobiography":31509,"upthe":31510,"ีà¹Ī":31511,"regal":31512,"figuring":31513,"kul":31514,"missy":31515,"hoop":31516,"gras":31517,"forums":31518,"backlash":31519,"abducted":31520,"pnw":31521,"minic":31522,"butt":31523,"bottoms":31524,"aton":31525,"veng":31526,"ðŁĮı":31527,"delaney":31528,"prabhu":31529,"fanclub":31530,"overhaul":31531,"healthye":31532,"syno":31533,"aaf":31534,"renamed":31535,"kimi":31536,"uncle":31537,"mancity":31538,"seu":31539,"quanti":31540,"esteem":31541,"umin":31542,"enzo":31543,"melvin":31544,"undergo":31545,"jhar":31546,"farah":31547,"coasters":31548,"humphrey":31549,"mhz":31550,"childrens":31551,"^.":31552,"dhi":31553,"disruptive":31554,"integrating":31555,"rnb":31556,"oversized":31557,"aide":31558,"neau":31559,"documentation":31560,"ðŁijĢðŁijĢ":31561,"palo":31562,"hearth":31563,"riyad":31564,"punctu":31565,"abcnews":31566,"secures":31567,"boyband":31568,"birch":31569,"juco":31570,"traff":31571,"legislators":31572,"baya":31573,"ãĤ¯":31574,"noises":31575,"collects":31576,"swarm":31577,"kner":31578,"bishops":31579,"sturgeon":31580,"snapping":31581,"mol":31582,"freaky":31583,"chairperson":31584,"trop":31585,"lynch":31586,"carcin":31587,"artsy":31588,"esto":31589,"chai":31590,"flur":31591,"invali":31592,"sausages":31593,"imel":31594,"jor":31595,"funfact":31596,"witter":31597,"punished":31598,"acons":31599,"hya":31600,"reversi":31601,"emc":31602,"diffu":31603,"zx":31604,"spaw":31605,"clad":31606,"dmit":31607,"holland":31608,"fresco":31609,"payroll":31610,"abundant":31611,"stuffing":31612,"moro":31613,"cny":31614,"boycott":31615,"wendy":31616,"eleven":31617,"provoc":31618,"pilot":31619,"trx":31620,"bead":31621,"climateaction":31622,"rion":31623,"assie":31624,"ìĸ":31625,"osm":31626,"islamic":31627,"hoar":31628,"goodreads":31629,"alici":31630,"afternoons":31631,"spokesman":31632,"jolie":31633,"itas":31634,"mascara":31635,"âĻ©âĻ«":31636,"prevail":31637,"beetroot":31638,"lujah":31639,"kli":31640,"dodger":31641,"»":31642,"rule":31643,"ln":31644,"scream":31645,"hobart":31646,"colbert":31647,"rtc":31648,"erm":31649,"patro":31650,"quoting":31651,"slive":31652,"quest":31653,"nonfiction":31654,"seminary":31655,"prosecutors":31656,"vest":31657,"expressway":31658,"gge":31659,"nautical":31660,"etf":31661,"ðŁİīðŁİĬ":31662,"duration":31663,"chaired":31664,"thefilm":31665,"fabio":31666,"sheh":31667,"cano":31668,"ðŁĴªðŁı»":31669,"withdraw":31670,"!:)":31671,"corpus":31672,"phenom":31673,"yelp":31674,"lawn":31675,"entom":31676,"snapper":31677,"butte":31678,"pinball":31679,"proxy":31680,"libre":31681,"allevi":31682,"nada":31683,"gabriel":31684,"fowl":31685,"eureka":31686,"daphne":31687,"tunes":31688,"punched":31689,"whore":31690,"jog":31691,"rential":31692,"manners":31693,"ope":31694,"whufc":31695,"guth":31696,"revolt":31697,"sneaker":31698,"philharmonic":31699,"hoste":31700,"sovereignty":31701,"ðŁĻıðŁĻıðŁĻı":31702,"fishing":31703,"sciart":31704,"feta":31705,"ipp":31706,"dumping":31707,"kelown":31708,"giri":31709,"digits":31710,"salu":31711,"sanjay":31712,"tweeters":31713,"spas":31714,"colchester":31715,"scab":31716,"madd":31717,"à¹Ħà¸":31718,"Äĩ":31719,"geddon":31720,"marchfor":31721,"dop":31722,"maureen":31723,"unplugged":31724,"dido":31725,"fashionblogger":31726,"upa":31727,"mexic":31728,"tary":31729,"polye":31730,"jameson":31731,"vt":31732,"grinder":31733,"maddy":31734,"consultancy":31735,"¬ë":31736,"leagueoflegends":31737,"accents":31738,"umni":31739,"janeiro":31740,"tuss":31741,"hens":31742,"amplifier":31743,"toshi":31744,"prettier":31745,"prevents":31746,"newtown":31747,"redwood":31748,"vantage":31749,"ballard":31750,"artof":31751,"ashe":31752,"asion":31753,"lacey":31754,"apat":31755,"grove":31756,"à¸Ħ":31757,"rwand":31758,"realtors":31759,"traitor":31760,"bedding":31761,"ör":31762,"zion":31763,"flashing":31764,"campan":31765,"boomer":31766,"secretariat":31767,"abol":31768,"litigation":31769,"contamination":31770,"sedly":31771,"shredded":31772,"infor":31773,"doherty":31774,"benchmark":31775,"roche":31776,"skateboard":31777,"shovel":31778,"izz":31779,"topper":31780,"oster":31781,"labyrin":31782,"autum":31783,"kong":31784,"hummus":31785,"viz":31786,"technews":31787,"klaus":31788,"amusing":31789,"socialmediamarketing":31790,"ides":31791,"castell":31792,"stee":31793,"underestimate":31794,"calab":31795,"paign":31796,"billing":31797,"unanimously":31798,"gmb":31799,"flyfishing":31800,"hathaway":31801,"commercial":31802,"colouring":31803,"skulls":31804,"pivot":31805,"tep":31806,"tbc":31807,"motorway":31808,"xpress":31809,"constructive":31810,"puk":31811,"underlying":31812,"kirsten":31813,"maniac":31814,"chao":31815,"sema":31816,"chiffon":31817,"ðŁijĮðŁı»":31818,"verona":31819,"komo":31820,"standoff":31821,"wiped":31822,"cated":31823,"blair":31824,"workin":31825,"msc":31826,"bethlehem":31827,"swipe":31828,"unexpec":31829,"pees":31830,"petri":31831,"origami":31832,"ðŁijħ":31833,"mexico":31834,"flavor":31835,"rudd":31836,"cannabis":31837,"maru":31838,"riddle":31839,"worshi":31840,"silon":31841,"schat":31842,"apse":31843,"tanger":31844,"bious":31845,"eer":31846,"questioned":31847,"ozar":31848,"dank":31849,"anglesey":31850,"charan":31851,"baku":31852,"competen":31853,"repri":31854,"batter":31855,"saxon":31856,"calves":31857,"lengths":31858,"$$$":31859,"âŀ¡ï¸ı":31860,"immersion":31861,"gaunt":31862,"carry":31863,"cyto":31864,"banda":31865,"shutt":31866,"experience":31867,"elgin":31868,"mousse":31869,"taz":31870,"êµ":31871,"incorrect":31872,"enz":31873,"bham":31874,"moron":31875,"sover":31876,"arun":31877,"tipped":31878,"lable":31879,"dearly":31880,"bautista":31881,"íĻ":31882,"mortal":31883,"woop":31884,"dtla":31885,"shocks":31886,"davos":31887,"ðŁĵĿ":31888,"swimwear":31889,"herman":31890,"ðŁijĩðŁijĩ":31891,"zir":31892,"neglected":31893,"graced":31894,"campuses":31895,"avs":31896,"arora":31897,"swachhb":31898,"livepd":31899,"accra":31900,"enquiries":31901,"shooters":31902,"kurt":31903,"vancouver":31904,"bradley":31905,"garda":31906,"gü":31907,"olla":31908,"attracting":31909,"upton":31910,"newin":31911,"lumia":31912,"furnace":31913,"evers":31914,"eon":31915,"swa":31916,"rookies":31917,"aoc":31918,"vss":31919,"brisket":31920,"torch":31921,"yoda":31922,"heartland":31923,"taco":31924,"phony":31925,"foodbank":31926,"abbey":31927,"babylon":31928,"uy":31929,"greate":31930,"expresses":31931,"dandy":31932,"scapes":31933,"survivor":31934,"rond":31935,"eci":31936,"havin":31937,"abel":31938,"childish":31939,"torque":31940,"wavy":31941,"urself":31942,"kanyewest":31943,"yearof":31944,"alestine":31945,"obrien":31946,"alfon":31947,"skag":31948,"korean":31949,"anchorage":31950,"valeri":31951,"dew":31952,"ðŁİ¨":31953,"landslide":31954,"carole":31955,"christen":31956,"gophers":31957,"afi":31958,"priyanka":31959,"qq":31960,"powerof":31961,"itte":31962,"pcso":31963,"twol":31964,"pry":31965,"intellectu":31966,"guerrero":31967,"piles":31968,"wishlist":31969,"wren":31970,"timetable":31971,"ëı":31972,"prodigy":31973,"gibbons":31974,"./":31975,"neur":31976,"anzac":31977,"murray":31978,"viest":31979,"plaster":31980,"lair":31981,"artgallery":31982,"intercontinental":31983,"gbr":31984,"bellator":31985,"namjoon":31986,"mammals":31987,"amel":31988,"yaw":31989,"sarasota":31990,"camar":31991,"budding":31992,"summari":31993,"acosta":31994,"lash":31995,"eyou":31996,"postgraduate":31997,"instructors":31998,"tig":31999,"constant":32000,"werewolf":32001,"icos":32002,"clas":32003,"glenn":32004,"budge":32005,"ðŁĻĤ":32006,"erta":32007,"stains":32008,"persecution":32009,"cumbri":32010,"och":32011,"synergy":32012,"huang":32013,"scandin":32014,"midterms":32015,"commentator":32016,"regarded":32017,"perpetual":32018,"boiling":32019,"alp":32020,"lange":32021,"schle":32022,"faceli":32023,"tweeta":32024,"ridden":32025,"oktoberfest":32026,"charlottesville":32027,"iklan":32028,"jou":32029,"chatham":32030,"bsc":32031,"ðŁį¦":32032,"strauss":32033,"mellow":32034,"xxxx":32035,"happyhour":32036,"reactor":32037,"wwer":32038,"distraction":32039,"atorial":32040,"ðŁĴªðŁı¼":32041,"twinpeaks":32042,"fayette":32043,"aor":32044,"kok":32045,"broom":32046,"syfy":32047,"ouse":32048,"amag":32049,"Ø·":32050,"ubisoft":32051,"lulu":32052,"hallmark":32053,"stuart":32054,"itya":32055,"sideline":32056,"vengeance":32057,"relu":32058,"sexism":32059,"bouncing":32060,"unites":32061,"gustav":32062,"tessa":32063,"stump":32064,"proclamation":32065,"imax":32066,"dividend":32067,"colby":32068,"ðŁįİ":32069,"playwright":32070,"unsafe":32071,"cosmo":32072,"ðŁĩ²ðŁĩ½":32073,"cupboard":32074,"constituents":32075,"anglia":32076,"rampage":32077,"ðŁĺįðŁĺįðŁĺįðŁĺįðŁĺį":32078,"thanked":32079,"takeaways":32080,"shroff":32081,"debat":32082,"khur":32083,"conducts":32084,"formats":32085,"à©":32086,"portage":32087,"graphers":32088,"uten":32089,"prem":32090,"moines":32091,"condemns":32092,"sous":32093,"lps":32094,"fcs":32095,"dealership":32096,"leukemia":32097,"bureau":32098,"skid":32099,"guardiola":32100,"caster":32101,"third":32102,"avoided":32103,"encyclo":32104,"csr":32105,"vixx":32106,"analyzing":32107,"shear":32108,"duluth":32109,"shapiro":32110,"chanting":32111,"stresses":32112,"asbe":32113,"militia":32114,"ãĥª":32115,"collin":32116,"arsene":32117,"suresh":32118,"teachings":32119,"yixing":32120,"shill":32121,"nudes":32122,"svu":32123,"clearwater":32124,"warped":32125,"prolife":32126,"artistson":32127,"itu":32128,"versailles":32129,"galaxy":32130,"axel":32131,"springst":32132,"cala":32133,"huhu":32134,"scu":32135,"commitments":32136,"exeter":32137,"poignant":32138,"motion":32139,"conservatory":32140,"rowdy":32141,"recalled":32142,"musk":32143,"embelli":32144,"sothe":32145,"âĺĢ":32146,"stopper":32147,"schild":32148,"tope":32149,"elmo":32150,"ziel":32151,"jom":32152,"barnsley":32153,"snowden":32154,"ontour":32155,"journey":32156,"hillsborough":32157,"parole":32158,"wts":32159,"moving":32160,"agility":32161,"tivo":32162,"ffers":32163,"kindleunlimited":32164,"gwen":32165,"annan":32166,"ahmad":32167,"textured":32168,"hepatitis":32169,"dram":32170,"insiders":32171,"tissues":32172,"ãĥĦ":32173,"fcbarcelona":32174,"cratic":32175,"naacp":32176,"pecan":32177,"fgm":32178,"customize":32179,"concert":32180,"gsm":32181,"peg":32182,"pone":32183,"justintrudeau":32184,"supercars":32185,"happyholidays":32186,"bular":32187,"adox":32188,"laptops":32189,"digitalhealth":32190,"destination":32191,"gradually":32192,"áĥ¦":32193,"poppy":32194,"ssl":32195,"inhibit":32196,"starlight":32197,"offro":32198,"gloomy":32199,"xper":32200,"halder":32201,"implants":32202,"leto":32203,"hassel":32204,"aas":32205,"untold":32206,"enci":32207,"liberia":32208,"oran":32209,"contests":32210,"ilah":32211,"smag":32212,"scout":32213,"marianne":32214,"cryo":32215,"scheduling":32216,"los":32217,"kane":32218,"stuttgart":32219,"nese":32220,"lawrence":32221,"dain":32222,"photom":32223,"carou":32224,"ร":32225,"gwy":32226,"nationaldogday":32227,"roasting":32228,"bandcamp":32229,"kentucky":32230,"stretches":32231,"kerel":32232,"cashe":32233,"ãĤ¸":32234,"stax":32235,"transi":32236,"doggie":32237,"atric":32238,"halle":32239,"civic":32240,"browning":32241,"leinster":32242,"catday":32243,"highland":32244,"joyous":32245,"incumb":32246,"orlando":32247,"romo":32248,"colton":32249,"delta":32250,"carab":32251,"rotc":32252,"asteroid":32253,"goosebumps":32254,"mology":32255,"yoko":32256,"ands":32257,"tomorrows":32258,"redcarpet":32259,"smp":32260,"casio":32261,"ðŁ¤£ðŁ¤£ðŁ¤£":32262,"seau":32263,"rejection":32264,"rotating":32265,"bipartisan":32266,"thun":32267,"mati":32268,"boni":32269,"oll":32270,"energye":32271,"doit":32272,"lj":32273,"motherhood":32274,"louise":32275,"necklaces":32276,"elite":32277,"nix":32278,"lcs":32279,"env":32280,"glu":32281,"lesh":32282,"crank":32283,"susie":32284,"mclau":32285,"sotu":32286,"crowley":32287,"ratri":32288,"used":32289,"breton":32290,"alfredo":32291,"yeo":32292,"travelpics":32293,"tipp":32294,"ellison":32295,"saxophone":32296,"mered":32297,"heughan":32298,"taine":32299,"fes":32300,"viro":32301,"supposedly":32302,"ias":32303,"digestive":32304,"yle":32305,"lizzy":32306,"wildlifephotography":32307,"brianna":32308,"westfield":32309,"rained":32310,"amher":32311,"ðŁĺĦðŁĺĦ":32312,"distribute":32313,"bottom":32314,"preserving":32315,"oiland":32316,"crafty":32317,"descen":32318,"colling":32319,"shakespearesunday":32320,"rwc":32321,"angled":32322,"cian":32323,"tations":32324,"montage":32325,"meyers":32326,"francesca":32327,"ðŁĮ·":32328,"wiggins":32329,"sanford":32330,"volunteer":32331,"carra":32332,"bark":32333,"varied":32334,"plin":32335,"amu":32336,"kapil":32337,"rockers":32338,"quind":32339,"brane":32340,"inmate":32341,"ental":32342,"improvis":32343,"michigan":32344,"retweeting":32345,"progressing":32346,"mercedesbenz":32347,"smoker":32348,"physiology":32349,"dorado":32350,"wattpad":32351,"hwa":32352,"srbachchan":32353,"wga":32354,"volatility":32355,"hire":32356,"acap":32357,"wnba":32358,"heinz":32359,"stitches":32360,"kidnapping":32361,"burys":32362,"limb":32363,"fitters":32364,"thumbnail":32365,"tone":32366,"mirand":32367,"desirable":32368,"addison":32369,"taran":32370,"tamilnadu":32371,"spectator":32372,"sociology":32373,"amitshah":32374,"remotely":32375,"âĻ¦":32376,"hamid":32377,"rds":32378,"glee":32379,"smoothly":32380,"schro":32381,"erc":32382,"laliga":32383,"heals":32384,"usf":32385,"nishi":32386,"dhu":32387,"unil":32388,"hle":32389,"tromb":32390,"bhutan":32391,"pilipinas":32392,"seung":32393,"whitman":32394,"tey":32395,"mince":32396,"snowboarding":32397,"reau":32398,"kker":32399,"avo":32400,"zachary":32401,"ranveer":32402,"tik":32403,"govern":32404,"qual":32405,"becky":32406,"anthropology":32407,"atten":32408,"groceries":32409,"debit":32410,"warp":32411,"silicon":32412,"hawaii":32413,"ðŁĴħ":32414,"pomegranate":32415,"peer":32416,"oranges":32417,"peopleschoice":32418,"endure":32419,"ðŁĴĽðŁĴĽ":32420,"ãĤ¹ãĥ":32421,"acial":32422,"ahaha":32423,"stuk":32424,"imperial":32425,"blond":32426,"powder":32427,"knots":32428,"vince":32429,"woodlands":32430,"dena":32431,"watchin":32432,"matcha":32433,"mahat":32434,"galaxies":32435,"middlesbrough":32436,"kö":32437,"stree":32438,"rescues":32439,"waldo":32440,"leroy":32441,"despic":32442,"realities":32443,"tmnt":32444,"haq":32445,"uno":32446,"pec":32447,"bollywood":32448,"blinds":32449,"designthinking":32450,"hems":32451,"andhra":32452,"absen":32453,"fans":32454,"stech":32455,"shirehour":32456,"blaine":32457,"shakti":32458,"purely":32459,"ðŁıı":32460,"trafal":32461,"keynes":32462,"grate":32463,"tobias":32464,"spontaneous":32465,"saturated":32466,"cavalry":32467,"prisc":32468,"ðŁĺij":32469,"wht":32470,"passi":32471,"~~~":32472,"virat":32473,"pattinson":32474,"lao":32475,"weirdo":32476,"sympathy":32477,"juda":32478,"occasionally":32479,"credited":32480,"statu":32481,"esco":32482,"hilly":32483,"escape":32484,"discharge":32485,"seer":32486,"maynard":32487,"sudbury":32488,"zlat":32489,"oral":32490,"weer":32491,"encountered":32492,"smelling":32493,"oversight":32494,"ê¸":32495,"thatcher":32496,"mackay":32497,"youcan":32498,"freep":32499,"freedoms":32500,"prophecy":32501,"hoe":32502,"ishqba":32503,"drake":32504,"quits":32505,"pelled":32506,"turk":32507,"ovi":32508,"wesleyan":32509,"newmusic":32510,"legg":32511,"cheng":32512,"hilli":32513,"ayy":32514,"panties":32515,"adversity":32516,"adjac":32517,"vaccination":32518,"juke":32519,"gac":32520,"exceed":32521,"timesof":32522,"staining":32523,"epcot":32524,"vital":32525,"upward":32526,"bethesda":32527,"apark":32528,"mahi":32529,"campfire":32530,"enchanting":32531,"rhapso":32532,"hz":32533,"naver":32534,"fax":32535,"validation":32536,"acad":32537,"nyr":32538,"asym":32539,"coordinated":32540,"departed":32541,"allery":32542,"varies":32543,"sprite":32544,"chaplin":32545,"ssoccer":32546,"swat":32547,"bret":32548,"reluct":32549,"tunesapp":32550,"superstar":32551,"reminiscing":32552,"oco":32553,"homegrown":32554,"doughnut":32555,"uncanny":32556,"lapd":32557,"thyroid":32558,"!âĿ¤ï¸ı":32559,"botanic":32560,"bres":32561,"spade":32562,"iste":32563,"echoes":32564,"dulil":32565,"bursting":32566,"quiero":32567,"ðŁijİ":32568,"loyola":32569,"amusement":32570,"hails":32571,"sleepy":32572,"burglary":32573,"âľı":32574,"rogue":32575,"cotland":32576,"moors":32577,"lower":32578,"wicked":32579,"ðŁĶĬ":32580,"competiti":32581,"argentine":32582,"yvonne":32583,"kartikeyan":32584,"iliary":32585,"gatsby":32586,"precinct":32587,"sixty":32588,"naji":32589,"cams":32590,"practitioner":32591,"ðŁĺ³ðŁĺ³":32592,"pune":32593,"negli":32594,"julien":32595,"invaded":32596,"calibr":32597,"clam":32598,"dubai":32599,"muk":32600,"lantic":32601,"product":32602,"fedex":32603,"ï¸ı:":32604,"eura":32605,"darius":32606,"sling":32607,"virtualreality":32608,"homestead":32609,"ðŁı³ï¸ıâĢįðŁĮĪ":32610,"paced":32611,"inha":32612,"pulmon":32613,"lazy":32614,"premiering":32615,"mastered":32616,"inhe":32617,"congregation":32618,"bajo":32619,"sporting":32620,"newjersey":32621,"horny":32622,"lmaoo":32623,"lengthy":32624,"dut":32625,"yogh":32626,"swearing":32627,"philosophical":32628,"papua":32629,"inski":32630,"knowles":32631,"dyke":32632,"âĢ²":32633,"token":32634,"mcguire":32635,"riot":32636,"probability":32637,"mccon":32638,"gros":32639,"sumat":32640,"cite":32641,"daa":32642,"onda":32643,"maddow":32644,"chew":32645,"boardgames":32646,"sparked":32647,"reclaimed":32648,"adhd":32649,"nyse":32650,"imwithher":32651,"equinox":32652,"booths":32653,"balsamic":32654,"hazy":32655,"dorchester":32656,"agos":32657,"seaw":32658,"moderator":32659,"seriea":32660,"andersen":32661,"pilgrim":32662,"âŃIJâŃIJ":32663,"itchen":32664,"halli":32665,"xton":32666,"nathaniel":32667,"munition":32668,"celestial":32669,"gaf":32670,"zoom":32671,"markle":32672,"penthouse":32673,"cale":32674,"sfa":32675,"barking":32676,"tucket":32677,"emery":32678,"calorie":32679,"lique":32680,"adar":32681,"mcnam":32682,"tortilla":32683,"woodpecker":32684,"motown":32685,"badger":32686,"ayrshire":32687,"scramble":32688,"dday":32689,"craziest":32690,"perrie":32691,"choco":32692,"caste":32693,"iot":32694,"wrecked":32695,"selecting":32696,"ussr":32697,"graft":32698,"punt":32699,"labou":32700,"irst":32701,"baek":32702,"ÛĮ":32703,"suki":32704,"queu":32705,"achat":32706,"tester":32707,"augmented":32708,"wcvb":32709,"sinks":32710,"ðŁĵ»":32711,"rake":32712,"interne":32713,"because":32714,"bellevue":32715,"unearth":32716,"lighten":32717,"ðŁĺ£":32718,"turnaround":32719,"labeled":32720,"unemployed":32721,"twitterkurds":32722,"leia":32723,"hye":32724,"greater":32725,"ðŁIJİ":32726,"timed":32727,"ired":32728,"ett":32729,"limitations":32730,"cabe":32731,"sout":32732,"beech":32733,"annihil":32734,"retrac":32735,"yoona":32736,"anger":32737,"dennis":32738,"supplying":32739,"diz":32740,"\"(":32741,"scur":32742,"gunman":32743,"suho":32744,"sauvignon":32745,"ล":32746,"wiley":32747,"landon":32748,"choreography":32749,"prehistoric":32750,"ðŁıĥ":32751,"vargas":32752,"assessments":32753,"pinnacle":32754,"dii":32755,"chamberlain":32756,"ìĪ":32757,"vp":32758,"presenters":32759,"deutsche":32760,"sunshine":32761,"salutes":32762,"rone":32763,"busiest":32764,"-.-":32765,"motorists":32766,"hemisphere":32767,"alwx":32768,"psp":32769,"owa":32770,"denying":32771,"choc":32772,"gutier":32773,"hanuk":32774,"muskete":32775,"jaitley":32776,"sewage":32777,"tame":32778,"thinkers":32779,"shim":32780,"sequo":32781,"papar":32782,"middleeast":32783,"kwa":32784,"keg":32785,"patagonia":32786,"noy":32787,"barça":32788,"takeoff":32789,"hea":32790,"à¬":32791,"nsc":32792,"gdc":32793,"ðŁijĪ":32794,"moustache":32795,"melania":32796,"thra":32797,"â¬Ĩï¸ı":32798,"pierced":32799,"zeus":32800,"fonts":32801,"bera":32802,"itiner":32803,"qatar":32804,"contrary":32805,"ireland":32806,"ify":32807,"oulos":32808,"communal":32809,"fins":32810,"unpaid":32811,"paa":32812,"ðŁijĩðŁı»":32813,"rios":32814,"oup":32815,"filler":32816,"cafeteria":32817,"à¸Ń":32818,"kasi":32819,"caliber":32820,"zulu":32821,"vsco":32822,"tsford":32823,"dragonfly":32824,"smokin":32825,"pist":32826,"psychologist":32827,"diplomat":32828,"webs":32829,"buccane":32830,"ா":32831,"motivational":32832,"dune":32833,"bae":32834,"cfs":32835,"without":32836,"eron":32837,"iac":32838,"atee":32839,"pension":32840,"frazier":32841,"ensis":32842,"skis":32843,"parting":32844,"gery":32845,"territories":32846,"nachos":32847,"enight":32848,"everlasting":32849,"msdhoni":32850,"tele":32851,"spun":32852,"podi":32853,"sabah":32854,"environmentally":32855,"cease":32856,"beaumont":32857,"marta":32858,"kelvin":32859,"hoff":32860,"sunil":32861,"nda":32862,"cob":32863,"shale":32864,"reedus":32865,"unboxing":32866,"ubio":32867,"reopened":32868,"nall":32869,"capsules":32870,"marr":32871,"himalayas":32872,"sweeter":32873,"jaz":32874,"fmr":32875,"tweeter":32876,"dhaka":32877,"nau":32878,"demi":32879,"dfs":32880,"taurus":32881,"fading":32882,"itutes":32883,"cip":32884,"overflow":32885,"jeffrey":32886,"donny":32887,"cartunesapp":32888,"ðŁįij":32889,"prefecture":32890,"danced":32891,"cpt":32892,"pleasing":32893,"italk":32894,"earthquakes":32895,"ulation":32896,"hio":32897,"ãĢĭ":32898,"antan":32899,"nutrient":32900,"deere":32901,"selects":32902,"enrichment":32903,"riti":32904,"trampol":32905,"blamed":32906,"jia":32907,"contributors":32908,"chesapeake":32909,"pigeons":32910,"tribunal":32911,"maduro":32912,"wsu":32913,"ilove":32914,"efficiently":32915,"darcy":32916,"warms":32917,"arra":32918,"ecu":32919,"hower":32920,"struggled":32921,"rajinikanth":32922,"ðŁĺ¢ðŁĺ¢":32923,"housing":32924,"strat":32925,"elix":32926,"dispro":32927,"raffic":32928,"thierry":32929,"nasty":32930,"cfb":32931,"staffing":32932,"alma":32933,"backers":32934,"henson":32935,"skywalker":32936,"realestate":32937,"roos":32938,"nessy":32939,"chance":32940,"cairns":32941,"cci":32942,"pedal":32943,"lyft":32944,"crossword":32945,"waiter":32946,"onlyin":32947,"kruger":32948,"kir":32949,"alejandro":32950,"cartier":32951,"carrera":32952,"repaired":32953,"ouat":32954,"unclear":32955,"unbreakable":32956,"todayin":32957,"queries":32958,"jody":32959,"genital":32960,"winner":32961,"tol":32962,"kelowna":32963,"fascinated":32964,"ãĥ¬":32965,"srisri":32966,"squared":32967,"sprung":32968,"negotiate":32969,"privately":32970,"aven":32971,">>>>>":32972,"gical":32973,"gavin":32974,"chesterfield":32975,"zumba":32976,"orr":32977,"natalia":32978,"impeachment":32979,"mnl":32980,"carat":32981,"critique":32982,"credible":32983,"tracy":32984,"tani":32985,"musik":32986,"jigsaw":32987,"gambia":32988,"tolkien":32989,"feu":32990,"asper":32991,"savory":32992,"foxx":32993,"fitt":32994,"marlon":32995,"lrt":32996,"vell":32997,"pbr":32998,"imprisoned":32999,"iom":33000,"chul":33001,"windshield":33002,"kaye":33003,"baa":33004,"chord":33005,"sart":33006,"algon":33007,"ministerial":33008,"natgeo":33009,"lazio":33010,"norms":33011,"ðŁijįðŁijį":33012,"licking":33013,"futbol":33014,"unsung":33015,"dallascowboys":33016,"shred":33017,"disturb":33018,"devine":33019,"beards":33020,"chf":33021,"bday":33022,"rosso":33023,"igor":33024,"ayi":33025,"siren":33026,"kair":33027,"stiles":33028,"rof":33029,"magnets":33030,"uncover":33031,"mouse":33032,"banging":33033,"sighted":33034,"speople":33035,"impact":33036,"rowland":33037,"kira":33038,"environment":33039,"lovethe":33040,"psis":33041,"mishra":33042,"glendale":33043,"cajun":33044,"oche":33045,"deception":33046,"sexist":33047,"straws":33048,"sga":33049,"buffer":33050,"apostle":33051,"spl":33052,"popup":33053,"ðŁļĹ":33054,"rg":33055,"uper":33056,"ballin":33057,"idy":33058,"occasional":33059,"nationalpark":33060,"ðŁıĬ":33061,"uan":33062,"innovation":33063,"ห":33064,"teaparty":33065,"rette":33066,"counterfe":33067,"bha":33068,"recs":33069,"igen":33070,"ðŁĮIJ":33071,"hummingbird":33072,"cur":33073,"haven":33074,"lazar":33075,"pueblo":33076,"::":33077,"zionist":33078,"opath":33079,"inverness":33080,"promoter":33081,"cartoon":33082,"cabinets":33083,"mahogany":33084,"surveying":33085,"rational":33086,"feeling":33087,"testify":33088,"sow":33089,"ocon":33090,"ย":33091,"neel":33092,"maris":33093,"solitary":33094,"chemo":33095,"radcliffe":33096,"simons":33097,"rosary":33098,"newer":33099,"jodie":33100,"retali":33101,"prawn":33102,"paddy":33103,"henge":33104,"kala":33105,"implant":33106,"aty":33107,"brentwood":33108,"paradox":33109,"enez":33110,"redesigned":33111,"pour":33112,"wyd":33113,"alde":33114,"à¯ģ":33115,"sold":33116,"biomedical":33117,"à¹Ĥ":33118,"tttt":33119,"matteo":33120,"yser":33121,"newton":33122,"debun":33123,"nerdy":33124,"lool":33125,"woon":33126,"elisabeth":33127,"ecc":33128,"whi":33129,"acho":33130,"salvage":33131,"salaries":33132,"quity":33133,"navigating":33134,"ophthal":33135,"consoles":33136,"rebuilt":33137,"opec":33138,"asters":33139,"shored":33140,"setlist":33141,"kathryn":33142,"rhymes":33143,"revisiting":33144,"ashish":33145,"lift":33146,"repost":33147,"soleil":33148,"âı±":33149,"wealth":33150,"saat":33151,"wec":33152,"kingjames":33153,"flipkart":33154,"fieldwork":33155,"segu":33156,"modal":33157,"bub":33158,"arers":33159,"ðŁįĴ":33160,"clooney":33161,"paddington":33162,"necessity":33163,"guthrie":33164,"pente":33165,"limo":33166,"josie":33167,"artin":33168,"enc":33169,"lhs":33170,"betrayal":33171,"infographics":33172,"ier":33173,"moa":33174,"hearings":33175,"bonjour":33176,"symbolic":33177,"agro":33178,"wedges":33179,"kristina":33180,"wildflower":33181,"athletic":33182,"photography":33183,"pesh":33184,"cahill":33185,"chilean":33186,"goul":33187,"fioren":33188,"ðŁij¶":33189,"zil":33190,"skim":33191,"badoo":33192,"delia":33193,"treble":33194,"ncc":33195,"ðŁĩ¦ðŁĩ":33196,"ahouse":33197,"bullock":33198,"solitude":33199,"اÙĨ":33200,"cancers":33201,"futureofwork":33202,"hutch":33203,"watershed":33204,"warmongers":33205,"spilled":33206,"colombo":33207,"moth":33208,"associations":33209,"weighed":33210,"globalgoals":33211,"notjust":33212,"christi":33213,"torg":33214,"sweating":33215,"maneu":33216,"clusters":33217,"âĢ¼ï¸ıâĢ¼ï¸ı":33218,"taped":33219,"uly":33220,"trusting":33221,"yusuf":33222,"tein":33223,"rab":33224,",,,,":33225,"sinai":33226,"audible":33227,"explicit":33228,"crowns":33229,"schiz":33230,"atleast":33231,"ðŁĹ£":33232,"debra":33233,"jesuit":33234,"enegger":33235,"zhen":33236,"onesie":33237,"iit":33238,"ssf":33239,"gurgaon":33240,"chakra":33241,"bearcats":33242,"kran":33243,"kawa":33244,"requesting":33245,"hanover":33246,"gend":33247,"soros":33248,"mercy":33249,"lovely":33250,"doomed":33251,"timmy":33252,"kuz":33253,"ull":33254,"abram":33255,"saison":33256,"ãĥ«":33257,"cleaners":33258,"remo":33259,"circuits":33260,"barred":33261,"oth":33262,"moist":33263,"madeleine":33264,"gallo":33265,"uj":33266,"permits":33267,"heaviest":33268,"carols":33269,"azte":33270,"giorgio":33271,"floats":33272,"declaring":33273,"usrc":33274,"minat":33275,"crafts":33276,"prima":33277,"conveni":33278,"nickelodeon":33279,"dancing":33280,"ceremonial":33281,"blogg":33282,"twp":33283,"anglican":33284,"shek":33285,"knick":33286,"(((":33287,"hubbard":33288,"harvey":33289,"hitman":33290,"feng":33291,"wesome":33292,"forza":33293,"sword":33294,"opus":33295,"brom":33296,"gibility":33297,"zal":33298,"munch":33299,"dancehall":33300,"greedy":33301,"hdmi":33302,"rebirth":33303,"ðŁĺĭðŁĺĭ":33304,"sworld":33305,"figurine":33306,"compost":33307,"kf":33308,"engraving":33309,"giorno":33310,"stana":33311,"kman":33312,"hamster":33313,"composers":33314,"aje":33315,"functionality":33316,"polk":33317,"isons":33318,"airplanes":33319,"tese":33320,"horrors":33321,"muscat":33322,"given":33323,"spence":33324,"ðŁĩ¸ðŁĩ":33325,"eliot":33326,"achilles":33327,"freck":33328,"cryptocurrencies":33329,"souther":33330,"halo":33331,"borneo":33332,"politic":33333,"hahahahah":33334,"upstate":33335,"siena":33336,"obscure":33337,"hausen":33338,"lloyd":33339,"happyfriday":33340,"motorbike":33341,"bona":33342,"americas":33343,"hols":33344,"-(":33345,"sporty":33346,"unaware":33347,"revenues":33348,"christopher":33349,"banksy":33350,"avan":33351,"evapor":33352,"compress":33353,"eyeliner":33354,"todos":33355,"buffy":33356,"renewableenergy":33357,"lyrical":33358,"archan":33359,"rapist":33360,"fairtrade":33361,"lmaooo":33362,"beatz":33363,"proactive":33364,"lapse":33365,"irical":33366,"reversal":33367,"pode":33368,"mcintyre":33369,"macau":33370,"ãĥķãĤ":33371,"nashgrier":33372,"fsa":33373,"gall":33374,"çĶŁ":33375,"perpetr":33376,"ilya":33377,"configuration":33378,"%;":33379,"strange":33380,"raci":33381,"à¸ĩ":33382,"pickups":33383,"kovsky":33384,"mammal":33385,"wps":33386,"gable":33387,"comparative":33388,"zh":33389,"saveour":33390,"davey":33391,"onetsy":33392,"mussels":33393,"miser":33394,"cristina":33395,"electron":33396,"crave":33397,"loren":33398,"precipitation":33399,"mz":33400,"ðŁį«":33401,"vincen":33402,"snowboard":33403,"noida":33404,"ahn":33405,"marinated":33406,"gtr":33407,"townhall":33408,"minis":33409,"bethel":33410,"advan":33411,"sura":33412,"shiel":33413,"furry":33414,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":33415,"lynd":33416,"soil":33417,"scence":33418,"seneca":33419,"sharjah":33420,"dickens":33421,"credentials":33422,"avar":33423,"perk":33424,"requiring":33425,"prefer":33426,"jian":33427,"deca":33428,"rach":33429,"ingfor":33430,"dele":33431,"beep":33432,"ðŁĴ»":33433,"cisely":33434,"huddle":33435,"greensboro":33436,"hawking":33437,"hoax":33438,"hangar":33439,"çľ":33440,"miso":33441,"lovin":33442,"greta":33443,"abad":33444,"logie":33445,"atan":33446,"snowflake":33447,"mahesh":33448,"fearthe":33449,"alkal":33450,"bobblehead":33451,"bahn":33452,"judged":33453,"futu":33454,"felix":33455,"ðŁįĵ":33456,"pike":33457,"deriv":33458,"notices":33459,"auer":33460,"dissuper":33461,"orda":33462,"wipes":33463,"amino":33464,"strikers":33465,"footb":33466,"dramas":33467,"punching":33468,"scoreless":33469,"hemingway":33470,"bih":33471,"ballad":33472,"chatter":33473,"ammo":33474,"klein":33475,"fabrication":33476,"karim":33477,"zend":33478,"histo":33479,"volta":33480,"rocky":33481,"marketer":33482,"xtreme":33483,"sequencing":33484,"paradigm":33485,"cleats":33486,"booming":33487,"âģłâģł":33488,"blockade":33489,"prompts":33490,"yoghurt":33491,"purpose":33492,"nur":33493,"regulate":33494,"noisy":33495,"ingrid":33496,"birdwatching":33497,"bartender":33498,"Ùĥ":33499,"wordof":33500,"chaotic":33501,"shorty":33502,"eldest":33503,"zapp":33504,"onceuponatime":33505,"flyo":33506,"ritos":33507,"mikequind":33508,"ðŁIJ´":33509,"registering":33510,".]":33511,"adol":33512,"gggg":33513,"purge":33514,"kidlit":33515,"arbor":33516,"valves":33517,"synagogue":33518,"oth":33519,"unanimous":33520,"verification":33521,"darrell":33522,"ãģĦ":33523,"vanderbilt":33524,"tapestry":33525,"prosper":33526,"diddy":33527,"drafting":33528,"decep":33529,"marquis":33530,"stint":33531,"michaeljackson":33532,"peeled":33533,"menus":33534,"bbb":33535,"scare":33536,"email":33537,"wrigley":33538,"itis":33539,"fell":33540,"somethin":33541,"barra":33542,"edgar":33543,"dipping":33544,"puddle":33545,"slade":33546,"learner":33547,"jalen":33548,"ðŁ§IJ":33549,"thedaily":33550,"mikequindazzi":33551,"jux":33552,"iqbal":33553,"mckinney":33554,"raiser":33555,"efan":33556,"drone":33557,"cato":33558,"picket":33559,"crowe":33560,"latt":33561,"uko":33562,"giuseppe":33563,"hini":33564,"synthesi":33565,"pontifex":33566,"songwriting":33567,"tod":33568,"switches":33569,"dinners":33570,"hq":33571,"gabrielle":33572,"pensacola":33573,"circle":33574,"exposes":33575,"evs":33576,"riyadh":33577,"promen":33578,"ock":33579,"saj":33580,"citation":33581,"brewco":33582,"josi":33583,"epaper":33584,"drif":33585,"pointless":33586,"tangled":33587,"cripp":33588,"lineups":33589,"fairies":33590,"daze":33591,"mourn":33592,"bladder":33593,"salz":33594,"burundi":33595,"bookmark":33596,"thepeople":33597,"subsequ":33598,"principal":33599,"sker":33600,"courtney":33601,"aoki":33602,"racers":33603,"adm":33604,"moma":33605,"criticalrole":33606,"houn":33607,"shedding":33608,"saka":33609,"aceous":33610,"mckay":33611,"husbands":33612,"½":33613,"meda":33614,"accusations":33615,"rosel":33616,"ncis":33617,"witnessing":33618,"orama":33619,"gods":33620,"hilton":33621,"elman":33622,"ÃŃn":33623,"megap":33624,"craven":33625,"announcer":33626,"criteri":33627,"sheffieldissuper":33628,"militant":33629,"consul":33630,"hooded":33631,"abyss":33632,"bx":33633,"madam":33634,"locu":33635,"maryam":33636,"manicure":33637,"gratis":33638,"actresses":33639,"rosario":33640,"thisdayin":33641,"kingly":33642,"gnome":33643,"celine":33644,"rous":33645,"heel":33646,"lilac":33647,"vishal":33648,"abh":33649,"thorns":33650,"sls":33651,"neal":33652,"constructing":33653,"beren":33654,"slang":33655,"mains":33656,"farra":33657,"sarko":33658,"paige":33659,"guiller":33660,"lala":33661,"iceberg":33662,"noun":33663,"planners":33664,"ummm":33665,"ouses":33666,"illary":33667,"maan":33668,"boxing":33669,"zipper":33670,"srinagar":33671,"miguel":33672,"ostr":33673,"mpo":33674,"responsibly":33675,"lanterns":33676,"appliance":33677,"xb":33678,"grenade":33679,"neglect":33680,"dysle":33681,"hammock":33682,"nectar":33683,"witcher":33684,"rgv":33685,"dience":33686,"serbian":33687,"seeded":33688,"cruz":33689,"bish":33690,"sphe":33691,"eq":33692,"skyrim":33693,"algebra":33694,"philately":33695,"bungalow":33696,"geoff":33697,"yves":33698,"demanded":33699,"considerations":33700,"thevamp":33701,"pawankalyan":33702,"coded":33703,"gritty":33704,"eruption":33705,"seinfeld":33706,"unidenti":33707,"ëĭĪ":33708,"worm":33709,"acus":33710,"seung":33711,"dung":33712,"roland":33713,"sud":33714,"divisions":33715,"ablanc":33716,"shortest":33717,"jf":33718,"poun":33719,"plantbased":33720,"beto":33721,"tougher":33722,"mco":33723,"donet":33724,"markus":33725,"vfl":33726,"ðŁıł":33727,"opening":33728,"coward":33729,"cabernet":33730,"oxi":33731,"burlesque":33732,"sandra":33733,"sumo":33734,"consist":33735,"thot":33736,"cayman":33737,"motorola":33738,"gutierrez":33739,"dslr":33740,"yw":33741,"nobel":33742,"novice":33743,"momsdemand":33744,"grunge":33745,"spor":33746,"dcc":33747,"presses":33748,"slist":33749,"allotment":33750,"vocational":33751,"ftc":33752,"puja":33753,"loven":33754,"uttarak":33755,"tandem":33756,"shep":33757,"comedians":33758,"anatom":33759,"cantwait":33760,"healthyeating":33761,"westside":33762,"margins":33763,"chiang":33764,"asbestos":33765,"stupidity":33766,"problematic":33767,"fitbit":33768,":$":33769,"ceilings":33770,"shua":33771,"protections":33772,"biotic":33773,"bengali":33774,"rests":33775,"biennale":33776,"timo":33777,"culmin":33778,"eminent":33779,"affection":33780,"unbelievably":33781,"individually":33782,"canvassing":33783,"whitt":33784,"novasco":33785,"chinson":33786,"hpe":33787,"gow":33788,"gloucestershire":33789,"pao":33790,"threshold":33791,"chevron":33792,"sine":33793,"wether":33794,"ppie":33795,"aquino":33796,"antwerp":33797,"âĸ¬":33798,"poon":33799,"instaf":33800,"equine":33801,"cinematography":33802,"nbafinals":33803,"valiant":33804,"kilkenny":33805,"terence":33806,"systemic":33807,"srl":33808,"pound":33809,"madeira":33810,"plough":33811,"trecht":33812,"mated":33813,"mpd":33814,"ransomware":33815,"phin":33816,"liqui":33817,"bbce":33818,"boomer":33819,"istandwith":33820,"conju":33821,"rte":33822,"nara":33823,"foolish":33824,"dashing":33825,"viernes":33826,"brite":33827,"dau":33828,"juniper":33829,"aida":33830,"younow":33831,"razer":33832,"dei":33833,"repeating":33834,"comforting":33835,"adjacent":33836,"eto":33837,"casted":33838,"chatur":33839,"muer":33840,"synth":33841,"sanitary":33842,"macle":33843,"independent":33844,"lawful":33845,"eerie":33846,"hor":33847,"ðŁĴŃ":33848,"amrit":33849,"velo":33850,"stationery":33851,"muf":33852,"maymay":33853,"contemplating":33854,"elaborate":33855,"gregor":33856,"dries":33857,"accol":33858,"à¸ļ":33859,"schwarzenegger":33860,"illnesses":33861,"daybreak":33862,"followback":33863,"collusion":33864,"electronic":33865,"jovi":33866,"hiroshima":33867,"taw":33868,"homec":33869,"micah":33870,"quitting":33871,"frosting":33872,"benfica":33873,"heli":33874,"sical":33875,"piccad":33876,"corporate":33877,"mentorship":33878,"youare":33879,"singer":33880,"shiva":33881,"rune":33882,"inger":33883,"rium":33884,"playable":33885,"doop":33886,"willow":33887,"terre":33888,"nip":33889,"atd":33890,"warbler":33891,"professionally":33892,"erase":33893,"proceed":33894,"pedestrians":33895,"mischief":33896,"bending":33897,"alaskan":33898,"ckett":33899,"mop":33900,"ddles":33901,"shutter":33902,"geared":33903,"ateneo":33904,"madeline":33905,"gations":33906,"osha":33907,"derick":33908,"swild":33909,"angry":33910,"patents":33911,"hunk":33912,"decreased":33913,"fry":33914,"ðŁĴĸðŁĴĸðŁĴĸ":33915,"salon":33916,"quantities":33917,"dario":33918,"nigel":33919,"kuma":33920,"jenn":33921,"happye":33922,"xxx":33923,"rexperience":33924,"pros":33925,"ausch":33926,"relessly":33927,"hamburger":33928,"fukushima":33929,"erne":33930,"statec":33931,"rend":33932,"mayfield":33933,"jone":33934,"lefty":33935,"bernstein":33936,"smil":33937,"generates":33938,"forestation":33939,"bandits":33940,"tayo":33941,"rca":33942,"acci":33943,"rodrigo":33944,"knapp":33945,"elovers":33946,"vegetation":33947,"ural":33948,"left":33949,"ħï¸ı":33950,"worldre":33951,"suri":33952,"embark":33953,"wson":33954,"bayou":33955,"muller":33956,"movers":33957,"ðŁķº":33958,"presbyter":33959,"lf":33960,"cree":33961,"batb":33962,"salam":33963,"demonstrations":33964,"anec":33965,"npc":33966,"itics":33967,"tography":33968,"reinst":33969,"thurst":33970,"tale":33971,"offences":33972,"smartcity":33973,"brotha":33974,"oftheyear":33975,"invaluable":33976,"earn":33977,"ðŁijıðŁı½":33978,"kremlin":33979,"grady":33980,"townfc":33981,"guernsey":33982,"maha":33983,"contagious":33984,"drex":33985,"been":33986,"(£":33987,"nativity":33988,"ktm":33989,"somerhalder":33990,"compounds":33991,"íķĺ":33992,"\"âĢ¦":33993,"afg":33994,"ottnews":33995,"hound":33996,"firefly":33997,"cilan":33998,"donetsk":33999,"volunteered":34000,"akira":34001,"èª":34002,"singul":34003,"sth":34004,"drowned":34005,"mando":34006,"heir":34007,"ðŁİīðŁİĪ":34008,"taxis":34009,"yuki":34010,"veld":34011,"kans":34012,"elk":34013,"rants":34014,"hashtag":34015,"teng":34016,"rog":34017,"aat":34018,"grub":34019,"eber":34020,"inindia":34021,"colossus":34022,"signi":34023,"soever":34024,"milestones":34025,"dero":34026,"differential":34027,"phuket":34028,"mastermind":34029,"angh":34030,"melani":34031,"broker":34032,"actorvijay":34033,"stunned":34034,"continuity":34035,"affl":34036,"vocal":34037,"perennial":34038,"fiancé":34039,"incomplete":34040,"hunts":34041,"reissue":34042,"dominates":34043,"turmeric":34044,"roam":34045,"rion":34046,"bagged":34047,"nassau":34048,"fut":34049,"xox":34050,"nationaltrust":34051,"joye":34052,"sano":34053,"hearthstone":34054,"disrespect":34055,"lees":34056,"hse":34057,"siberian":34058,"offee":34059,"restock":34060,"wolfgang":34061,"regan":34062,"plano":34063,"unwind":34064,"repar":34065,"mille":34066,"],":34067,"skull":34068,"fatally":34069,"conceptual":34070,"ðŁĮ²":34071,"fé":34072,"berto":34073,"bms":34074,"ua":34075,"magna":34076,"notredame":34077,"lete":34078,"laundering":34079,"heartwarming":34080,"buffett":34081,"goat":34082,"peabo":34083,"windmill":34084,"vac":34085,"continually":34086,"azalea":34087,"membrane":34088,"cancels":34089,"makeyourown":34090,"athered":34091,"pto":34092,"torpe":34093,"ðŁĺł":34094,"ðŁĴ§":34095,"scares":34096,"leaking":34097,"zet":34098,"pixels":34099,"aci":34100,"khil":34101,"marathi":34102,"ðŁĻıðŁı½":34103,"ula":34104,"tamu":34105,"chandigarh":34106,"zagre":34107,"aab":34108,"pronounced":34109,"aubrey":34110,"sander":34111,"punta":34112,"harlow":34113,"icelan":34114,"celebratory":34115,"sot":34116,"unciation":34117,"struly":34118,"mcdowell":34119,"deepika":34120,"reminders":34121,"mystical":34122,"ctc":34123,"chatted":34124,"sica":34125,"bargains":34126,"chhat":34127,"rubin":34128,"mnet":34129,"oilandgas":34130,"pelican":34131,"oat":34132,"morality":34133,"kour":34134,"ih":34135,"nuclear":34136,"gcu":34137,"richer":34138,"venezia":34139,"mma":34140,"leith":34141,"accompany":34142,"richmond":34143,"sportsnet":34144,"baahu":34145,"smuggling":34146,"mmi":34147,"ðŁĩ®ðŁĩª":34148,"twists":34149,"sahib":34150,".....":34151,"ambitions":34152,"illo":34153,"historical":34154,"forec":34155,"showbiz":34156,"ponies":34157,"chasers":34158,"remodel":34159,"willing":34160,"princesses":34161,"ample":34162,"cushions":34163,"acles":34164,"lotr":34165,"dach":34166,"anthe":34167,"incorporate":34168,"newbury":34169,"kiri":34170,"friedrich":34171,"abv":34172,"ballers":34173,"albert":34174,"ðŁijŃ":34175,"leti":34176,"nanop":34177,"cide":34178,"analo":34179,"nsf":34180,"))))":34181,"griffiths":34182,"valenci":34183,"roano":34184,"funrun":34185,"babysitting":34186,"caday":34187,"entre":34188,"uck":34189,"slug":34190,"tical":34191,"thesims":34192,"roar":34193,"carney":34194,"gam":34195,"stowe":34196,"fid":34197,"bunny":34198,"shamrock":34199,"pecu":34200,"molina":34201,"gocougs":34202,"contributes":34203,"transformation":34204,"moy":34205,"vaj":34206,"severy":34207,"antioxidants":34208,"thirteen":34209,"sightseeing":34210,"lj":34211,"reversible":34212,"oddly":34213,"hookah":34214,"nouvel":34215,"halal":34216,"fei":34217,"stables":34218,"mult":34219,"hopped":34220,"braids":34221,"interchange":34222,"ghanaian":34223,"wwww":34224,"ethno":34225,"conjunction":34226,"agov":34227,"yeti":34228,"earthand":34229,"tsp":34230,"conserve":34231,"heirloom":34232,"metaphor":34233,"woof":34234,"torio":34235,"selfless":34236,"nwa":34237,"emilia":34238,"ylene":34239,"yxe":34240,"giar":34241,"moderating":34242,"probz":34243,"bfi":34244,"neer":34245,"dummy":34246,"hanukkah":34247,"webber":34248,"kv":34249,"eyebrow":34250,"dagger":34251,"sump":34252,"rages":34253,"orkney":34254,"tbo":34255,"halsey":34256,"assignments":34257,"tronic":34258,"scrib":34259,"coon":34260,"anwar":34261,"#âĢİ":34262,"jalape":34263,"florida":34264,"quaid":34265,"hawkeyes":34266,"âĻ¡âĻ¡":34267,"streetcar":34268,"rog":34269,"datlantic":34270,"granola":34271,"unchanged":34272,"expectation":34273,"Ùĩ":34274,"marlin":34275,"gummy":34276,"ðŁĻıðŁı¾":34277,"awarenessmonth":34278,"oilpainting":34279,"muth":34280,"perch":34281,"junto":34282,"villagers":34283,"morg":34284,"cheated":34285,"webcomic":34286,"thefuture":34287,"dps":34288,"lakings":34289,"mentioning":34290,"voor":34291,"identities":34292,"accord":34293,"mcgu":34294,"lpga":34295,"rumour":34296,"massively":34297,"mpls":34298,"healy":34299,"date":34300,"spoli":34301,"revisited":34302,"ont":34303,"aland":34304,"scrutiny":34305,"lakeland":34306,"blending":34307,"":34308,"ankara":34309,"jamiedor":34310,"metabolic":34311,"fences":34312,"anny":34313,"åħ":34314,"semicon":34315,"oott":34316,"spaceship":34317,"wacky":34318,"leta":34319,"apac":34320,"shee":34321,"inherit":34322,"dores":34323,"ðŁĩ¨ðŁĩ¦":34324,"gente":34325,"twick":34326,"rims":34327,"galve":34328,"deville":34329,"kingfisher":34330,"scorpio":34331,"owl":34332,"alar":34333,"varian":34334,"ðŁĹĵ":34335,"venetian":34336,"stardust":34337,"thenorth":34338,"qing":34339,"harrington":34340,"consulate":34341,"spectacle":34342,"hobbs":34343,"turks":34344,"greer":34345,"mating":34346,"ðŁİĢ":34347,"ðŁĮĢ":34348,"directs":34349,"íĭ":34350,"pompeo":34351,"voiced":34352,"laos":34353,"tzu":34354,"prome":34355,"prism":34356,"merc":34357,"fortunately":34358,"bcfc":34359,"mcdonnell":34360,"notsorry":34361,"smiled":34362,"tba":34363,"forwar":34364,"midterm":34365,"darby":34366,"weinstein":34367,"upgrading":34368,"wolff":34369,"bronco":34370,"cabello":34371,"ðŁ¥ĩ":34372,"fiable":34373,"sharpe":34374,"battered":34375,"sato":34376,"mythical":34377,"instapic":34378,"prepped":34379,"enium":34380,"espo":34381,"diaper":34382,"explanations":34383,"whopping":34384,"ragnar":34385,"peel":34386,"antibiotic":34387,"lacks":34388,"harrison":34389,"lism":34390,"aul":34391,"quail":34392,"martina":34393,"sentencing":34394,"scams":34395,"didi":34396,"tronics":34397,"ãħłãħł":34398,"goff":34399,"zain":34400,"paramore":34401,"chained":34402,"clinton":34403,"liff":34404,"cottages":34405,"emon":34406,"reverend":34407,"consumer":34408,"cean":34409,"tany":34410,"lumpur":34411,"ebay":34412,"stool":34413,"ðŁĺ»ðŁĺ»":34414,"tapro":34415,"hath":34416,"modernart":34417,"justine":34418,"proverb":34419,"appy":34420,"trax":34421,"manifest":34422,"ambu":34423,"naik":34424,"pepp":34425,"rsd":34426,"merchants":34427,"kitchener":34428,"shifted":34429,"lizz":34430,"âĺħâĺħâĺħâĺħ":34431,"âĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶâĢĶ":34432,"utopia":34433,"tomo":34434,"outed":34435,"comers":34436,"chiropractic":34437,"bookclub":34438,"cindy":34439,"prohibition":34440,"seuss":34441,"민":34442,"thinkin":34443,"rrrr":34444,"gofund":34445,"tack":34446,"omb":34447,"catastrophic":34448,"lingu":34449,"guildford":34450,"botd":34451,"à¥ĭ":34452,"planter":34453,"^^":34454,"wink":34455,"kathmandu":34456,"stoppers":34457,"smoothies":34458,"reefs":34459,"hind":34460,"bellamy":34461,"Ħë":34462,"wastewater":34463,"voor":34464,"natl":34465,"!]":34466,"reel":34467,"yap":34468,"scooby":34469,"workspace":34470,"corinthians":34471,"blun":34472,"obligation":34473,"gbbo":34474,"dyson":34475,"cravings":34476,"ellington":34477,"dapl":34478,"wrexham":34479,"earthandclouds":34480,"ukrunchat":34481,"positioned":34482,"kalb":34483,"foursquare":34484,"jock":34485,"impending":34486,"evening":34487,"athy":34488,"proclaimed":34489,"cites":34490,"annapolis":34491,"sani":34492,"marth":34493,"irl":34494,"accommo":34495,"kaa":34496,"fina":34497,"yaa":34498,"disper":34499,"ecar":34500,"bhak":34501,"willy":34502,"ðŁĺĢðŁĺĢ":34503,"mcdermott":34504,"moj":34505,"generational":34506,"usaid":34507,"training":34508,"lonely":34509,"lores":34510,"impecc":34511,"âĢIJ":34512,"beavers":34513,"maki":34514,"heb":34515,"aapl":34516,"åı":34517,"wolverhampton":34518,"leaderboard":34519,"meu":34520,"cfa":34521,"eastern":34522,"hur":34523,"civilwar":34524,"ourage":34525,"horned":34526,"lehigh":34527,"awards":34528,"evident":34529,"gigab":34530,"rous":34531,"madel":34532,"robyn":34533,"urgently":34534,"kors":34535,"enas":34536,"heisman":34537,"bambam":34538,"fabian":34539,"fom":34540,"evaluating":34541,"assembly":34542,"outsourcing":34543,"huntsville":34544,"ðŁĶª":34545,"justified":34546,"cashier":34547,"spaper":34548,"buckeye":34549,"analytical":34550,"illuminati":34551,"autho":34552,"oj":34553,"shade":34554,"geelong":34555,"whey":34556,"heaton":34557,"terribly":34558,"elek":34559,"uncharted":34560,"sdlive":34561,"motocross":34562,"hermes":34563,"darshan":34564,"darlington":34565,"cashmere":34566,"gripping":34567,"cilantro":34568,"punish":34569,"...:":34570,"ðŁĴĦ":34571,"instance":34572,"deri":34573,"lobal":34574,"mukher":34575,"spar":34576,"thinker":34577,"fremont":34578,"compiled":34579,"colorado":34580,"vigne":34581,"smd":34582,"whead":34583,"village":34584,"leek":34585,"formulae":34586,"tares":34587,"persistence":34588,"??????":34589,"pedago":34590,"hez":34591,"alzheimers":34592,"vulture":34593,"offence":34594,"isgreat":34595,"suffra":34596,"kickin":34597,"hmmmm":34598,"broadway":34599,"ï¸ı@":34600,"arti":34601,"allison":34602,"endorses":34603,"ryu":34604,"lollipop":34605,"soybean":34606,"kendall":34607,"cera":34608,"invade":34609,"(ðŁĵ·:":34610,"converter":34611,"carpets":34612,"hobo":34613,"frit":34614,"peac":34615,"esqu":34616,"ernan":34617,"ouf":34618,"anil":34619,"differ":34620,"ching":34621,"brecht":34622,"spg":34623,"davenport":34624,"strava":34625,"severn":34626,"ngos":34627,"storians":34628,"fete":34629,"paramedic":34630,"jhb":34631,"alamo":34632,"sneaking":34633,"goldcoast":34634,"roofs":34635,"isil":34636,"depicted":34637,"projections":34638,"numb":34639,"oss":34640,"epi":34641,"glucose":34642,"zidane":34643,"infiniti":34644,"íĺĦ":34645,"ransom":34646,"tonics":34647,"falk":34648,"gler":34649,"outw":34650,"ress":34651,"weekly":34652,"theon":34653,"nole":34654,"ðŁĩªðŁĩº":34655,"volley":34656,"summar":34657,"negativity":34658,"samson":34659,"yew":34660,"ausvotes":34661,"jul":34662,"judy":34663,"fart":34664,"prayed":34665,"palate":34666,"multicultural":34667,"doubleheader":34668,"cyclones":34669,"pierre":34670,"ãģ¨":34671,"âĺłï¸ı":34672,"rtw":34673,"converting":34674,"wirral":34675,"lari":34676,"irrelevant":34677,"austinmahone":34678,"anche":34679,"yaan":34680,"sdf":34681,"$.":34682,"exploding":34683,"ultimate":34684,"profici":34685,"gofundme":34686,"cellence":34687,"epstein":34688,"bullied":34689,"septic":34690,"த":34691,"lumber":34692,"cuff":34693,"vscocam":34694,"plor":34695,"ล":34696,"seok":34697,"roto":34698,"venezuelan":34699,"sorta":34700,"spirited":34701,"danielpadilla":34702,"teamsisd":34703,"radioactive":34704,"icelandic":34705,"ðŁĴ¤":34706,"vere":34707,"accommodate":34708,"shipp":34709,"otter":34710,"olina":34711,"ego":34712,"sula":34713,"sanantonio":34714,"deas":34715,"similarities":34716,"âļ¾":34717,"yom":34718,"broward":34719,"å°":34720,"cancun":34721,"verify":34722,"onte":34723,"candlelight":34724,"ìłķ":34725,"infants":34726,"azam":34727,"ðŁĺ°":34728,"leven":34729,"unstable":34730,"bloomington":34731,"xford":34732,"contour":34733,"yp":34734,"innovator":34735,"histories":34736,"poy":34737,"lololol":34738,"expires":34739,"catalo":34740,"billboards":34741,"anab":34742,"elic":34743,"novascotia":34744,"faire":34745,"ìĿ´":34746,"rockwell":34747,"grille":34748,"aztec":34749,"johor":34750,"urstruly":34751,"firen":34752,"dunlop":34753,"idle":34754,"portman":34755,"joes":34756,"txhsfb":34757,"holm":34758,"chamele":34759,"underworld":34760,"loss":34761,"tiem":34762,"therapists":34763,"pasture":34764,"paste":34765,"ingnow":34766,"vulcan":34767,"ragon":34768,"larkin":34769,"oshi":34770,"hoco":34771,"childhood":34772,"umbrel":34773,"successor":34774,"kathy":34775,"izen":34776,"°ï¸ı":34777,"shareholders":34778,"olga":34779,"aib":34780,"heap":34781,"flaming":34782,"rou":34783,"airtel":34784,"ratt":34785,"zane":34786,"vow":34787,"thorough":34788,"snag":34789,"parth":34790,"unconscious":34791,"vey":34792,"newrelease":34793,"ghee":34794,"croatian":34795,"facilitating":34796,"swanson":34797,"astoria":34798,"tology":34799,"mastery":34800,"ðŁ¤ij":34801,"bilbao":34802,"troupe":34803,"theori":34804,"cheyenne":34805,"rott":34806,"shoreline":34807,"grasso":34808,"masterchef":34809,"+)":34810,"vix":34811,"ellenshow":34812,"asg":34813,"anak":34814,"kuya":34815,"safarilive":34816,"debuting":34817,"blum":34818,"listener":34819,"vins":34820,"bookshelf":34821,"smartcities":34822,"makeyourownlane":34823,";;":34824,"ðŁIJ¯":34825,"rizz":34826,"onward":34827,"bulldog":34828,"bearish":34829,"viruses":34830,"frigh":34831,"linden":34832,"weiser":34833,"snt":34834,"gona":34835,"dresden":34836,"flanders":34837,"cuk":34838,"wheeling":34839,"bau":34840,"atuesday":34841,"surfers":34842,"swift":34843,"mccall":34844,"arbitration":34845,"awd":34846,"monc":34847,"bine":34848,"atx":34849,"refr":34850,"miro":34851,"posey":34852,"nare":34853,"ritter":34854,"âģ¦":34855,"playbook":34856,"blowout":34857,"sportsmanship":34858,"soooooo":34859,"malayalam":34860,"grims":34861,"burbank":34862,"infinity":34863,"sargent":34864,"oitnb":34865,"josephine":34866,"skipping":34867,"parkin":34868,"excursion":34869,"seminars":34870,"johar":34871,"partridge":34872,"postgame":34873,"llll":34874,"blanche":34875,"tempting":34876,"mna":34877,"luka":34878,"isers":34879,"toffee":34880,"barron":34881,"hemmings":34882,"sae":34883,"gohawks":34884,"cupid":34885,"limbs":34886,"conse":34887,"uncommon":34888,"zada":34889,"headshot":34890,"soils":34891,"pioneer":34892,"mamma":34893,"semitic":34894,"pandey":34895,"jamiedornan":34896,"splits":34897,"vela":34898,"soni":34899,"raff":34900,"tmobile":34901,"âŀĸ":34902,"prawns":34903,"liter":34904,"enjoyment":34905,"eggplant":34906,"tub":34907,"cultural":34908,"usic":34909,"suspicion":34910,"sycam":34911,"summed":34912,"madu":34913,"hock":34914,"upwards":34915,"eyeing":34916,"rive":34917,"assassins":34918,"âĤ¬":34919,"outfy":34920,"chives":34921,"tner":34922,"lais":34923,"porridge":34924,"saddest":34925,"wcc":34926,"vicki":34927,"snails":34928,"bizitalk":34929,"millan":34930,"ðŁĮį":34931,"samoa":34932,"jing":34933,"mikey":34934,"guj":34935,"chelms":34936,"eligibility":34937,"armada":34938,"throp":34939,"surgeries":34940,"ãĤ¿":34941,"mohawk":34942,"exits":34943,"mem":34944,"islington":34945,"cme":34946,"landfill":34947,"kaitlyn":34948,"ðŁİ¼":34949,"combinations":34950,"tomorrowland":34951,"verb":34952,"cora":34953,"precisely":34954,"naom":34955,"ðŁĨķ":34956,"shrink":34957,"softly":34958,"mercede":34959,"mandel":34960,"poodle":34961,"ballerina":34962,"soph":34963,"juxta":34964,"yat":34965,"aryan":34966,"hesitate":34967,"lowered":34968,"gular":34969,"dungeonsand":34970,"ronan":34971,"myri":34972,"spf":34973,"menopau":34974,"grasp":34975,"pathi":34976,"feasi":34977,"flaw":34978,"shistory":34979,"steward":34980,"ggle":34981,"fayre":34982,"clique":34983,"credibility":34984,"yog":34985,"section":34986,"musko":34987,"seville":34988,"nott":34989,"calm":34990,"mateo":34991,"indicted":34992,"fiba":34993,"byl":34994,"lino":34995,"ukin":34996,"!!#":34997,"enigma":34998,"sirius":34999,"busc":35000,"ðŁįĬ":35001,"mackerel":35002,"psalms":35003,"aat":35004,"tomorrowspaper":35005,"ðŁĺĸ":35006,"pfc":35007,"...........":35008,"shrek":35009,"mullet":35010,"osh":35011,"dangerously":35012,"immensely":35013,"amur":35014,"ðŁįĤ":35015,"propor":35016,"sya":35017,"londonmarathon":35018,"above":35019,"obligatory":35020,"prov":35021,"racha":35022,"alexis":35023,"primary":35024,"shh":35025,"ethernet":35026,"dstv":35027,"cougar":35028,"unlucky":35029,"nil":35030,"steakhouse":35031,"mela":35032,"fcbayern":35033,"causeway":35034,"catherine":35035,"fluorescent":35036,"nxt":35037,"tokyo":35038,"ausp":35039,"relegation":35040,"quizz":35041,"shoreditch":35042,"proudtobe":35043,"promos":35044,"interacting":35045,"homebrew":35046,"daesh":35047,"wpg":35048,"steadily":35049,"provinces":35050,"ballots":35051,"iah":35052,"alto":35053,"<<<":35054,"youu":35055,"riley":35056,"preference":35057,"traverse":35058,"incense":35059,"ammunition":35060,"hodges":35061,"#@":35062,"hailstate":35063,"tartan":35064,"witchcraft":35065,"ventilation":35066,"libertarian":35067,"!âĢ¦":35068,"owes":35069,"%!":35070,"ongchang":35071,"brushing":35072,"leic":35073,"fiber":35074,"underattack":35075,"download":35076,"expir":35077,"hyo":35078,"pompey":35079,"mcbride":35080,"yag":35081,"stree":35082,"combat":35083,"tending":35084,"aira":35085,"guggen":35086,"abra":35087,"inna":35088,"flips":35089,"awal":35090,"mach":35091,"dollar":35092,"inspirations":35093,"zum":35094,"odu":35095,"itty":35096,"videogame":35097,"aquaman":35098,"haru":35099,"belfast":35100,"jeb":35101,"butch":35102,"usgs":35103,"calculus":35104,"goyal":35105,"morgen":35106,"xfinity":35107,"standup":35108,"contracep":35109,"sabre":35110,"nabe":35111,"insecure":35112,"generously":35113,"epitome":35114,"lw":35115,"tca":35116,"narratives":35117,"donnell":35118,"pandas":35119,"bergh":35120,"tut":35121,"keral":35122,"felicity":35123,"brampton":35124,"quintet":35125,"nomore":35126,"ðŁĶij":35127,"loi":35128,"alhamdulil":35129,"ðŁĶ¥ðŁĶĹ":35130,"stoner":35131,"shawl":35132,"clinical":35133,"brendan":35134,"gone":35135,"flawed":35136,"trippy":35137,"jg":35138,"allocation":35139,"poaching":35140,"vevo":35141,"mocks":35142,"leftist":35143,"bonuses":35144,"condemned":35145,"ability":35146,"stating":35147,"microbiome":35148,"biologist":35149,"foryou":35150,"wahlberg":35151,"ssor":35152,"iftar":35153,"wul":35154,"ÑĦоÑĤ":35155,"pomer":35156,"meme":35157,"verte":35158,"trell":35159,"trait":35160,"inlet":35161,"hormones":35162,"deliberately":35163,"villar":35164,"battleship":35165,"pbl":35166,"twenti":35167,"hokies":35168,"dalail":35169,"saya":35170,"mayfair":35171,"hans":35172,"diets":35173,"⾨⾨":35174,"odin":35175,"hotspur":35176,"papi":35177,"kana":35178,"kamp":35179,"finna":35180,"flotus":35181,"tians":35182,"unicorns":35183,"tribeca":35184,"changers":35185,"foreground":35186,"outa":35187,"invaders":35188,"gettys":35189,"tomorrowspaperstoday":35190,"macmillan":35191,"handwritten":35192,"wfp":35193,"ude":35194,"stateof":35195,"based":35196,"âĺģï¸ı":35197,"casm":35198,"psyched":35199,"historians":35200,"fold":35201,"dda":35202,"aggrav":35203,"pans":35204,"greenway":35205,"ausv":35206,"ðŁĺ¶":35207,"shraddha":35208,"index":35209,"besti":35210,"zimmer":35211,"tness":35212,"eyeshadow":35213,"otte":35214,"gots":35215,"distributing":35216,"promin":35217,"yol":35218,"acea":35219,"tramrahim":35220,"hooper":35221,"supreme":35222,"jammin":35223,"intuitive":35224,"qualifications":35225,"slim":35226,"siddi":35227,"jayne":35228,"tripping":35229,"gtx":35230,"puns":35231,"emanuel":35232,"omg":35233,"midsummer":35234,"into":35235,"succulent":35236,"rien":35237,"newmexico":35238,"oor":35239,"hooking":35240,"inf":35241,"ðŁ¤Ŀ":35242,"flirting":35243,"nahi":35244,"gfriend":35245,"tps":35246,"helix":35247,"zs":35248,"onie":35249,"ctf":35250,"kris":35251,"irresistible":35252,"flap":35253,"ðŁijıðŁı»ðŁijıðŁı»":35254,"uswnt":35255,"rud":35256,"ramps":35257,"pinoy":35258,"otw":35259,"lolz":35260,"lowering":35261,"favorite":35262,"tmc":35263,"phrases":35264,"hermi":35265,"averaging":35266,"embr":35267,"beno":35268,"estuary":35269,"sleeve":35270,"ribbons":35271,"tash":35272,"ู":35273,"xf":35274,"awgs":35275,"sunited":35276,"breweries":35277,"anirud":35278,"punches":35279,"oldie":35280,"ipads":35281,"wifey":35282,"landlords":35283,"dji":35284,"gunner":35285,"íķ´":35286,"texan":35287,"exop":35288,"cassandra":35289,"soff":35290,"ðŁļ«":35291,"ighton":35292,"bakers":35293,"awarenessweek":35294,"vall":35295,"earp":35296,"btsbbmas":35297,"apologizes":35298,"âļĵï¸ı":35299,"wasps":35300,"statesman":35301,"snatch":35302,"watchdog":35303,"rafi":35304,"afterparty":35305,"spike":35306,"jer":35307,"periph":35308,"rnc":35309,"mull":35310,"leen":35311,"shies":35312,"lieu":35313,"urstrulymahesh":35314,"merton":35315,"desai":35316,"shif":35317,"ðŁĮ±":35318,"pedic":35319,"gosling":35320,"arranging":35321,"wwg":35322,"geny":35323,"youuu":35324,"netflix":35325,"ettes":35326,"kwi":35327,"bernardino":35328,"amiga":35329,"ب":35330,"kashmiri":35331,"tings":35332,"emeritus":35333,"decat":35334,"abdomin":35335,"dci":35336,"phases":35337,"djan":35338,"beam":35339,"opry":35340,"ished":35341,"theellenshow":35342,"thest":35343,"habitats":35344,"toons":35345,"mclaughlin":35346,"ripper":35347,"microbiology":35348,"talaga":35349,"clueless":35350,"ssu":35351,"croche":35352,"bromance":35353,"longevity":35354,"zagreb":35355,"prevented":35356,"trave":35357,"spoilt":35358,"darryl":35359,"migraine":35360,"alcat":35361,"dddd":35362,"viv":35363,"serpent":35364,"mattel":35365,"jama":35366,"conquest":35367,"îĦ":35368,"samsung":35369,"presbyterian":35370,"ketch":35371,"firefox":35372,"motif":35373,"lec":35374,"chopping":35375,"cherno":35376,"jann":35377,"ðŁIJ°":35378,"prolon":35379,"wakeup":35380,"convergence":35381,"merseyside":35382,"heartbroken":35383,"looming":35384,"hallucin":35385,"maize":35386,"communism":35387,"moh":35388,"twitterstorians":35389,"sergey":35390,"reseller":35391,"favorable":35392,"edgy":35393,"reiter":35394,"malaga":35395,"liveme":35396,"kahn":35397,"pulsion":35398,"bigg":35399,"kimkardashian":35400,"atio":35401,"tyranny":35402,"ruption":35403,"qant":35404,"proven":35405,"byz":35406,"pushaw":35407,"kristin":35408,"eer":35409,"tardis":35410,"riz":35411,"awaken":35412,"miko":35413,"undocumented":35414,"pathfinder":35415,"indirect":35416,"resembles":35417,"hler":35418,"concealed":35419,"scandal":35420,"reim":35421,"dnb":35422,"critters":35423,"attendant":35424,"apprenticeships":35425,"aau":35426,"screamed":35427,"lsu":35428,"fah":35429,"harbour":35430,"edd":35431,"batsman":35432,"liss":35433,"misha":35434,"spaniel":35435,"itf":35436,"advancement":35437,"fac":35438,"closeup":35439,"cecilia":35440,"medic":35441,"narcissi":35442,"lavish":35443,"giac":35444,"mays":35445,"leit":35446,"winewednesday":35447,"pushaward":35448,"letto":35449,"currents":35450,"bugatti":35451,"outine":35452,"wj":35453,"undo":35454,"lerosis":35455,"devotional":35456,"ðŁij«":35457,"onna":35458,"faisal":35459,"sauna":35460,"himachal":35461,"amii":35462,"à®®":35463,"dizzy":35464,"screenwriting":35465,"phx":35466,"spn":35467,"icki":35468,"agirl":35469,"fishes":35470,"wbz":35471,"pim":35472,"boar":35473,"acid":35474,"!..":35475,"rockefeller":35476,"nga":35477,"drastically":35478,"simplify":35479,"drumming":35480,"autumnal":35481,"gurmee":35482,"lorde":35483,"joann":35484,"giveup":35485,"bour":35486,"amura":35487,"derland":35488,"simpler":35489,"watson":35490,"trident":35491,"concordia":35492,"bellum":35493,"brek":35494,"dumplings":35495,"vion":35496,"dungeonsanddragons":35497,"spri":35498,"ascension":35499,"wildatlantic":35500,"ust":35501,"robins":35502,"legion":35503,"insist":35504,"jaro":35505,"guess":35506,"sob":35507,"bighit":35508,"poolside":35509,"negotiating":35510,"mcgill":35511,"bild":35512,"technicians":35513,"mitigation":35514,"ajaydevgn":35515,"bto":35516,"anten":35517,"cosmopolitan":35518,"ðŁĺĬðŁĺĬðŁĺĬðŁĺĬ":35519,"patrioti":35520,"temper":35521,"promenade":35522,"navajo":35523,"namm":35524,"wrinkles":35525,"dcfc":35526,"leach":35527,"brunette":35528,"rf":35529,"coutinho":35530,"alti":35531,"traditionally":35532,"optome":35533,"naz":35534,"accordingly":35535,"recard":35536,"deets":35537,"swell":35538,"posure":35539,"whitening":35540,"stranger":35541,"illion":35542,"hereford":35543,"uwu":35544,"robber":35545,"cotswolds":35546,"clen":35547,"gorge":35548,"namaste":35549,"relish":35550,"griff":35551,"adrenaline":35552,"blasio":35553,"vale":35554,"ê²":35555,"tolerate":35556,"railminindia":35557,"jensen":35558,"hoven":35559,"ellu":35560,"obsole":35561,"eisenhower":35562,"unidentified":35563,"thanniversary":35564,"bodyguard":35565,"د":35566,"idge":35567,"schal":35568,"stockport":35569,"sni":35570,"retaining":35571,"popo":35572,"pixie":35573,"olithic":35574,"kier":35575,"hajj":35576,"saz":35577,"corbin":35578,"!!!!!!!!!!":35579,"vit":35580,"megat":35581,"deh":35582,"circuit":35583,"affleck":35584,"theoretical":35585,"hopeless":35586,"uab":35587,"slump":35588,"bice":35589,"jammed":35590,"letstalk":35591,"cani":35592,"sideways":35593,"labyrinth":35594,"refs":35595,"hahn":35596,"jared":35597,"ðŁį¹":35598,"jambo":35599,"phyl":35600,"enhancement":35601,"ctr":35602,"fullest":35603,"seye":35604,"doba":35605,"choic":35606,"yos":35607,"cbj":35608,"andré":35609,"rewatch":35610,"prima":35611,"doctrine":35612,"forgets":35613,"uhm":35614,"around":35615,"ule":35616,"artlovers":35617,"shiraz":35618,"harth":35619,"extor":35620,"Å¡":35621,"unexpectedly":35622,"elius":35623,"yx":35624,"emmy":35625,"seac":35626,"ðŁijĩðŁijĩðŁijĩ":35627,"corrected":35628,"combu":35629,"womanc":35630,"cough":35631,"whatson":35632,"publishes":35633,"diversity":35634,"backbone":35635,"lockdown":35636,"mesmerizing":35637,"norte":35638,"mab":35639,"designer":35640,"íģ":35641,"ragh":35642,"molecules":35643,"getoutside":35644,"thebeatles":35645,"semiconduc":35646,"nacho":35647,"lunes":35648,"hammers":35649,"sultan":35650,"oon":35651,"feren":35652,"attach":35653,"arqu":35654,"uttarakhand":35655,"sash":35656,";-":35657,"tread":35658,"iko":35659,"arthur":35660,"scandinavian":35661,"ration":35662,"gael":35663,"chargeable":35664,"fishy":35665,"vma":35666,"handbags":35667,"chara":35668,"ayne":35669,"defam":35670,"settlers":35671,"qadri":35672,"palais":35673,"inwx":35674,"apocalyptic":35675,"pooja":35676,"aes":35677,"atories":35678,"proofing":35679,"nlp":35680,"tsla":35681,"vina":35682,"lido":35683,"deephouse":35684,"informatics":35685,"vv":35686,"ppings":35687,"diss":35688,"ï":35689,"uhuru":35690,"stony":35691,"betrayed":35692,"baff":35693,"myra":35694,"aspen":35695,"allowance":35696,"tamara":35697,"cif":35698,"corbett":35699,"serge":35700,"digo":35701,"ambigu":35702,"painters":35703,"pcr":35704,"pca":35705,"noms":35706,"loft":35707,"vee":35708,"opendata":35709,"ðŁIJ±":35710,"alexandre":35711,"identifies":35712,"fantasyfootball":35713,"reproduction":35714,"bromley":35715,"wareagle":35716,"mmer":35717,"pss":35718,"cues":35719,"ayat":35720,"hutchinson":35721,"sarac":35722,"jackman":35723,"irah":35724,"apink":35725,"cols":35726,"aussies":35727,"execs":35728,"dayton":35729,"ðŁĻĨ":35730,"imv":35731,"haram":35732,"chuckle":35733,"authenticity":35734,"ardo":35735,"incubator":35736,"ส":35737,"photoshopped":35738,"embraced":35739,"fightfor":35740,"gorman":35741,"zzzz":35742,"scholastic":35743,"crisps":35744,"teapo":35745,"midnight":35746,"gaine":35747,"collier":35748,"sate":35749,"dette":35750,"åŃ":35751,"imagine":35752,"iff":35753,"twili":35754,"ification":35755,"teatro":35756,"norma":35757,"esur":35758,"emergencies":35759,"riseup":35760,"ringer":35761,"hassle":35762,"caitlyn":35763,"tranquil":35764,"versa":35765,"seb":35766,"overlook":35767,"gini":35768,"bogo":35769,"sere":35770,"mayne":35771,"henrik":35772,"contaminated":35773,"rhapsody":35774,"proportion":35775,"wildatlanticway":35776,"âģ©.":35777,"organisers":35778,"trane":35779,"standard":35780,"sperm":35781,"launcher":35782,"ricci":35783,"herts":35784,"paperwork":35785,"showcased":35786,"meryl":35787,"pena":35788,"pimp":35789,"disastrous":35790,"^.^":35791,"phara":35792,"xis":35793,"frontal":35794,"swirl":35795,"spills":35796,"swagger":35797,"smartwatch":35798,"sizzling":35799,"saviour":35800,"catar":35801,"bbcr":35802,"refurbishment":35803,"dris":35804,"citroen":35805,"absorb":35806,"patriotism":35807,"illeg":35808,"chromo":35809,"freshers":35810,"rus":35811,"limiting":35812,"efish":35813,"downed":35814,"mandir":35815,"hazelnut":35816,"pall":35817,"macon":35818,"disappearing":35819,"qualifies":35820,"boon":35821,"barracks":35822,"amine":35823,"gendere":35824,"ðŁļĺ":35825,"jes":35826,"ãĥŃ":35827,"quito":35828,"middleweight":35829,"schau":35830,"quadru":35831,"aciones":35832,"limitless":35833,"ðŁijĮðŁı½":35834,"chman":35835,"arav":35836,"regulators":35837,"itup":35838,"battersea":35839,"milford":35840,"gz":35841,"ticking":35842,"ghou":35843,"crushes":35844,"tutu":35845,"dreadful":35846,"famine":35847,"forchange":35848,"dalailama":35849,"ðŁĴį":35850,"whitaker":35851,"hashmi":35852,"hus":35853,"vod":35854,"bette":35855,"aaah":35856,"isoo":35857,"ðŁ¥Ī":35858,"haar":35859,"laine":35860,"bv":35861,"allday":35862,"sprout":35863,"indiegames":35864,"freebie":35865,"greeks":35866,"butler":35867,"illin":35868,"haal":35869,"wareness":35870,"sima":35871,"publichealth":35872,"gama":35873,"waa":35874,"oung":35875,"goooo":35876,"okinawa":35877,"offenders":35878,"impose":35879,"hoc":35880,"youngster":35881,"storyteller":35882,"scap":35883,"fighter":35884,"+,":35885,"whites":35886,"musicmonday":35887,"reza":35888,"goducks":35889,"bria":35890,"mium":35891,"casper":35892,"crumbs":35893,"aad":35894,"martialarts":35895,"chp":35896,"rigged":35897,"tng":35898,"harvested":35899,"sak":35900,"dojo":35901,"millwall":35902,"bnw":35903,"ocd":35904,"historyof":35905,"tmr":35906,"sirens":35907,"fanci":35908,"caregivers":35909,"vira":35910,"soni":35911,"recurring":35912,"acknowledged":35913,"ðŁıŁ":35914,"ophile":35915,"bucky":35916,"stressing":35917,"rook":35918,"digger":35919,"vival":35920,"sando":35921,"fleet":35922,"siers":35923,"selcaday":35924,"refreshed":35925,"antifa":35926,"aque":35927,"polo":35928,"disappearance":35929,"demb":35930,"âĮļï¸ı":35931,"rented":35932,"berger":35933,"gmb":35934,"cula":35935,"ssal":35936,"goody":35937,"uhh":35938,"marcelo":35939,"wanna":35940,"software":35941,"shopsmall":35942,"turtle":35943,"tomas":35944,"frisco":35945,"ðŁĺįðŁĴķ":35946,"jimenez":35947,"csu":35948,"dayz":35949,"ando":35950,"wynne":35951,"choreographer":35952,"cervical":35953,"trailblazers":35954,"edg":35955,"zendaya":35956,"travelblog":35957,"els":35958,"wholesome":35959,"cog":35960,"labout":35961,"arney":35962,"delle":35963,"suisse":35964,"masi":35965,"inese":35966,"ombe":35967,"fiddle":35968,"reclaim":35969,"pau":35970,"watcher":35971,"slain":35972,"berty":35973,"optimum":35974,"elites":35975,"minis":35976,"turkey":35977,"patrols":35978,"gerard":35979,"aureli":35980,"wildly":35981,"waltz":35982,"brgy":35983,"wob":35984,"crest":35985,"+++":35986,"vez":35987,"frosted":35988,"davido":35989,"thex":35990,"paramedics":35991,"pinto":35992,"hank":35993,"dupont":35994,"urg":35995,"fostering":35996,"micropoetry":35997,"spectre":35998,"---->":35999,"neuro":36000,"frida":36001,"musical":36002,"galveston":36003,"effic":36004,"scape":36005,"palazzo":36006,"thall":36007,"provisional":36008,"pjs":36009,"aure":36010,"ðŁĶľ":36011,"mamamoo":36012,"kitties":36013,"cree":36014,"wak":36015,"loool":36016,"lupus":36017,"cnblue":36018,"ú":36019,"ðŁİ¬":36020,"raced":36021,"trose":36022,"omas":36023,"stride":36024,"coors":36025,"⤵ï¸ı":36026,"incomparable":36027,"cyril":36028,"broader":36029,"areclipse":36030,"ðŁįĶ":36031,"interval":36032,"tiru":36033,"coworking":36034,"waco":36035,"aham":36036,"abee":36037,"flourish":36038,"thetimes":36039,"olini":36040,"kickboxing":36041,"lucer":36042,"atla":36043,"asun":36044,"casserole":36045,"miaw":36046,"lobbying":36047,"janice":36048,"cirque":36049,"reflex":36050,"leary":36051,"sanatomy":36052,"tempest":36053,"semb":36054,"murdering":36055,"usav":36056,"robo":36057,"onet":36058,"pcc":36059,"natives":36060,"lifeof":36061,"saha":36062,"ruthless":36063,"relates":36064,"appetizer":36065,"pyeongchang":36066,"nord":36067,"eru":36068,"athing":36069,"ugly":36070,"plying":36071,"brance":36072,"organise":36073,"kendra":36074,"dato":36075,"cheeses":36076,"parma":36077,"burnout":36078,"astra":36079,"pretoria":36080,"adjustment":36081,"uku":36082,"slo":36083,"liken":36084,"favors":36085,"clive":36086,"beets":36087,"snowdonia":36088,"gotv":36089,"syn":36090,"openhouse":36091,"pani":36092,"portrayed":36093,"slated":36094,"mecca":36095,"renal":36096,"supportsmallstreamers":36097,"staffs":36098,"dao":36099,"biker":36100,"viktor":36101,"titus":36102,"admired":36103,"ðŁĵ±":36104,"hurrican":36105,"heats":36106,"glory":36107,"photogenic":36108,"meri":36109,"depor":36110,"burnham":36111,"orangu":36112,"djing":36113,"impressionism":36114,"ignition":36115,"cai":36116,"wynn":36117,"depe":36118,"coveted":36119,"collagen":36120,"saus":36121,"ornam":36122,"administrators":36123,"sson":36124,"nhpolitics":36125,"hahahahahahahaha":36126,"aspirations":36127,"rgb":36128,"swollen":36129,"sowe":36130,"scr":36131,"divergent":36132,"houghton":36133,"hanoi":36134,"dory":36135,"niki":36136,"landry":36137,"bcci":36138,"ðŁijĮðŁijĮ":36139,"ismail":36140,"tripod":36141,"herd":36142,"bhatt":36143,"dressage":36144,"tabby":36145,"inguish":36146,"huron":36147,"à³į":36148,"Ãł":36149,"todas":36150,"evangelical":36151,"chords":36152,"stjohn":36153,"sloppy":36154,"martyr":36155,"facebook":36156,"alight":36157,"sensei":36158,"kathniel":36159,"rites":36160,"zione":36161,"uo":36162,"revelations":36163,"weightlifting":36164,"pano":36165,"ncwx":36166,"acton":36167,"à®ķ":36168,"ز":36169,"soma":36170,"à¸Ĺ":36171,"respecting":36172,"marche":36173,"foreman":36174,"betty":36175,"kik":36176,"shibu":36177,"poon":36178,"argyle":36179,"kswx":36180,"etz":36181,"marbella":36182,"brackets":36183,"standby":36184,"fireside":36185,"defiance":36186,"vex":36187,"britannia":36188,"inhabit":36189,"appoint":36190,"piyush":36191,"leash":36192,"sciento":36193,"flask":36194,"senna":36195,">:":36196,"atroc":36197,"sanderson":36198,"idlib":36199,"dhanush":36200,"ðŁĺĻ":36201,"enthr":36202,"hitch":36203,"dedly":36204,"alley":36205,"dork":36206,"mondo":36207,"cuddly":36208,"missin":36209,"yesss":36210,"nighting":36211,"jpn":36212,"wary":36213,"umpire":36214,"maz":36215,"ê³":36216,"babs":36217,"ĭãģ":36218,"stanford":36219,"possessed":36220,"exceeded":36221,"ðŁĶ¶":36222,"wallart":36223,"trap":36224,"jil":36225,"hibis":36226,"spying":36227,"scribe":36228,"khalil":36229,"translator":36230,"lumb":36231,"dized":36232,"chc":36233,"supervision":36234,"shutter":36235,"jag":36236,"_*":36237,"yesterdays":36238,"msf":36239,"hihi":36240,"gonzaga":36241,"gillespie":36242,"vivek":36243,"ecstatic":36244,"thismorning":36245,"chus":36246,"edes":36247,"stoned":36248,"bees":36249,"ðŁĩ¹ðŁĩ":36250,"turin":36251,"hover":36252,"atrics":36253,"stern":36254,"samheughan":36255,"autism":36256,"miya":36257,"eyewitness":36258,"writings":36259,"traveltips":36260,"chutney":36261,"pxrtg":36262,"kenyans":36263,"mystic":36264,"krit":36265,"/$":36266,"redhead":36267,"worldly":36268,"amus":36269,"opla":36270,"leve":36271,"gabbana":36272,"seen":36273,"oclock":36274,"ganga":36275,"keenan":36276,"scent":36277,"oldies":36278,"gogreen":36279,"cornerstone":36280,"comply":36281,"concours":36282,"ðŁİ¶ðŁİ¶":36283,"haan":36284,"confis":36285,"awson":36286,"cleop":36287,"îĢ":36288,"suzu":36289,"sauté":36290,"algar":36291,"subscriber":36292,"esteemed":36293,"ãĤ¤ãĥ":36294,"worthwhile":36295,"melrose":36296,"flock":36297,"brightly":36298,"violinist":36299,"pere":36300,"slipping":36301,"andco":36302,"sigh":36303,"havan":36304,"culo":36305,"msa":36306,"fibrosis":36307,"matilda":36308,"rafting":36309,"award":36310,"ëª":36311,"mmmm":36312,"geaux":36313,"steiner":36314,"sinn":36315,"helpers":36316,"beetles":36317,"aimee":36318,"taiwan":36319,"pistachio":36320,"macbeth":36321,"mzan":36322,"descendants":36323,"onsale":36324,"inr":36325,"ilm":36326,"grouse":36327,"saig":36328,"mow":36329,"bigre":36330,"adjustments":36331,"tula":36332,"mathew":36333,"translates":36334,"muh":36335,"bollah":36336,"ðŁĴĽðŁĴĻ":36337,"amores":36338,"abouts":36339,"bombshell":36340,"blaster":36341,"xavi":36342,"sns":36343,"kroger":36344,"gather":36345,"eradic":36346,"daft":36347,"chemo":36348,"benches":36349,"ðŁĩ©ðŁĩ":36350,"utv":36351,"oura":36352,"nko":36353,"gatorade":36354,"biafra":36355,"okstate":36356,"imdanielpadilla":36357,"domains":36358,"openingday":36359,"kiddo":36360,"doi":36361,"rice":36362,"daycare":36363,"macmillan":36364,"bathurst":36365,"cheerleading":36366,"ðŁ¦ģ":36367,"cashback":36368,"kwon":36369,"hobbies":36370,"exempl":36371,"riesling":36372,"âļª":36373,"agles":36374,"nys":36375,"everything":36376,"navis":36377,"addi":36378,"magnesium":36379,"facelift":36380,"arkham":36381,"grandes":36382,"extremist":36383,"donat":36384,"vitality":36385,"pumpkin":36386,"betta":36387,"sltd":36388,"artisan":36389,"liby":36390,"peaked":36391,"ahhhhh":36392,"maryam":36393,"assim":36394,"unsc":36395,"mente":36396,"alaya":36397,"lowers":36398,"aras":36399,"griev":36400,"leip":36401,"grati":36402,"crises":36403,"sprints":36404,"execute":36405,"wto":36406,"msd":36407,"magical":36408,"reviewer":36409,"sparkles":36410,"jukebox":36411,"ðŁĺĤâĿ¤ï¸ı":36412,"payback":36413,"licenses":36414,"dunkin":36415,"belt":36416,"lakewood":36417,"hateful":36418,"budgets":36419,"revamped":36420,"pherson":36421,"kyiv":36422,"wentworth":36423,"rosen":36424,"cruise":36425,"giggle":36426,"defstar":36427,"assassinscre":36428,"ymouth":36429,"winkle":36430,"wfc":36431,"bandwagon":36432,"bkk":36433,"wiring":36434,"kearney":36435,"southside":36436,"petit":36437,"!ðŁĺį":36438,"nordic":36439,"mirza":36440,"mugabe":36441,"vl":36442,"scones":36443,"ktv":36444,"sandal":36445,"duc":36446,"malls":36447,"ðŁĴŀðŁĴŀ":36448,"itc":36449,"alay":36450,"impair":36451,"unrest":36452,"floss":36453,"cé":36454,"abou":36455,"varying":36456,"museo":36457,"server":36458,"diya":36459,"hibiscus":36460,"eroy":36461,"merritt":36462,"findom":36463,"fpp":36464,"unusually":36465,"gott":36466,"contingent":36467,"aliaa":36468,"ballon":36469,"jol":36470,"hiked":36471,"zyme":36472,"ayr":36473,"agn":36474,"gaz":36475,"periodic":36476,"sparty":36477,"practising":36478,"linton":36479,"talis":36480,"cypri":36481,"womaninbiz":36482,"radiodisney":36483,"ðŁĮ¼":36484,"jumpers":36485,"endocr":36486,"ðŁļ¨ðŁļ¨":36487,"andon":36488,"sharapo":36489,"mier":36490,"masonic":36491,"factories":36492,"vien":36493,"bbers":36494,"ìĽIJ":36495,"hold":36496,"kebab":36497,"beak":36498,"approached":36499,"acmilan":36500,"munro":36501,"kosher":36502,"excellency":36503,"negotiation":36504,"waltdisneyworld":36505,"crouch":36506,"teasing":36507,"suppression":36508,"enya":36509,"bce":36510,"transformationtuesday":36511,"callie":36512,"viswas":36513,"pgat":36514,"icted":36515,"endings":36516,"escu":36517,"recruited":36518,"itfc":36519,"collaborations":36520,"gino":36521,"snuck":36522,"auschwitz":36523,"ifc":36524,"xii":36525,"kesha":36526,"gervais":36527,"cloak":36528,"xl":36529,"saad":36530,"probation":36531,"precau":36532,"macin":36533,"anastasi":36534,"lek":36535,"eazy":36536,"daysofcode":36537,"mariahcarey":36538,"yog":36539,"stitched":36540,"boyfriends":36541,"shar":36542,"phile":36543,"agu":36544,"twinkle":36545,"phishing":36546,"weekender":36547,"icton":36548,"gurmeetramrahim":36549,"alton":36550,"leness":36551,"allan":36552,"penultimate":36553,"krystal":36554,"gou":36555,"lande":36556,"dismant":36557,"abusing":36558,"norse":36559,"paterson":36560,"edmun":36561,"apan":36562,"xiumin":36563,"skel":36564,"catwalk":36565,"react":36566,"walled":36567,"tangle":36568,"bryn":36569,"veto":36570,"supermoon":36571,"casablanc":36572,"appreciates":36573,"skid":36574,"both":36575,"catalina":36576,"eleague":36577,"cybermonday":36578,"cautious":36579,"ðŁ¤ĵ":36580,"novo":36581,"hampton":36582,"haye":36583,"josef":36584,"varan":36585,"lobos":36586,"roanoke":36587,"orphans":36588,"ttin":36589,"squads":36590,"ishqbaaaz":36591,"blackpanther":36592,"etu":36593,"ksh":36594,"crumble":36595,"cessna":36596,"relieved":36597,"scully":36598,"pollinators":36599,"explorecanada":36600,"kies":36601,"kamloops":36602,"kiran":36603,"primal":36604,"settlements":36605,"hotspot":36606,"brainstorming":36607,"cedric":36608,"biennial":36609,"shant":36610,"âĻ¡âĻ¡âĻ¡":36611,"doon":36612,"hearn":36613,"walkway":36614,"fem":36615,"veal":36616,"deportation":36617,"toxins":36618,"eliminating":36619,"descending":36620,"bythe":36621,"blasphe":36622,"hasta":36623,"complement":36624,"ascent":36625,"riga":36626,"provost":36627,"âĸª":36628,"weeping":36629,"antisemitism":36630,"employee":36631,"unearthed":36632,"pino":36633,"natalie":36634,"blad":36635,"angola":36636,"lockheed":36637,"inian":36638,"agr":36639,"nister":36640,"impala":36641,"mke":36642,"fanatic":36643,"âĺħâĺħ":36644,"ðŁij¸":36645,"luch":36646,"simplified":36647,"gallery":36648,"economic":36649,"cyborg":36650,"coni":36651,"selma":36652,"inception":36653,"koala":36654,"dvds":36655,"crested":36656,"mmor":36657,"visible":36658,"nsd":36659,"ðŁĻĮðŁı½":36660,"wunder":36661,"refrigerator":36662,"reopening":36663,"eera":36664,"carousel":36665,"asp":36666,"ballistic":36667,"victory":36668,"motive":36669,"trey":36670,"sharapova":36671,"sii":36672,"monter":36673,"intend":36674,"westchester":36675,"spe":36676,"cymb":36677,"vidal":36678,"llama":36679,"univ":36680,"finer":36681,"craftsmanship":36682,"jazzfest":36683,"bch":36684,"aggio":36685,"ncc":36686,"lambda":36687,"tranquility":36688,"cisco":36689,"baden":36690,"sobbing":36691,"ofi":36692,"gota":36693,"rumored":36694,"warmed":36695,"orean":36696,"acton":36697,"marci":36698,"ghani":36699,"âľĵ":36700,"assorted":36701,"pembroke":36702,"penelope":36703,"daf":36704,"atty":36705,"aimo":36706,"pretzel":36707,"carnival":36708,"thanos":36709,"kochi":36710,"mersal":36711,"hamradio":36712,"artwit":36713,"casc":36714,"guerrilla":36715,"kushner":36716,"kapp":36717,"alise":36718,"toddlers":36719,"stewardship":36720,"otti":36721,"terri":36722,"tempe":36723,"restless":36724,"vito":36725,"zayed":36726,"rspb":36727,"pion":36728,"hippo":36729,"hawthorne":36730,"inas":36731,"amily":36732,"nutcracker":36733,"lop":36734,"dali":36735,"tropic":36736,"ðŁ¤ł":36737,"ulo":36738,"jaredle":36739,"pyrene":36740,"paleo":36741,"usair":36742,"mould":36743,"itated":36744,"genetically":36745,"biomass":36746,"ðŁĩ³ðŁĩ±":36747,"dodd":36748,"practiced":36749,"monarchs":36750,"unmanned":36751,"mbuhari":36752,"amal":36753,"photogra":36754,"kool":36755,"brendon":36756,"juices":36757,"cure":36758,"worldbank":36759,"pointers":36760,"ðŁĴĿ":36761,"turf":36762,"leds":36763,"borussia":36764,"baptism":36765,"warwickshire":36766,"mounts":36767,"gayo":36768,"begg":36769,"copied":36770,"asians":36771,"kg":36772,"modernist":36773,"gid":36774,"frontman":36775,"concentrated":36776,"yt":36777,"scavenger":36778,"ironically":36779,"adic":36780,"psn":36781,"ðŁ¥ī":36782,"culturally":36783,"yuv":36784,"macarthur":36785,"fertilizer":36786,"bewithyou":36787,"rigor":36788,"minors":36789,"zoning":36790,"âĸł":36791,"rir":36792,"adolescent":36793,"vinny":36794,"reng":36795,"sandstone":36796,"guet":36797,"westh":36798,"pledged":36799,"laced":36800,"spide":36801,"vai":36802,"tycoon":36803,"seizure":36804,"dup":36805,"appalachian":36806,"rok":36807,"catholics":36808,"seychel":36809,"possess":36810,"lager":36811,"jodi":36812,"champ":36813,"stras":36814,"dina":36815,"centuri":36816,"calder":36817,"bluray":36818,"ðŁĩ¨ðŁĩ³":36819,"modo":36820,"annette":36821,"youtubers":36822,"chaps":36823,"angling":36824,"labeling":36825,"aqui":36826,"pkwy":36827,"lyle":36828,"bisexual":36829,"litur":36830,"dugout":36831,"libby":36832,"greysanatomy":36833,"substances":36834,"augustus":36835,"rallying":36836,"fidel":36837,"ingue":36838,"人":36839,"hallmarkchannel":36840,"toothbrush":36841,"má":36842,"adirond":36843,"aggi":36844,"ðŁĵį:":36845,"crusade":36846,"taxation":36847,"kz":36848,"iver":36849,"doubling":36850,"roomie":36851,"wab":36852,"enrolled":36853,"azon":36854,"aju":36855,"grandchildren":36856,"asdf":36857,"ðŁ¥º":36858,"matic":36859,"oughton":36860,"utilize":36861,"ðŁĴ£":36862,"ponder":36863,"raisin":36864,"dysfunction":36865,"cobain":36866,"butternut":36867,"eman":36868,"sured":36869,"drian":36870,"andfriends":36871,"withthe":36872,"onomy":36873,"heineken":36874,"bridal":36875,"leadership":36876,"pyramids":36877,"deutschland":36878,"jocel":36879,"bowel":36880,"yqr":36881,"horsepower":36882,"beacon":36883,"ingeni":36884,"gradient":36885,"fermented":36886,"moom":36887,"thingy":36888,"potassi":36889,"wristband":36890,"bord":36891,"bodied":36892,"ðŁĺŃðŁĺį":36893,"mapp":36894,"kau":36895,"cyberpunk":36896,"phish":36897,"looking":36898,"coates":36899,"apur":36900,"amie":36901,"uklabour":36902,"atin":36903,"gla":36904,"adoptable":36905,"shelby":36906,"villi":36907,"riya":36908,"mingly":36909,"climber":36910,"bumblebee":36911,"ðŁĺ¸":36912,"csd":36913,"âĿ¥":36914,"hospitalized":36915,"cki":36916,"hater":36917,"chr":36918,"retina":36919,"ita":36920,"fanbase":36921,"beatrice":36922,"gwyne":36923,"goss":36924,"fos":36925,"favorited":36926,"swachhbharat":36927,"malade":36928,"monmouth":36929,"\"[":36930,"sivan":36931,"shhh":36932,"commanding":36933,"sainsburys":36934,"weed":36935,"gman":36936,"ssw":36937,"reptile":36938,"ivy":36939,"tropics":36940,"rollers":36941,"overcast":36942,"exposition":36943,"masquerade":36944,"mancrush":36945,"waist":36946,"sprinter":36947,"sleet":36948,"levin":36949,"jpg":36950,"_(":36951,"opel":36952,"exploit":36953,"apa":36954,"powe":36955,"wrecking":36956,"jongin":36957,"orb":36958,"erick":36959,"bosco":36960,"praising":36961,"bertr":36962,"towing":36963,"insecurity":36964,"kut":36965,"restocked":36966,"rrp":36967,"prescribed":36968,"trafalgar":36969,"pert":36970,"gases":36971,"apprais":36972,"ghar":36973,"musicals":36974,"âĸ¬âĸ¬":36975,"mcfad":36976,"agony":36977,"condition":36978,"equip":36979,"shik":36980,"atravel":36981,"ðŁĩ¿ðŁĩ¦":36982,"keh":36983,"abduction":36984,"peoria":36985,"wilkins":36986,"gms":36987,"asd":36988,"evi":36989,"ðŁĴĹðŁĴĹðŁĴĹ":36990,"uz":36991,"moc":36992,"hallelujah":36993,"guadalu":36994,"louvre":36995,"drawing":36996,"gove":36997,"phant":36998,"frie":36999,"webdev":37000,"programmer":37001,"zable":37002,"gamescom":37003,"clarify":37004,"lith":37005,"kinky":37006,"âĿ£":37007,"labourdoorstep":37008,"sonata":37009,"juris":37010,"maiden":37011,"viadu":37012,"bucharest":37013,"conditioned":37014,"capitalist":37015,"ude":37016,"psb":37017,"spca":37018,"lulla":37019,"foothills":37020,"kayo":37021,"bond":37022,"womb":37023,"rounder":37024,"cesar":37025,"bursts":37026,"apra":37027,"swoon":37028,"sabrin":37029,"fragrant":37030,"clearer":37031,"kubrick":37032,"climax":37033,"journo":37034,"agle":37035,"ðŁı½âĢįâĻĢï¸ı":37036,"pooch":37037,"hale":37038,"solit":37039,"salmon":37040,"organisms":37041,"bronson":37042,"arten":37043,"hodgson":37044,"alove":37045,"venture":37046,"bbi":37047,"aea":37048,"ðŁIJ¢":37049,"ldn":37050,"dnr":37051,"ozone":37052,"ellas":37053,"manny":37054,"azzur":37055,"unbeat":37056,"truffles":37057,"thong":37058,"mañ":37059,"lasers":37060,"leye":37061,"gettysburg":37062,"backpacks":37063,"oris":37064,"maison":37065,"crawling":37066,"labra":37067,"cling":37068,"dragging":37069,"steal":37070,"doubt":37071,"devan":37072,"ckers":37073,"agentsof":37074,"photobomb":37075,"elonmusk":37076,"aboy":37077,"distances":37078,"storyline":37079,"spi":37080,"northan":37081,"europeans":37082,"whale":37083,"serpent":37084,"ðŁļ²":37085,"fior":37086,"trit":37087,"oxo":37088,"awarding":37089,"classmate":37090,"sufc":37091,"smartest":37092,"riches":37093,"prk":37094,"bigfoot":37095,"armb":37096,"bipolar":37097,"dwelling":37098,"omars":37099,"kwan":37100,"grime":37101,"meng":37102,"frederick":37103,"navarro":37104,"sorrynotsorry":37105,"jaredleto":37106,"pave":37107,"slack":37108,"barnsley":37109,"attar":37110,"eviction":37111,"accumulation":37112,"oir":37113,"catchy":37114,"welter":37115,"vikas":37116,"hassee":37117,"nikita":37118,"moyes":37119,"mathews":37120,"shiv":37121,"gatwick":37122,"profiling":37123,"companions":37124,"marrake":37125,"antics":37126,"ðŁĻĮðŁĻĮðŁĻĮ":37127,"sese":37128,"boi":37129,"bartlett":37130,"poisonous":37131,"abuses":37132,"ymm":37133,"kampala":37134,"guggenheim":37135,"imvkohli":37136,"dolom":37137,"bree":37138,"throttle":37139,"gareth":37140,"fitzpatrick":37141,"unya":37142,"parad":37143,"margot":37144,"jnr":37145,"wea":37146,"potassium":37147,"pnc":37148,"disguised":37149,"crash":37150,"renergy":37151,"illic":37152,"coupled":37153,"niels":37154,"ciones":37155,"æĹ¥":37156,"iment":37157,"despicable":37158,"dye":37159,"whatcha":37160,"connections":37161,"paralympics":37162,"gauntlet":37163,"waitrose":37164,"suicidal":37165,"starship":37166,"vapor":37167,"stou":37168,"lawmaker":37169,"cooled":37170,"simo":37171,"theno":37172,"offroad":37173,"jaden":37174,"basque":37175,"vicky":37176,"lukaku":37177,"centro":37178,"trish":37179,"strategist":37180,"medications":37181,"horst":37182,"bfc":37183,"grail":37184,"sharply":37185,"aditya":37186,"tomb":37187,"kaufman":37188,"tripad":37189,"samba":37190,"pastoral":37191,"britney":37192,"sagan":37193,"hillside":37194,"masons":37195,"sara":37196,"zone":37197,"xu":37198,"totes":37199,"robbie":37200,"appen":37201,"montag":37202,"dero":37203,"shortfilm":37204,"charismatic":37205,"tators":37206,"kiba":37207,"andri":37208,"alarming":37209,"splitting":37210,"icar":37211,"thug":37212,"scariest":37213,"sylvester":37214,"anan":37215,"utrecht":37216,"adifference":37217,"meade":37218,"buster":37219,"airstrikes":37220,"cuffs":37221,"accountants":37222,"ðŁĺ¡ðŁĺ¡":37223,"newt":37224,"bott":37225,"issuing":37226,"clancy":37227,"wwenetwork":37228,"kyuhyun":37229,"resemble":37230,"pajamas":37231,"sink":37232,"kinney":37233,"sulph":37234,"ork":37235,"lies":37236,"lagh":37237,"orton":37238,"rahul":37239,"dsc":37240,"wewill":37241,"ream":37242,"colloqui":37243,"sharia":37244,"hectic":37245,"sarcasm":37246,"lander":37247,"tmz":37248,"endorf":37249,"roz":37250,"hammered":37251,"fris":37252,"wadi":37253,"popefrancis":37254,"heit":37255,"flashlight":37256,"unborn":37257,"opes":37258,"holiness":37259,"ðŁIJ¦":37260,"nacht":37261,"imsa":37262,"gracing":37263,"bjp":37264,"verts":37265,"csc":37266,"homeowner":37267,"aque":37268,"bigotry":37269,"annie":37270,"bagh":37271,"âĿ¤ï¸ıðŁĺį":37272,"cari":37273,"thomp":37274,"disposable":37275,"cardiology":37276,"patented":37277,"hhhhhh":37278,"ldr":37279,"stephenson":37280,"crores":37281,"fanning":37282,"climat":37283,"ðŁijįðŁijįðŁijį":37284,"ðŁijįðŁı¼":37285,"aeron":37286,"piccadilly":37287,"bankrupt":37288,"silvia":37289,"employ":37290,"donny":37291,"commenting":37292,"screenwriter":37293,"iota":37294,"cean":37295,"ancers":37296,"tuan":37297,"streetwear":37298,"य":37299,"skine":37300,"espa":37301,"asif":37302,"osce":37303,"sheppard":37304,"morecam":37305,"bottle":37306,"ders":37307,"oracle":37308,"googleplay":37309,"averaged":37310,"edmonton":37311,"stephan":37312,"sisterhood":37313,"crusted":37314,"staggering":37315,"methodology":37316,"congresswoman":37317,"cabo":37318,"triggers":37319,"milky":37320,"glide":37321,"toothpaste":37322,"roommates":37323,"nuff":37324,"guam":37325,"sprinkles":37326,"alternative":37327,"watfordfc":37328,"uoft":37329,"haley":37330,"contacted":37331,"bundy":37332,"prostitu":37333,"ghar":37334,"preston":37335,"onsite":37336,"hilar":37337,"gts":37338,"catt":37339,"hampstead":37340,"??!":37341,"ðŁĩ§ðŁĩ":37342,"bbcqt":37343,"alessandro":37344,"resist":37345,"maidan":37346,"tko":37347,"shading":37348,"pinup":37349,"gallo":37350,"sinu":37351,"atec":37352,"funk":37353,"aclu":37354,"strides":37355,"rhyme":37356,"wetland":37357,"bbcspringwatch":37358,"tins":37359,"wildcard":37360,"stour":37361,"flamenco":37362,"paula":37363,"ontology":37364,"gangsta":37365,"amade":37366,"ãĤ«":37367,"tbs":37368,"skeletal":37369,"runner":37370,"jardin":37371,"harrier":37372,"hunted":37373,"zhen":37374,"believeinfilm":37375,"demean":37376,"auditi":37377,"restart":37378,"chondri":37379,"âĿ¤ï¸ıðŁĴĻ":37380,"mclaren":37381,"gab":37382,"shum":37383,"ausa":37384,"lewisham":37385,"ypg":37386,"kjv":37387,"furnished":37388,"doro":37389,"bonded":37390,"morty":37391,"latitude":37392,"_)":37393,"lova":37394,"waterways":37395,"vinai":37396,"shorth":37397,"drunk":37398,"cay":37399,"ayana":37400,"kaplan":37401,"cappuccino":37402,"spro":37403,"lifeboat":37404,"hasbro":37405,"spolice":37406,"toron":37407,"doing":37408,"damn":37409,"shree":37410,"fountains":37411,"entation":37412,"maru":37413,"boarder":37414,"topless":37415,"jada":37416,"channing":37417,"ulls":37418,"enclosure":37419,"gibson":37420,"fractured":37421,"britton":37422,"ö":37423,"tous":37424,"porth":37425,"draf":37426,"trailing":37427,"margate":37428,"elife":37429,"downward":37430,"linn":37431,"glades":37432,"girlpower":37433,"akrish":37434,"uki":37435,"ronda":37436,"tsc":37437,"appreciationday":37438,"vising":37439,"loom":37440,"ðŁį³":37441,"mexican":37442,"argos":37443,"yya":37444,"jadine":37445,"southport":37446,"dend":37447,"sista":37448,"redeem":37449,"meng":37450,"braxton":37451,"antioxidant":37452,"skey":37453,"mpg":37454,"finding":37455,"vibration":37456,"ceu":37457,"khart":37458,"dimini":37459,"cline":37460,"shelly":37461,"hines":37462,"īï¸ı":37463,"topical":37464,"nover":37465,"maxx":37466,"primitive":37467,"illustrate":37468,"bounds":37469,"trenton":37470,"jointly":37471,"breeders":37472,"uchi":37473,"wakeupamerica":37474,"bada":37475,"ðŁĹ£ï¸ı":37476,"guacam":37477,"spheres":37478,"peregr":37479,"youthful":37480,"lolo":37481,"birmin":37482,"tly":37483,"jeremycorbyn":37484,"defects":37485,"cosm":37486,"arent":37487,"vaa":37488,"bagels":37489,"mediac":37490,"coriander":37491,"icago":37492,"ghaz":37493,"abbas":37494,"remodel":37495,"structuring":37496,"pum":37497,"outlaw":37498,"adani":37499,"rbc":37500,"gulls":37501,"nli":37502,"confuse":37503,"ðŁijĩðŁı¼":37504,"vila":37505,"mcnamara":37506,"corrections":37507,"mughal":37508,"seri":37509,"regain":37510,"ssb":37511,"leave":37512,"hahahah":37513,"grande":37514,"distressed":37515,"rechargeable":37516,"hoa":37517,"housed":37518,"stil":37519,"attributed":37520,"opathic":37521,"dips":37522,"prit":37523,"headphone":37524,"conclude":37525,"pilo":37526,"het":37527,"utsa":37528,"nitin":37529,"jem":37530,"snippet":37531,"tutoring":37532,"oper":37533,"sunk":37534,"ensla":37535,"chau":37536,"acorn":37537,"quintess":37538,"rankin":37539,"affiliated":37540,"ourlives":37541,"clint":37542,"seater":37543,"isaac":37544,"bashing":37545,"smear":37546,"nurse":37547,"doodling":37548,"\";":37549,"saku":37550,"atrocities":37551,"imam":37552,"gfs":37553,"violating":37554,"commend":37555,"bradshaw":37556,"erville":37557,"billed":37558,"bbe":37559,"thulhu":37560,"iphones":37561,"moose":37562,"dios":37563,"rew":37564,"methane":37565,"strangely":37566,"whisky":37567,"tightly":37568,"spielberg":37569,"radius":37570,"noticing":37571,"wif":37572,"ignati":37573,"ifa":37574,"apis":37575,"wali":37576,"haitian":37577,"bushes":37578,"yz":37579,"vl":37580,"exited":37581,"assel":37582,"truec":37583,"domen":37584,"asher":37585,"inking":37586,"newyearseve":37587,"hendricks":37588,"bati":37589,"ìĿ´ì":37590,"richter":37591,"monsanto":37592,"conline":37593,"agreat":37594,"ðŁ¤¯":37595,"masterpieces":37596,"arn":37597,"roughs":37598,"cleve":37599,"sev":37600,"fashions":37601,"toya":37602,"shail":37603,"copeland":37604,"aquari":37605,"decals":37606,"areyou":37607,"yaya":37608,"astr":37609,"font":37610,"mlm":37611,"arca":37612,"ppor":37613,"pollock":37614,"xperia":37615,"conservation":37616,"chainsaw":37617,"aggie":37618,"?!?!?":37619,"sile":37620,"shon":37621,"ìĹIJ":37622,"notebooks":37623,"marquette":37624,"deus":37625,"bbled":37626,"spicer":37627,"mccabe":37628,"norwich":37629,"modification":37630,"boosted":37631,"strum":37632,"salesman":37633,"bangle":37634,"nissan":37635,"hezbollah":37636,"breasts":37637,"aaf":37638,"anthus":37639,"sker":37640,"owed":37641,"heros":37642,"gifs":37643,"fosters":37644,"eaters":37645,"dues":37646,"_/":37647,"lymphoma":37648,"sfam":37649,"megal":37650,"afridi":37651,"agic":37652,"pamp":37653,"jealousy":37654,"ðŁijĮðŁı¼":37655,"calculate":37656,"napping":37657,"gale":37658,"ðŁ¦Ħ":37659,"lubbock":37660,"assumed":37661,"renting":37662,"íĥľ":37663,"suburb":37664,"ãĤ·":37665,"technic":37666,"ucla":37667,"infront":37668,"garnet":37669,"steroids":37670,"striving":37671,"howar":37672,"mover":37673,"leton":37674,"bulldo":37675,"isin":37676,"ciao":37677,"snz":37678,"forefront":37679,"dams":37680,"midwife":37681,"mawards":37682,"clapton":37683,"wein":37684,"subsidies":37685,"sproud":37686,"rotherham":37687,"phantom":37688,"arach":37689,"spiel":37690,"racket":37691,"selamat":37692,"noon":37693,"lbc":37694,"entially":37695,"ðŁĴ¸":37696,"silve":37697,"moud":37698,"kinetic":37699,"yasi":37700,"ðŁİ©":37701,"ool":37702,"miku":37703,"iza":37704,"fera":37705,"floren":37706,"barbershop":37707,"groot":37708,"zest":37709,"nears":37710,"stanis":37711,"zand":37712,"policeman":37713,"jurisdic":37714,"formations":37715,"apparatus":37716,"spd":37717,"artifact":37718,"tosc":37719,"motivating":37720,"womancrush":37721,"redro":37722,"diagnostics":37723,"raza":37724,"outfitters":37725,"elxn":37726,"dodgy":37727,"ryn":37728,"shd":37729,"orthodon":37730,"olde":37731,"jayanti":37732,"balances":37733,"quickest":37734,"canton":37735,"fridayreads":37736,"!*":37737,"naa":37738,"aak":37739,"ðŁĶ·":37740,"behaviors":37741,"raspberries":37742,"ä»":37743,"political":37744,"camil":37745,"åľ":37746,"dik":37747,"astounding":37748,"liebe":37749,"novelty":37750,"turmoil":37751,"sully":37752,"springbreak":37753,"honouring":37754,"ccg":37755,"ðŁıĴ":37756,"mylittle":37757,"kyc":37758,"proms":37759,"ðŁķĬ":37760,"è":37761,"bige":37762,"avril":37763,"ðŁĩµðŁĩ°":37764,"marion":37765,"asants":37766,"surya":37767,"octag":37768,"lufthan":37769,"acron":37770,"fayetteville":37771,"tique":37772,"loves":37773,"enca":37774,"dekalb":37775,"taver":37776,"devote":37777,"auxiliary":37778,"johannes":37779,"treadmill":37780,"ayan":37781,"qur":37782,"donaldson":37783,"cheryl":37784,"\"....":37785,"sven":37786,"kirsty":37787,"gunners":37788,"radish":37789,"oahu":37790,"vsky":37791,"ible":37792,"concourse":37793,"bps":37794,"eloqu":37795,"ashford":37796,"tebow":37797,"roblox":37798,"mada":37799,"driving":37800,"thday":37801,"sproject":37802,"mms":37803,"banded":37804,".!!":37805,"librarians":37806,"flannel":37807,"intolerance":37808,"heral":37809,"çµ":37810,"nemesis":37811,"lista":37812,"tarak":37813,"crypt":37814,"starplus":37815,"vishnu":37816,"scale":37817,"cris":37818,"%),":37819,"jillian":37820,"reggae":37821,"pegasus":37822,"olin":37823,"ipment":37824,"manic":37825,"lfc":37826,"goddard":37827,"iteam":37828,"parlour":37829,"anchors":37830,"leeminho":37831,"tallahassee":37832,"antit":37833,"dho":37834,"kidney":37835,"yash":37836,"battled":37837,"azad":37838,"garis":37839,"faulkner":37840,"sniff":37841,"paparazzi":37842,"edm":37843,"phyllis":37844,"contested":37845,"aaay":37846,"seca":37847,"kton":37848,"velve":37849,"rainier":37850,"forum":37851,"tampab":37852,"hosp":37853,"tractors":37854,"oxfordshire":37855,"notion":37856,"guangzhou":37857,"ðŁĺ¯":37858,"refill":37859,"wednesdaymotivation":37860,"slider":37861,"mukherjee":37862,"pratt":37863,"fontaine":37864,"alphon":37865,"afar":37866,"tsi":37867,"pesticides":37868,"fiends":37869,"mocking":37870,"braw":37871,"transat":37872,"doses":37873,"cores":37874,"homophobia":37875,"documenting":37876,"zlatan":37877,"condoms":37878,"sé":37879,"sunset":37880,"kunst":37881,"tonga":37882,"ส":37883,"vation":37884,"spray":37885,"chowder":37886,"raps":37887,"palladium":37888,"norwood":37889,"musichistory":37890,"hooker":37891,"sisi":37892,"osprey":37893,"phys":37894,"conceded":37895,"bobcat":37896,"armad":37897,"zeit":37898,"ÙĦ":37899,"ðŁĺģðŁĺģ":37900,"meridi":37901,"ðŁĩ·ðŁĩº":37902,"cornwall":37903,"!),":37904,"touchdowns":37905,"zeit":37906,"chalet":37907,"mmm":37908,"alche":37909,"gorilla":37910,"foss":37911,"atiku":37912,"luminous":37913,"ivanka":37914,"beek":37915,"stares":37916,"swiss":37917,"âĿ¤âĿ¤âĿ¤âĿ¤":37918,"scrubs":37919,"meath":37920,"gustav":37921,"jogging":37922,"confetti":37923,"asos":37924,"ersfc":37925,"breitbart":37926,"applicable":37927,"authored":37928,"yaho":37929,"hin":37930,"displacement":37931,"jv":37932,"ðŁĮ¹ðŁĮ¹":37933,"otc":37934,"nonprofits":37935,"diecast":37936,"gusto":37937,"intestin":37938,"cages":37939,"meen":37940,"lukas":37941,"mooney":37942,"ðŁĺ·":37943,"veryday":37944,"torah":37945,"ission":37946,"wac":37947,"leveraging":37948,"ishable":37949,"cuse":37950,"lewood":37951,"mayan":37952,"turntable":37953,"juice":37954,"trusty":37955,"tup":37956,"etiquette":37957,"supervisors":37958,"stun":37959,"guzman":37960,"conferen":37961,"rico":37962,"feast":37963,"backward":37964,"polaris":37965,"miche":37966,"jog":37967,"hing":37968,"fieldhouse":37969,"veling":37970,"shocker":37971,"escence":37972,"ा":37973,"vibe":37974,"anastasia":37975,"marched":37976,"killing":37977,"Ķë":37978,"fett":37979,"exoplan":37980,"...(":37981,"snowday":37982,"loh":37983,"irani":37984,"lakhs":37985,"dela":37986,"pocaly":37987,"boomers":37988,"dictatorship":37989,"acer":37990,"turkeys":37991,"quarterfinal":37992,"musketeers":37993,"ðŁĴĽðŁĴļ":37994,"sfx":37995,"museumweek":37996,"scala":37997,"risis":37998,"(ðŁĵ·":37999,"ãĢĤ":38000,"zies":38001,"boeh":38002,"hues":38003,"lusci":38004,"dola":38005,"impeachtrump":38006,"rood":38007,"doncaster":38008,"torre":38009,"heroes":38010,"foyer":38011,"tari":38012,"blurred":38013,"kew":38014,"frankly":38015,"droid":38016,"apal":38017,"м":38018,"yaf":38019,"bret":38020,"paragu":38021,"cacao":38022,"ðŁĻĮðŁı¾":38023,"rue":38024,"headaches":38025,"shawty":38026,"charley":38027,"paler":38028,"gowns":38029,"correctional":38030,"ðŁĺ©ðŁĺ©":38031,"breakingbad":38032,"oling":38033,"dap":38034,"endeavour":38035,"citadel":38036,"trad":38037,"incumbent":38038,"meditate":38039,"footed":38040,"ðŁĴµ":38041,"shabbat":38042,"dayofthe":38043,"willem":38044,"galway":38045,"tored":38046,"marriage":38047,"fillion":38048,"sleeveless":38049,"auditor":38050,"jinyoung":38051,"invincible":38052,"kaduna":38053,"aand":38054,"volcanoes":38055,"moneti":38056,"indiegogo":38057,"buccaneers":38058,"ðŁijīðŁı½":38059,"ãĢĤ":38060,"layton":38061,"cuckoo":38062,"humber":38063,"buzzer":38064,"Ïī":38065,"tore":38066,"strains":38067,"stom":38068,"paine":38069,"swe":38070,"duff":38071,"zou":38072,"simi":38073,"lipp":38074,"urn":38075,"seagu":38076,"ðŁĶ®":38077,"sundae":38078,"hic":38079,"ðŁĺ¨":38080,"bullpen":38081,"uper":38082,"flyover":38083,"aldridge":38084,"globes":38085,"alies":38086,"kenzie":38087,"gees":38088,"ycle":38089,"splin":38090,"magenta":38091,"jha":38092,"balu":38093,"ghorn":38094,"tipper":38095,"wicker":38096,"tasteof":38097,"conclave":38098,"chale":38099,"invasi":38100,"cater":38101,"dioxide":38102,"megab":38103,"winn":38104,"atp":38105,"transformative":38106,"nestled":38107,"hig":38108,"bridging":38109,"lilies":38110,"cheered":38111,"baddest":38112,"scrolls":38113,"realis":38114,"diplo":38115,"ðŁĶ«":38116,"concession":38117,"preferences":38118,"explodes":38119,"ergon":38120,"introductory":38121,"ineau":38122,"chaf":38123,"somes":38124,"landrover":38125,"spiration":38126,"sexy":38127,"scorecard":38128,"illustrates":38129,"soulmate":38130,"wien":38131,"interdisciplinary":38132,"forecasting":38133,"entities":38134,"glued":38135,"enlar":38136,"curt":38137,"perceptions":38138,"bootleg":38139,"mire":38140,"ashok":38141,"vaz":38142,"horne":38143,"calle":38144,"aculture":38145,"theroy":38146,"nighttime":38147,"ocal":38148,"characterdesign":38149,"armist":38150,"ðŁĺıðŁĺı":38151,"yahoo":38152,"aceae":38153,"tose":38154,"evento":38155,"sout":38156,"nayanth":38157,"whom":38158,"vare":38159,"rigging":38160,"genus":38161,"hive":38162,"commands":38163,"stie":38164,"daya":38165,"ethanol":38166,"enf":38167,"hifi":38168,"fluence":38169,"clemson":38170,"reinvent":38171,"thermometer":38172,"humorous":38173,"emerging":38174,"ación":38175,"ðŁĺĺðŁĺį":38176,"sity":38177,"hawke":38178,"accompanying":38179,"tility":38180,"ðŁĺª":38181,"recess":38182,"protagonist":38183,"lery":38184,"dundal":38185,"intl":38186,"brittany":38187,"qbs":38188,"offthe":38189,"marriages":38190,"howto":38191,"violated":38192,"adelaide":38193,"witt":38194,"lancer":38195,"pakv":38196,"hume":38197,"stade":38198,"bragging":38199,"outright":38200,"adc":38201,"superst":38202,"realtime":38203,"cures":38204,"gardeners":38205,"erock":38206,"dalejr":38207,"vero":38208,"bartol":38209,"moti":38210,"mcfly":38211,"vpn":38212,"stink":38213,"overrated":38214,"guerra":38215,"etis":38216,"athome":38217,"twdfamily":38218,"thab":38219,"tnx":38220,"rafael":38221,"familytravel":38222,"xley":38223,"satanic":38224,"equations":38225,"rudy":38226,"waldorf":38227,"stani":38228,"tube":38229,"measles":38230,"zimmerman":38231,"obligations":38232,"iously":38233,"bowser":38234,"transformer":38235,"shoppe":38236,"shaken":38237,"ghouse":38238,"tod":38239,"ketball":38240,"shareholder":38241,"marca":38242,"kpmg":38243,"akan":38244,"givenchy":38245,"coastal":38246,"auth":38247,"rollercoaster":38248,"marches":38249,"coordinate":38250,"cinema":38251,"apprentices":38252,"parlor":38253,"mito":38254,"menon":38255,"considerable":38256,"barre":38257,"gloss":38258,"enhances":38259,"jazeera":38260,"falmouth":38261,"thrash":38262,"staten":38263,"kzn":38264,"engel":38265,"samanthap":38266,"floppy":38267,"salom":38268,"ðŁıĨðŁıĨ":38269,"wack":38270,"deliberate":38271,"oscill":38272,"heritag":38273,"dusted":38274,"ornithology":38275,"paddle":38276,"ferns":38277,"barun":38278,"clans":38279,"anticipate":38280,"aay":38281,"matically":38282,"éĩ":38283,"tumble":38284,"postman":38285,"unicef":38286,"trotter":38287,"opd":38288,"leaflet":38289,"geist":38290,"ceasefire":38291,"screws":38292,"creation":38293,"walnuts":38294,"longhorns":38295,"understatement":38296,"abb":38297,"proximity":38298,"nax":38299,"unity":38300,"turnpike":38301,"ordained":38302,"dubstep":38303,"chakra":38304,"mech":38305,"loveher":38306,"lookalike":38307,"donnein":38308,"viron":38309,"ÙĪ":38310,"bangers":38311,"variants":38312,"outdated":38313,"inta":38314,"cristo":38315,"spelt":38316,"foodand":38317,"fon":38318,"stefani":38319,"marginal":38320,"hutton":38321,"tiara":38322,"telford":38323,"quen":38324,"fairgrounds":38325,"quetta":38326,"mikhail":38327,"healer":38328,"vball":38329,"tyre":38330,"undergrad":38331,"glend":38332,"homers":38333,"scribed":38334,"maintains":38335,"poche":38336,"missal":38337,"marko":38338,"uas":38339,"án":38340,"shp":38341,"convey":38342,"padre":38343,"saba":38344,"puglia":38345,"madhuri":38346,"paxton":38347,"chaplain":38348,"nago":38349,"casi":38350,"...!!!":38351,"flirt":38352,"saleh":38353,"kare":38354,"dire":38355,"stamped":38356,"extreme":38357,"ðŁĺĥðŁĺĥ":38358,"hoppy":38359,"guadalupe":38360,"advantaged":38361,"euchar":38362,"plow":38363,"unn":38364,"macqu":38365,"portland":38366,"clash":38367,"pes":38368,"loubout":38369,"yp":38370,"keeping":38371,"arcadia":38372,"frankie":38373,"fiu":38374,"deth":38375,"encyclopedia":38376,"size":38377,"invests":38378,"ðŁį©":38379,"geological":38380,"franç":38381,"confront":38382,"ðŁĺ¥":38383,"dys":38384,"afm":38385,"texan":38386,"graphene":38387,"repostapp":38388,"acf":38389,"ursula":38390,"gaza":38391,"ddled":38392,"fum":38393,"wsbtv":38394,"mbe":38395,"frontiers":38396,"chronograph":38397,"kes":38398,"interfaith":38399,"taboo":38400,"sparta":38401,"wondo":38402,"florist":38403,"embraces":38404,"caw":38405,"noel":38406,"archers":38407,"ðŁIJ·":38408,"romano":38409,"banan":38410,"shakers":38411,"melodies":38412,"geothermal":38413,"sephora":38414,"ìļ°":38415,"од":38416,"proc":38417,"handshake":38418,"pande":38419,"populated":38420,"slowdown":38421,"hortons":38422,"registrations":38423,"undeni":38424,"lants":38425,"passover":38426,"thakur":38427,"lief":38428,"adhesive":38429,"petal":38430,"microscopy":38431,"memphis":38432,"confirming":38433,"airdrop":38434,"mesmer":38435,"perceived":38436,"mingle":38437,"lifeline":38438,"ghj":38439,"worcestershire":38440,"passions":38441,"acher":38442,"ellar":38443,"aho":38444,"firenze":38445,"barang":38446,"letterman":38447,"hatfield":38448,"lucha":38449,"jeter":38450,"eshop":38451,"williams":38452,"horoscope":38453,"prede":38454,"eastbourne":38455,"durga":38456,"diversion":38457,"altrin":38458,"seismic":38459,"premiosm":38460,"narco":38461,"tir":38462,"orig":38463,"orm":38464,"landfall":38465,"cious":38466,"lindo":38467,"maxine":38468,"xico":38469,"tray":38470,"oswald":38471,"cba":38472,"ricotta":38473,"ncr":38474,"marau":38475,"า":38476,"gladiator":38477,"chery":38478,"lung":38479,"ume":38480,"popsic":38481,"longing":38482,"canals":38483,"taya":38484,"decentralized":38485,"shopp":38486,"pressures":38487,"maharaj":38488,"etihad":38489,"walgreens":38490,"succession":38491,"signaling":38492,"lig":38493,"staffer":38494,"northkorea":38495,"defying":38496,"asma":38497,"deg":38498,"perimeter":38499,"oakville":38500,"msk":38501,"baltimore":38502,"receip":38503,"deple":38504,"ðŁĺŃðŁĺĤ":38505,"jamboree":38506,">.<":38507,"rspb":38508,"punisher":38509,"considerably":38510,"intothe":38511,"parisian":38512,"accelerated":38513,"polyester":38514,"lowes":38515,"frying":38516,"sautéed":38517,"mouths":38518,"seychelles":38519,"rax":38520,"godis":38521,"dakota":38522,"housewives":38523,"theme":38524,"matinee":38525,"blackbird":38526,"yesung":38527,"prefers":38528,"pellegr":38529,"inated":38530,"trunks":38531,"strongertogether":38532,"repet":38533,"repairing":38534,"pedals":38535,"tolerant":38536,"herr":38537,"dunne":38538,"indication":38539,"decatur":38540,"btv":38541,"exhibitors":38542,"ikon":38543,"fridaymotivation":38544,"bragg":38545,"livetweet":38546,"alves":38547,"womensart":38548,"foreigners":38549,"wallets":38550,"mindy":38551,"laney":38552,"bbin":38553,"tvmiaw":38554,"lifter":38555,"target":38556,"tame":38557,"drou":38558,"astrophotography":38559,"mpc":38560,"gpu":38561,"nordstrom":38562,"friction":38563,"runoff":38564,"lovable":38565,"spnfamily":38566,"extingui":38567,"bloody":38568,"schel":38569,"artistry":38570,"swish":38571,"scarce":38572,"phils":38573,"maxim":38574,"possum":38575,"compromised":38576,"styli":38577,"scfc":38578,"issa":38579,"birmingham":38580,"sketched":38581,"angelica":38582,"ordinance":38583,"jets":38584,"conquer":38585,"ðŁĺIJ":38586,"onlineshopping":38587,"sori":38588,"reasonably":38589,"nuestro":38590,"arturo":38591,"chl":38592,"benefici":38593,"sphoto":38594,"welt":38595,"nikk":38596,"ðŁ¤ŀ":38597,"danao":38598,"formid":38599,"asse":38600,"afirst":38601,"âľĤ":38602,"gillette":38603,"assor":38604,"anonym":38605,"selca":38606,"femi":38607,"bearable":38608,"yand":38609,"armory":38610,"crepe":38611,"celticfc":38612,"bravo":38613,"inexpensive":38614,"delec":38615,"gecko":38616,"newmarket":38617,"snowflakes":38618,"kabir":38619,"contra":38620,"canning":38621,"morpho":38622,"garwal":38623,"ðŁĴĥðŁı»":38624,"fighting":38625,"mutation":38626,"woody":38627,"jugg":38628,"graces":38629,"premiosmtvmiaw":38630,"kennedy":38631,"gup":38632,"sae":38633,"opha":38634,"offspring":38635,"finisher":38636,"betts":38637,"spanning":38638,"marj":38639,"hone":38640,"shing":38641,"continents":38642,"samanthaprabhu":38643,"unrelated":38644,"lacy":38645,"explosions":38646,"benjamin":38647,"sophie":38648,"noting":38649,"microsoft":38650,"assen":38651,"ahoy":38652,"iker":38653,"hofer":38654,"moe":38655,"ahmadi":38656,"yann":38657,"anak":38658,"mahi":38659,"beu":38660,"ahah":38661,"creeper":38662,"baahubali":38663,"amat":38664,"priory":38665,"hawkeye":38666,"deloitte":38667,"skoda":38668,"printmaking":38669,"assembling":38670,"miraculous":38671,"noch":38672,"swo":38673,"lega":38674,"operates":38675,"borderlands":38676,"elie":38677,"strongh":38678,"reptiles":38679,"pirate":38680,"unfold":38681,"¯":38682,"qualcomm":38683,"unpredictable":38684,"otr":38685,"rosewood":38686,"directional":38687,"counselors":38688,"cornell":38689,"liberated":38690,"jad":38691,"irregular":38692,"bulgarian":38693,"highness":38694,"vodafone":38695,"swild":38696,"minimize":38697,"grazie":38698,"à¹ĩ":38699,"rstats":38700,"streep":38701,"ometric":38702,"humble":38703,"lump":38704,"lille":38705,"bü":38706,"homedepot":38707,"tripadvisor":38708,"kiwan":38709,"avia":38710,"erz":38711,"exico":38712,"duf":38713,"blumen":38714,"mizing":38715,"arma":38716,"inim":38717,"constan":38718,"sora":38719,"jual":38720,"aun":38721,"twell":38722,"trenches":38723,"hera":38724,"rk":38725,"poplar":38726,"recipeoftheday":38727,"llan":38728,"bhuban":38729,"shortages":38730,"ingdon":38731,"bridgewater":38732,"ðŁIJĺ":38733,"fortnite":38734,"camden":38735,"uncture":38736,"prow":38737,"colonies":38738,"tks":38739,"ngo":38740,"bhm":38741,"livepd":38742,"splace":38743,"slike":38744,"happyeaster":38745,"terrence":38746,"revolver":38747,"jed":38748,"yyyy":38749,"officeof":38750,"mts":38751,"existential":38752,"rourke":38753,"explorebc":38754,"ssed":38755,"priest":38756,"vixen":38757,"siding":38758,"kpa":38759,"ahar":38760,"juic":38761,"obstruc":38762,"forensics":38763,"ukmfg":38764,"cancellation":38765,"weary":38766,"abq":38767,"elec":38768,"prized":38769,"debts":38770,"mezz":38771,"salvatore":38772,"mdc":38773,"grette":38774,"cgc":38775,"thon":38776,"snowstorm":38777,"tsch":38778,"cookery":38779,"å¹":38780,"waxing":38781,"nacional":38782,"murs":38783,"rave":38784,"capes":38785,"germain":38786,"dripping":38787,"submitting":38788,"omelette":38789,"iteration":38790,"ajes":38791,"shimmer":38792,"fueling":38793,"ðŁĩ§ðŁĩª":38794,"lipo":38795,"bobble":38796,"unfollow":38797,"islamist":38798,"hiber":38799,"cats":38800,"agentsofshield":38801,"sensi":38802,"_____":38803,"steria":38804,"instal":38805,"auspicious":38806,"harrow":38807,"overland":38808,"feminists":38809,"instant":38810,"chariot":38811,"blindness":38812,"sped":38813,"scarec":38814,"nuit":38815,"miniatures":38816,"hoseok":38817,"glock":38818,"fifaworldcup":38819,"ete":38820,"dism":38821,"weiner":38822,"exfoli":38823,"earts":38824,"à¸Ķ":38825,"myart":38826,"manil":38827,"issant":38828,"forma":38829,"incu":38830,"buffalob":38831,"intim":38832,"mccul":38833,"anjali":38834,"popo":38835,"undoub":38836,"hila":38837,"fungal":38838,"thankful":38839,"futur":38840,"endish":38841,"rends":38842,"thar":38843,"sheff":38844,"ringo":38845,"nicholls":38846,"iowa":38847,"potom":38848,"clams":38849,"ãģĦ":38850,"aconf":38851,"stadiums":38852,"dimp":38853,"dik":38854,"residences":38855,"dov":38856,"caricature":38857,"seagull":38858,"klm":38859,"confess":38860,"slapped":38861,"celeb":38862,"turbines":38863,"ppv":38864,"nurture":38865,"elab":38866,".....#":38867,"tuff":38868,"depress":38869,"alfar":38870,"amiibo":38871,"dispon":38872,"ewing":38873,"queer":38874,"friends":38875,"forre":38876,"âĺ¼":38877,"swt":38878,"aquarius":38879,"headliner":38880,"curd":38881,"figs":38882,"otters":38883,"lovefl":38884,"kareem":38885,"govegan":38886,"friyay":38887,"consolation":38888,"atri":38889,"ì§Ħ":38890,"âĺĿï¸ı":38891,"polyne":38892,"gued":38893,"oya":38894,"laus":38895,"intestinal":38896,"camilla":38897,"scalp":38898,"pir":38899,"leeds":38900,"horrifying":38901,"boretum":38902,"dandelion":38903,"ferrer":38904,"ellic":38905,"asx":38906,"soren":38907,"reloaded":38908,"aleague":38909,"navigator":38910,"inette":38911,"addams":38912,"alchemist":38913,"akshay":38914,"dystopian":38915,"awec":38916,"naya":38917,"alisa":38918,"ailed":38919,"agor":38920,"aviator":38921,"alizer":38922,"smobile":38923,"findyourpark":38924,"copying":38925,"toddy":38926,"shti":38927,"monger":38928,"calhoun":38929,"napkin":38930,"breakup":38931,"yatra":38932,"sethu":38933,"richi":38934,"erasmus":38935,"ferry":38936,"amore":38937,"practise":38938,"bobo":38939,"powerpoint":38940,"oose":38941,"liffe":38942,"china":38943,"shka":38944,"fadnavis":38945,"duane":38946,"waron":38947,"false":38948,"ðŁļĤ":38949,"washes":38950,"discip":38951,"========":38952,"gk":38953,"abb":38954,"stubborn":38955,"medieval":38956,"pci":38957,"ðŁįª":38958,"marilyn":38959,"hyo":38960,"mandi":38961,"cri":38962,"predecess":38963,"continuation":38964,"omusic":38965,"slat":38966,"whal":38967,"mallory":38968,"bonn":38969,"shenzhen":38970,"cai":38971,"âĺĥ":38972,"safest":38973,"forwards":38974,"drawers":38975,"blasted":38976,"slee":38977,"morphe":38978,"mbta":38979,"dumbass":38980,"ÑĦоÑĤо":38981,"alhamdulillah":38982,"eclub":38983,"albeit":38984,"healey":38985,"ayurveda":38986,"advertised":38987,"crocs":38988,"ittles":38989,"bryson":38990,"bei":38991,"njpw":38992,"honoree":38993,"fused":38994,"ðŁĶĺ":38995,"multin":38996,"naga":38997,"departs":38998,"kop":38999,"kino":39000,"jharkhand":39001,"edna":39002,"axle":39003,"milton":39004,"supremacist":39005,"marrakech":39006,"dominic":39007,"transcript":39008,"][#":39009,":).":39010,"woc":39011,"surrounds":39012,"ogil":39013,"leaflets":39014,"cowell":39015,"whew":39016,"trude":39017,"prolifer":39018,"succes":39019,"sportsman":39020,"condom":39021,"poche":39022,"kup":39023,"imprisonment":39024,"{}":39025,"scrambled":39026,"åĽ":39027,"kaine":39028,"cellphone":39029,"metamor":39030,"coni":39031,"remnants":39032,"eez":39033,"downpour":39034,"afternoon":39035,"exercising":39036,"berser":39037,"architecture":39038,"wicklow":39039,"mns":39040,"isp":39041,"boc":39042,"niss":39043,"mnwild":39044,"stumble":39045,"rsi":39046,"luffy":39047,"silen":39048,"ddad":39049,"bullies":39050,"hawker":39051,"bbcc":39052,"scuba":39053,"epp":39054,"quets":39055,"foraging":39056,"pallet":39057,"hadi":39058,"cinematographer":39059,"catchers":39060,"toaster":39061,"khi":39062,"litecoin":39063,"kidlit":39064,"amherst":39065,"mauricio":39066,"ipad":39067,"marmalade":39068,"fey":39069,"donnelly":39070,"gto":39071,"estas":39072,"cerebral":39073,"antgrasso":39074,"zzled":39075,"virgil":39076,"swapped":39077,"ðŁĺħðŁĺħ":39078,"nodapl":39079,"greatest":39080,"nhlbruins":39081,"fraser":39082,"bmo":39083,"anew":39084,".âĿ¤ï¸ı":39085,"segregation":39086,"remarkably":39087,"mccormick":39088,"logger":39089,"eras":39090,"contracting":39091,"âłĢâłĢ":39092,"yorks":39093,"ukulele":39094,"touchscreen":39095,"decked":39096,"benn":39097,"southwark":39098,"ravin":39099,"numis":39100,"ðŁ¤Ļ":39101,"rut":39102,"greco":39103,"ethic":39104,"redneck":39105,"arr":39106,"tcs":39107,"ihri":39108,"ðŁĩ«ðŁĩ·":39109,"lk":39110,"inherited":39111,"zyk":39112,"viaduct":39113,"martyred":39114,"higu":39115,"ssn":39116,"bein":39117,"streetstyle":39118,"fergie":39119,"bankof":39120,"æĹ¥":39121,"stakeholder":39122,"exemplary":39123,"cress":39124,"essa":39125,"erotica":39126,"intrepid":39127,"gomes":39128,"braun":39129,"bethany":39130,"bangtan":39131,"pulmonary":39132,"milling":39133,"doctorate":39134,"trumprussia":39135,"र":39136,"sani":39137,"blatt":39138,"plau":39139,"deprived":39140,"tle":39141,"fully":39142,"bourn":39143,"stak":39144,"lufthansa":39145,"kiosk":39146,"faroo":39147,"defy":39148,"badan":39149,"ðŁĺĺâĿ¤ï¸ı":39150,"ritz":39151,"trisha":39152,"rands":39153,"middlesex":39154,"arabs":39155,"proj":39156,"sportscenter":39157,"repeats":39158,"ivf":39159,"bleedblue":39160,"assure":39161,"obs":39162,"territorial":39163,"elen":39164,"beverley":39165,"annah":39166,"âĿ¤ï¸ıâĿ¤ï¸ıâĿ¤ï¸ıâĿ¤ï¸ı":39167,"zl":39168,"forgood":39169,"sciencefiction":39170,"glau":39171,"sonya":39172,"prith":39173,"stweets":39174,"mixers":39175,"mario":39176,"antelope":39177,"writingcommunity":39178,"wentz":39179,"denham":39180,"bedi":39181,"sfo":39182,"harleydavidson":39183,"lookbook":39184,"immunotherapy":39185,"orphe":39186,"esville":39187,"edged":39188,"task":39189,"sbball":39190,"corrosion":39191,"kilometers":39192,"costing":39193,"playback":39194,"keke":39195,"divisi":39196,"uter":39197,"relocation":39198,"yelled":39199,"peng":39200,"upbeat":39201,"serve":39202,"âļł":39203,"halen":39204,"stirring":39205,"rehman":39206,"env":39207,"schumacher":39208,"fragment":39209,"alkaline":39210,"sbk":39211,"resili":39212,"sharepoint":39213,"rollover":39214,"trash":39215,"counterpart":39216,"âĻ«":39217,"obitu":39218,"à½":39219,"ãĤ¹":39220,"mulberry":39221,"ðŁİĨ":39222,"autonomy":39223,"spraying":39224,"natl":39225,"loveyou":39226,"franki":39227,"nuk":39228,"escar":39229,"canteen":39230,"alibaba":39231,"deplor":39232,"molecule":39233,"pud":39234,"fortnight":39235,"blondie":39236,"sphin":39237,"portrayal":39238,"tache":39239,"bute":39240,"consisting":39241,"freepalestine":39242,"csp":39243,"immort":39244,"dns":39245,"ðŁĴ¥ðŁĴ¥":39246,"tourde":39247,"cooking":39248,"archival":39249,"gathers":39250,"bitt":39251,"banc":39252,"premature":39253,"snowball":39254,"poetryday":39255,"loudly":39256,"fugitive":39257,"eday":39258,"emra":39259,"ðŁĩ¸ðŁĩª":39260,"scien":39261,"nodejs":39262,"jurgen":39263,"jeong":39264,"bandana":39265,"unis":39266,"foxsports":39267,"vandy":39268,"provisions":39269,"weep":39270,"tuk":39271,"iko":39272,"houn":39273,"ziggy":39274,"zr":39275,"fillet":39276,"bata":39277,"tink":39278,"cone":39279,"wewant":39280,"kilo":39281,"horace":39282,"slt":39283,"sct":39284,"staytuned":39285,"victoria":39286,"umbria":39287,"attacker":39288,"inghamshire":39289,"frightening":39290,"noir":39291,"frat":39292,"contempt":39293,"liaison":39294,"hoi":39295,"brink":39296,"trill":39297,"niagar":39298,"kickass":39299,"dundas":39300,"notmy":39301,"rhode":39302,"bumble":39303,"noxi":39304,"fag":39305,"spectators":39306,"mancrushmonday":39307,"jinping":39308,"distract":39309,"daisy":39310,"walden":39311,"portrait":39312,"arthistory":39313,"voltron":39314,"evel":39315,"isc":39316,"acm":39317,"rite":39318,"nao":39319,"deported":39320,"sweats":39321,"rufus":39322,"lobo":39323,"laborday":39324,"gamo":39325,"ihrithik":39326,"blit":39327,"abdominal":39328,"ãħ¤ãħ¤ãħ¤ãħ¤":39329,"iit":39330,"eq":39331,"busy":39332,"alluarjun":39333,"undisclosed":39334,"deton":39335,"procreate":39336,"kil":39337,"ðŁİĤðŁİĤ":39338,"mitchell":39339,"kii":39340,"inheritance":39341,"alp":39342,"joburg":39343,"patrolling":39344,"compulsory":39345,"unsigned":39346,"niam":39347,"lga":39348,"eshopsuk":39349,"trilli":39350,"maw":39351,"appreciating":39352,"rockab":39353,"mañana":39354,"antal":39355,"malvern":39356,"royo":39357,"grandprix":39358,"sutton":39359,"goftheday":39360,"digi":39361,"ãħĭãħĭãħĭãħĭ":39362,"tles":39363,"varanasi":39364,"erected":39365,"disciples":39366,"contact":39367,"ðŁĺµ":39368,"lid":39369,"â¬ĩ":39370,"scentre":39371,"radiator":39372,"ingtips":39373,"transitions":39374,"thursdaymotivation":39375,"chemical":39376,"separati":39377,"salis":39378,"mim":39379,"geographical":39380,"bookfest":39381,"/.":39382,"âľĭ":39383,"vae":39384,"currie":39385,"aggarwal":39386,"acceleration":39387,"theses":39388,"lgm":39389,"umass":39390,"proportions":39391,"nata":39392,"anians":39393,"kuch":39394,"beacons":39395,"apr":39396,"@#":39397,"ðŁĴªðŁı¾":39398,"nuke":39399,"sheraton":39400,"kio":39401,"makati":39402,"politico":39403,"morale":39404,"ìĻ":39405,"economically":39406,"ggly":39407,"ssen":39408,"pastries":39409,"internships":39410,"vicente":39411,"fantaken":39412,"avengers":39413,"accuse":39414,"sleepover":39415,"indicated":39416,"thedream":39417,"sterone":39418,"renders":39419,"frost":39420,"oui":39421,"gregg":39422,"dore":39423,"⾨⾨⾨":39424,"pugs":39425,"saty":39426,"numb":39427,"hemsworth":39428,"tami":39429,"lassic":39430,"schiff":39431,"iglesias":39432,"agawa":39433,"]\"":39434,"reshi":39435,"gamestop":39436,"divorced":39437,"theater":39438,"claudi":39439,"unconventional":39440,"prophets":39441,"acin":39442,"twelf":39443,"towering":39444,"tml":39445,"sclerosis":39446,"kwan":39447,"gets":39448,"disturb":39449,"naira":39450,"energ":39451,"piracy":39452,"pruitt":39453,"notified":39454,"henna":39455,"bram":39456,"groundwater":39457,"bls":39458,"optimis":39459,"$)":39460,"lucie":39461,"bizhour":39462,"fangirling":39463,"grills":39464,"orl":39465,"verse":39466,"cina":39467,"lawless":39468,"artistsontwitter":39469,"televised":39470,"marshmallows":39471,"radiohead":39472,"barr":39473,"mfc":39474,"brevi":39475,"mmorpg":39476,"gaya":39477,"âĸ«":39478,"subtitles":39479,"jt":39480,"disneyland":39481,"tobago":39482,"nhm":39483,"groove":39484,"fiawec":39485,"\"/":39486,"bao":39487,"scrabble":39488,"omni":39489,"ffl":39490,"umc":39491,"simba":39492,"alier":39493,"terrell":39494,"plume":39495,"midi":39496,"dignit":39497,"coc":39498,"brut":39499,"adata":39500,"alchemy":39501,"dsm":39502,"ðŁĺĨðŁĺĨ":39503,"wintry":39504,"spares":39505,"cuer":39506,"conclusions":39507,"toys":39508,"odor":39509,"flann":39510,"garvey":39511,"scriptions":39512,"inspections":39513,"catap":39514,"anglo":39515,"stlouis":39516,"heimer":39517,"atay":39518,"trich":39519,"enyc":39520,"childs":39521,"ventil":39522,"montp":39523,"guillermo":39524,"circulare":39525,"zell":39526,"modeled":39527,"craftsman":39528,"alina":39529,"stimulation":39530,"cashew":39531,"judas":39532,"bestof":39533,"toire":39534,"suspends":39535,"scollege":39536,"realising":39537,"bytes":39538,"bloods":39539,"assi":39540,"ðŁĴ¿":39541,"ohs":39542,"ðŁįĭ":39543,"scallop":39544,"व":39545,"gifting":39546,"camogie":39547,"wilkes":39548,"ozzy":39549,"ðŁ¤¤":39550,"veronic":39551,"savoy":39552,"demetri":39553,"babygirl":39554,"ðŁĺįðŁĺŃ":39555,"sox":39556,"clyde":39557,"inductee":39558,"countdown":39559,"selfcare":39560,"à¤ľ":39561,"vika":39562,"torre":39563,"phdchat":39564,"pears":39565,"awh":39566,"suffrage":39567,"lesn":39568,"admiration":39569,"mpp":39570,"sharkweek":39571,"schulz":39572,"santorini":39573,"clover":39574,"(*":39575,"strasbourg":39576,"exiting":39577,"soyu":39578,"fingerprint":39579,"chea":39580,"ãĢľ":39581,"vindic":39582,"songwriters":39583,"soa":39584,"prouder":39585,"nama":39586,"=))":39587,"simplest":39588,"deliciously":39589,"gilles":39590,"uq":39591,"mnwx":39592,"epp":39593,"shun":39594,"kennel":39595,"fallon":39596,"ðŁIJ£":39597,"sind":39598,"tragically":39599,"outes":39600,"modernism":39601,"coke":39602,"gyn":39603,"spion":39604,"âĺ¹ï¸ı":39605,"leam":39606,"compressor":39607,"apologise":39608,"twentyon":39609,"fanatics":39610,"âĻ»":39611,"scotsman":39612,"sawa":39613,"kou":39614,"aser":39615,"à¸ļ":39616,"welterweight":39617,"phenom":39618,"twickenham":39619,"stria":39620,"pout":39621,"kaz":39622,"giam":39623,"cdp":39624,"hoy":39625,"employ":39626,"redmond":39627,"à¸Ħà¸":39628,"smere":39629,"trancefamily":39630,"protocols":39631,"piece":39632,"luiz":39633,"iteracy":39634,"carls":39635,"unitedstates":39636,"harmed":39637,"phdlife":39638,"chaw":39639,"footprints":39640,"lé":39641,"choker":39642,"zana":39643,"slipper":39644,"ericsson":39645,"insulting":39646,"artichoke":39647,"advising":39648,"acquisitions":39649,"opor":39650,"mutations":39651,"rear":39652,"à¥ģ":39653,"podcast":39654,"wither":39655,"kung":39656,"íĺ¸":39657,"winslow":39658,"diapers":39659,"ðŁĵ¸@":39660,"ecker":39661,"collar":39662,"huey":39663,"giro":39664,"monogram":39665,"kasich":39666,"siveness":39667,"malaysi":39668,"aromatic":39669,"gres":39670,"galileo":39671,"uji":39672,"robb":39673,"drm":39674,"nonetheless":39675,"asa":39676,":>":39677,"loa":39678,"lnp":39679,"atwork":39680,"agt":39681,"lakshmi":39682,"pipelines":39683,"idal":39684,"strel":39685,"reall":39686,"chainz":39687,"stonewall":39688,"sansk":39689,"ðŁı´":39690,"piedmont":39691,"hostess":39692,"ciu":39693,"té":39694,"analyses":39695,"wilhelm":39696,"scotty":39697,"rwby":39698,"mosquit":39699,"usemb":39700,"quins":39701,"ðŁijİ":39702,"tucker":39703,"sconf":39704,"specifications":39705,"psychiatry":39706,"brookes":39707,"sils":39708,"olaf":39709,"deto":39710,"codi":39711,"clip":39712,"filth":39713,"womancrushwednesday":39714,"goto":39715,"angerous":39716,"beale":39717,"wtc":39718,"panelist":39719,"nex":39720,"larsen":39721,"emilio":39722,"tableau":39723,"hitters":39724,"conceived":39725,"americani":39726,"ortega":39727,"mardi":39728,"Ñĥ":39729,"paintball":39730,"thirsty":39731,"newyorker":39732,"etisation":39733,"goss":39734,"weaker":39735,"ugh":39736,"troll":39737,"harga":39738,"dual":39739,"ghtning":39740,"atine":39741,"ðŁĺİðŁĺİðŁĺİ":39742,"cookout":39743,"pyrenees":39744,"poss":39745,"authentication":39746,"sportswear":39747,"yunho":39748,"kiro":39749,"archipel":39750,"shenko":39751,"render":39752,"novation":39753,"divinity":39754,"ðŁij£":39755,"sufi":39756,"humbling":39757,"geopol":39758,"devotees":39759,"waitress":39760,"trough":39761,"pyro":39762,"iba":39763,"bling":39764,"graf":39765,"epilots":39766,"btr":39767,"oftball":39768,"basking":39769,"dominos":39770,"soom":39771,"rath":39772,"sheryl":39773,"quel":39774,"astronomical":39775,"weld":39776,"tracklist":39777,"signee":39778,"sleepless":39779,"comman":39780,"chron":39781,"summon":39782,"puremichigan":39783,"crispr":39784,"slip":39785,"lagi":39786,"raq":39787,"umu":39788,"thalap":39789,"charmed":39790,"scrump":39791,"quadcopter":39792,"skip":39793,"petersen":39794,"muni":39795,"ðŁĮ¾":39796,"monaghan":39797,"trays":39798,"icked":39799,"canadaday":39800,"tegr":39801,"�":39802,"hotness":39803,"heavymetal":39804,"abar":39805,"gopdebate":39806,"azul":39807,"spiderman":39808,"sunflowers":39809,"ľë":39810,"webcomics":39811,"bard":39812,"в":39813,"nicholas":39814,"slush":39815,"raman":39816,"markham":39817,"fficial":39818,"ffler":39819,"íĬ¸":39820,"pless":39821,"anushka":39822,"toto":39823,"skaters":39824,"prowrestling":39825,"competes":39826,"ayala":39827,"mystery":39828,"thrills":39829,"mpg":39830,"independently":39831,"yul":39832,"imperative":39833,"formidable":39834,"tireless":39835,"stacking":39836,"tongues":39837,"maltese":39838,"potts":39839,"matti":39840,"charting":39841,"chillout":39842,"supernova":39843,"omeo":39844,"skysports":39845,"nutty":39846,"ðŁĹĵï¸ı":39847,"rohan":39848,"inspired":39849,"concierge":39850,"serra":39851,"makk":39852,"galat":39853,"chipp":39854,"yev":39855,"ì£":39856,"reimbur":39857,"opul":39858,"kimberley":39859,"ieee":39860,"bremen":39861,"chitec":39862,"orin":39863,"naku":39864,"bonkers":39865,"footy":39866,"emergence":39867,"ðŁĨĺ":39868,"stip":39869,"sergei":39870,"zoey":39871,"aime":39872,"would":39873,"dyes":39874,"destiny":39875,"vinaigrette":39876,"drier":39877,"circulareconomy":39878,"anarchi":39879,"ssr":39880,"schel":39881,"ciner":39882,"groom":39883,"determining":39884,"garmin":39885,"calais":39886,"incarceration":39887,"bukit":39888,"noi":39889,"chelmsford":39890,"mckinley":39891,"chipped":39892,"belonged":39893,"tumors":39894,"stroud":39895,"mii":39896,"influenza":39897,"wwenxt":39898,"tundra":39899,"telecommunications":39900,"catsofinstagram":39901,"tages":39902,"beatty":39903,"odu":39904,"mlkday":39905,"ooper":39906,"dangle":39907,"akley":39908,"crumb":39909,"antigua":39910,"timbers":39911,"rouhani":39912,"ðŁĴªðŁĴªðŁĴª":39913,"hafi":39914,"...!!":39915,"wcs":39916,"coop":39917,"snc":39918,"litres":39919,"ãĢĬ":39920,"haz":39921,"coz":39922,"kant":39923,"greenfield":39924,"curti":39925,"yale":39926,"flyeagles":39927,"whatsoever":39928,"worthing":39929,"roulette":39930,"flyeaglesfly":39931,"unda":39932,"ainted":39933,"standing":39934,"luscious":39935,"hpc":39936,"efficacy":39937,"ashland":39938,"meghan":39939,"kywx":39940,"npr":39941,"bathtub":39942,"acos":39943,"hani":39944,"marcor":39945,"mantis":39946,"daisi":39947,"boba":39948,"abbie":39949,"mutil":39950,"vial":39951,"spyder":39952,"poz":39953,"gti":39954,"elfie":39955,"nightw":39956,"metroid":39957,"antoni":39958,"maddie":39959,"dhry":39960,"darlings":39961,"tends":39962,"taekwondo":39963,"atlanta":39964,"meow":39965,"chloe":39966,"ãĥİ":39967,"ymes":39968,"siberia":39969,"kcon":39970,"gues":39971,"mariner":39972,"facil":39973,"azzle":39974,"[...":39975,"hannover":39976,"bavaria":39977,"virgo":39978,"teuk":39979,"usps":39980,")#":39981,"walla":39982,"sampson":39983,"needless":39984,"verbally":39985,"hayley":39986,"bowled":39987,"pius":39988,"lampard":39989,"hamstring":39990,"volvo":39991,"roadsafety":39992,"choking":39993,"sorbet":39994,"ahem":39995,"healthyfood":39996,"braided":39997,"horticulture":39998,"crative":39999,"cheek":40000,"addo":40001,"theforce":40002,"koko":40003,"schizoph":40004,"jie":40005,"wada":40006,"twentyonepilots":40007,"hbcu":40008,"proton":40009,"pauls":40010,"louisa":40011,"latam":40012,"kyrgy":40013,"compac":40014,"sdk":40015,"sapi":40016,"???":40017,"liberalism":40018,"epsilon":40019,"aiden":40020,"wusa":40021,"sprayed":40022,"basketball":40023,"kimono":40024,"bluewave":40025,"alias":40026,"ë§Ī":40027,"mugshot":40028,"cec":40029,"dogre":40030,"adora":40031,"ðŁĵ·@":40032,"krakow":40033,"intrigued":40034,"exhausting":40035,"astronomer":40036,"venison":40037,"ladybug":40038,"civ":40039,"brae":40040,"usm":40041,"bribe":40042,"acupuncture":40043,"pembroke":40044,"keating":40045,"chie":40046,"yad":40047,"tsi":40048,"smi":40049,"seeding":40050,"gateshead":40051,"lisboa":40052,"gyp":40053,"canvass":40054,"ðŁĶ´âļªï¸ı":40055,"opi":40056,"nir":40057,"societal":40058,"lyte":40059,"aties":40060,"csm":40061,"artery":40062,"alin":40063,"akapoor":40064,"abstracts":40065,"âĢ¦âĢ¦":40066,"teenwolf":40067,"newe":40068,"travelgram":40069,"sentimental":40070,"perched":40071,"handel":40072,"hoek":40073,"fay":40074,"coordinating":40075,"animate":40076,"manian":40077,"effort":40078,"jerky":40079,"fck":40080,"adrienne":40081,"mably":40082,"trading":40083,"myel":40084,"spiro":40085,"sola":40086,"storing":40087,"overdrive":40088,"mondaymorning":40089,"dreamteam":40090,"pulse":40091,"bondi":40092,"bernie":40093,"pgatour":40094,"tripoli":40095,"sonam":40096,"platt":40097,"âļ¡":40098,"agroup":40099,"îIJĴ":40100,"invading":40101,"vcu":40102,"kell":40103,"ños":40104,"undead":40105,"podcasting":40106,"mercedesam":40107,"manafort":40108,"cortex":40109,"queso":40110,"impeccable":40111,"palmer":40112,"wildoz":40113,"sportsc":40114,"guacamole":40115,"dispenser":40116,"categori":40117,"stunts":40118,"peril":40119,"invitations":40120,"dunedin":40121,"xie":40122,"achieves":40123,"safer":40124,"preds":40125,"phan":40126,"knuckles":40127,"kak":40128,"ignores":40129,"lovemyjob":40130,"aruba":40131,"oundation":40132,"datacenter":40133,"covert":40134,"gring":40135,"couple":40136,"ار":40137,"voli":40138,"mccle":40139,"artisans":40140,"ludo":40141,"kalam":40142,"aroma":40143,"undertaker":40144,"hula":40145,"wizkid":40146,"gumb":40147,"godfrey":40148,"bakersfield":40149,"kern":40150,"engineer":40151,"carve":40152,"palin":40153,"guarantees":40154,"pebbles":40155,"bays":40156,"zieg":40157,"fink":40158,"â¬ĩï¸ıâ¬ĩï¸ı":40159,"downpours":40160,"rochelle":40161,"raspberry":40162,"ðŁĺ®":40163,"graphies":40164,"stomp":40165,"cafes":40166,"arized":40167,"uttar":40168,"calvary":40169,"drie":40170,"crusader":40171,"busan":40172,"tuxedo":40173,"siu":40174,"seamus":40175,"cultured":40176,"blanchard":40177,"townhouse":40178,"gered":40179,"buttermilk":40180,"fluctu":40181,"rogerfederer":40182,"heli":40183,"ðŁ¦ĥ":40184,"uous":40185,"ramesh":40186,"muppets":40187,"emailmarketing":40188,"yess":40189,"brice":40190,"rizio":40191,"pelo":40192,"donneinarte":40193,"urable":40194,"investin":40195,"bumping":40196,"rajiv":40197,"sava":40198,"thrower":40199,"forex":40200,"ohhhh":40201,"thrust":40202,"pullman":40203,"rfid":40204,"sepsis":40205,"leed":40206,"fright":40207,"rounding":40208,"neb":40209,"phins":40210,"aisha":40211,"utilizing":40212,"squats":40213,"goldsmith":40214,"jic":40215,"boks":40216,"vaus":40217,"ipo":40218,"exclusion":40219,"tariff":40220,"pokes":40221,"minal":40222,"lands":40223,"enforce":40224,"washingtondc":40225,"orchar":40226,"gx":40227,"marys":40228,"eyour":40229,"aussie":40230,"bakers":40231,"unpopular":40232,"latinos":40233,"large":40234,"putnam":40235,"bolo":40236,"wade":40237,"pelo":40238,"dizz":40239,"obstruction":40240,"flappy":40241,"wearethe":40242,"dependence":40243,"pajama":40244,"ete":40245,"yann":40246,"ewan":40247,"discla":40248,"aay":40249,"karina":40250,"eic":40251,"antrim":40252,"wsoc":40253,"negatively":40254,"kaido":40255,"fotografia":40256,"dhru":40257,"colossal":40258,"mcleod":40259,"kwang":40260,"manipu":40261,"exhilar":40262,"usatoday":40263,"summerslam":40264,"coles":40265,"taproom":40266,"unbeatable":40267,"dema":40268,"ticks":40269,"kling":40270,"fils":40271,"campaigners":40272,"à¸ķ":40273,"brewster":40274,"audubon":40275,"quay":40276,"chs":40277,"kigali":40278,"dler":40279,"strengthens":40280,"somal":40281,"signingday":40282,"golds":40283,"pigment":40284,"orchestral":40285,"gq":40286,"linkin":40287,"ðŁıĩ":40288,"taw":40289,"algarve":40290,"hov":40291,"earle":40292,"goldfish":40293,"amig":40294,"exer":40295,"benin":40296,"druid":40297,"ðŁIJ¸":40298,"shem":40299,"quattro":40300,"mercen":40301,"mente":40302,"incorporating":40303,"bonanza":40304,"statefair":40305,"ende":40306,"conceptions":40307,"ees":40308,"âĻ¥ï¸ıâĻ¥ï¸ı":40309,"dson":40310,"firearm":40311,"orbital":40312,"weh":40313,"multip":40314,"fob":40315,"requiem":40316,"plight":40317,"thouse":40318,"said":40319,"ocre":40320,"remembrance":40321,"nold":40322,"chipping":40323,"bev":40324,"ert":40325,"cathy":40326,"sym":40327,"riggs":40328,"mley":40329,"dialogues":40330,"slender":40331,"howl":40332,"gauteng":40333,"wdw":40334,"tobi":40335,"smokes":40336,"implo":40337,"bpm":40338,"adn":40339,"mombasa":40340,"capsul":40341,"bloomfield":40342,"articul":40343,"cleo":40344,"googled":40345,"fluffy":40346,"lard":40347,"enzyme":40348,"vesti":40349,"ibrahi":40350,"flame":40351,"emea":40352,"outages":40353,"dispropor":40354,"bleak":40355,"ansel":40356,"icker":40357,"stlouis":40358,"stockmarket":40359,"goodfriday":40360,"sault":40361,"stalled":40362,"prom":40363,"epsom":40364,"bé":40365,"these":40366,"sauces":40367,"mew":40368,"litfest":40369,"pred":40370,"reu":40371,"karak":40372,"sienna":40373,"ellin":40374,"biotechnology":40375,"ï¸ıâĥ£-":40376,"tactic":40377,"sain":40378,"pork":40379,"monza":40380,"kaj":40381,"lush":40382,"compartment":40383,"changing":40384,"shraddhakapoor":40385,"foal":40386,"artem":40387,"cuando":40388,"canola":40389,"oriente":40390,"messe":40391,"dited":40392,"brc":40393,"boxer":40394,"bbctwo":40395,"sst":40396,"mentday":40397,"eming":40398,"dewey":40399,"kofi":40400,"âŀĸâŀĸâŀĸâŀĸ":40401,"realization":40402,"smol":40403,"twood":40404,"sanje":40405,"flagstaff":40406,"berwick":40407,"corset":40408,"canary":40409,"whistleblower":40410,"etched":40411,"composing":40412,"squeezed":40413,"bower":40414,"autodesk":40415,"neh":40416,"mathieu":40417,"baja":40418,"ÅĤ":40419,"hydra":40420,"daim":40421,"ameri":40422,"insisted":40423,"merlot":40424,"garros":40425,"heartnews":40426,"gainesville":40427,"cutler":40428,"bode":40429,"ðŁĺīðŁĺī":40430,"lewes":40431,"scountry":40432,"gsa":40433,"usu":40434,"ccm":40435,"godawgs":40436,"pharaoh":40437,"crae":40438,"morley":40439,"hypnoti":40440,"fades":40441,"neurons":40442,"fuzz":40443,"ingco":40444,"highlanders":40445,"stark":40446,"vigne":40447,"packets":40448,"amarillo":40449,"reuben":40450,"insults":40451,"basic":40452,"vector":40453,"nme":40454,"acruz":40455,"tros":40456,"transmitter":40457,"ðŁĺŀ":40458,"interpret":40459,"ðŁĺ²":40460,"prequel":40461,"mcgowan":40462,"dissemin":40463,"ðŁĴĺðŁĴĺ":40464,"masculinity":40465,"indiegamedev":40466,"alive":40467,"tet":40468,"petal":40469,"emailed":40470,"armed":40471,"koo":40472,"heer":40473,"baird":40474,"superjunior":40475,"metropolis":40476,"delavin":40477,"declines":40478,"stitutes":40479,"Ûģ":40480,"ptbo":40481,"glan":40482,"chores":40483,"ealing":40484,"chrissy":40485,"stemc":40486,"vian":40487,"assassinated":40488,"pronounce":40489,"illegals":40490,"discovery":40491,"cavill":40492,"frifotos":40493,"fal":40494,"soi":40495,"sabotage":40496,"tint":40497,"pdc":40498,"ðŁİīðŁİĪ":40499,"ãĤĬãģ":40500,"jio":40501,"endeavor":40502,"insig":40503,"committees":40504,"shearer":40505,"metz":40506,"marrying":40507,"hdd":40508,"gby":40509,"fret":40510,"trish":40511,"pul":40512,"scripted":40513,"saki":40514,"lw":40515,"keye":40516,"shimi":40517,"nanaimo":40518,"cah":40519,"ë":40520,"tempered":40521,"ician":40522,"dugg":40523,"dishwasher":40524,"airfield":40525,"srugby":40526,"grinch":40527,"yst":40528,"rms":40529,"mahatma":40530,"lankan":40531,"discar":40532,"digestion":40533,"nodes":40534,"lls":40535,"omic":40536,"gutter":40537,"tisgarh":40538,"federico":40539,"electionday":40540,"bohe":40541,"mastercard":40542,"fireball":40543,"âľĶï¸ı":40544,"oyster":40545,"pong":40546,"dok":40547,"enroute":40548,"mvc":40549,"beatthe":40550,"alistair":40551,"shub":40552,"shaming":40553,"chernobyl":40554,"ghibli":40555,"thes":40556,"pinion":40557,"dbs":40558,"salts":40559,"iction":40560,"epiph":40561,"ncpol":40562,"inconvenience":40563,"whitley":40564,"inspecting":40565,"woodley":40566,"wiener":40567,"skillet":40568,"noles":40569,"mca":40570,"hina":40571,"asha":40572,"willingness":40573,"wellness":40574,"tamed":40575,"showtime":40576,"disadvantaged":40577,"bernat":40578,"usn":40579,"missionaries":40580,"counselling":40581,"arrogant":40582,"quantitative":40583,"legalization":40584,"hodge":40585,"energyefficiency":40586,"camerondallas":40587,"possessions":40588,"pbb":40589,"harrisburg":40590,"vg":40591,"hinduism":40592,"happythanksgiving":40593,"fib":40594,"reacting":40595,"tweetapicture":40596,"politi":40597,"muppet":40598,"hurrah":40599,"pace":40600,"coastguard":40601,"guarded":40602,"asam":40603,"parry":40604,"forevery":40605,"xq":40606,"oomf":40607,"keanu":40608,"jind":40609,"rist":40610,"customerservice":40611,"sacred":40612,"ðŁĺº":40613,"toner":40614,"occurrence":40615,"matu":40616,"valdez":40617,"redd":40618,"isak":40619,"powerrangers":40620,"peasant":40621,"rajini":40622,"abraham":40623,"emil":40624,"cardo":40625,"tril":40626,"hairstyles":40627,"obsolete":40628,"sampler":40629,"directive":40630,"delavinkisses":40631,"verton":40632,"glos":40633,"spay":40634,"palermo":40635,"comets":40636,"manziel":40637,"chicagof":40638,"skipped":40639,"pictorial":40640,"hant":40641,"bmi":40642,"aol":40643,"reopens":40644,"paddling":40645,"devos":40646,"fraud":40647,"baseline":40648,"queues":40649,"spired":40650,"snare":40651,"euve":40652,"descriptions":40653,"daisies":40654,"caching":40655,"galleria":40656,"trimmed":40657,"stino":40658,"recycla":40659,"icular":40660,"birken":40661,"rawlings":40662,"flix":40663,"chicas":40664,"bgt":40665,"likeli":40666,"argyll":40667,"thelove":40668,"gaston":40669,"blanca":40670,"hak":40671,"fone":40672,"sailormoon":40673,"haci":40674,"imac":40675,"flyn":40676,"decan":40677,"belles":40678,"apic":40679,"zog":40680,"taunton":40681,"constance":40682,"lasagna":40683,"kernel":40684,"inka":40685,"harbor":40686,"collectively":40687,"calculated":40688,"aville":40689,"shilpa":40690,"purdu":40691,"gimm":40692,"funer":40693,"aest":40694,"pembrokeshire":40695,"nightingale":40696,"nunes":40697,"hypertension":40698,"hubert":40699,"sliders":40700,"infertility":40701,"commended":40702,"transatlantic":40703,"metrical":40704,"!!@":40705,"ÅŁ":40706,"ssg":40707,"bacca":40708,"inverted":40709,"funfactfriday":40710,"itans":40711,"album":40712,"acquainted":40713,"rier":40714,"whelan":40715,"sarab":40716,"mue":40717,"snooze":40718,"piff":40719,"agreeing":40720,"spitting":40721,"jermaine":40722,"nye":40723,"âľıï¸ı":40724,"ambush":40725,"zeph":40726,"congreg":40727,"university":40728,"sapp":40729,"wannabe":40730,"patrice":40731,"ibd":40732,"doglo":40733,"fridges":40734,"sund":40735,"kingston":40736,"argon":40737,"kamen":40738,"hardrock":40739,"dsley":40740,"dolores":40741,"ì°":40742,"otaku":40743,"piping":40744,"behaving":40745,"âŃIJï¸ıâŃIJï¸ıâŃIJï¸ı":40746,"bluebird":40747,"ansari":40748,"teapot":40749,"firework":40750,"crop":40751,"logans":40752,"typed":40753,"thickness":40754,"igers":40755,"cfp":40756,"dysfunctional":40757,"contrasting":40758,"etty":40759,"astonmartin":40760,"txst":40761,"dragrace":40762,"attributes":40763,"marathon":40764,"manuscripts":40765,"johnstone":40766,"ðŁĺ±ðŁĺ±":40767,"boer":40768,"ayu":40769,"arugula":40770,"poorest":40771,"condu":40772,"assumption":40773,"anagh":40774,"noh":40775,"delavin":40776,"sitter":40777,"gö":40778,"morow":40779,"kickstart":40780,"comi":40781,"glacial":40782,"ghead":40783,"bain":40784,"kershaw":40785,"endof":40786,"freud":40787,"omat":40788,"iaf":40789,"hug":40790,"signup":40791,"eachother":40792,"definite":40793,"tubing":40794,"shakira":40795,"ðŁijıðŁı½":40796,"uuuu":40797,"swin":40798,"shambles":40799,"olas":40800,"skell":40801,"britain":40802,"knw":40803,"clutter":40804,"omy":40805,"jens":40806,"hanged":40807,"cityscape":40808,"scraps":40809,"unlocking":40810,"deadliest":40811,"erno":40812,"breastcancer":40813,"ait":40814,"inspect":40815,"furi":40816,"ðŁĴĮ":40817,"kud":40818,"jule":40819,"orah":40820,"mids":40821,"mdt":40822,"burgring":40823,"rattle":40824,"pusa":40825,"stalk":40826,"cleans":40827,"issance":40828,"zek":40829,"worthit":40830,"nameis":40831,"muskoka":40832,"councilman":40833,"urbanart":40834,"barrac":40835,"unsolved":40836,"tul":40837,"gita":40838,"whiteboard":40839,"soybeans":40840,"ement":40841,"conti":40842,"saturdaymotivation":40843,"conveniently":40844,"docking":40845,"tado":40846,"âı©":40847,"spino":40848,"puppylove":40849,"pof":40850,"fabricated":40851,"robbers":40852,"adopts":40853,"tified":40854,"kkr":40855,"indulgence":40856,"noticeable":40857,"macquarie":40858,"chapel":40859,"sensual":40860,"kiko":40861,"melanoma":40862,"loretta":40863,"liance":40864,"aben":40865,"splus":40866,"gaal":40867,"acele":40868,"libdems":40869,"comparisons":40870,"ðŁĮµ":40871,"rhythms":40872,"mery":40873,"encapsul":40874,"napier":40875,"ðŁijĮðŁijĮðŁijĮ":40876,"ðŁijIJ":40877,"platz":40878,"fresno":40879,"reformed":40880,"ranbir":40881,"elit":40882,"thebest":40883,"bhushan":40884,"vinnie":40885,"improvised":40886,"sittin":40887,"recreated":40888,"eba":40889,"ecker":40890,"acrob":40891,"ponte":40892,"cord":40893,"giddy":40894,"eurusd":40895,"fever":40896,"intuition":40897,"gari":40898,"dummies":40899,"budweiser":40900,"amendments":40901,"tetra":40902,"schnit":40903,"ayas":40904,"marys":40905,"cist":40906,"kani":40907,"kermit":40908,"ðŁĺ±ðŁĺ±ðŁĺ±":40909,"tinker":40910,"strolling":40911,"divisional":40912,"nigeri":40913,"ominous":40914,"menstrual":40915,"karab":40916,"khy":40917,"bwfc":40918,"panhandle":40919,"lilli":40920,"weller":40921,"strapped":40922,"sonthe":40923,"transferring":40924,"ethereal":40925,"sneaks":40926,"rudol":40927,"gables":40928,"jacking":40929,"cincode":40930,"fortune":40931,"canadiens":40932,"confor":40933,"abnormal":40934,"franklin":40935,"tita":40936,"mula":40937,"persist":40938,"cuties":40939,"kiel":40940,"ðŁĩ±ðŁĩ":40941,"hermann":40942,"awk":40943,"fiasco":40944,"koto":40945,"weta":40946,"hiker":40947,"buddy":40948,"preventive":40949,"mcgraw":40950,"gameboy":40951,"forsyth":40952,"topshop":40953,"siob":40954,"sadh":40955,"intram":40956,"followart":40957,"soaps":40958,"dragonball":40959,"oux":40960,"morrison":40961,"à¹ĥ":40962,"lubric":40963,"adulthood":40964,"morrisons":40965,"âļłï¸ı":40966,"hermo":40967,"taka":40968,"stallone":40969,"misuse":40970,"teamgb":40971,"ragha":40972,"confined":40973,"aty":40974,"homophobic":40975,"nwo":40976,"skynews":40977,"hoya":40978,"acrosse":40979,"wiiu":40980,"purée":40981,"jeddah":40982,"ðŁ¤§":40983,"advisers":40984,"phine":40985,"anis":40986,"scrumptious":40987,"ë°ķ":40988,"cke":40989,"viny":40990,"term":40991,"sdc":40992,"odo":40993,"homeschool":40994,"vasc":40995,"leopards":40996,"deborah":40997,"illicit":40998,"curran":40999,"asroma":41000,"naught":41001,"marig":41002,"brandi":41003,"emp":41004,"ðŁĺįðŁijĮ":41005,"îĮ":41006,"suspend":41007,"luz":41008,"initiation":41009,"schaft":41010,"jensenackles":41011,"crawler":41012,"postdoc":41013,"desks":41014,"trailblazer":41015,"denomin":41016,"trix":41017,"noise":41018,"poet":41019,"±ï¸ı":41020,"smug":41021,"volatile":41022,"proofs":41023,"pharmacist":41024,"sardinia":41025,"mashable":41026,"kimchi":41027,"coed":41028,"schalke":41029,"doodled":41030,"csw":41031,"shur":41032,"rox":41033,"dok":41034,"chrisbrown":41035,"mathematician":41036,"abound":41037,"angelic":41038,"rockford":41039,"dole":41040,"yorkers":41041,"msn":41042,"gman":41043,"xavier":41044,"borrowing":41045,"markings":41046,"longhorn":41047,"kja":41048,"diverted":41049,"mmit":41050,"euphoria":41051,"ayyy":41052,"tea":41053,"pah":41054,"cki":41055,"uncut":41056,"liven":41057,"kyung":41058,"fanart":41059,"mering":41060,"redding":41061,"amovie":41062,"gridi":41063,"cthulhu":41064,"scholarly":41065,"judah":41066,"thbewithyou":41067,"eucalyp":41068,"ðŁIJķ":41069,"hertfordshire":41070,"courtroom":41071,"byu":41072,"auctioned":41073,"please":41074,"marcia":41075,"ê°ĵ":41076,"succeeded":41077,"elas":41078,"arvind":41079,"tlot":41080,"saigon":41081,"rett":41082,"rakesh":41083,"fdny":41084,"asen":41085,"sebring":41086,"gladiators":41087,"youknow":41088,"vlad":41089,"gola":41090,"parap":41091,"ÑĢи":41092,"sabcnews":41093,"oneteam":41094,"ohl":41095,"sune":41096,"rij":41097,"cdc":41098,"stargate":41099,"rundown":41100,"plato":41101,"phc":41102,"chatter":41103,"raviol":41104,"mnf":41105,"mandala":41106,"liet":41107,"à¸ķ":41108,"maria":41109,"hungover":41110,"consolidation":41111,"ferrell":41112,"traditional":41113,"iloveart":41114,"galap":41115,"ðŁıĮ":41116,"quezon":41117,"españa":41118,"ðŁĩ¨ðŁĩŃ":41119,"hobby":41120,"steamboat":41121,"malign":41122,"guillau":41123,"prohi":41124,"itsme":41125,"íĥĢ":41126,"inscription":41127,"alz":41128,"marian":41129,"kade":41130,"mmon":41131,"adjusting":41132,"nests":41133,"internally":41134,"cir":41135,"vikram":41136,"malala":41137,"kph":41138,"felicia":41139,"thereal":41140,"captivity":41141,"atis":41142,"marcorubio":41143,"kaleido":41144,"chev":41145,"manoj":41146,"lemore":41147,"gentri":41148,"vips":41149,"trope":41150,"\"âĢĶ":41151,"pairings":41152,"malnutrition":41153,"fray":41154,"designation":41155,"brunomars":41156,"aze":41157,"torrential":41158,"panzer":41159,"gail":41160,"underthe":41161,"theological":41162,"schizophre":41163,"dazzle":41164,"frederic":41165,"mopar":41166,"adilla":41167,"soggy":41168,"raun":41169,"mediocre":41170,"colorec":41171,"ife":41172,"pinst":41173,"bluef":41174,"²":41175,"worldwater":41176,"giroud":41177,"clarinet":41178,"adolf":41179,"tarantino":41180,"receipts":41181,"assump":41182,"ðŁijŁ":41183,"coffees":41184,"âľĬðŁı¾":41185,"duplex":41186,"sof":41187,"rx":41188,"lino":41189,"timberwolves":41190,"pandit":41191,"motm":41192,"ega":41193,"ayama":41194,"achs":41195,"outsider":41196,"llen":41197,"coer":41198,"tilly":41199,"cheeseburger":41200,"mads":41201,"pledis":41202,"empty":41203,"nationalparks":41204,"aziz":41205,"pmi":41206,"junkies":41207,"fener":41208,"sqn":41209,"ès":41210,"generation":41211,"cleopatra":41212,"bhubanes":41213,"mosques":41214,"tyfree":41215,"poppins":41216,"twc":41217,"orwell":41218,"nage":41219,"kawhi":41220,"hollow":41221,"dalai":41222,"¨¨¨¨":41223,"ouro":41224,"mhealth":41225,"gion":41226,"azo":41227,"visas":41228,"renegade":41229,"reic":41230,"wsop":41231,"ðŁĴļðŁĴĽ":41232,"echel":41233,"toxicity":41234,"mün":41235,"bunk":41236,"stimulating":41237,"asthour":41238,"\\'":41239,"eph":41240,"endemic":41241,"cnbc":41242,"shrinking":41243,"peabody":41244,"michelangelo":41245,"canyon":41246,"wale":41247,"sumi":41248,"siders":41249,"inuit":41250,"?.":41251,"professionalism":41252,"dracing":41253,"platoon":41254,"pons":41255,"outbound":41256,"mapleleafs":41257,"desol":41258,"cency":41259,"athan":41260,"verma":41261,"rubbing":41262,"okan":41263,"ðŁijł":41264,"mullins":41265,"authentic":41266,"Åį":41267,"almanac":41268,"gaia":41269,"bbq":41270,"onimo":41271,"keh":41272,"tya":41273,"touts":41274,"yav":41275,"reposit":41276,",.":41277,"wight":41278,"seeyou":41279,"callof":41280,"donesia":41281,"bargaining":41282,"granth":41283,"sdsu":41284,"amphitheater":41285,"psu":41286,"rewatching":41287,"winetasting":41288,"peakdistrict":41289,"detecting":41290,"thurman":41291,"phee":41292,"èªķ":41293,"umich":41294,"rer":41295,"sculpted":41296,"gole":41297,"namesake":41298,"ðŁĶģ":41299,"servicing":41300,"baugh":41301,"pugh":41302,"pencil":41303,"darth":41304,"munchkin":41305,"atorium":41306,"teners":41307,"suny":41308,"rollingstones":41309,"maging":41310,"starrer":41311,"idris":41312,"feinstein":41313,"agron":41314,"âĺºï¸ıâĺºï¸ı":41315,"supervised":41316,"chameleon":41317,"aggregate":41318,"successive":41319,"mogul":41320,"instyle":41321,"poldark":41322,"custome":41323,"ohiostate":41324,"haya":41325,"cides":41326,"brokerage":41327,"angelou":41328,"fifawwc":41329,"deforestation":41330,"alton":41331,"pamph":41332,"hugged":41333,"hobo":41334,"changeable":41335,"kuber":41336,"burroughs":41337,"demonetisation":41338,"capecod":41339,"versatility":41340,"orice":41341,"leila":41342,"womeninscience":41343,"tua":41344,"hedges":41345,"embarrassment":41346,"alife":41347,"soars":41348,"nighter":41349,"hymn":41350,"gipp":41351,"chasu":41352,"techs":41353,"niall":41354,"killa":41355,"hika":41356,"camels":41357,"value":41358,"¢":41359,"scoops":41360,"mahmoud":41361,"clusive":41362,"adriana":41363,"paco":41364,"ozil":41365,"unas":41366,"translations":41367,"whisperer":41368,"sbi":41369,"buxton":41370,"biotics":41371,"indiffe":41372,"kenney":41373,"klar":41374,"etching":41375,"barrabest":41376,"instability":41377,"seine":41378,"votel":41379,"blogged":41380,"whiskey":41381,"myspace":41382,"tant":41383,"landia":41384,"giveback":41385,"illus":41386,"awak":41387,"acab":41388,"fbloggers":41389,"cloudcomputing":41390,"blatant":41391,"syrians":41392,"bandra":41393,"styn":41394,"anem":41395,"keted":41396,"karthik":41397,"barunsob":41398,"pinot":41399,"gubernat":41400,"gaye":41401,"artiste":41402,"ified":41403,"conventions":41404,"huan":41405,"geniuses":41406,"eeeeee":41407,"folly":41408,"somerville":41409,"pridemonth":41410,"ðŁĩºðŁĩ¸ðŁĩºðŁĩ¸":41411,"chemotherapy":41412,"pauls":41413,"bakar":41414,"ìĦ¸ë¸IJ":41415,"taiwanese":41416,"follo":41417,"css":41418,"reign":41419,"nnnn":41420,"flaun":41421,"catastrophe":41422,"ities":41423,"fragments":41424,"extremists":41425,"ymoun":41426,"carmen":41427,"ezekiel":41428,"connecting":41429,"seh":41430,"manta":41431,"remodeling":41432,"weymouth":41433,"atoms":41434,"cem":41435,"newell":41436,"lumi":41437,"theopen":41438,"moc":41439,"miliband":41440,"gland":41441,"zshq":41442,"maggie":41443,"maniacs":41444,"msp":41445,"ady":41446,"creams":41447,"leanne":41448,"esta":41449,"pyg":41450,"affinity":41451,"prayer":41452,"dunbar":41453,"lightroom":41454,"acadi":41455,"wynonna":41456,"romantic":41457,"statedept":41458,"sickle":41459,"whos":41460,"lamo":41461,"etour":41462,"finity":41463,"shrub":41464,"sharpen":41465,"pundit":41466,"edon":41467,"afore":41468,"mars":41469,"jeffery":41470,"terps":41471,"medallist":41472,"katharine":41473,"accusing":41474,"taz":41475,"royd":41476,"fromhome":41477,"confrontation":41478,"allegh":41479,"ðŁijīðŁijī":41480,"refresher":41481,"ranveer":41482,"neverland":41483,"jojo":41484,"lucrative":41485,"enam":41486,"caver":41487,"paedi":41488,"manjaro":41489,"fluids":41490,"thessal":41491,"oppressed":41492,"muss":41493,"johanna":41494,"Ø®":41495,"cng":41496,"buildthe":41497,"settles":41498,"sith":41499,"fuego":41500,"clamp":41501,"arag":41502,"payer":41503,"tedx":41504,"mandy":41505,"interstellar":41506,"frc":41507,"chand":41508,"bcc":41509,"molo":41510,"lentil":41511,"johansson":41512,"grimsby":41513,"naturelovers":41514,"ðŁļ¨ðŁļ¨ðŁļ¨":41515,"shinde":41516,"xin":41517,"internationaldayof":41518,"transitional":41519,"sata":41520,"caddy":41521,"wod":41522,"ifu":41523,"hays":41524,"hollyo":41525,"jang":41526,"irc":41527,"coim":41528,"gradable":41529,"\"\"":41530,"ðŁį´":41531,"া":41532,"ael":41533,"nyo":41534,"westlake":41535,"timeout":41536,"sofi":41537,"phenomena":41538,"cultivation":41539,"agno":41540,"unarmed":41541,"sot":41542,"conj":41543,"geno":41544,"royalnavy":41545,"nutrition":41546,"fairmont":41547,"tirelessly":41548,"sng":41549,"rety":41550,"mica":41551,"lucent":41552,"sloane":41553,"drool":41554,"rizal":41555,"odell":41556,"criticized":41557,".'\"":41558,"laze":41559,"deserted":41560,"coder":41561,"pras":41562,"lillian":41563,"itinerary":41564,"davy":41565,"anap":41566,"whipping":41567,"hoboken":41568,"kareena":41569,"羣":41570,"vius":41571,"tern":41572,"nantucket":41573,"misunderstood":41574,"bulaga":41575,"stant":41576,"chinook":41577,"zam":41578,"relies":41579,"dss":41580,"edmond":41581,"sketchy":41582,"mell":41583,"fex":41584,"rector":41585,"distill":41586,"daydream":41587,"winemaker":41588,"ripley":41589,"billionaires":41590,"helene":41591,"atif":41592,"culprit":41593,"bertrand":41594,"wouldnt":41595,"mapped":41596,"vak":41597,"gladly":41598,"parliament":41599,"kidlitart":41600,"wareness":41601,"goliath":41602,"âĨĵ":41603,"viewpoint":41604,"tatted":41605,"fuls":41606,"dorsey":41607,"anglers":41608,"lids":41609,"kiya":41610,"bowles":41611,"beh":41612,"bite":41613,"compatibility":41614,"ancestral":41615,"prox":41616,"behaved":41617,"gubernatorial":41618,"chfield":41619,"saban":41620,"zh":41621,"teeny":41622,"shibuya":41623,"holliday":41624,"pancy":41625,"âĿĦï¸ıâĿĦï¸ı":41626,"seungri":41627,"?,":41628,"ðŁĩ¦ðŁĩ·":41629,"imitation":41630,"impactful":41631,"anyi":41632,"genevie":41633,"años":41634,"bateman":41635,"glider":41636,"afar":41637,"rasheed":41638,"effortless":41639,"shwar":41640,"dachsh":41641,"erun":41642,"atos":41643,"kini":41644,"chd":41645,"khaki":41646,"klin":41647,"felicidades":41648,"belo":41649,"asl":41650,"toppers":41651,"finley":41652,"stacey":41653,"rigorous":41654,"karting":41655,"leppard":41656,"carmichael":41657,"beret":41658,"cse":41659,"akhi":41660,"meringue":41661,"aban":41662,"hake":41663,"geri":41664,"erjee":41665,"resto":41666,"commanders":41667,"prit":41668,"flor":41669,"adven":41670,"extermin":41671,"remainder":41672,"åIJ":41673,"esg":41674,"martino":41675,"lullaby":41676,"|@":41677,"mign":41678,"instore":41679,"bigbang":41680,"cordi":41681,"cauley":41682,"antebellum":41683,"dgate":41684,"crock":41685,"spandex":41686,"scaffolding":41687,"oreos":41688,"ê°ĵìĦ¸ë¸IJ":41689,"pomona":41690,"mauro":41691,"universi":41692,"remi":41693,"afootball":41694,"tant":41695,"smalls":41696,"neh":41697,"worldo":41698,"tropical":41699,"morph":41700,"javelin":41701,"glar":41702,"arquitec":41703,"reminiscent":41704,"tubs":41705,"spidey":41706,"makeu":41707,"sylla":41708,"progressives":41709,"blot":41710,"shorten":41711,"keepin":41712,"chak":41713,"angst":41714,"superfood":41715,"decadent":41716,"stony":41717,"neurological":41718,"arboretum":41719,"annak":41720,"fema":41721,"percu":41722,"disrespectful":41723,"smallbiz":41724,"lox":41725,"coom":41726,"csc":41727,"bsbi":41728,"prevalence":41729,"himss":41730,"espan":41731,"moga":41732,"frampton":41733,"skymap":41734,"masse":41735,"leviathan":41736,"().":41737,"nocturnal":41738,"carameli":41739,"angor":41740,"amnesia":41741,"outsiders":41742,"shealth":41743,"rhino":41744,"antag":41745,"agio":41746,"ðŁĴ°ðŁĴ°":41747,"takeme":41748,"kabaddi":41749,"csi":41750,"msh":41751,"cochrane":41752,"thessaloni":41753,"sila":41754,"haus":41755,"dusting":41756,"obese":41757,"macklemore":41758,"manish":41759,"lenin":41760,"mdc":41761,"grown":41762,"sheffield":41763,"srs":41764,"kele":41765,"carson":41766,"chum":41767,"dahlia":41768,"cantore":41769,"oppo":41770,"howling":41771,"cybercrime":41772,"surrealism":41773,"scran":41774,"faiz":41775,"thren":41776,"racists":41777,"rout":41778,"pknot":41779,"semana":41780,"sini":41781,"mccull":41782,"machi":41783,"alfonso":41784,"yb":41785,"sardar":41786,"kendrick":41787,"deng":41788,"recipro":41789,"onf":41790,"doomsday":41791,"bribery":41792,"customiz":41793,"artis":41794,"cpi":41795,"ðŁĻĪðŁĻĪ":41796,"slava":41797,"lette":41798,"ens":41799,"âĿ¤ï¸ıðŁĺĺ":41800,"crayon":41801,"adan":41802,"trc":41803,"migrate":41804,"simpson":41805,"rowers":41806,"kingsley":41807,"farmersmarket":41808,"sheehan":41809,"nephe":41810,"bornon":41811,"carton":41812,"mickey":41813,"allure":41814,"ulu":41815,"slipknot":41816,"hebdo":41817,"guido":41818,"dogcelebration":41819,"onlinemarketing":41820,"accelerating":41821,")..":41822,"originated":41823,"macaroni":41824,"edtech":41825,"outfield":41826,"mitz":41827,"discus":41828,"advertiser":41829,"manor":41830,"hashi":41831,"descrip":41832,"capita":41833,"fulbright":41834,"receptor":41835,"conn":41836,"coney":41837,"spionage":41838,"rattle":41839,"prest":41840,"uli":41841,"blogpost":41842,"ackeray":41843,")âĢ¦":41844,"redvelvet":41845,"matth":41846,"inspiring":41847,"bsd":41848,"kerri":41849,"pocon":41850,"millar":41851,"repur":41852,"accenture":41853,"ä¹":41854,"rambo":41855,"ragnarok":41856,"deleting":41857,"britishmuseum":41858,"patory":41859,"leipzig":41860,"florian":41861,"scifi":41862,"iners":41863,"brate":41864,"yoy":41865,"melissa":41866,"aber":41867,"masa":41868,"pote":41869,"mosquitoes":41870,"transplant":41871,"rpa":41872,";))":41873,"bastille":41874,"ylan":41875,"joyeux":41876,"melodic":41877,"captions":41878,"atrist":41879,"rochdale":41880,"gotti":41881,"pewdie":41882,"cutiesaturday":41883,"whois":41884,"aquaculture":41885,"tiva":41886,"spel":41887,"hess":41888,"haji":41889,"freddie":41890,"coper":41891,"brando":41892,"vk":41893,"photobook":41894,"*,":41895,"mydayin":41896,"michaela":41897,"brunei":41898,"srini":41899,"inte":41900,"ı":41901,"deol":41902,"dfc":41903,"separately":41904,"bund":41905,"vests":41906,"toc":41907,"meck":41908,"reinforced":41909,"constraints":41910,"carroll":41911,"sqft":41912,"rever":41913,"camper":41914,"birdman":41915,"inaction":41916,"generators":41917,"triumphant":41918,"pests":41919,"ovo":41920,"gypt":41921,"alamo":41922,"scaled":41923,"sureshpp":41924,"sdn":41925,"ismo":41926,"gios":41927,")@":41928,"justiceleague":41929,"restaurant":41930,"gabi":41931,"dengue":41932,"nextgen":41933,"exempli":41934,"apex":41935,"inspirational":41936,"downside":41937,"kidz":41938,"upl":41939,"etna":41940,"alvaro":41941,"feldman":41942,"barnet":41943,"mha":41944,"esch":41945,"blooded":41946,">>>>>>>>":41947,"kani":41948,"hofficial":41949,"casablanca":41950,"birds":41951,"tyga":41952,"swamp":41953,"oday":41954,"newcastle":41955,"nbap":41956,"cision":41957,"chools":41958,"aflo":41959,"nep":41960,"monton":41961,"akb":41962,"supermodel":41963,"downtime":41964,"thos":41965,"scwx":41966,"snoopy":41967,"aggreg":41968,"yoke":41969,"norcal":41970,"wett":41971,"prolonged":41972,"metast":41973,"beater":41974,"fta":41975,"tlap":41976,"disgusted":41977,"yh":41978,"voiceover":41979,"itchy":41980,"ipc":41981,"ðŁİ¾":41982,"pheasant":41983,"straits":41984,"rampant":41985,"jg":41986,"fertil":41987,"assures":41988,"fortunes":41989,"salinas":41990,"lizards":41991,"kettle":41992,"ibs":41993,"cynthi":41994,"heg":41995,"mccr":41996,"socceroos":41997,"happenings":41998,"corden":41999,"ðŁĺĤðŁijĮ":42000,"tches":42001,"egret":42002,"wolverines":42003,"congratulated":42004,"hogg":42005,"bottling":42006,"wri":42007,"ferri":42008,"bosch":42009,"afire":42010,"ogden":42011,"sjo":42012,"jdm":42013,"svt":42014,"contex":42015,"tollywood":42016,"mink":42017,"mese":42018,"supersonic":42019,"opoulos":42020,"å¸":42021,"âĶģ":42022,"knuckle":42023,"guise":42024,"gami":42025,"chucky":42026,"zinger":42027,"radial":42028,"complained":42029,"boda":42030,"fetal":42031,"disciplines":42032,"corro":42033,"ðŁĩ®ðŁĩ¹":42034,"opted":42035,"filtration":42036,"adnan":42037,"emcee":42038,"mistre":42039,"insomni":42040,"fergus":42041,"trajec":42042,"ondon":42043,"medtech":42044,"tangerine":42045,"madras":42046,"grue":42047,"cabs":42048,"zhu":42049,"sureshpprabhu":42050,"insulated":42051,"dayswild":42052,"ppm":42053,"bandai":42054,"vday":42055,"sff":42056,"squid":42057,"lothing":42058,"notdead":42059,"expressive":42060,"cull":42061,"alastair":42062,"xu":42063,"upfront":42064,"fishers":42065,"enes":42066,"umd":42067,"dismissal":42068,"stier":42069,"sels":42070,"lust":42071,"reactive":42072,"protester":42073,"eyelashes":42074,"alim":42075,"goode":42076,"greeng":42077,"dair":42078,"compen":42079,"anushka":42080,"prototyping":42081,"mapu":42082,"bearings":42083,"ðŁIJŁ":42084,"forme":42085,"bsbibotany":42086,"timothy":42087,"outskirts":42088,"ambed":42089,"aretha":42090,"wendell":42091,"streaks":42092,"nim":42093,"kpk":42094,"snee":42095,"fitter":42096,"quota":42097,"pate":42098,"winning":42099,"ðŁįŃ":42100,"shopping":42101,"mainst":42102,"culver":42103,"stevie":42104,"mcfadden":42105,"counterparts":42106,"grenfell":42107,"folsom":42108,"dorset":42109,"techcrunch":42110,"â¬ħï¸ı":42111,"tiptuesday":42112,"usl":42113,"trex":42114,"georgie":42115,"ranveerofficial":42116,"licks":42117,"sewn":42118,"kf":42119,"'âĢ¦":42120,"japs":42121,"pate":42122,"orthop":42123,"festa":42124,"stras":42125,"montal":42126,"hammersmith":42127,"foremost":42128,"widows":42129,"madre":42130,"itez":42131,"mitochondri":42132,"ligans":42133,"zona":42134,"caribou":42135,"mss":42136,"andrei":42137,"weatherchannel":42138,"ghc":42139,":...":42140,"taft":42141,"aweather":42142,"alisation":42143,"brutal":42144,"blissful":42145,"nikola":42146,"malicious":42147,"qm":42148,"mpgvip":42149,"brodie":42150,"blitz":42151,"applaud":42152,"dribb":42153,"vague":42154,"doggo":42155,"translating":42156,"interpreted":42157,"hatched":42158,"getyour":42159,"beneficiaries":42160,"sparring":42161,"caesars":42162,"awilliams":42163,"lahat":42164,"broke":42165,"timp":42166,"virtues":42167,"relying":42168,"pietro":42169,"ktn":42170,"icists":42171,"pablo":42172,"loui":42173,"aag":42174,"pnpp":42175,"chast":42176,"pulses":42177,"finish":42178,"usairforce":42179,"typewriter":42180,"thompson":42181,"dogs":42182,"utto":42183,"ãģį":42184,"sandal":42185,"newly":42186,"doge":42187,"zw":42188,"wankers":42189,"negr":42190,"mucha":42191,"determines":42192,"blackfish":42193,"skunk":42194,"mups":42195,"instrument":42196,"phyto":42197,"daystogo":42198,"skinned":42199,"haider":42200,"conten":42201,"ðŁIJ¾ðŁIJ¾":42202,"weiler":42203,"undoubtedly":42204,"chairing":42205,"wallis":42206,"shard":42207,"zindabad":42208,"adult":42209,"absorption":42210,"presto":42211,"deploying":42212,"drummond":42213,"battlefront":42214,"seagulls":42215,"howdy":42216,"judaism":42217,"desde":42218,"partition":42219,"âľĿ":42220,"nology":42221,"nationalbestfriend":42222,"lesnar":42223,"filmfare":42224,"coasts":42225,"christensen":42226,"acan":42227,"mbu":42228,"copped":42229,"rubble":42230,"swc":42231,"funnier":42232,"farther":42233,"whereas":42234,"nanotechnology":42235,"withstand":42236,"pillow":42237,"bowers":42238,"tope":42239,"itly":42240,"confit":42241,"makar":42242,"comforts":42243,"bosh":42244,"clipper":42245,"balla":42246,"stik":42247,"milb":42248,"safeguard":42249,"musique":42250,"easport":42251,"yaz":42252,"padded":42253,"bader":42254,"foreign":42255,"chopin":42256,"archive":42257,"oka":42258,"transporting":42259,"tmltalk":42260,"ajit":42261,"consequence":42262,"scroo":42263,"ffo":42264,"collaborated":42265,"pugchat":42266,"yemi":42267,"javed":42268,"auburn":42269,"oof":42270,"maw":42271,"saucer":42272,"mitigate":42273,"iles":42274,"evangelist":42275,"terie":42276,"recl":42277,"indictment":42278,"cata":42279,"brightness":42280,"maythe":42281,"whimsical":42282,"unlv":42283,"keyword":42284,"cumin":42285,"medway":42286,"westworld":42287,"traw":42288,"imposing":42289,"formity":42290,"coulter":42291,"abz":42292,"nypd":42293,"grassi":42294,"kelsey":42295,"qldpol":42296,"clockwork":42297,"fdr":42298,"dianne":42299,"âĺij":42300,"adh":42301,"pann":42302,"bravely":42303,"aege":42304,"unlawful":42305,"verdi":42306,"pocalypse":42307,"pharo":42308,"karla":42309,"resonance":42310,"mastiff":42311,"ladak":42312,"buu":42313,"mailed":42314,"hii":42315,"crawley":42316,"torrent":42317,"machado":42318,"libyan":42319,"effortlessly":42320,"falsely":42321,"qvist":42322,"keef":42323,"crafthour":42324,"cherished":42325,"valkyrie":42326,"sari":42327,"kalamaz":42328,"behe":42329,"ðŁĮĻ":42330,"thim":42331,"roddy":42332,"coltrane":42333,"butchers":42334,"achim":42335,"wkend":42336,"awkward":42337,"cabrera":42338,":))))":42339,"franc":42340,"declan":42341,"condos":42342,"aja":42343,"pandoramusic":42344,"charter":42345,"phill":42346,"montrose":42347,"hatchback":42348,"handicapp":42349,"greaves":42350,"eucalyptus":42351,"utmost":42352,"tson":42353,"burton":42354,"midwives":42355,"incur":42356,"ðŁĺį#":42357,"mood":42358,"compressed":42359,"toma":42360,"mustang":42361,"mog":42362,"asana":42363,"testic":42364,"shotel":42365,"insol":42366,"corsair":42367,"nhq":42368,"benny":42369,"smma":42370,"kapur":42371,"incon":42372,"jonas":42373,"energies":42374,"donal":42375,"asad":42376,"sez":42377,"npa":42378,"archived":42379,"stimulate":42380,"dop":42381,"hyd":42382,"grieving":42383,"ãĥĪ":42384,"rona":42385,"whyte":42386,"treehouse":42387,"ssell":42388,"sandro":42389,"kobo":42390,"thermost":42391,"seclu":42392,"hiya":42393,"geez":42394,"mamas":42395,"priscilla":42396,"flavoured":42397,"fass":42398,"wold":42399,"makerspace":42400,"cosplay":42401,"ptv":42402,"happyvalentinesday":42403,"sequoia":42404,"lovecraft":42405,"guan":42406,"dtm":42407,"cii":42408,"yokohama":42409,"posthum":42410,"req":42411,"ðŁĶµâļªï¸ı":42412,"galatasar":42413,"dolby":42414,"hamptons":42415,"disturbance":42416,"stonehenge":42417,"okc":42418,"disrupting":42419,"monthsary":42420,"jungle":42421,"headlights":42422,"dustin":42423,"microsof":42424,"happymothersday":42425,"koko":42426,"grazi":42427,"testo":42428,"naidu":42429,"malay":42430,"arial":42431,"rumb":42432,"aboo":42433,"harman":42434,"trape":42435,"spoils":42436,"jeho":42437,"godly":42438,"lockscreen":42439,"zun":42440,"pious":42441,"magento":42442,"lenders":42443,"probable":42444,"corporal":42445,"mour":42446,"awal":42447,"sua":42448,"callme":42449,"tonne":42450,"govin":42451,"devastation":42452,"xj":42453,"gearbox":42454,"warlock":42455,"perme":42456,"itate":42457,"gazaunderattack":42458,"duval":42459,"parasite":42460,"clemente":42461,"leth":42462,"iva":42463,"frozen":42464,"tholes":42465,"tobin":42466,"cairn":42467,"sill":42468,"luckiest":42469,"converts":42470,"stale":42471,"pancra":42472,"europale":42473,"wisdom":42474,"schur":42475,"ì¶":42476,"vertigo":42477,"bij":42478,"ubc":42479,"nure":42480,"righteousness":42481,"mtc":42482,"factory":42483,"verst":42484,"reversed":42485,"huri":42486,"heechul":42487,"faber":42488,"arr":42489,"ulous":42490,"venom":42491,"phat":42492,"greenery":42493,"brady":42494,"æ":42495,":((":42496,"nevergiveup":42497,"disha":42498,"mota":42499,"healthcare":42500,"dunham":42501,"dexpo":42502,"denzel":42503,"bbins":42504,"fics":42505,"wham":42506,"mcg":42507,"elian":42508,"wata":42509,"stralia":42510,"tellu":42511,"pesky":42512,"spinoff":42513,"armoured":42514,"reacted":42515,"dofficial":42516,"tedu":42517,"sagar":42518,"morally":42519,"paralleled":42520,"fios":42521,"downer":42522,"daugh":42523,"redo":42524,"worldcup":42525,"tariq":42526,"barne":42527,"glaciers":42528,"occult":42529,"barbarian":42530,"hermosa":42531,"!!!)":42532,"yur":42533,"internation":42534,"pss":42535,"situ":42536,"pint":42537,"americanair":42538,"swam":42539,"doppler":42540,"ðŁĴĻðŁĴľ":42541,"cincodemayo":42542,"levan":42543,"hellenic":42544,"mcne":42545,"judi":42546,"yuh":42547,"stx":42548,"quare":42549,"ðŁĺĤ.":42550,"stig":42551,"gels":42552,"motley":42553,"hardwork":42554,"eurozone":42555,"ead":42556,"ç¥Ń":42557,"seabir":42558,"cius":42559,"laid":42560,"alpaca":42561,"presumably":42562,"pewdiepie":42563,"booted":42564,"amari":42565,"tamine":42566,"solace":42567,"barrow":42568,"academies":42569,"xian":42570,"omination":42571,"dungeons":42572,"bma":42573,"deity":42574,"aik":42575,"stabil":42576,"hira":42577,"affectionate":42578,"vingne":42579,"newport":42580,"ãħĭãħĭ":42581,"thirds":42582,"retains":42583,"aromatherapy":42584,"skier":42585,"nima":42586,"dope":42587,"cringe":42588,"condomin":42589,"toor":42590,"animator":42591,"saraj":42592,"seascape":42593,"minimalism":42594,"lakeshore":42595,"callaway":42596,"bergman":42597,"à¤Ĺ":42598,"whispering":42599,"stupid":42600,"rightful":42601,"requis":42602,"irn":42603,"seva":42604,"utpol":42605,"tuberculo":42606,"squish":42607,"debut":42608,"governmental":42609,"christine":42610,"allman":42611,"weapon":42612,"sito":42613,"buri":42614,"lolita":42615,"leafy":42616,"fuch":42617,"tinted":42618,"mcken":42619,"ahahaha":42620,"ðŁĩµðŁĩ¹":42621,"repeal":42622,"negan":42623,"ðŁķĬ":42624,"tailgating":42625,"gameinsight":42626,"ðŁıŁï¸ı":42627,"yakuza":42628,"zt":42629,"tiring":42630,"proposing":42631,"bowlers":42632,"traitors":42633,"akshi":42634,"clergy":42635,"cito":42636,"upsets":42637,"tuscal":42638,"symphonic":42639,"silently":42640,"shuff":42641,"blackwell":42642,"ðŁĺĤ)":42643,"kobe":42644,"roberto":42645,"ridg":42646,"dcu":42647,"merino":42648,"ftp":42649,"eastside":42650,".~":42651,"nbl":42652,"mnleg":42653,"tsfor":42654,"fraudul":42655,"capping":42656,"inmy":42657,"gymnast":42658,"stones":42659,"ssin":42660,"tweaks":42661,"shaggy":42662,"oakland":42663,"demsin":42664,"sangria":42665,"mmva":42666,"hennessy":42667,"downton":42668,"rightly":42669,"init":42670,"agave":42671,"oblast":42672,"northeast":42673,"friendship":42674,"dala":42675,"trophy":42676,"ðŁij½":42677,"magin":42678,"margaritas":42679,"ê·":42680,"wwfc":42681,"fash":42682,"dike":42683,"cud":42684,"chart":42685,"ðŁij®":42686,"refugees":42687,"joplin":42688,"ncs":42689,"impy":42690,"firmware":42691,"pascu":42692,"flamin":42693,"healthtech":42694,"bellletstalk":42695,"waka":42696,"olls":42697,"lago":42698,"cowan":42699,"bombardier":42700,"shome":42701,"ðŁĻħ":42702,"mcmaster":42703,"nave":42704,"wells":42705,"uta":42706,"tellers":42707,"misfits":42708,"kapil":42709,"faceoff":42710,"affirm":42711,"apro":42712,"whitepaper":42713,"superyacht":42714,"specimens":42715,"allocated":42716,"...,":42717,"-__":42718,"kaw":42719,"dachshund":42720,"djoker":42721,"swork":42722,"quiere":42723,"orum":42724,"ðŁIJł":42725,"somm":42726,"cmt":42727,"inghour":42728,"skinny":42729,"lgbti":42730,"giggles":42731,"breakaway":42732,"researched":42733,"parity":42734,"myal":42735,"msl":42736,"retained":42737,"sivity":42738,"makeinindia":42739,"solves":42740,"defamation":42741,"waltham":42742,"sriracha":42743,"roadway":42744,"conceptu":42745,"alin":42746,"iwant":42747,"åĪ":42748,"delft":42749,"tenderloin":42750,"gains":42751,"faults":42752,"swire":42753,"stellen":42754,"pollo":42755,"dyne":42756,"bornonthisday":42757,"asdfghj":42758,"sql":42759,"salim":42760,"advises":42761,"voip":42762,"ìĹijìĨ":42763,"untouched":42764,"sheil":42765,"ontario":42766,"uphill":42767,"sobre":42768,"deshi":42769,"novella":42770,"dutton":42771,"crawfish":42772,"اÙĨ":42773,"maa":42774,"twine":42775,"kalin":42776,"ðŁĩµðŁĩŃ":42777,"yess":42778,"brooks":42779,"hoosiers":42780,"tonka":42781,"umbrellas":42782,"ayers":42783,"ateam":42784,"acquiring":42785,"suction":42786,"än":42787,"wies":42788,"tarians":42789,"socio":42790,"mattb":42791,"shepherds":42792,"oso":42793,"charitytuesday":42794,"slogans":42795,"ninjas":42796,"albat":42797,"byte":42798,"bashir":42799,"trampoline":42800,"mydayinla":42801,"ija":42802,"basel":42803,"rory":42804,"goldie":42805,"firec":42806,"unnoticed":42807,"peculiar":42808,"scha":42809,"kerson":42810,"mourns":42811,"liquidity":42812,"quipment":42813,"hibs":42814,"ars":42815,"aeronau":42816,"slideshow":42817,"slabs":42818,"deliciousness":42819,"skitchen":42820,"htafc":42821,"fullerton":42822,"creighton":42823,"aerob":42824,"procrastination":42825,"azores":42826,"whitehall":42827,"ussoccer":42828,"mediation":42829,"djokernole":42830,"andme":42831,"umen":42832,"noxious":42833,"joss":42834,"ilife":42835,"annivers":42836,"sudanese":42837,"etres":42838,"undermine":42839,"wholefoods":42840,"disobe":42841,"kori":42842,"adele":42843,"eliz":42844,"canti":42845,"alon":42846,"gymnasium":42847,"sarkodie":42848,"meteorologist":42849,"ylde":42850,"steen":42851,"stampcollecting":42852,"nasal":42853,"lott":42854,"franks":42855,"exol":42856,"acki":42857,"goodyear":42858,"animalrights":42859,"yles":42860,"violets":42861,"mmes":42862,"sthel":42863,"rapping":42864,"tuscan":42865,"waiver":42866,"turner":42867,"eatlocal":42868,"northeasthour":42869,"animations":42870,"tommorow":42871,"tsh":42872,"ffame":42873,"brae":42874,"petron":42875,"glamour":42876,"bryn":42877,"dcs":42878,"bales":42879,"ðŁĶ¶":42880,"brov":42881,"brev":42882,"bons":42883,"physique":42884,"carne":42885,"xe":42886,"elixir":42887,"volved":42888,"loma":42889,"ìľł":42890,"æĺ":42891,"vanu":42892,"rigs":42893,"balance":42894,"vares":42895,"bonita":42896,"sprinkle":42897,"perfecto":42898,"dion":42899,"leak":42900,"calcutta":42901,"oba":42902,"dma":42903,"cmon":42904,"tuner":42905,"pneumonia":42906,"bogus":42907,"apologe":42908,"clough":42909,"borne":42910,"))))":42911,"revived":42912,"ovarian":42913,"nerf":42914,"clegg":42915,"fanfest":42916,"chou":42917,"realizes":42918,"mcn":42919,"ligu":42920,"legalize":42921,"justsaying":42922,"forster":42923,"bosni":42924,"khi":42925,"indom":42926,"heidel":42927,"encryp":42928,"siss":42929,"eddi":42930,"marbles":42931,"brisbane":42932,"ying":42933,"prepaid":42934,"walsall":42935,"cooperate":42936,"orchestr":42937,"marisa":42938,"howie":42939,"chewy":42940,"brenner":42941,"andromeda":42942,"egan":42943,"stocki":42944,"cavendish":42945,"agan":42946,"bano":42947,"deir":42948,"gog":42949,"blk":42950,"rethinking":42951,"chig":42952,"rheu":42953,"snip":42954,"peng":42955,"seminole":42956,"mswx":42957,"annex":42958,"lynda":42959,"lewishamilton":42960,"cumul":42961,"tbl":42962,"dolphin":42963,"aguero":42964,"............":42965,"prelude":42966,"atour":42967,"granger":42968,"tooting":42969,"rotun":42970,"disar":42971,"homeitems":42972,"dares":42973,"********":42974,"ðŁijĨ":42975,"compreh":42976,"jinx":42977,"aswell":42978,"irie":42979,"circulating":42980,"ðŁIJ¥":42981,"overboard":42982,"cultivate":42983,"rhett":42984,"orienteering":42985,"cak":42986,"balkans":42987,"sitt":42988,"jasmin":42989,"britneyspears":42990,"rotor":42991,"sealing":42992,"gbc":42993,"occi":42994,"fas":42995,"emancip":42996,"comer":42997,"wartime":42998,"tickle":42999,"sonny":43000,"paces":43001,"logg":43002,"atrix":43003,"srp":43004,"gwin":43005,"dobbs":43006,"uzbe":43007,"thewanted":43008,"drush":43009,"extru":43010,"micky":43011,"honorees":43012,"darwin":43013,"redux":43014,"mmj":43015,"rami":43016,"jalapeño":43017,"ioc":43018,"dover":43019,"juju":43020,"whitney":43021,"seng":43022,"enly":43023,"auch":43024,"archipelago":43025,"vigilant":43026,"mangal":43027,"wildest":43028,"paranoid":43029,"hali":43030,"bbly":43031,"sanctioned":43032,"realms":43033,"conco":43034,"uddin":43035,"csk":43036,"playtime":43037,"libra":43038,"savag":43039,"octane":43040,"rectan":43041,"return":43042,"parrish":43043,"morrha":43044,"ccp":43045,"cmu":43046,"sailed":43047,"sevent":43048,"rosie":43049,"piling":43050,"hew":43051,"boarded":43052,"segments":43053,"nephro":43054,"(.":43055,"crats":43056,"bakes":43057,"ðŁį¸":43058,"backtothe":43059,"sibling":43060,"kirkland":43061,"keo":43062,"guwa":43063,"breads":43064,"ðŁĺľðŁĺľ":43065,"tq":43066,"harassed":43067,"gau":43068,"wilbur":43069,"jisoo":43070,"eper":43071,"lisam":43072,"trippin":43073,"shino":43074,"rukh":43075,"beastmode":43076,"choa":43077,"instaweather":43078,"richland":43079,"gari":43080,"fez":43081,"cowboysnation":43082,"fursuit":43083,"krun":43084,"aen":43085,"sycamore":43086,"segun":43087,"entennial":43088,"dih":43089,"oax":43090,"demsinphilly":43091,"ðŁĻĢ":43092,"snhl":43093,"pennies":43094,"passwords":43095,"makin":43096,"tye":43097,"deng":43098,"knigh":43099,"jeeplife":43100,"helpline":43101,"afor":43102,"zzzz":43103,"steamy":43104,"picker":43105,"iterate":43106,"happeningnow":43107,"kib":43108,"bloomberg":43109,"martyrdom":43110,"bully":43111,"assortment":43112,"ahora":43113,"zoe":43114,"noi":43115,"illustri":43116,"agarwal":43117,"psc":43118,"electronica":43119,"recruiter":43120,"gardiner":43121,"radha":43122,"nafta":43123,"dotnet":43124,"piero":43125,"georg":43126,"bels":43127,"ðŁĺĤðŁĺį":43128,"tuberculosis":43129,"runnin":43130,"moris":43131,"hauling":43132,"evoc":43133,"brethren":43134,"shair":43135,"frameworks":43136,"astu":43137,"rigid":43138,"kuma":43139,"kreme":43140,"jinnah":43141,"insurers":43142,"nyu":43143,"fere":43144,"nollywood":43145,"goodvibes":43146,"-...":43147,"toile":43148,"skril":43149,"instaweatherpro":43150,"czech":43151,"pavel":43152,"onepiece":43153,"nikeplus":43154,"filet":43155,"cavity":43156,"ðŁı½âĢįâĻĤï¸ı":43157,"ðŁİ£":43158,"drastic":43159,"dailys":43160,"siamese":43161,"rebu":43162,"osteo":43163,"lark":43164,"fre":43165,"shelling":43166,"pé":43167,"gladys":43168,"ðŁıĢðŁıĢ":43169,"gustave":43170,"submerged":43171,"grandstand":43172,"attu":43173,"wont":43174,"fpv":43175,"bley":43176,"joni":43177,"angames":43178,"weighted":43179,"alou":43180,"श":43181,"lesbians":43182,"fj":43183,"annies":43184,"aml":43185,"doria":43186,"davin":43187,"beta":43188,"canc":43189,"madewithunity":43190,"haj":43191,"badlands":43192,"mul":43193,"bluec":43194,"pawn":43195,"covington":43196,"neurology":43197,"httweets":43198,"dyslexia":43199,"thelove":43200,"neat":43201,"forklift":43202,"automate":43203,"uneven":43204,"montess":43205,"hein":43206,"hag":43207,"relics":43208,"competitiveness":43209,"canelo":43210,"martens":43211,"bulletproof":43212,"skittles":43213,"gya":43214,"primo":43215,"americafirst":43216,"wooo":43217,"abortions":43218,"??!!":43219,"mache":43220,"lders":43221,"rlly":43222,"prelims":43223,"direct":43224,"course":43225,"swain":43226,"supercell":43227,"eccentric":43228,"stingray":43229,"plets":43230,"wilcox":43231,"westin":43232,"okanagan":43233,"kiran":43234,"carbo":43235,"bombings":43236,"rarest":43237,"boh":43238,"gawd":43239,"digg":43240,"moana":43241,"entirety":43242,"enclosed":43243,"dodgeball":43244,"parton":43245,"milkyway":43246,"atr":43247,"thoroughbred":43248,"really":43249,"qantas":43250,"epiphany":43251,"inee":43252,"aerosmith":43253,"spieth":43254,"arthro":43255,"ellini":43256,"dubu":43257,"braving":43258,"âļ½âļ½":43259,"restructuring":43260,"illuminate":43261,"equili":43262,"mpi":43263,"ashton":43264,"ponytail":43265,"mascots":43266,"flattering":43267,"crum":43268,"asta":43269,"à®°":43270,"strangerthings":43271,"barnab":43272,"رÙĬ":43273,"makeshift":43274,"gotcha":43275,"willam":43276,"choirs":43277,"kilometres":43278,"ghosh":43279,"euthan":43280,"dolly":43281,"unning":43282,"thear":43283,"crewe":43284,"wsw":43285,"jace":43286,"dismiss":43287,"kean":43288,"hota":43289,"khat":43290,"~>":43291,"thiru":43292,"rendez":43293,"hartman":43294,"teessi":43295,"casca":43296,"zah":43297,"hydrange":43298,"fod":43299,"awp":43300,"mzansi":43301,"thicker":43302,"nagoya":43303,"neva":43304,"stique":43305,"castel":43306,"damian":43307,"thereby":43308,"jiang":43309,"alek":43310,"musicislife":43311,"raq":43312,"callahan":43313,"gouache":43314,"somaliland":43315,"seanhannity":43316,"raheem":43317,"lose":43318,"elove":43319,"wharton":43320,"rectangular":43321,"illustrating":43322,"harne":43323,"autisma":43324,"scrapped":43325,"elland":43326,"decree":43327,"nagpur":43328,"kipp":43329,"sore":43330,"nmd":43331,"maas":43332,"guna":43333,"gartner":43334,"belli":43335,"thenight":43336,"jeon":43337,"genderequality":43338,"giver":43339,"ael":43340,"garments":43341,"neu":43342,"mardigras":43343,"marsden":43344,"rower":43345,"polluted":43346,"cameraman":43347,"vinod":43348,"beasley":43349,"croc":43350,"jiu":43351,"hollyoaks":43352,"anesthesia":43353,"alles":43354,"steward":43355,"latimes":43356,"ðŁĩºðŁĩ¸ðŁĩºðŁĩ¸ðŁĩºðŁĩ¸":43357,"tician":43358,"goria":43359,"comedic":43360,"ðŁ¤ĶðŁ¤ĶðŁ¤Ķ":43361,"naive":43362,"slions":43363,"łĪ":43364,"burglar":43365,"ðŁĺŃðŁĺŃðŁĺŃðŁĺŃðŁĺŃ":43366,"yorkshi":43367,"señ":43368,"fanboy":43369,"laurel":43370,"incidence":43371,"potomac":43372,"roberta":43373,"presiden":43374,"pryor":43375,"osbourne":43376,"wku":43377,"teme":43378,"palae":43379,"ðŁ¥º":43380,"reboun":43381,"itude":43382,"reddish":43383,"khand":43384,"colonialism":43385,"northcarolina":43386,"ðĿĴ":43387,"mannequin":43388,"ladybird":43389,"tasty":43390,"knowledgeable":43391,"gshore":43392,"ðŁĮĮ":43393,"ன":43394,"quaker":43395,"salzburg":43396,"medalists":43397,"chyna":43398,"bridesmaid":43399,"maori":43400,"rop":43401,"outraged":43402,"inadequate":43403,"truckers":43404,"alana":43405,"ìĿ¼":43406,"rix":43407,"oooooooo":43408,"commandments":43409,"lambeth":43410,"aaj":43411,"ecofriendly":43412,"blaz":43413,"morecambe":43414,"bouncy":43415,"roux":43416,"raided":43417,"mized":43418,"shc":43419,"gawx":43420,"laboratories":43421,"rubs":43422,"restroom":43423,"consultations":43424,"cajun":43425,"virgini":43426,"soir":43427,"revue":43428,"plein":43429,"wager":43430,"ç¹":43431,"wedo":43432,"growingup":43433,"!ðŁĺĬ":43434,"faceted":43435,"sinners":43436,"hovering":43437,"tiene":43438,"seasoning":43439,"anja":43440,"leggo":43441,"ilis":43442,"flax":43443,"devo":43444,"ashram":43445,"matisse":43446,"keri":43447,"gower":43448,"botox":43449,"marshes":43450,"unhcr":43451,"tsm":43452,"optimus":43453,"duni":43454,"stuffs":43455,"sok":43456,"orderly":43457,"nbad":43458,"islamophobia":43459,"ravioli":43460,"faber":43461,"creds":43462,"wonka":43463,"infusion":43464,"overweight":43465,"dailynews":43466,"assimil":43467,"acollege":43468,"medallion":43469,"kilimanjaro":43470,"stiff":43471,"thames":43472,"sunken":43473,"thard":43474,"mydubai":43475,"hilariously":43476,"hannel":43477,"plumber":43478,"fairview":43479,"separating":43480,"rascal":43481,"quien":43482,"necessities":43483,"confederation":43484,"llll":43485,":]":43486,"weaknesses":43487,"bronco":43488,"raffles":43489,"elot":43490,"ãĤ¸ãĥ":43491,"adventcalendar":43492,"ðŁİ¹":43493,"stravel":43494,"tunic":43495,"ksu":43496,"impeach":43497,"espionage":43498,"!-":43499,"diment":43500,"currant":43501,"biode":43502,"commuting":43503,"byron":43504,"ðŁĴĵðŁĴĵ":43505,"shaded":43506,"truro":43507,"crayons":43508,"arne":43509,"hsc":43510,"freaked":43511,"dramati":43512,"fleek":43513,"ucd":43514,"marlborough":43515,"^-":43516,"crossings":43517,"malo":43518,"blackops":43519,"binance":43520,"choked":43521,"cheney":43522,"plo":43523,"gestures":43524,"valedic":43525,"ryanair":43526,"remington":43527,"vcs":43528,"mckee":43529,"ecz":43530,"begs":43531,"nailart":43532,"mayorof":43533,"happyfathersday":43534,"wart":43535,"petitions":43536,"ningly":43537,"cleanenergy":43538,"brox":43539,"slalom":43540,"existent":43541,"abay":43542,"ugliest":43543,"tomp":43544,"stoma":43545,"selby":43546,"goalscorer":43547,"benji":43548,"overwhelmingly":43549,"lans":43550,"semiconductor":43551,"southkorea":43552,"rescheduled":43553,"skyl":43554,"enlisted":43555,"dowski":43556,"sidel":43557,"rosenberg":43558,"nasser":43559,"whitehead":43560,"prius":43561,"harare":43562,"enn":43563,"ryder":43564,"íĤ":43565,"mong":43566,"clasico":43567,"transporter":43568,"potty":43569,"isme":43570,"*****":43571,"vice":43572,"skit":43573,"odessa":43574,"lmp":43575,"hern":43576,"racially":43577,"pinoy":43578,"paraguay":43579,"obituary":43580,"goes":43581,"bucha":43582,"sidewalks":43583,"angular":43584,"unconstitutional":43585,"transitioning":43586,"ibu":43587,"guys":43588,"unpacking":43589,"oooooo":43590,"blackgirl":43591,"bergs":43592,"¯":43593,"wordoftheday":43594,"trumptrain":43595,"thunderbolt":43596,"msi":43597,"fascists":43598,"ब":43599,"tsk":43600,"collapses":43601,"rajesh":43602,"loveislove":43603,"migrating":43604,"setback":43605,"ðŁĺĬâĿ¤ï¸ı":43606,"tels":43607,"safetyfirst":43608,"narrated":43609,"jaejoong":43610,"unanswered":43611,"liqueur":43612,"ennes":43613,"dalgo":43614,"billings":43615,"saltwater":43616,"mermaids":43617,"longs":43618,"clapham":43619,"wearec":43620,"piccollage":43621,"nach":43622,"hace":43623,"poisoned":43624,"loth":43625,"agna":43626,"adelrey":43627,"guardia":43628,"polishing":43629,"peacekeeping":43630,"dall":43631,"pisa":43632,"lapland":43633,"processors":43634,"deandre":43635,"sobs":43636,"ponce":43637,"drains":43638,"cbe":43639,"ðŁİ¥:":43640,"splash":43641,"meatball":43642,"fontana":43643,"worcestershirehour":43644,"nev":43645,"brisk":43646,"bint":43647,"acr":43648,"pox":43649,"cayenne":43650,"skrillex":43651,"jfc":43652,"hahahahahahaha":43653,"glas":43654,"engul":43655,"temporal":43656,"onized":43657,"concre":43658,"compose":43659,"vibrations":43660,"planters":43661,"fert":43662,"criticalrolefanart":43663,"tbli":43664,"schallenge":43665,"huckabee":43666,"municipal":43667,"iambic":43668,"radios":43669,"nevis":43670,"durability":43671,"mccla":43672,"horseback":43673,"institutes":43674,"fulfill":43675,"attach":43676,"ateur":43677,"akan":43678,"resisting":43679,"illumination":43680,"handle":43681,"haircare":43682,"oment":43683,"macleod":43684,"kaiser":43685,"gno":43686,"beardown":43687,"lyf":43688,"glomer":43689,"distortion":43690,"zm":43691,"sank":43692,"roosters":43693,"isnow":43694,"asports":43695,"agen":43696,"woken":43697,"stgeorge":43698,"romper":43699,"myle":43700,"economists":43701,"ruto":43702,"twill":43703,"healthand":43704,"dito":43705,"wsl":43706,"tairp":43707,"prakash":43708,"micheal":43709,"hts":43710,"wrights":43711,"katsu":43712,"fiorentina":43713,"defenseman":43714,"ditch":43715,"varsity":43716,"texanscheer":43717,"baham":43718,"scanned":43719,"weil":43720,"seductive":43721,"ðŁijįðŁı½":43722,"fue":43723,"erwin":43724,"davison":43725,"terran":43726,"moods":43727,"woolf":43728,"resource":43729,"@.":43730,"cush":43731,"ðŁį°":43732,"regression":43733,"curled":43734,"lazer":43735,"joanne":43736,"abbott":43737,"moz":43738,"downers":43739,"mmmmmm":43740,"valentina":43741,"khair":43742,"dreamt":43743,"crook":43744,"chek":43745,"steaming":43746,"nephews":43747,"cleric":43748,"asober":43749,"indefinitely":43750,"wye":43751,"usnews":43752,"joyce":43753,"flushing":43754,"wynonnaearp":43755,"rondo":43756,"kiss":43757,"hotdog":43758,"barns":43759,"saxophon":43760,"farley":43761,"gasp":43762,"decreasing":43763,"alway":43764,"pex":43765,"lsd":43766,"shift":43767,"poutine":43768,"razz":43769,"rescuing":43770,"niko":43771,"hoch":43772,"ccl":43773,"uaap":43774,"nts":43775,"mcar":43776,"ilwx":43777,"conquering":43778,"kettering":43779,"sturdy":43780,"delaying":43781,"stok":43782,"vanished":43783,"cathar":43784,"bingham":43785,"inv":43786,"ichiro":43787,"hemo":43788,"budgeting":43789,"[...]":43790,"bess":43791,"sebastian":43792,"slowed":43793,"ðĿij":43794,"muslim":43795,"stuns":43796,"actonclimate":43797,"vea":43798,"seton":43799,"rosetta":43800,"ount":43801,"hardin":43802,"fluid":43803,"caw":43804,"ðŁ¥Ĥ":43805,"yacht":43806,"unl":43807,"sphy":43808,"provocative":43809,"oric":43810,"isback":43811,"___":43812,"nicolas":43813,"gyan":43814,"loose":43815,"flin":43816,"rebate":43817,":::":43818,"!\"@":43819,"comicon":43820,"sheff":43821,"downstream":43822,"chichester":43823,"beachlife":43824,"momlife":43825,"diabete":43826,"arra":43827,"vane":43828,"oku":43829,"yeo":43830,"mango":43831,"tryout":43832,"appell":43833,"heirs":43834,"arjuna":43835,"ddu":43836,"naveen":43837,"movic":43838,"socialists":43839,"sback":43840,"criterion":43841,"soyuz":43842,"kher":43843,"daz":43844,"yolanda":43845,"wineoclock":43846,"reina":43847,"onew":43848,"leonard":43849,"endez":43850,"ubs":43851,"supportlocal":43852,"facilitated":43853,"caramelized":43854,"bpa":43855,"vuelta":43856,"mytho":43857,"mami":43858,"speare":43859,"nbaplayoffs":43860,"fevre":43861,"nickjonas":43862,"imprint":43863,"cso":43864,"craigslist":43865,"lasalle":43866,"gideon":43867,"hadoop":43868,"disregard":43869,"wud":43870,"tuc":43871,"magee":43872,"acoustics":43873,"taa":43874,"quie":43875,"pola":43876,"crt":43877,"dwyer":43878,"dissec":43879,"capitol":43880,"mention":43881,"knoll":43882,"heigh":43883,"finders":43884,"placements":43885,"lse":43886,"indira":43887,"guri":43888,"madhuridixit":43889,"kingdoms":43890,"iambicpent":43891,"georgina":43892,"jeky":43893,"conflicting":43894,"bayan":43895,"agatha":43896,"uphold":43897,"dron":43898,"vicar":43899,"expat":43900,"peripheral":43901,"pessi":43902,"faf":43903,"ancestor":43904,"?..":43905,"widget":43906,"punc":43907,"commenced":43908,"beavs":43909,"airwaves":43910,"addis":43911,"poa":43912,"desses":43913,"coden":43914,"vue":43915,"rupee":43916,"karin":43917,"spock":43918,"msy":43919,"ะ":43920,"prick":43921,"fillmore":43922,"tification":43923,"thingsto":43924,"sarde":43925,"emile":43926,"pereira":43927,"nad":43928,"brightening":43929,"arresting":43930,"woking":43931,"uscg":43932,"spill":43933,"raspberrypi":43934,"hugo":43935,"itec":43936,"isma":43937,"cufflinks":43938,"optimized":43939,"occ":43940,"miwx":43941,"enka":43942,"elited":43943,"affordable":43944,"sakh":43945,"coronado":43946,"hoh":43947,"atul":43948,"aioli":43949,"jimcantore":43950,"accounted":43951,"vinay":43952,"hermit":43953,"grooves":43954,"ranch":43955,"rilla":43956,"wetter":43957,"outof":43958,"veterin":43959,"nikov":43960,"kian":43961,"fairbanks":43962,"ramapho":43963,"niti":43964,"kko":43965,"rusty":43966,"nestle":43967,"tvxq":43968,"shaheer":43969,"âĿ¤âĿ¤âĿ¤âĿ¤":43970,"pennant":43971,"gemstones":43972,"demdebate":43973,"ðŁIJĬ":43974,"autonews":43975,"supportindiefilm":43976,"macho":43977,"vex":43978,"newsat":43979,"neti":43980,"concessions":43981,"candied":43982,"yofthe":43983,"macau":43984,"dends":43985,"cricketers":43986,"saniti":43987,"mariano":43988,"ghat":43989,"artoftheday":43990,"¡ľ":43991,"egos":43992,"genoa":43993,"chatbots":43994,"brier":43995,"allabout":43996,"monty":43997,"spied":43998,"rtr":43999,"comfort":44000,"snippets":44001,"realtime":44002,"grain":44003,"examined":44004,"enlightening":44005,"ttu":44006,"godbless":44007,"releasethe":44008,"singular":44009,"kians":44010,"haka":44011,"sorren":44012,"defect":44013,"marg":44014,"equities":44015,"dorian":44016,"suka":44017,"perl":44018,"aishwarya":44019,"pullover":44020,"precision":44021,"fairway":44022,"neve":44023,"riveting":44024,"villanova":44025,"encom":44026,"ako":44027,"passionately":44028,"europaleague":44029,"siempre":44030,"xvi":44031,"enlightened":44032,"cfr":44033,"âĺħâĺħâĺħâĺħ":44034,"wasteland":44035,"isf":44036,"newcomers":44037,"emergency":44038,"amphitheatre":44039,"-.":44040,"textbooks":44041,"figurative":44042,"tremb":44043,"pesc":44044,"abhin":44045,"abbot":44046,"acacia":44047,"hards":44048,"porsche":44049,"kauai":44050,"elisa":44051,"carrick":44052,"abou":44053,"ellier":44054,"bech":44055,"neutron":44056,"galapagos":44057,"ruben":44058,"innis":44059,"howto":44060,"nuns":44061,"sabine":44062,"iac":44063,"clinched":44064,"notori":44065,"fives":44066,"cairngor":44067,"peri":44068,"grc":44069,"ðŁĴ¯ðŁĴ¯":44070,"malm":44071,"twelfth":44072,"diff":44073,"routines":44074,"martyn":44075,"linden":44076,"synthesizer":44077,"number":44078,"gamecube":44079,"falkirk":44080,"byzantine":44081,"queuing":44082,"grill":44083,"scalable":44084,"charred":44085,"routing":44086,"herbali":44087,"grizz":44088,"ðŁĺŃðŁĺŃðŁĺŃ":44089,"toll":44090,"terminals":44091,"lpc":44092,"abd":44093,"warmups":44094,"removable":44095,"¯\\":44096,"vigo":44097,"papaya":44098,"neve":44099,"lovingly":44100,"jokers":44101,"ibles":44102,"ssett":44103,"potenti":44104,"pele":44105,"gigi":44106,"sadiq":44107,"legacy":44108,"sono":44109,"rupees":44110,"retarded":44111,"elee":44112,"parr":44113,"fiance":44114,"eyre":44115,"sayers":44116,"pendants":44117,"maknae":44118,"albans":44119,"adapting":44120,"pff":44121,"puberty":44122,"jiu":44123,"ingrad":44124,"hypocrite":44125,"diplomats":44126,"physical":44127,"robby":44128,"bonsai":44129,"ãģ·":44130,"fatt":44131,"catalunya":44132,"âľĸï¸ı":44133,"roma":44134,"moreland":44135,"soe":44136,"conversions":44137,"stlblues":44138,"sholm":44139,"grassy":44140,"prado":44141,"onu":44142,"assaulting":44143,">_":44144,"settes":44145,"disgraceful":44146,"aphra":44147,"âļ½ï¸ıâļ½ï¸ı":44148,"प":44149,"kiln":44150,"goaltender":44151,"sru":44152,"philanthropist":44153,"bals":44154,"thn":44155,"studen":44156,"sandoval":44157,"dogrescue":44158,"elions":44159,"assessed":44160,"largo":44161,"hectares":44162,"shrm":44163,"saif":44164,"cleavage":44165,"noches":44166,"nene":44167,"fatalities":44168,"curing":44169,"cleanser":44170,"ales":44171,"pvp":44172,"southbank":44173,"pizzeria":44174,"marshals":44175,"knife":44176,"andover":44177,"tblightning":44178,"srsly":44179,"oute":44180,"digimon":44181,"timesofindia":44182,"promethe":44183,"lebo":44184,"fsu":44185,"witz":44186,"revere":44187,"manas":44188,"mamba":44189,"chica":44190,"guan":44191,"exhibitor":44192,"csrracing":44193,"dere":44194,"xxxxx":44195,"gusta":44196,"storytime":44197,"stoney":44198,"organics":44199,"andu":44200,"seam":44201,"minogue":44202,"anushkasharma":44203,"aba":44204,"ðŁİĻï¸ı":44205,"ugandan":44206,"chromatic":44207,"assn":44208,"documentaries":44209,"sht":44210,"rupaul":44211,"loyd":44212,"kats":44213,"eus":44214,"itech":44215,"medusa":44216,"panty":44217,"kellogg":44218,"etto":44219,"tallade":44220,"shaa":44221,"dost":44222,"pms":44223,"mariana":44224,"jester":44225,"crooks":44226,"ðŁĶ¬":44227,"mindanao":44228,"indhoven":44229,"ðŁ¤ª":44230,"lexi":44231,"tvn":44232,"janis":44233,"cote":44234,"ãģĨ":44235,"serrano":44236,"iwm":44237,"ðŁIJ¬":44238,"kke":44239,"distributors":44240,"capu":44241,"counterfeit":44242,"campsite":44243,"aggie":44244,"ðŁĺ¼":44245,"chhattisgarh":44246,"~@":44247,"stateu":44248,"sandi":44249,"preventable":44250,"cls":44251,"canne":44252,"mmc":44253,"iver":44254,"saharan":44255,"palis":44256,"nightout":44257,"dos":44258,"apia":44259,"abscbn":44260,"managerial":44261,"arose":44262,"mowx":44263,"arosa":44264,"ðŁĮ³":44265,"underdog":44266,"remover":44267,"astronomers":44268,"lentils":44269,"suscep":44270,"smoother":44271,"pendleton":44272,"faucet":44273,"emory":44274,"dalmati":44275,"afcb":44276,"ticus":44277,"exempt":44278,"enrol":44279,"dheim":44280,"ðŁIJº":44281,"restriction":44282,"starfish":44283,"stow":44284,"snorkel":44285,"thunderbirds":44286,"shead":44287,"homosexual":44288,"dyn":44289,"asli":44290,"andretti":44291,"douche":44292,"domo":44293,"tarmac":44294,"slumber":44295,"pronto":44296,"firstdayof":44297,"miniature":44298,"mariachi":44299,"argus":44300,"recommending":44301,"mobiles":44302,"ince":44303,"illustrious":44304,"orc":44305,"adverts":44306,"grits":44307,"weasel":44308,"pagoda":44309,"overpass":44310,"greys":44311,"maximus":44312,"armagh":44313,"woodland":44314,"sunni":44315,"ðŁĴī":44316,"ëĿ":44317,"tione":44318,"socio":44319,"hos":44320,"ðŁ¤ĹðŁ¤Ĺ":44321,"windsor":44322,"subsequent":44323,"munchies":44324,"idh":44325,"excluding":44326,"emi":44327,"cuth":44328,"zai":44329,"weekdays":44330,"lawsuits":44331,"barnard":44332,"ت":44333,"petting":44334,"netes":44335,"mulligan":44336,"pharmacists":44337,"raquel":44338,"eton":44339,"cranston":44340,"gilded":44341,"cleary":44342,"ceph":44343,"raa":44344,"pamper":44345,"lombardi":44346,"asin":44347,"sherry":44348,"prod":44349,"forte":44350,"arianism":44351,"buffalobills":44352,"æľ¬":44353,"ðŁĶ¥#":44354,"uuu":44355,"justices":44356,"carina":44357,"natin":44358,"maslow":44359,"drooling":44360,"cognac":44361,"camber":44362,"elong":44363,"rdr":44364,"inen":44365,"convictions":44366,"amuse":44367,"trock":44368,"harmless":44369,"visitation":44370,"genomic":44371,"bland":44372,"benoit":44373,"chimp":44374,"tuscaloosa":44375,"greasy":44376,"xpo":44377,"gilt":44378,"seq":44379,"permitted":44380,"christmaseve":44381,"books":44382,"mue":44383,"oldschool":44384,"humanright":44385,"beati":44386,"ðŁĶĿ":44387,"shat":44388,"sculpting":44389,"hwan":44390,"fernandes":44391,"sciutto":44392,"fuentes":44393,"endeavors":44394,"maidstone":44395,"unparalleled":44396,"shouted":44397,"queenof":44398,"merc":44399,"bandic":44400,"veda":44401,"selangor":44402,"pile":44403,"jahan":44404,"intimidating":44405,"disappears":44406,"clich":44407,"zaha":44408,"wurst":44409,"hiv":44410,"fodils":44411,"cordless":44412,"aaaaaa":44413,"hydra":44414,"belinda":44415,"eels":44416,"buf":44417,"sustaining":44418,"rugbyleague":44419,"noc":44420,"brigitte":44421,"(ðŁĵ¸:":44422,"trombone":44423,"soothe":44424,"smog":44425,"adp":44426,"stable":44427,"ingley":44428,"diagnose":44429,"msg":44430,"wess":44431,"ticketing":44432,"onee":44433,"nswpol":44434,"eup":44435,"autopsy":44436,"adityanath":44437,"sundown":44438,"riverfront":44439,"siya":44440,"pis":44441,"hierarchy":44442,"durango":44443,"dijk":44444,"renshaw":44445,"heaps":44446,"epidemi":44447,"davidbowie":44448,"internetof":44449,"ddi":44450,"nationality":44451,"mbar":44452,"airy":44453,"winder":44454,"walia":44455,"elliott":44456,"cx":44457,"bavarian":44458,"platt":44459,"antw":44460,"wiwx":44461,"softer":44462,"neha":44463,"heller":44464,"thand":44465,"daniela":44466,"boast":44467,"degradation":44468,"ðŁĴ¦ðŁĴ¦":44469,"transforming":44470,"mane":44471,"avut":44472,"ðŁĺĪðŁĺĪ":44473,"voter":44474,"thee":44475,"tate":44476,"puff":44477,"indoor":44478,"soproud":44479,"boyce":44480,"borisjohnson":44481,"waitin":44482,"immunology":44483,"ðŁıĨðŁıĨðŁıĨ":44484,"âĿĮ":44485,"streetfood":44486,"lizasober":44487,"cavalier":44488,"celia":44489,"needle":44490,"motoring":44491,"gato":44492,",)":44493,"rade":44494,"harvest":44495,"tms":44496,"jarpad":44497,"oney":44498,"airmen":44499,"vre":44500,"impairment":44501,"abhishek":44502,"snoop":44503,"lant":44504,"famously":44505,"blou":44506,"sze":44507,"gander":44508,"untouch":44509,"tuf":44510,"deejay":44511,"collateral":44512,"bind":44513,"ðŁļ©":44514,"pinning":44515,"icn":44516,"';":44517,"theeconomist":44518,"ultram":44519,"worldwaterday":44520,"tipoff":44521,"thei":44522,"feeders":44523,"campaign":44524,"scumb":44525,"dayweekend":44526,"yom":44527,"pedic":44528,"hough":44529,"psv":44530,"plin":44531,"onde":44532,"bostonmarathon":44533,"azzy":44534,"*_*":44535,"conley":44536,"thiago":44537,"hooo":44538,"galerie":44539,"lucid":44540,"jett":44541,"glitz":44542,"finalfantasy":44543,"achievers":44544,"yung":44545,"peregrine":44546,"ophi":44547,"dames":44548,"biomar":44549,"âĺĢï¸ıâĺĢï¸ı":44550,"skc":44551,"lics":44552,"flank":44553,"arrahman":44554,"hoof":44555,"upholstery":44556,"tats":44557,"woz":44558,"¿":44559,"snoring":44560,"raer":44561,"lju":44562,"apd":44563,"plating":44564,"kanu":44565,"imation":44566,"fragrances":44567,"mra":44568,"moray":44569,"mott":44570,"immuni":44571,"hearties":44572,"bhopal":44573,"timers":44574,"gata":44575,"colorway":44576,"carnation":44577,"winget":44578,"sighs":44579,"sville":44580,"optimist":44581,"chateau":44582,"olympians":44583,"cio":44584,"singersongwriter":44585,"nyo":44586,"fibers":44587,"burch":44588,"agro":44589,"milne":44590,"igbo":44591,"cramer":44592,"ationals":44593,"danube":44594,"padma":44595,"normani":44596,"enforced":44597,"breck":44598,"boehner":44599,"arden":44600,"surrendered":44601,"prosthetic":44602,"oma":44603,"hailed":44604,"calculations":44605,"wfa":44606,"bib":44607,"fcblive":44608,"fonda":44609,"westcoast":44610,"quests":44611,"friendly":44612,"towie":44613,"fitch":44614,"balot":44615,"stardom":44616,"scratching":44617,"hosa":44618,"thika":44619,"oven":44620,"stroke":44621,"outpost":44622,"pharmaceuticals":44623,"hikari":44624,"muy":44625,"afd":44626,"fallontonight":44627,"squat":44628,"oru":44629,"drained":44630,"chocolat":44631,"민":44632,"worths":44633,"rib":44634,"muj":44635,"thats":44636,"residente":44637,"itel":44638,"boost":44639,"migos":44640,"mulled":44641,"laa":44642,"etsyshop":44643,"donkeys":44644,"mek":44645,"ptc":44646,"flinders":44647,"ehs":44648,"rohit":44649,"muir":44650,"gad":44651,"compositions":44652,"åĨĻ":44653,"combustion":44654,"ikh":44655,"yemeni":44656,"waved":44657,"garci":44658,"akos":44659,"oods":44660,"fusion":44661,"seque":44662,"slan":44663,"plur":44664,"kicchasu":44665,"shenando":44666,"sams":44667,"worlden":44668,"horowitz":44669,"withme":44670,"microbes":44671,"kki":44672,"ðŁĴĶðŁĴĶ":44673,"wsu":44674,"patchwork":44675,"freer":44676,"yaki":44677,"theart":44678,"symbolism":44679,"miler":44680,"btn":44681,"mabu":44682,"sidekick":44683,"motivates":44684,"sagitt":44685,"naturals":44686,"serviced":44687,"psori":44688,"paola":44689,"quig":44690,"ibadan":44691,"giggs":44692,"ë³":44693,"scientology":44694,"sioux":44695,"salamat":44696,"dres":44697,"cadbury":44698,"dhawan":44699,"ción":44700,"_'":44701,"swapping":44702,"mariska":44703,"jamesbond":44704,"explosives":44705,"ayles":44706,"afer":44707,"sagu":44708,"censor":44709,"toma":44710,"jefferson":44711,"ringed":44712,"partist":44713,"irresponsible":44714,"aguilar":44715,"vacay":44716,"equitable":44717,"altrincham":44718,"acur":44719,"manish":44720,"germin":44721,"schooled":44722,"putter":44723,"edad":44724,"naval":44725,"toasty":44726,"solareclipse":44727,"dishu":44728,"coyne":44729,"acco":44730,"muck":44731,"maran":44732,"elos":44733,"lender":44734,"croix":44735,"worthless":44736,"haber":44737,"gunmen":44738,"ðŁįĵ":44739,"zenith":44740,"tenders":44741,"hurst":44742,"holtz":44743,"italians":44744,"carlow":44745,"ucd":44746,"characteristic":44747,"bung":44748,"avl":44749,"uth":44750,"sasia":44751,"rsl":44752,"redman":44753,"neighboring":44754,"greenpeace":44755,"stips":44756,"followparty":44757,"ygk":44758,"enos":44759,"omnibus":44760,"naissance":44761,"chrissy":44762,"secure":44763,"callback":44764,"jihoon":44765,"memory":44766,"blocker":44767,"lanta":44768,"daffodils":44769,"bilt":44770,"fferty":44771,"faust":44772,"iec":44773,"nipples":44774,"sog":44775,"mnd":44776,"jaguar":44777,"boldly":44778,"abpoli":44779,"proposition":44780,"gunsense":44781,"evansville":44782,"cutters":44783,"wego":44784,"doun":44785,"dox":44786,"stallions":44787,"kaj":44788,"shippers":44789,"jawa":44790,"volo":44791,"leven":44792,"paprika":44793,"kovich":44794,"jordi":44795,"inductees":44796,"appalling":44797,"dialysis":44798,"alleviate":44799,"âĢĶâĢĶ":44800,"pieter":44801,"midwi":44802,"qtr":44803,"juliette":44804,"intermission":44805,"hawks":44806,"actment":44807,"oneill":44808,"klin":44809,"vamps":44810,"famous":44811,"could":44812,"automobi":44813,"daan":44814,"westend":44815,"ellip":44816,"nhc":44817,"melanch":44818,"webseries":44819,"tongue":44820,"snatched":44821,"smyth":44822,"tangible":44823,"sli":44824,"easing":44825,"barstool":44826,"overlay":44827,"affordability":44828,"tinged":44829,"teras":44830,"ayush":44831,"wannaone":44832,"rhine":44833,"dana":44834,"shana":44835,"kendal":44836,"fertile":44837,"wir":44838,"repleni":44839,"larvae":44840,"isro":44841,"convos":44842,"abbrevi":44843,"ucc":44844,"hungry":44845,"burrows":44846,"ager":44847,"navi":44848,"matin":44849,"duper":44850,"cern":44851,"madon":44852,"ķï¸ı":44853,"éģ":44854,"tups":44855,"hyatt":44856,"shep":44857,"fridaynight":44858,"wiser":44859,"heidi":44860,"hatton":44861,"pgh":44862,"fountain":44863,"wristbands":44864,"ahmadiyya":44865,"aerial":44866,"subscribed":44867,"solos":44868,"mace":44869,"slayed":44870,"forfe":44871,"dulce":44872,"christmass":44873,"arunjaitley":44874,"violate":44875,"obstru":44876,"nieces":44877,"wvu":44878,"idyl":44879,"faze":44880,"preserves":44881,"infringe":44882,"premiers":44883,"intervals":44884,"agency":44885,"(©":44886,"standalone":44887,"dimes":44888,"boer":44889,"parameters":44890,"getit":44891,"ðŁĺĺðŁĺĺðŁĺĺðŁĺĺ":44892,"tulane":44893,"forgiven":44894,"scoll":44895,"mbps":44896,"smashbros":44897,"robbi":44898,"primavera":44899,"alist":44900,"ghostly":44901,"ayat":44902,"yeats":44903,"impressionist":44904,"earphones":44905,"caulfield":44906,"waikiki":44907,"salute":44908,"scou":44909,"muay":44910,"louisvuitton":44911,"bakhta":44912,"adog":44913,"inventions":44914,"hurd":44915,"foreclo":44916,"streamline":44917,"thalaivar":44918,"chsnews":44919,"willard":44920,"tsn":44921,"europarl":44922,"crusher":44923,"mysore":44924,"grower":44925,"raping":44926,"patti":44927,"gden":44928,"smw":44929,"mufti":44930,"kidman":44931,"abr":44932,"sounders":44933,"skeptical":44934,"ðŁĶİ":44935,"sundar":44936,"ime":44937,"ferg":44938,"featherweight":44939,"arlington":44940,"pasqu":44941,"agazine":44942,"wearable":44943,"natic":44944,"mcclure":44945,"intermitt":44946,"horde":44947,"sixties":44948,"carte":44949,"bhav":44950,"zeal":44951,"experiential":44952,"adorned":44953,"sommer":44954,"enote":44955,"hypothesis":44956,"stinky":44957,"proto":44958,"deadlines":44959,"vogel":44960,"musings":44961,"moncton":44962,"guter":44963,"fle":44964,"acion":44965,"voiceof":44966,"tasha":44967,"inhabitants":44968,"typeface":44969,"sba":44970,"btsx":44971,"ðŁĶĴ":44972,"worx":44973,"uhc":44974,"joko":44975,"cellars":44976,"goro":44977,"continuum":44978,"...&":44979,"weathercee":44980,"hap":44981,"srk":44982,"risers":44983,"lonelyplanet":44984,"unnamed":44985,"coeur":44986,"ðŁįĮ":44987,"theworld":44988,"ilike":44989,"fasten":44990,"amigo":44991,"riba":44992,"ramaphosa":44993,"staffers":44994,"hadley":44995,"??\"":44996,"fiore":44997,"salut":44998,"huff":44999,"bezos":45000,"Ñĭ":45001,"rader":45002,"kamala":45003,"inline":45004,"fillers":45005,"umatic":45006,"allin":45007,"shatter":45008,"rein":45009,"oku":45010,"chases":45011,"flagged":45012,"babymetal":45013,"waterstones":45014,"tsb":45015,"cutout":45016,"ophel":45017,"aama":45018,"rockabilly":45019,"stolic":45020,"jetblue":45021,"ichick":45022,"downton":45023,"uzbekistan":45024,"patna":45025,"laq":45026,"grange":45027,")_/":45028,"subsidi":45029,"scp":45030,"newscast":45031,"itsa":45032,"tweetyour":45033,"emor":45034,"archaeologists":45035,"unification":45036,"porta":45037,"qx":45038,"protectors":45039,"prohib":45040,"charisma":45041,"cartag":45042,"renfre":45043,"sculpt":45044,"guwahati":45045,"dema":45046,"boop":45047,"unfpa":45048,"dexter":45049,"layla":45050,"alleges":45051,"soups":45052,"neveragain":45053,"lys":45054,"calc":45055,"baroness":45056,"visualize":45057,"gerber":45058,"absorbed":45059,"iers":45060,"ahan":45061,"fontein":45062,"detectors":45063,"verstappen":45064,"svc":45065,"formulated":45066,"acdc":45067,"lix":45068,"incompetent":45069,"bhk":45070,"lourdes":45071,"waterhouse":45072,"snowed":45073,"appreciative":45074,"sigma":45075,"lizasoberano":45076,"penned":45077,"paycheck":45078,"tallinn":45079,"fancafe":45080,"parisi":45081,"avalley":45082,"vig":45083,"rufc":45084,"hardship":45085,"socute":45086,"poise":45087,"ì¹":45088,"rothschild":45089,"kly":45090,"????????":45091,"lhp":45092,"ilay":45093,"fhs":45094,"amad":45095,"ideals":45096,"bradbury":45097,"balboa":45098,"nicot":45099,"kidnap":45100,"wolve":45101,"tasmanian":45102,"opt":45103,"matthias":45104,"ãĥ³ãĤ":45105,"supermarkets":45106,"mylittlepony":45107,"melee":45108,"lister":45109,"groun":45110,"fedora":45111,"kindness":45112,"enen":45113,"brahms":45114,"¯\\_(":45115,"roswell":45116,"marlene":45117,"icu":45118,"reformation":45119,"orail":45120,"hebrides":45121,"disparities":45122,"terracotta":45123,"swallows":45124,"reid":45125,"influencing":45126,"fluor":45127,"dene":45128,"tumour":45129,"blondes":45130,"thunderbird":45131,"sheva":45132,"mogadishu":45133,"kab":45134,"creeps":45135,"iving":45136,"eneed":45137,"annoy":45138,"âĶĢ":45139,"intrigue":45140,"enquiry":45141,"araj":45142,"tural":45143,"kubernetes":45144,"endlessly":45145,"dividends":45146,"tora":45147,"tish":45148,"commemorates":45149,"unra":45150,"trib":45151,"ponty":45152,"nem":45153,"dissent":45154,"brewingco":45155,"ðŁĺ½":45156,"normali":45157,"biof":45158,"(...":45159,"chillen":45160,"주":45161,"mellon":45162,"avis":45163,"mccormack":45164,"ingra":45165,"enriched":45166,"customerexperience":45167,"testosterone":45168,"snug":45169,"setti":45170,"geronimo":45171,"inquirer":45172,"breaches":45173,"verything":45174,"blooming":45175,"mura":45176,"dispos":45177,"bide":45178,"deva":45179,"shadesof":45180,"intrin":45181,"shev":45182,"sven":45183,"nayanthara":45184,"ganesha":45185,"cws":45186,"berta":45187,"labelled":45188,"useum":45189,"nicknamed":45190,"mahan":45191,"caruso":45192,"apur":45193,"ðŁijĨ":45194,"wq":45195,"orphanage":45196,"discarded":45197,"magnu":45198,"lue":45199,"jeon":45200,"bridgeport":45201,"pacing":45202,"mercury":45203,"(ðŁĵ¸":45204,"marxist":45205,"amphibious":45206,"transplantation":45207,"stitching":45208,"thenburg":45209,"gradual":45210,"ãĤĮ":45211,"roft":45212,"mails":45213,"inec":45214,"guyana":45215,"doppelg":45216,"vero":45217,"rewrite":45218,"headless":45219,"harbaugh":45220,"gateway":45221,"carsforsale":45222,"swi":45223,"stis":45224,"macht":45225,"unde":45226,"surabaya":45227,"stapleton":45228,"nurturing":45229,"milner":45230,"yao":45231,"lmaoooo":45232,"kosh":45233,"arsenal":45234,"kame":45235,"erry":45236,"arroyo":45237,"dismisses":45238,"rubbed":45239,"rcb":45240,"lewd":45241,"dilu":45242,"andor":45243,"vide":45244,"urin":45245,"intersec":45246,"haar":45247,"alb":45248,"yearswith":45249,"appleton":45250,"éal":45251,"ullivan":45252,"succu":45253,"monterrey":45254,"dmx":45255,"artemis":45256,"ronnie":45257,"farmland":45258,"sfootball":45259,"grotto":45260,"anthi":45261,"ãĢģ":45262,"à®Ł":45263,"vidya":45264,"jimmyfallon":45265,"àµį":45266,"tzer":45267,"gravitational":45268,"wthr":45269,"uhhh":45270,"ehr":45271,"tinker":45272,"tijuana":45273,"scranton":45274,"ramcharan":45275,"barclay":45276,"revan":45277,"msi":45278,"kap":45279,"wrs":45280,"wethenorth":45281,"toral":45282,"satu":45283,"grom":45284,"facep":45285,"erickson":45286,"zyn":45287,"sedge":45288,"oodle":45289,"spursofficial":45290,"dsp":45291,"sicilian":45292,"solihull":45293,"receivers":45294,"ladakh":45295,"hendrick":45296,"theri":45297,"presiding":45298,"mcguinness":45299,"litters":45300,"gunnar":45301,"ghoul":45302,"wib":45303,"ntv":45304,"karo":45305,"frock":45306,"blau":45307,"amplify":45308,"allis":45309,"ullah":45310,"memoirs":45311,"khloe":45312,"interceptions":45313,"petday":45314,"looney":45315,"confin":45316,"chay":45317,"piyushgoyal":45318,"frequencies":45319,"utz":45320,"eventual":45321,"warmly":45322,"oblivion":45323,"anka":45324,"tait":45325,"âĿ¤ï¸ı.":45326,"directorial":45327,"rulers":45328,"princes":45329,"muck":45330,"sturridge":45331,"deuce":45332,"abridged":45333,"baguette":45334,"uncles":45335,"pendu":45336,"minding":45337,"forrester":45338,"avila":45339,"waller":45340,"wallstreet":45341,"mentor":45342,"hino":45343,"highway":45344,"cromwell":45345,"fanartfriday":45346,"mbi":45347,"coyle":45348,"ahi":45349,"trove":45350,"spiegel":45351,"paytm":45352,"mcintosh":45353,"jansen":45354,"niti":45355,"nashville":45356,"leno":45357,"leicestershire":45358,"legos":45359,"dict":45360,"ðŁĵ½":45361,"spad":45362,"beverlyhills":45363,"syrah":45364,"separates":45365,"zain":45366,"unfit":45367,"drags":45368,"tania":45369,"overflowing":45370,"hrithik":45371,"hawthorn":45372,"zani":45373,"macfar":45374,"fide":45375,"totem":45376,"peds":45377,"fundamentally":45378,"calico":45379,"sinner":45380,"jä":45381,"hilde":45382,"dsd":45383,"tenay":45384,"tahit":45385,"milf":45386,"lieb":45387,"informing":45388,"uplift":45389,"rael":45390,"mortgages":45391,"lect":45392,"iiii":45393,"guillaume":45394,"composites":45395,"oldsmobile":45396,"lend":45397,"garth":45398,"commish":45399,"baptized":45400,"scorpions":45401,"rucker":45402,"bringbackour":45403,"alliance":45404,"thalapathy":45405,"tali":45406,"spans":45407,"eridge":45408,"witherspoon":45409,"linda":45410,"skylar":45411,"korn":45412,"homs":45413,"Äį":45414,"silenced":45415,"caffe":45416,"arty":45417,"distinguish":45418,"towed":45419,"pung":45420,"jessica":45421,"earnest":45422,"beaufort":45423,"tama":45424,"studyabroad":45425,"sikhs":45426,"newbie":45427,"navratri":45428,"marble":45429,"lounging":45430,"litter":45431,"dalit":45432,"sosa":45433,"izes":45434,"grade":45435,"compromising":45436,"triton":45437,"detta":45438,"vj":45439,"chauffe":45440,"spectral":45441,"powered":45442,"montessori":45443,"articulate":45444,"halton":45445,"alco":45446,"yey":45447,"mntwins":45448,"acounty":45449,"ðŁijıðŁı¾":45450,"âīĪ":45451,"madmen":45452,"kala":45453,"grum":45454,"chik":45455,"atis":45456,"sume":45457,"akhtar":45458,"jobsearch":45459,"highlighter":45460,"boath":45461,"âĦ¹":45462,"tarzan":45463,"lambo":45464,"âĽĦï¸ı":45465,"oxfam":45466,"dumpster":45467,"pretzels":45468,"macos":45469,"inclined":45470,"factual":45471,"advertisers":45472,"shui":45473,"puree":45474,"mlpfi":45475,"antidote":45476,"capo":45477,"pastr":45478,"mercado":45479,"button":45480,"armin":45481,"agg":45482,"lolla":45483,"horribly":45484,"errands":45485,"christophe":45486,"timesnow":45487,"mondaymotiv":45488,"liss":45489,"scandals":45490,"mci":45491,"disproportion":45492,"âĺİ":45493,"surpass":45494,"samaritan":45495,"sotho":45496,"purest":45497,"flatt":45498,"triviatuesday":45499,"delectable":45500,"leopold":45501,"hermione":45502,"choudhary":45503,"enrich":45504,"¡¡":45505,"subsidiary":45506,"inequalities":45507,"bachelor":45508,"autoimmune":45509,"lakota":45510,"ihop":45511,"adjec":45512,"thesimpsons":45513,"shes":45514,"sek":45515,"gretchen":45516,"upstream":45517,"hinakhan":45518,"copernic":45519,"xtina":45520,"lug":45521,"toughness":45522,"ead":45523,"clipped":45524,"bius":45525,"slv":45526,"fahren":45527,"deepak":45528,"cau":45529,"xan":45530,"immature":45531,"digni":45532,"bobs":45533,"shredding":45534,"buttery":45535,"accommodations":45536,"deven":45537,"chunks":45538,"superleague":45539,"skybet":45540,"kildare":45541,"jeet":45542,"ëį":45543,"cek":45544,"wrecks":45545,"propane":45546,"ohl":45547,"tbd":45548,"quoi":45549,"trumpp":45550,"mimo":45551,"reluctant":45552,"verne":45553,"oic":45554,"magh":45555,"arnau":45556,"sever":45557,"lidge":45558,"stairway":45559,"kicchasudeep":45560,"ðŁĶº":45561,"machining":45562,"aamaadmi":45563,"oti":45564,"cda":45565,"alit":45566,"pany":45567,"installs":45568,"acct":45569,"eshop":45570,"diem":45571,"hardwell":45572,"fulfillment":45573,"scafe":45574,"quack":45575,"extracts":45576,"sweetened":45577,"fighton":45578,"fdi":45579,"dinger":45580,"waltham":45581,"usur":45582,"referees":45583,"seokjin":45584,"grann":45585,"afrin":45586,"thn":45587,"schaf":45588,"parcels":45589,"betis":45590,"amarine":45591,"noman":45592,"khtar":45593,"moritz":45594,"coupling":45595,"barons":45596,"ðŁIJ¸":45597,"ø":45598,"slp":45599,"sadler":45600,"xander":45601,"triad":45602,"mcmillan":45603,"khz":45604,"dividing":45605,"ìĹijìĨĮ":45606,"daryl":45607,"zedd":45608,"leys":45609,"plaques":45610,"fluori":45611,"tipperary":45612,"onnell":45613,"didier":45614,"langford":45615,"imc":45616,"thesun":45617,"birdies":45618,"archa":45619,"yessss":45620,"tdi":45621,"daria":45622,"candace":45623,"altam":45624,"palaces":45625,"chit":45626,"santam":45627,"eventful":45628,"bookof":45629,"adb":45630,"monstax":45631,"creole":45632,"coel":45633,"âĸ½":45634,"wearen":45635,"stennis":45636,"sheath":45637,"atism":45638,"groningen":45639,"mlpfim":45640,"lepre":45641,"wrongly":45642,"rspca":45643,"rendezvous":45644,"acknowledging":45645,"pelvic":45646,"solicitor":45647,"slays":45648,"nuestra":45649,"lod":45650,"islander":45651,"feroci":45652,"fashionshow":45653,"rass":45654,"dgeon":45655,"adolescents":45656,"smashes":45657,"negligence":45658,"grateful":45659,"vedere":45660,"swoop":45661,"ingl":45662,"apolice":45663,"vandalism":45664,"gann":45665,"joao":45666,"disupdates":45667,"zimbabwe":45668,"underage":45669,"radiance":45670,"wof":45671,"bourgeo":45672,"plas":45673,"crani":45674,"ghue":45675,"wreckem":45676,"warrants":45677,"reform":45678,"jimmie":45679,"atwood":45680,"ysl":45681,"neilhimself":45682,"lbj":45683,"iman":45684,"tanto":45685,"noisse":45686,"verbs":45687,"equipo":45688,"altogether":45689,"mament":45690,"lice":45691,"douglass":45692,"tierney":45693,"primed":45694,"jhal":45695,"furnitu":45696,"brazili":45697,"vill":45698,"pastels":45699,"nison":45700,"uff":45701,"paralysis":45702,"jaye":45703,"impo":45704,"ðŁijģ":45705,"strategically":45706,"pakistanis":45707,"wassup":45708,"superbike":45709,"thanku":45710,"truelove":45711,"shaikh":45712,"israelis":45713,"vip":45714,"tog":45715,"lien":45716,"laker":45717,"greyhounds":45718,"culars":45719,"bianchi":45720,"balotelli":45721,"arran":45722,"loos":45723,"strates":45724,"hebron":45725,"arvo":45726,"sunderland":45727,"theal":45728,"tombstone":45729,"sandman":45730,"cpac":45731,"thanksgiving":45732,"lovehim":45733,"latino":45734,"anin":45735,"akaif":45736,"ĭãĤ":45737,"torquay":45738,"diest":45739,"allianz":45740,"ðŁĺķ":45741,"golfclub":45742,"cllr":45743,"walcott":45744,"schnau":45745,"prompted":45746,"nominating":45747,"lennox":45748,"valet":45749,"monro":45750,"mayward":45751,"eph":45752,"ðŁĶĶ":45753,"interoper":45754,"rda":45755,"reflex":45756,"armchair":45757,"ê°ķ":45758,"stripper":45759,"porti":45760,"pharm":45761,"hamza":45762,"nireland":45763,"neue":45764,"hpv":45765,"portfoli":45766,"sunburn":45767,"frisbee":45768,"beal":45769,"baptiste":45770,"xh":45771,"tym":45772,"prati":45773,"overs":45774,"hazrat":45775,"desert":45776,"derry":45777,"usky":45778,"emmett":45779,"acharya":45780,")_/¯":45781,"shud":45782,"maya":45783,"hamill":45784,"raim":45785,"nrc":45786,"fittings":45787,"curvy":45788,"ðŁıĩ":45789,"sterling":45790,"à¥Ģ":45791,"walkin":45792,"shortcuts":45793,"milly":45794,"astur":45795,"alphabe":45796,"pli":45797,"pez":45798,"missyou":45799,"radford":45800,"mlg":45801,"taeyang":45802,"notjustlakes":45803,"dumps":45804,"serendip":45805,"leur":45806,"raving":45807,"ester":45808,"depriv":45809,"abscbn":45810,"ðŁijĩðŁı»":45811,"scarcity":45812,"ocr":45813,"meanings":45814,"capt":45815,"dahl":45816,"fermentation":45817,"brioche":45818,"towin":45819,"outlander":45820,"massimo":45821,"encro":45822,"ðŁ¥³":45823,"built":45824,"potam":45825,"kiri":45826,"tmw":45827,"monitored":45828,"kites":45829,"peoplesvote":45830,"grayson":45831,"íģ¬":45832,"afrika":45833,"adies":45834,"ivote":45835,"gyne":45836,"gannon":45837,"dix":45838,"cmc":45839,"oural":45840,"foxandfriends":45841,"beli":45842,"igne":45843,"glan":45844,"katrinakaif":45845,"copolitics":45846,"qualitative":45847,"psi":45848,"lucci":45849,"discoura":45850,"âĺ®":45851,"kelli":45852,"gautam":45853,"caracas":45854,"realest":45855,"pula":45856,"inus":45857,"hilltop":45858,"makeaw":45859,"attenborough":45860,"twy":45861,"rarity":45862,"peckham":45863,"mahon":45864,"cornelius":45865,"clinicians":45866,"tonline":45867,"tbi":45868,"paradise":45869,"kasi":45870,"inevit":45871,"freshness":45872,"collingwood":45873,"lunatic":45874,"defense":45875,"copd":45876,"infra":45877,"wainwright":45878,"sainsbury":45879,"alabam":45880,"tema":45881,"laco":45882,"checker":45883,"relegated":45884,"trent":45885,"stalks":45886,"huffpost":45887,"bhubaneswar":45888,"astral":45889,"shareyour":45890,"primrose":45891,"hime":45892,"catan":45893,"endment":45894,"endow":45895,"clemens":45896,"maloney":45897,"hilary":45898,"gametime":45899,"denise":45900,"collaborators":45901,"bwo":45902,"radicals":45903,"guetta":45904,"icion":45905,"aua":45906,"snapmatic":45907,"satchel":45908,"excavation":45909,"baseman":45910,"são":45911,"gnation":45912,"feld":45913,"survey":45914,"shahzad":45915,"mast":45916,"anirudhofficial":45917,"trucker":45918,"otago":45919,"geograph":45920,"ethel":45921,"âļ¡ï¸ıâļ¡ï¸ı":45922,"sver":45923,"mutt":45924,"internetofthings":45925,"anchored":45926,"whouse":45927,"bangla":45928,"balmain":45929,"ç¹ĭãģ":45930,"breakfa":45931,"áĢ":45932,"twister":45933,"tetris":45934,"cav":45935,"stags":45936,"gz":45937,"aub":45938,"stormed":45939,"helens":45940,"yarmouth":45941,"stasy":45942,"gustavo":45943,"cosc":45944,"vinson":45945,"upp":45946,"scricket":45947,"assumptions":45948,"appe":45949,"nuh":45950,"uer":45951,"premise":45952,"naga":45953,"eamon":45954,"coronary":45955,"naf":45956,"northside":45957,"elmer":45958,"rotar":45959,"outlining":45960,"elf":45961,"resurg":45962,"katelyn":45963,"incan":45964,"hysteria":45965,"cee":45966,"ambani":45967,"prolly":45968,"ĮãĤĬãģ":45969,"axes":45970,"sanjose":45971,"rembrandt":45972,"magpie":45973,"evenly":45974,"scorsese":45975,"quaint":45976,"fg":45977,"bbuk":45978,"indianfootball":45979,"weareall":45980,"spdwy":45981,"pisces":45982,"ecg":45983,"âĺħâĺħâĺħâĺħâĺħ":45984,"preorders":45985,":|":45986,"nipple":45987,"salazar":45988,"jume":45989,"jailbreak":45990,"minn":45991,"bassett":45992,"zetta":45993,"jeffree":45994,"adjun":45995,"ticon":45996,"sandiego":45997,"drinklocal":45998,"cholera":45999,"solicitors":46000,"obo":46001,"compost":46002,"nian":46003,"wra":46004,"treach":46005,"icic":46006,"professional":46007,"delve":46008,"legate":46009,"historia":46010,"croissant":46011,"connoisse":46012,"namo":46013,"palliative":46014,"chemtrails":46015,"iority":46016,"globalwarming":46017,"comicart":46018,"behavioural":46019,"rested":46020,"lias":46021,"climates":46022,"ŁãģĦ":46023,"rutland":46024,"nourish":46025,"menopause":46026,"hotties":46027,"dementi":46028,"vespa":46029,"melville":46030,"analogue":46031,"tzman":46032,"strung":46033,"imperfect":46034,"glare":46035,"circling":46036,"rosberg":46037,"reco":46038,"ocity":46039,"loire":46040,"embe":46041,"dossier":46042,"neel":46043,"nando":46044,"mea":46045,"galvani":46046,"finesse":46047,"agp":46048,"berkeley":46049,"asim":46050,"âĺºâĺº":46051,"quilted":46052,"ishere":46053,"unmatched":46054,"potion":46055,"forz":46056,"atre":46057,"selfies":46058,"juliana":46059,"ðŁļ¶":46060,"âĸº":46061,"melton":46062,"âłĢâłĢâłĢâłĢâłĢâłĢâłĢâłĢ":46063,"spinrilla":46064,"purcell":46065,"edp":46066,"atleti":46067,"tonyawards":46068,"raja":46069,"progno":46070,"molten":46071,"stuff":46072,"pally":46073,"nobelprize":46074,"âĻ»ï¸ı":46075,"spiritual":46076,"speake":46077,"sasha":46078,"brium":46079,"truss":46080,"criticize":46081,"assassinscreed":46082,"yoruba":46083,"ulo":46084,"fireman":46085,"workinprogress":46086,"efcc":46087,"flares":46088,"robot":46089,"hikers":46090,"cll":46091,"shadowing":46092,"patsy":46093,"lehman":46094,"cns":46095,"å±":46096,"guadal":46097,"à±į":46098,"rape":46099,"rhonda":46100,"parallels":46101,"sonja":46102,"language":46103,"landings":46104,"zola":46105,"cramps":46106,"burning":46107,"appraisal":46108,"jolla":46109,"hamm":46110,"kasa":46111,"gully":46112,"fgo":46113,"ulysses":46114,"ribe":46115,"ðŁĴĦ":46116,"ibu":46117,"etienne":46118,"briar":46119,"finely":46120,"combating":46121,"yql":46122,"gotham":46123,"wechat":46124,"topaz":46125,"primaries":46126,"lse":46127,"izz":46128,"hele":46129,"disponible":46130,"cystic":46131,"belichick":46132,"thrush":46133,"kansascity":46134,"geom":46135,"solidi":46136,"redbubble":46137,"bystand":46138,"cambridgeshire":46139,"parfait":46140,"astle":46141,"owo":46142,"indore":46143,"stomping":46144,"smelly":46145,"ðŁ¤ĸ":46146,"locomo":46147,"admitting":46148,"holme":46149,"clockwise":46150,"minsk":46151,"mcco":46152,"forget":46153,"evp":46154,"camra":46155,"abella":46156,"yotes":46157,"universityof":46158,"méxico":46159,"silverado":46160,"ricket":46161,"crombie":46162,"puj":46163,"eradicate":46164,"delight":46165,"ygo":46166,"glamping":46167,"vica":46168,"duggan":46169,"counters":46170,"cfd":46171,"scour":46172,"reactjs":46173,"puram":46174,"parasites":46175,"inki":46176,"villen":46177,"stella":46178,"limbo":46179,"angas":46180,"kcr":46181,"ðŁĴļðŁĴļðŁĴļ":46182,"vapori":46183,"mumford":46184,"oligar":46185,"à¼":46186,"aloo":46187,"booties":46188,"adr":46189,"kelli":46190,"drummers":46191,"avici":46192,"natureuk":46193,"ronal":46194,"intrac":46195,"unsplash":46196,"leche":46197,"goma":46198,"eline":46199,"enviro":46200,"bionic":46201,"bueno":46202,"mik":46203,"avin":46204,"starling":46205,"empowers":46206,"cakeday":46207,"boycot":46208,"ðŁĴļðŁĴļ":46209,"ðŁĮ¸ðŁĮ¸":46210,"vach":46211,"mci":46212,"fractures":46213,"geri":46214,"sking":46215,"excluded":46216,"luce":46217,"jave":46218,"iggy":46219,"eviden":46220,"akistan":46221,"awn":46222,"morals":46223,"lucifer":46224,"haban":46225,"tumbling":46226,"sundaymotivation":46227,"mosley":46228,"captainamerica":46229,"schicago":46230,"theone":46231,"motd":46232,"dts":46233,"ðŁIJ¼":46234,"repell":46235,"iii":46236,"locust":46237,"geospatial":46238,"mersey":46239,"immerse":46240,"descend":46241,"bernade":46242,"js":46243,"boatsales":46244,"winder":46245,"crank":46246,"singleton":46247,"candidacy":46248,"bena":46249,"ðŁı»âĢį":46250,"highlander":46251,"olt":46252,"kprs":46253,"healthylifestyle":46254,"fourteen":46255,"endthe":46256,"ithaca":46257,"circulated":46258,"rans":46259,"prevalent":46260,"havas":46261,"splendor":46262,"rooster":46263,"kalamazoo":46264,"jewellers":46265,"ennedy":46266,"rousey":46267,"esy":46268,"cannons":46269,"ornamental":46270,"////":46271,"rendon":46272,"winne":46273,"molding":46274,"eidmubarak":46275,"countess":46276,"simona":46277,"hawa":46278,"foes":46279,"duster":46280,"sbu":46281,"portray":46282,"marries":46283,"goodday":46284,"choco":46285,"achiever":46286,"ðŁĺ¹ðŁĺ¹":46287,"preneur":46288,"tramp":46289,"tomi":46290,"nbat":46291,"gardenchat":46292,"farrakhan":46293,"everglades":46294,"abru":46295,"sousa":46296,"sece":46297,"homeswee":46298,"terrestrial":46299,"barit":46300,"sridevi":46301,"olu":46302,"melinda":46303,"frick":46304,"candies":46305,"ðŁĺŃðŁĴķ":46306,"qureshi":46307,"familyfun":46308,"exorcist":46309,"cardinal":46310,"nyt":46311,"diesel":46312,"cumulus":46313,"capricorn":46314,"siology":46315,"lorna":46316,"dougie":46317,"andie":46318,"supersport":46319,"cfl":46320,"пÑĢи":46321,"sayang":46322,"peek":46323,"à¸Ĭ":46324,"lobe":46325,"jem":46326,"inglis":46327,"ggled":46328,"csn":46329,"amnesty":46330,"chups":46331,"baes":46332,"sauer":46333,"ðŁıIJ":46334,"mongolian":46335,"enet":46336,"backstreet":46337,"drilled":46338,"accessing":46339,"ceo":46340,"bse":46341,"aiken":46342,"purr":46343,"worsen":46344,"wheres":46345,"wark":46346,"testifying":46347,"buri":46348,"blast":46349,"awg":46350,"ðŁĵĭ":46351,"redefining":46352,"hearing":46353,"uci":46354,"cmp":46355,"boni":46356,"tailoring":46357,"taji":46358,"nocchi":46359,"emt":46360,"stephenking":46361,"neet":46362,"complains":46363,"campaigner":46364,"luciano":46365,"twilight":46366,"tiesto":46367,"passports":46368,"floyd":46369,"cathedr":46370,"naked":46371,"caregiver":46372,"bcoz":46373,"adecides":46374,"kuri":46375,"lyk":46376,"braries":46377,"drenched":46378,"disclose":46379,"ðŁĴªðŁı½":46380,"leblanc":46381,"jetty":46382,"garty":46383,"chipmun":46384,"bsu":46385,"rhythmic":46386,"icz":46387,"frid":46388,"annex":46389,"amex":46390,"soloist":46391,"lancers":46392,"arrowhead":46393,"specification":46394,"simulated":46395,"nais":46396,"inverte":46397,"bowing":46398,"worship":46399,"fz":46400,"aboss":46401,"shaq":46402,"ì¶ķ":46403,"challengers":46404,"anarch":46405,"aamaadmiparty":46406,"ãħĭãħĭãħĭ":46407,"suffolk":46408,"socorro":46409,"snell":46410,"cladding":46411,"absorbing":46412,"shawa":46413,"participates":46414,"ðŁįĶ":46415,"bookstores":46416,"baku":46417,"seaport":46418,"kojima":46419,"gaby":46420,"packard":46421,"electrician":46422,"letit":46423,"mowing":46424,"fawad":46425,"youngjae":46426,"hotmail":46427,"mening":46428,"urie":46429,"intimacy":46430,"conti":46431,":\")":46432,"lifeisgood":46433,"inciner":46434,"idri":46435,"craziness":46436,"journos":46437,"franchi":46438,"bottlen":46439,"alda":46440,"ffes":46441,"kx":46442,"southwe":46443,"aira":46444,"clayton":46445,"scoti":46446,"fj":46447,"briga":46448,"ðŁ¤ĺðŁı»":46449,"demonstrators":46450,"yz":46451,"stork":46452,"naq":46453,"cascades":46454,"travelchat":46455,"plata":46456,"padma":46457,"franci":46458,"attain":46459,"batgirl":46460,"lombard":46461,"hoos":46462,"ddos":46463,"neonatal":46464,"disclaimer":46465,"rss":46466,"rant":46467,"disen":46468,"texaste":46469,"socal":46470,"fractal":46471,"camry":46472,"strife":46473,"snacking":46474,"muh":46475,"santander":46476,"morons":46477,"graf":46478,"parades":46479,"huston":46480,"drupal":46481,"miento":46482,"kirstel":46483,"hyde":46484,"vomit":46485,"fortified":46486,"sphinx":46487,"dav":46488,"biryani":46489,"winnings":46490,"sbaseball":46491,"merged":46492,"lovelondon":46493,"lingering":46494,"dreambig":46495,"carleton":46496,"livelihood":46497,"django":46498,"astrid":46499,"grids":46500,"downe":46501,"bruised":46502,"sne":46503,"scarecrow":46504,"helium":46505,"fnc":46506,"biggs":46507,"anter":46508,"restorative":46509,"empires":46510,"abdel":46511,"lifestyle":46512,"kiwanis":46513,"colloquium":46514,"meen":46515,"prick":46516,"antique":46517,"zeb":46518,"mimic":46519,"edmonds":46520,"ðŁijĬðŁijĬ":46521,"qing":46522,"ppel":46523,"mcgill":46524,"interpreting":46525,"âŀķ":46526,"rashad":46527,"doka":46528,"narrator":46529,"electromagnetic":46530,"ashby":46531,"saura":46532,"irandeal":46533,"âģīï¸ı":46534,"krishnan":46535,"indi":46536,"ffen":46537,"brea":46538,"osman":46539,"multinational":46540,"chippe":46541,"recruiters":46542,"ausbiz":46543,"pounding":46544,"regen":46545,"cursor":46546,"refusal":46547,"macs":46548,"inak":46549,"axial":46550,"waifu":46551,"upcycled":46552,"hindustan":46553,"cassini":46554,"carlyle":46555,"scratches":46556,"reef":46557,"manatee":46558,"eatery":46559,"ðŁĵ¢":46560,"uncondition":46561,"senpai":46562,"onther":46563,"comicbook":46564,"prosciutto":46565,"demar":46566,"mise":46567,"mage":46568,"freec":46569,"ayesha":46570,"alder":46571,"androidgames":46572,"leyton":46573,"hock":46574,"doorway":46575,"chicagofire":46576,"aaliyah":46577,"swelling":46578,"bix":46579,".ðŁĺĤ":46580,"evankirstel":46581,"torpedo":46582,"konstant":46583,"genevieve":46584,"maia":46585,"hauser":46586,"dotorg":46587,"hideous":46588,"fik":46589,"spraw":46590,"eek":46591,"zappa":46592,"wandered":46593,"''":46594,"rajan":46595,"bambi":46596,"($)":46597,"widening":46598,"toolbox":46599,"sair":46600,"illuminating":46601,"prays":46602,"outpatient":46603,"iw":46604,"dayo":46605,"lob":46606,"swfl":46607,"shades":46608,"gums":46609,"cookin":46610,"kodi":46611,"griffin":46612,"traumati":46613,"stea":46614,"slaughtered":46615,"godbless":46616,"airtime":46617,"pseudo":46618,"bsa":46619,"hauled":46620,"arif":46621,"à¸Ńà¸ĩ":46622,"lel":46623,"wcpo":46624,"militi":46625,"charters":46626,"worlda":46627,"ruk":46628,"kgs":46629,"digitalindia":46630,"isable":46631,"idyllic":46632,"espino":46633,"marietta":46634,"ebo":46635,"teamcanada":46636,"abour":46637,"wilton":46638,"rockstars":46639,"favored":46640,"physic":46641,"wrinkle":46642,"tbr":46643,"dprint":46644,"ballarat":46645,"adal":46646,"zey":46647,"ðŁĺįðŁĶ¥":46648,"tomlin":46649,"mtr":46650,"palsy":46651,"fenerbah":46652,"tighten":46653,"philia":46654,"ironing":46655,"ryu":46656,"bant":46657,"enquire":46658,"cair":46659,"aburger":46660,"trun":46661,"greenberg":46662,"chauhan":46663,"irina":46664,"shani":46665,"trendsetter":46666,"prett":46667,"zafar":46668,"alove":46669,"vici":46670,"panic":46671,"noo":46672,"lustre":46673,"disrupted":46674,"ballis":46675,"sonsof":46676,"monsi":46677,"instac":46678,"akest":46679,"ëĭ¤":46680,"kwame":46681,"horrormovies":46682,"district":46683,"saucy":46684,"mban":46685,"armies":46686,"withdrawn":46687,"medics":46688,"loftus":46689,"eroom":46690,"bekind":46691,"arns":46692,"allon":46693,"unison":46694,"davids":46695,"crat":46696,"nicotine":46697,"soor":46698,"smx":46699,"onco":46700,"cosplaying":46701,"zombies":46702,"harms":46703,"eger":46704,"rosy":46705,"moonshine":46706,"fein":46707,"cett":46708,"dubrov":46709,"regents":46710,"benitez":46711,"ðŁijıðŁı¼ðŁijıðŁı¼":46712,"stec":46713,"malia":46714,"prioritize":46715,"iceland":46716,"ftse":46717,"vamo":46718,"lamont":46719,"homosexuality":46720,"brees":46721,"regui":46722,"cbp":46723,"tej":46724,"skysports":46725,"detergent":46726,"shasta":46727,"derel":46728,"conservancy":46729,"colorized":46730,"accolades":46731,"viso":46732,"showyour":46733,"nanow":46734,"biceps":46735,"usability":46736,"bim":46737,"dailysketch":46738,"pearljam":46739,"strangest":46740,"megadeth":46741,"broadcasts":46742,"barren":46743,"arton":46744,"chriss":46745,"configu":46746,"lures":46747,"isthe":46748,"eul":46749,"railwayana":46750,"globalhealth":46751,"gianni":46752,"uaap":46753,"slum":46754,"consciously":46755,"abre":46756,"nup":46757,"budget":46758,"vada":46759,"esch":46760,"realness":46761,"erased":46762,"thunt":46763,"bez":46764,"armistice":46765,"ðŁij¹":46766,"shrun":46767,"oled":46768,"driverless":46769,"ðŁ¤·ðŁı»âĢįâĻĢï¸ı":46770,"wondr":46771,"skan":46772,"salaam":46773,"motherland":46774,"hwang":46775,"geno":46776,"gangnam":46777,"twright":46778,"endorsing":46779,"enic":46780,"adoration":46781,"paused":46782,"patricks":46783,"docked":46784,"platte":46785,"ffxv":46786,"ethnicity":46787,"autoshow":46788,"sideshow":46789,"afterlife":46790,"relocated":46791,"orphaned":46792,"foodnetwork":46793,"dareto":46794,"andra":46795,"slaps":46796,"vlive":46797,"swims":46798,"reimagined":46799,"mistle":46800,"revise":46801,"reality":46802,"bharti":46803,"ðŁĴĻðŁĴĽ":46804,"latest":46805,"proudest":46806,"grasses":46807,"lanyard":46808,"freshest":46809,"carcinoma":46810,"anomaly":46811,"ziegler":46812,"sumner":46813,"lyrix":46814,"gorg":46815,"isd":46816,"avel":46817,"swildlife":46818,"mesqu":46819,"johncena":46820,"euroleague":46821,"saber":46822,"masterful":46823,"yarra":46824,"cognition":46825,"jacobson":46826,"abolic":46827,"sirloin":46828,"shukla":46829,"mojito":46830,"supere":46831,"stweet":46832,"mez":46833,"esa":46834,"rudolf":46835,"gura":46836,"whereyou":46837,"ttm":46838,"wins":46839,"trustworthy":46840,"nyk":46841,"braden":46842,"tabletop":46843,"goodfood":46844,"eson":46845,"bek":46846,"linguistic":46847,"grays":46848,"chath":46849,"hcs":46850,"moni":46851,"deans":46852,"cussions":46853,"chell":46854,"slows":46855,"hemi":46856,"dapp":46857,"sharpie":46858,"boosters":46859,"aos":46860,"strack":46861,"sedona":46862,"mueller":46863,"hardwick":46864,"ornate":46865,"thora":46866,"salud":46867,"otwol":46868,"chum":46869,"miho":46870,"forage":46871,"thelittle":46872,"tearful":46873,"oneself":46874,"mindy":46875,"smg":46876,"gmbh":46877,"emerald":46878,"ðŁĶ´âļªï¸ı":46879,"tutti":46880,"receptions":46881,"revising":46882,"ibrox":46883,"topeka":46884,"salami":46885,"expanse":46886,"ibooks":46887,"dobson":46888,"clio":46889,"ats":46890,"ðŁļĮ":46891,"moha":46892,"isance":46893,"shutters":46894,"moot":46895,"janine":46896,"marvelcomics":46897,"jordani":46898,"poser":46899,"kenneth":46900,"hyung":46901,"deja":46902,"aseball":46903,"speciality":46904,"euston":46905,"classiccar":46906,"hadith":46907,"ðŁIJī":46908,"chasing":46909,"izo":46910,"grosven":46911,"aglia":46912,"thisdayinhistory":46913,"trow":46914,"omile":46915,"huar":46916,"byn":46917,"saline":46918,"divine":46919,"demonic":46920,"tyran":46921,"handover":46922,"revitalization":46923,"paella":46924,"cryptic":46925,"sedg":46926,"mend":46927,"dunkirk":46928,"bred":46929,"wald":46930,"sportscar":46931,"aard":46932,"wheaton":46933,"daener":46934,"klan":46935,"brt":46936,"bakhtawar":46937,"spires":46938,"schubert":46939,"roti":46940,"polish":46941,"ose":46942,"agame":46943,"wondercon":46944,"protestant":46945,"bosa":46946,"ðŁĺŁ":46947,"dü":46948,"joyride":46949,"gertrude":46950,"âĿĿ":46951,"gila":46952,"vh":46953,"twa":46954,"trav":46955,"swallowed":46956,"starve":46957,"lain":46958,"entren":46959,"reiki":46960,"sukh":46961,"craic":46962,"azu":46963,"webpage":46964,"keefe":46965,"hypothe":46966,"hirsch":46967,"helle":46968,"campground":46969,"wamy":46970,"travi":46971,"shahi":46972,"sandeep":46973,"rui":46974,"hanuman":46975,"dwp":46976,"repository":46977,"noor":46978,"noff":46979,"unreal":46980,"pell":46981,"blackhistory":46982,"harvick":46983,"mascar":46984,"payee":46985,"pasha":46986,"gastronomy":46987,"dÃŃ":46988,"aig":46989,"rosenthal":46990,"openday":46991,"embellished":46992,"ttip":46993,"sunbathing":46994,"gopack":46995,"endome":46996,"ï¸ı#":46997,"invalid":46998,"finalfour":46999,"stfu":47000,"squishy":47001,"rasta":47002,"mosch":47003,"jamesc":47004,"dietrich":47005,"sela":47006,"melb":47007,"elvi":47008,"tdp":47009,"suni":47010,"slit":47011,"jha":47012,"biza":47013,"spiked":47014,"lli":47015,"lillard":47016,"vampi":47017,"synopsis":47018,"azhar":47019,"kendricklamar":47020,"ĮãĤĬãģŁãģĦ":47021,"heartless":47022,"countryfile":47023,"airplay":47024,"arrogance":47025,"pree":47026,"virtuoso":47027,"ãħłãħłãħłãħł":47028,"raju":47029,"lebu":47030,"forward":47031,"tug":47032,"dros":47033,"mondaymotivaton":47034,"concepcion":47035,"thelo":47036,"padi":47037,"looool":47038,"ÑĢод":47039,"itss":47040,"ethical":47041,"enduro":47042,"__:":47043,"expenditure":47044,"monste":47045,"masking":47046,"terriers":47047,"ibis":47048,"ember":47049,"cumple":47050,"punctuation":47051,"piper":47052,"irvin":47053,"adee":47054,"yyyyyy":47055,"flashbacks":47056,"celsius":47057,"donnie":47058,"bogota":47059,"benevol":47060,"thescript":47061,"shilpa":47062,"prose":47063,"findia":47064,"zeke":47065,"neko":47066,"doves":47067,"blueslyrix":47068,"frosh":47069,"soweto":47070,"mplo":47071,"alai":47072,"sabi":47073,"raqqa":47074,"wftv":47075,"stroller":47076,"iansomerhalder":47077,"ðŁĶª":47078,"anon":47079,"moseley":47080,"!?!?":47081,"staking":47082,"moly":47083,"cartri":47084,"csg":47085,"astor":47086,"transcend":47087,"maer":47088,"deux":47089,"cowgirl":47090,"sask":47091,"punter":47092,"maken":47093,"oates":47094,"lovett":47095,"growler":47096,"sagin":47097,"vn":47098,"ssible":47099,"officeofrg":47100,"ymc":47101,"sabar":47102,"faulty":47103,"apha":47104,"akon":47105,"ðŁij«":47106,"snowdon":47107,"aew":47108,"raisethe":47109,"ðĿĵ":47110,"gruesome":47111,"clementine":47112,"sping":47113,"lata":47114,"worldenviron":47115,"mimic":47116,"canaria":47117,"bakhtawarbz":47118,"aoa":47119,"fala":47120,"ãĤŃ":47121,"aviva":47122,"youuuu":47123,"thigh":47124,"ladders":47125,"gumbo":47126,"tzky":47127,"fuzz":47128,"plasticpollution":47129,"estate":47130,"strengthened":47131,"kant":47132,"drin":47133,"calvert":47134,"transformational":47135,"frightened":47136,"maclean":47137,"elitedangerous":47138,"earthy":47139,"tson":47140,"toda":47141,"jnu":47142,"..,":47143,"michal":47144,"iban":47145,"jeong":47146,"isreal":47147,"simcoe":47148,"exclusives":47149,"bluebells":47150,"bene":47151,"teu":47152,"pilsner":47153,"penske":47154,"atheists":47155,"mpu":47156,"cartagena":47157,"ðŁĴĹðŁĴĹ":47158,"millionaires":47159,"kkkk":47160,"itar":47161,"subscriptions":47162,"remote":47163,"mafi":47164,"hinton":47165,"wcc":47166,"hok":47167,"dsb":47168,"ableton":47169,"seventy":47170,"punks":47171,"eindhoven":47172,"shone":47173,"mcfarlane":47174,"limpopo":47175,"emphasi":47176,"ü":47177,"sinfo":47178,"petre":47179,"mangrove":47180,"chino":47181,"bertie":47182,"playlists":47183,"pushawards":47184,"paf":47185,"debbie":47186,"cdo":47187,"rino":47188,"ðŁı¾âĢįâĻĤï¸ı":47189,"folke":47190,"bonnar":47191,"thine":47192,"slan":47193,"halter":47194,"evie":47195,"awsome":47196,"vultures":47197,"sparky":47198,"seizures":47199,"âľĶ":47200,"ramone":47201,"ineffe":47202,"aln":47203,"proctor":47204,"astra":47205,"thevoice":47206,"grote":47207,"scion":47208,"deadline":47209,"amaya":47210,"tainted":47211,"patterned":47212,"exceeding":47213,"crossfit":47214,"kaylee":47215,"dropbox":47216,"rushes":47217,"tackled":47218,"moby":47219,"retrogamer":47220,"ncbd":47221,"benefitting":47222,"shaykh":47223,"guildhall":47224,"gentry":47225,"dreamcast":47226,"dreaded":47227,"bundled":47228,"thaw":47229,"revolving":47230,"npt":47231,"kyliejenner":47232,"imaginative":47233,"roni":47234,"overcame":47235,"familytime":47236,"dsburg":47237,"carnaval":47238,"relationship":47239,"recognizable":47240,"coroner":47241,"hole":47242,"fanfic":47243,"emirates":47244,"burritos":47245,"analyse":47246,"thinner":47247,"nees":47248,"gallipoli":47249,"blr":47250,"catwoman":47251,"-->>":47252,"ault":47253,"adaily":47254,"naughty":47255,"ilio":47256,"solitaire":47257,"mtvbr":47258,"jocelyn":47259,"arunach":47260,"repent":47261,"southgate":47262,"hyacin":47263,"essential":47264,"fenton":47265,"andum":47266,"itor":47267,"gopal":47268,"slinger":47269,"posei":47270,"awil":47271,"wielding":47272,"raila":47273,"elias":47274,"asto":47275,"ä":47276,"tendency":47277,"strata":47278,"kert":47279,"<-":47280,"imacele":47281,"daes":47282,"stimulus":47283,"hanley":47284,"fitnes":47285,"ecstasy":47286,"limous":47287,"hailing":47288,"ðŁ¤Ń":47289,"chiswick":47290,"taries":47291,"slav":47292,"puli":47293,"modernization":47294,"blackmail":47295,"bingham":47296,"hfx":47297,"++":47298,"ðŁĩ®ðŁĩ³":47299,"niv":47300,"wea":47301,"professor":47302,"koff":47303,"bolster":47304,"suave":47305,"sequences":47306,"pepperoni":47307,"notte":47308,"dren":47309,"ãģ¨ç¹ĭãģ":47310,"hsv":47311,"oga":47312,"aptly":47313,"zad":47314,"excelsi":47315,"rinka":47316,"moldova":47317,"minn":47318,"mabel":47319,"conferencing":47320,"basing":47321,"ofer":47322,"obsi":47323,"hamillhimself":47324,"careless":47325,"briefed":47326,"inherent":47327,"parish":47328,"dubnation":47329,"townsville":47330,"sarawak":47331,"geeky":47332,"doncasterisgreat":47333,"wasabi":47334,"gup":47335,"pheno":47336,"drainthe":47337,"carrieunderwood":47338,"bleeds":47339,"bbcworld":47340,"anew":47341,"altaf":47342,"dulwich":47343,"aniston":47344,"wti":47345,"sumatra":47346,"grafton":47347,"bln":47348,"mester":47349,"bodega":47350,"rego":47351,"esq":47352,"anjo":47353,"sumptuous":47354,"maisie":47355,"�":47356,"wilt":47357,"jakob":47358,"elvis":47359,"sepul":47360,"muster":47361,"airpollution":47362,"presidente":47363,"happymonday":47364,"extensively":47365,"flondon":47366,"tls":47367,"playing":47368,"peed":47369,"dinho":47370,"vardy":47371,"pika":47372,"niro":47373,"aucus":47374,"ðŁį¦":47375,"null":47376,"elondon":47377,"juventus":47378,"imagines":47379,"disab":47380,"lito":47381,"dura":47382,"workplaces":47383,"promote":47384,"mccaf":47385,"woodwork":47386,"wawx":47387,"ப":47388,"ttino":47389,"shari":47390,"semper":47391,"bettertogether":47392,"ðŁijĬðŁı»":47393,"zebra":47394,"pondering":47395,"enchil":47396,"hom":47397,"cosmic":47398,"tanz":47399,"mocked":47400,"eccc":47401,"athed":47402,"abolish":47403,"propeller":47404,"parisagreement":47405,"assemblies":47406,"industry":47407,"fraudulent":47408,"pesa":47409,"changmin":47410,"axx":47411,"ðŁĴµ":47412,"irrational":47413,"cusa":47414,"ramadhan":47415,"octavia":47416,"onelove":47417,"jacki":47418,"barak":47419,"taxider":47420,"serious":47421,"nathanfillion":47422,"mcen":47423,"chk":47424,"popart":47425,"gravity":47426,"coppola":47427,"readingfc":47428,"illusions":47429,"jig":47430,"wwx":47431,"resh":47432,"exporting":47433,"buzzard":47434,"âĻ¤":47435,"pcm":47436,"lanapar":47437,"kos":47438,"aromas":47439,"antalya":47440,"wwdc":47441,"vena":47442,"phila":47443,"ballin":47444,"ðŁijĦ":47445,"quinta":47446,"mao":47447,"fery":47448,"eighty":47449,"sentiments":47450,"safeguarding":47451,"rwa":47452,"puffs":47453,"lucille":47454,"decath":47455,"slu":47456,"nugent":47457,"deter":47458,"brazil":47459,"zeiss":47460,"superbowl":47461,"subsidy":47462,"altern":47463,"hidalgo":47464,"enzymes":47465,"ä½":47466,"tagne":47467,"hairdresser":47468,"adrien":47469,"walkout":47470,"opposes":47471,"cantina":47472,"bedside":47473,"afan":47474,"ðŁĶĹ":47475,"prophetic":47476,"danes":47477,"unsuccessful":47478,"supercharged":47479,"pkk":47480,"exemption":47481,"hartle":47482,"secular":47483,"clipping":47484,"brs":47485,"unitedway":47486,"cnet":47487,"patchy":47488,"hagan":47489,"een":47490,"âļľ":47491,"vara":47492,"sympathi":47493,"nevertrump":47494,"affirmation":47495,"omf":47496,"nycfc":47497,"maja":47498,"surro":47499,"keerth":47500,"upscale":47501,"sandalwood":47502,"monarchy":47503,"knobs":47504,"åĭ":47505,"potholes":47506,"hungergames":47507,"terraces":47508,"nasir":47509,"counsell":47510,"welcometo":47511,"waq":47512,"seaman":47513,"mita":47514,"stunningly":47515,"ontheroad":47516,"inability":47517,")!!":47518,"bongo":47519,"antv":47520,"sput":47521,"worldenvironmentday":47522,"resusc":47523,"ytd":47524,"fim":47525,"eunhyuk":47526,"sachin":47527,"roseanne":47528,"clermont":47529,"apec":47530,"amina":47531,"vening":47532,"nantes":47533,"almost":47534,"sinus":47535,"exas":47536,"tyl":47537,"tien":47538,"plead":47539,"lancs":47540,"burnaby":47541,"rek":47542,"joom":47543,"observers":47544,"discography":47545,"clg":47546,"âĻ¦":47547,"snack":47548,"rti":47549,"oily":47550,"crystalli":47551,"brute":47552,"webdevelopment":47553,"toppings":47554,"laf":47555,"anis":47556,"adder":47557,"reliving":47558,"carlin":47559,"battleof":47560,"weg":47561,"syrian":47562,"pont":47563,"ndc":47564,"laghate":47565,"yuma":47566,"spp":47567,"piti":47568,"robbing":47569,"marting":47570,"reykja":47571,"rajput":47572,"ncds":47573,"kiewicz":47574,"âĢ¢âĢ¢":47575,"vampire":47576,"substantially":47577,"opioids":47578,"nepali":47579,"kline":47580,"aroo":47581,"understand":47582,"litt":47583,"uit":47584,"thrombo":47585,"saries":47586,"quot":47587,"balling":47588,"ttr":47589,"sgh":47590,"philipp":47591,"brant":47592,"acl":47593,"mello":47594,"whittaker":47595,".;":47596,"defiant":47597,"bgc":47598,"replying":47599,"mirren":47600,"metamorpho":47601,"schwab":47602,"bulge":47603,"utilized":47604,"pickering":47605,"pardon":47606,"dsa":47607,"à¸Ī":47608,"dooley":47609,"cumulative":47610,"л":47611,"urgency":47612,"emir":47613,"+/-":47614,"¦Ī":47615,"otas":47616,"âı³":47617,"stationed":47618,"grapevine":47619,"arac":47620,"karanjohar":47621,"fancy":47622,"saul":47623,"coogs":47624,"lgbtq":47625,"اÙħ":47626,"javi":47627,"ummer":47628,"pll":47629,"denis":47630,"daipur":47631,"puffin":47632,"lewisham":47633,"fandom":47634,"cope":47635,"vesmatter":47636,"sve":47637,"helpless":47638,"deodor":47639,"ostrich":47640,"kazan":47641,"fridaythe":47642,"condor":47643,"vx":47644,"sophomores":47645,"robles":47646,"cutt":47647,"climbers":47648,"리":47649,"sleg":47650,"snf":47651,"macys":47652,"hydrating":47653,"groupe":47654,"poyn":47655,"moulin":47656,"hgtv":47657,"lmfaooo":47658,"sulphur":47659,"asdfghjkl":47660,"annabelle":47661,"humpback":47662,"braved":47663,"viswasam":47664,"multipurpose":47665,"humidi":47666,"escorted":47667,"barbican":47668,"fad":47669,"corsa":47670,"ðŁ¤«":47671,"pippa":47672,"hereto":47673,"cany":47674,"sergi":47675,"orcas":47676,"ovie":47677,"edou":47678,"sany":47679,"globalization":47680,"mancini":47681,"foodtruck":47682,"fis":47683,"defibrill":47684,"schre":47685,"smafia":47686,"lovewins":47687,"laut":47688,"kaka":47689,"hollande":47690,"gameon":47691,"resurgence":47692,"outside":47693,"olympiad":47694,"intan":47695,"abstraction":47696,"rapid":47697,"palom":47698,"calle":47699,"jasmin":47700,"attackers":47701,"swagg":47702,"mitra":47703,"kylo":47704,"ல":47705,"hermitage":47706,"gordo":47707,"eira":47708,"sosfam":47709,"rollout":47710,"excite":47711,"synod":47712,"merrill":47713,"cals":47714,"assa":47715,"livelihoods":47716,"juve":47717,"theblack":47718,"gopackgo":47719,"antlers":47720,"albanian":47721,"woolly":47722,"quiche":47723,"purification":47724,"areth":47725,"smarthome":47726,"nek":47727,"allblacks":47728,"mexicans":47729,"ism":47730,"germs":47731,"complexion":47732,"marck":47733,"ushi":47734,"ðŁIJIJ":47735,"charl":47736,"castic":47737,"tillerson":47738,"giuliani":47739,"biodegradable":47740,"malbec":47741,"bois":47742,"jubil":47743,"imes":47744,"rame":47745,"genetic":47746,"espnu":47747,"chley":47748,"soho":47749,"gopher":47750,"gsc":47751,"buuren":47752,"cube":47753,"bridesmaids":47754,"webinars":47755,"toe":47756,"manipur":47757,"violently":47758,"noticias":47759,"exchanging":47760,"chiev":47761,"replaceable":47762,"muaythai":47763,"buss":47764,"spil":47765,"instalment":47766,"divya":47767,"caitlin":47768,"olim":47769,"filtering":47770,"whirlwind":47771,"stared":47772,"priorit":47773,"pram":47774,"pompeii":47775,"monologue":47776,"kite":47777,"buka":47778,"âĢ¦..":47779,"vaccine":47780,"brero":47781,"wozni":47782,"solent":47783,"referr":47784,"myrt":47785,"gridiron":47786,"galatasaray":47787,"froze":47788,"claremont":47789,"ðŁ¥ĥ":47790,"victorias":47791,"sseldorf":47792,"pastures":47793,"netneutrality":47794,"chor":47795,"ðŁijģ":47796,"ಿ":47797,"weho":47798,"symptom":47799,"josel":47800,"inous":47801,"dragoncon":47802,"powerball":47803,"pte":47804,"fourthofjuly":47805,"ecla":47806,"earbuds":47807,"whereabouts":47808,"saltlife":47809,"deprivation":47810,"chter":47811,"wiggle":47812,"system":47813,"psst":47814,"chaz":47815,"dany":47816,"rimo":47817,"oaxaca":47818,"lanaparrilla":47819,"barcelon":47820,"melancholy":47821,"wayback":47822,"hotro":47823,"nsi":47824,"lilly":47825,"kuro":47826,"jahan":47827,"intellect":47828,"boardgame":47829,"ðŁıĬ":47830,"sneakpeek":47831,"kprc":47832,"jails":47833,"candel":47834,"zanzi":47835,"mortimer":47836,"starch":47837,"rags":47838,"pfa":47839,"longlive":47840,"kart":47841,"girona":47842,"crocker":47843,"christoph":47844,"precautions":47845,"warship":47846,"perm":47847,"parent":47848,"vangogh":47849,"gifford":47850,"allegheny":47851,"rayn":47852,"utm":47853,"stencil":47854,"recalling":47855,"penney":47856,"zazzle":47857,"ìĥĿ":47858,"hinds":47859,"arenas":47860,"nuev":47861,"lawler":47862,"guin":47863,"dothis":47864,"ðŁijķ":47865,"ì¶ķíķĺ":47866,"weg":47867,"tib":47868,"ridin":47869,"complexes":47870,"turbulent":47871,"pesos":47872,"demarcus":47873,"vallarta":47874,"samsun":47875,"kisses":47876,"heinrich":47877,"deportes":47878,"wilms":47879,"urd":47880,"thenext":47881,"inkigayo":47882,"howi":47883,"firsts":47884,"carriage":47885,"cleanliness":47886,"maswar":47887,"isch":47888,"axel":47889,"sizzle":47890,"roadhouse":47891,"frans":47892,"entourage":47893,"cobble":47894,"booth":47895,"benedict":47896,"talon":47897,"fcu":47898,"yearofthe":47899,"rayon":47900,"raidernation":47901,"foyle":47902,"koval":47903,"pianos":47904,"lpg":47905,"burmese":47906,"manure":47907,"geocaching":47908,"coscino":47909,"bnp":47910,"ferra":47911,"strophy":47912,"marais":47913,"cees":47914,"legendof":47915,"katniss":47916,"enoch":47917,"aved":47918,"youknow":47919,"dprk":47920,"ðŁĺ¢ðŁĺ¢":47921,"spun":47922,"prost":47923,"sorrows":47924,"centred":47925,"kea":47926,"galicia":47927,"?ðŁ¤Ķ":47928,"ÑĢода":47929,"bouchard":47930,"ðŁĴĻðŁĴľ":47931,"yui":47932,"seedlings":47933,"jonah":47934,"recovers":47935,"nyrd":47936,"boardroom":47937,"suma":47938,"myjaps":47939,"tung":47940,"shai":47941,"irgc":47942,"elio":47943,"wagons":47944,"kashi":47945,"policemen":47946,"johnnie":47947,"alecoscino":47948,"shopify":47949,"dotted":47950,"detri":47951,"vaw":47952,"tofficial":47953,"inyour":47954,"chalmers":47955,"traced":47956,"novi":47957,"byes":47958,"ariel":47959,"nippon":47960,"lapel":47961,"griez":47962,"bgs":47963,"fooling":47964,"dita":47965,"vijaysethu":47966,"nmwx":47967,"asot":47968,"kranti":47969,"helm":47970,"vedi":47971,"sickest":47972,"mochi":47973,"kabo":47974,"shrubs":47975,"hered":47976,"bsp":47977,"sqm":47978,"hamr":47979,"dulkar":47980,"antha":47981,"nrf":47982,"avoidance":47983,"aten":47984,"publix":47985,"bearers":47986,"nasi":47987,"hap":47988,"hells":47989,"ðŁĸ¥":47990,"ื":47991,"thelastjedi":47992,"ohwx":47993,"ðŁį«":47994,"wahoo":47995,"therese":47996,"recaps":47997,"ssnhq":47998,"birdphotography":47999,"vay":48000,"petti":48001,"paulo":48002,"belvedere":48003,"(*":48004,"grl":48005,"duvet":48006,"cpec":48007,"sait":48008,"porsch":48009,"measurable":48010,"aviators":48011,"fremantle":48012,"breen":48013,"onom":48014,"meand":48015,"lifesaving":48016,"euref":48017,"endon":48018,"embaras":48019,"airasia":48020,"elis":48021,"dunkin":48022,"starmagic":48023,"sill":48024,"portobello":48025,"kiefer":48026,"exe":48027,"muted":48028,"ãģ¦":48029,"wethepeople":48030,"logia":48031,"liberal":48032,"theforceawakens":48033,"mined":48034,"haunts":48035,"freckles":48036,"caretaker":48037,"sindia":48038,"âķIJ":48039,"devlin":48040,"liston":48041,"directioner":48042,"ohn":48043,"figaro":48044,"emmanuel":48045,"dubois":48046,"clones":48047,"bruise":48048,"ðŁİĪðŁİī":48049,"disinfe":48050,"dermatology":48051,"asr":48052,"swatch":48053,"discomfort":48054,"tamanna":48055,"piday":48056,"macken":48057,"katic":48058,"delusional":48059,"shawnee":48060,"gud":48061,"albino":48062,"pali":48063,"dingh":48064,"cucumbers":48065,"coffey":48066,"anticipating":48067,"treasured":48068,"websummit":48069,"sheltered":48070,"savor":48071,"pedagogy":48072,"mgs":48073,"shma":48074,"sbu":48075,"denali":48076,"campos":48077,"bubblegum":48078,"oir":48079,"leaps":48080,"yler":48081,"rone":48082,"sanskrit":48083,"mint":48084,"meatless":48085,"futurist":48086,"dude":48087,"avel":48088,"protested":48089,"squire":48090,"zaki":48091,"szn":48092,"harcourt":48093,"cyclone":48094,"bourdain":48095,"gatherings":48096,"dant":48097,"adventurer":48098,"paragon":48099,"altman":48100,"dding":48101,"banerjee":48102,"snorkeling":48103,"motherwell":48104,"missy":48105,"ender":48106,"glows":48107,"kiwis":48108,"chickpea":48109,"poro":48110,"efron":48111,"appt":48112,"uy":48113,"specified":48114,"gabby":48115,"estrada":48116,"combos":48117,"bourbon":48118,"vini":48119,"varun":48120,"stephani":48121,"keywords":48122,"carvings":48123,"amitabh":48124,"wrought":48125,"twal":48126,"reels":48127,"clubbing":48128,"ubiquit":48129,"crit":48130,"ambedkar":48131,"æĻ":48132,"pruning":48133,"vaccinated":48134,"boeing":48135,"sks":48136,"loona":48137,"hypnosis":48138,"edelman":48139,"phol":48140,"hew":48141,"colosse":48142,"mckinsey":48143,"uon":48144,"tote":48145,"sacrificing":48146,"oxi":48147,"nang":48148,"emu":48149,"пÑĢиÑĢода":48150,"mth":48151,"kerswednesday":48152,"argued":48153,"timelapse":48154,"risking":48155,"regulating":48156,"nigh":48157,"likelihood":48158,"cubic":48159,"auction":48160,"reinfor":48161,"pistor":48162,"noses":48163,"yel":48164,"snuggles":48165,"pei":48166,"jeanette":48167,"taku":48168,"rith":48169,"guyz":48170,"à¸ŀ":48171,"yte":48172,"verted":48173,"paysoff":48174,"jauregui":48175,"hooligans":48176,"procedural":48177,"mib":48178,"hardy":48179,"eleng":48180,"checkers":48181,"alline":48182,"themet":48183,"proudof":48184,"keerthyofficial":48185,"collaborator":48186,"niu":48187,"inflicted":48188,"advani":48189,"retwee":48190,"memoriam":48191,"ficial":48192,"tighter":48193,"salem":48194,"reviewers":48195,"brics":48196,"bendigo":48197,"amell":48198,"turkish":48199,"sushmaswar":48200,"paulson":48201,"palawan":48202,"mollie":48203,"stitcher":48204,"sburgh":48205,"iru":48206,"haydn":48207,"eners":48208,"aroa":48209,"uzzi":48210,"sarajevo":48211,"hela":48212,"apollo":48213,"ninety":48214,"vaca":48215,"spon":48216,"ventu":48217,"jelena":48218,"heifer":48219,"avoids":48220,"spine":48221,"prize":48222,"marist":48223,"recreating":48224,"mede":48225,"wooden":48226,"findlay":48227,"rofl":48228,"ndi":48229,"comprehend":48230,"yugo":48231,"yü":48232,"towork":48233,"ufos":48234,"sonar":48235,"piston":48236,"recording":48237,"tentative":48238,"artforsale":48239,"pellets":48240,"fredo":48241,"ÙĪر":48242,"muses":48243,"customization":48244,"profound":48245,"isner":48246,"ideally":48247,"siam":48248,"plankton":48249,"cmdr":48250,"manger":48251,"franken":48252,"customizable":48253,"म":48254,"walkaway":48255,"swivel":48256,"vastly":48257,"noton":48258,"lexa":48259,"exmoor":48260,"zas":48261,"tante":48262,"reductions":48263,"lolly":48264,"hipsters":48265,"benefited":48266,"ë²":48267,"wwwww":48268,"masculine":48269,"fiji":48270,"drey":48271,"phill":48272,"aneous":48273,"nicol":48274,"mendez":48275,"disappro":48276,"chner":48277,"throughs":48278,"shenmue":48279,"eastman":48280,"ðŁIJİ":48281,"yuck":48282,"undertale":48283,"reys":48284,"gobeavs":48285,"engen":48286,"cna":48287,"merr":48288,"birk":48289,"ãģ¨ç¹ĭãģĮãĤĬãģŁãģĦ":48290,"âĥ£@":48291,"ynna":48292,"steed":48293,"offender":48294,"atum":48295,"vanishing":48296,"presidenti":48297,"lovethem":48298,"gnocchi":48299,"friggin":48300,"peril":48301,"madhya":48302,"agne":48303,"deejay":48304,"marnock":48305,"mtb":48306,"foldable":48307,"@___":48308,"standre":48309,"bronx":48310,"bowski":48311,"finite":48312,"crockett":48313,"bsf":48314,"getit":48315,"serenawilliams":48316,"miro":48317,"ignatius":48318,"slay":48319,"rinse":48320,"fondue":48321,"seldom":48322,"smore":48323,"gani":48324,"dyce":48325,"dmitry":48326,"crumb":48327,"latepost":48328,"primark":48329,"ohana":48330,"florals":48331,"doa":48332,"remembranceday":48333,"dds":48334,"azione":48335,"toonami":48336,"airport":48337,"æĿ±":48338,"thad":48339,"fist":48340,"dinesh":48341,"drwho":48342,"adwords":48343,"admirer":48344,"proje":48345,"kyrgyz":48346,"à«":48347,"manifestation":48348,"lewan":48349,"jic":48350,"thibau":48351,"leased":48352,"vanity":48353,"nourished":48354,"nevertheless":48355,"augmente":48356,"fuelled":48357,"chead":48358,"wilshere":48359,"rudi":48360,"pz":48361,"myco":48362,"morro":48363,"herbalife":48364,"hardrock":48365,"deman":48366,"dreality":48367,"spades":48368,"cevic":48369,"bhai":48370,"baron":48371,"ultimatefan":48372,"hounews":48373,"tobi":48374,"strut":48375,"keel":48376,"affiliation":48377,"themasters":48378,"smal":48379,"hue":48380,"esteban":48381,"conv":48382,"omnic":48383,"databases":48384,"cov":48385,"terti":48386,"stg":48387,"snoopdogg":48388,"metabol":48389,"lethbridge":48390,"ðŁı»âĢįâĻĢï¸ı":48391,"yearling":48392,"residentevil":48393,"nwsl":48394,"iyaki":48395,"griezmann":48396,"cous":48397,"ðŁĵĿ:":48398,"torian":48399,"sami":48400,"ðŁĶ¥ðŁĶ¥ðŁĶ¥ðŁĶ¥ðŁĶ¥":48401,"gare":48402,"alliances":48403,"whitfield":48404,"wether":48405,"refining":48406,"coyi":48407,"kraken":48408,"ðŁĺĺâĿ¤":48409,"singularity":48410,"lili":48411,"hns":48412,"boldand":48413,"wawrinka":48414,"misogyny":48415,"lovers":48416,"cq":48417,"bdg":48418,"adona":48419,"garter":48420,"womenof":48421,"scd":48422,"recognising":48423,"muna":48424,"strou":48425,"signalling":48426,"laredo":48427,"hellboy":48428,"aleksand":48429,"unavailable":48430,"pediatric":48431,"asin":48432,"meria":48433,"rishi":48434,"futurism":48435,"wye":48436,"polarized":48437,"ewe":48438,"propel":48439,"informs":48440,"crease":48441,"~\"":48442,"artiston":48443,"likefor":48444,"heidelberg":48445,"erra":48446,"lifein":48447,"lenny":48448,"interrupt":48449,"coherent":48450,"caz":48451,"vickers":48452,"leveled":48453,"fbs":48454,"cabins":48455,"bummed":48456,"apostles":48457,"weh":48458,"tendon":48459,"souvenirs":48460,"infuri":48461,"pierce":48462,"asset":48463,"mlas":48464,"goth":48465,"diggin":48466,"annas":48467,"ylor":48468,"thwaite":48469,"swel":48470,"panera":48471,"murderers":48472,"crooked":48473,"bsgo":48474,"acu":48475,"aon":48476,"rean":48477,"oneof":48478,"kohl":48479,"bloodh":48480,"pesticide":48481,"lostdog":48482,"flexing":48483,"ëĤĺ":48484,"supra":48485,"eternally":48486,"ðŁļĻ":48487,"paolo":48488,"olan":48489,"momo":48490,"iselle":48491,"captainmarvel":48492,"slou":48493,"mistakenly":48494,"akhilesh":48495,"mert":48496,"ilinan":48497,"buon":48498,"balkan":48499,"mirro":48500,"millen":48501,"derail":48502,"damon":48503,"titi":48504,"bios":48505,"redon":48506,"picard":48507,"parte":48508,"ðŁ¤Ł":48509,"غ":48510,"sonics":48511,"firsth":48512,"ddc":48513,"vegans":48514,"turban":48515,"nigan":48516,"lottie":48517,"lyndon":48518,"starbuck":48519,"pinkfloyd":48520,"lifestyles":48521,"amara":48522,"ashe":48523,"rsc":48524,"vala":48525,"smer":48526,"cwgc":48527,"client":48528,"buenas":48529,"jagan":48530,"coops":48531,"ðŁijijðŁijij":48532,"specializes":48533,"snagged":48534,"glar":48535,"bennet":48536,"wildlifewednesday":48537,"bowden":48538,"pik":48539,"artin":48540,"emporium":48541,"arl":48542,"reba":48543,"passer":48544,"disappoints":48545,"additive":48546,"âľĬðŁı½":48547,"bayer":48548,"missoula":48549,"haskell":48550,"commences":48551,"nix":48552,"neman":48553,"exploited":48554,"plasticsurgery":48555,"ccd":48556,"asocial":48557,"vot":48558,"siegel":48559,"froome":48560,"kapam":48561,"fara":48562,"eha":48563,"probes":48564,"mwf":48565,"meeting":48566,"pbb":48567,"akins":48568,"mistletoe":48569,"kingdomhearts":48570,"forkids":48571,"ecr":48572,"bale":48573,"escorts":48574,"adidasoriginals":48575,"kwa":48576,"kts":48577,"halloffame":48578,"ðŁĺį.":48579,"wags":48580,"potted":48581,"owing":48582,"honeycomb":48583,"hefty":48584,"urology":48585,"merle":48586,"bpd":48587,"stripping":48588,"reich":48589,"kstate":48590,"guay":48591,"yonge":48592,"shakti":48593,"gloom":48594,"batt":48595,"sonom":48596,"nery":48597,"elba":48598,"blanks":48599,"helle":48600,"triplets":48601,"bombay":48602,"akarta":48603,"abia":48604,"transmitted":48605,"rolf":48606,"jais":48607,"angularjs":48608,"fierc":48609,"mss":48610,"trace":48611,"à¥ĩ":48612,"tombs":48613,"oldman":48614,"kombucha":48615,"fol":48616,"ehealth":48617,"cereals":48618,"arelli":48619,"inari":48620,"ðŁĴ©":48621,"wol":48622,"liberties":48623,"fawn":48624,"affirm":48625,"nunavut":48626,"hysterical":48627,"kdrama":48628,"artes":48629,"âĢ¢âĢ¢âĢ¢âĢ¢âĢ¢âĢ¢âĢ¢âĢ¢":48630,"valentin":48631,"manslaughter":48632,"gales":48633,"eoin":48634,"energized":48635,"dels":48636,"withdraws":48637,"stles":48638,"sarcastic":48639,"ramesh":48640,"incredibles":48641,"lockhart":48642,"yawn":48643,"ultimatefanlive":48644,"oooooooooooooooo":48645,"muen":48646,"gurudev":48647,"teer":48648,"peeling":48649,"newsnow":48650,"linguistics":48651,"directv":48652,"agend":48653,"unilever":48654,"ruger":48655,"handedly":48656,"erose":48657,"limel":48658,"thec":48659,"royalties":48660,"finishers":48661,"nrg":48662,"mgt":48663,"fidget":48664,"comps":48665,"bacon":48666,"aggressively":48667,"abit":48668,"châ":48669,"tarde":48670,"slugger":48671,"qanda":48672,"greening":48673,"dats":48674,"enslaved":48675,"spector":48676,"oye":48677,"freef":48678,"bhand":48679,"stopbrexit":48680,"misconceptions":48681,"cava":48682,"ðŁĺįðŁĺįðŁĺįðŁĺįðŁĺįðŁĺįðŁĺįðŁĺį":48683,"multitasking":48684,"housel":48685,"ferreira":48686,"centime":48687,"ankles":48688,"jodh":48689,"helly":48690,"frome":48691,"outtuesday":48692,"narnia":48693,"balaji":48694,"lbloggers":48695,"jyoti":48696,"ðŁįĩ":48697,"lancia":48698,"capri":48699,"yap":48700,"natash":48701,"downfall":48702,".\"âĢĶ":48703,"î":48704,"ligament":48705,"coatings":48706,"aided":48707,"hiko":48708,"falling":48709,"encrypted":48710,"yegfood":48711,"infringement":48712,"cudi":48713,"cep":48714,"ðŁĺįðŁĺĤ":48715,"trad":48716,"superrugby":48717,"edwin":48718,"whiche":48719,"vimeo":48720,"layne":48721,"invigor":48722,"hehe":48723,"dubrovnik":48724,"bieber":48725,"utr":48726,"shaman":48727,"opers":48728,"hamill":48729,"enig":48730,"dif":48731,"arum":48732,"scrapbook":48733,"minh":48734,"divergence":48735,"mckinnon":48736,"lifetime":48737,"guterres":48738,"wille":48739,"pleas":48740,"patty":48741,"micron":48742,"kz":48743,"domaine":48744,"rusher":48745,"mds":48746,"chesney":48747,"screwdriver":48748,"âģ©,":48749,"sledge":48750,"hauer":48751,"chana":48752,"stamina":48753,"sprinkler":48754,"pln":48755,"heff":48756,"bolton":48757,"omon":48758,"carrington":48759,"accordion":48760,"jorge":48761,"interception":48762,"inputs":48763,"gull":48764,"transcription":48765,"vanuatu":48766,"itical":48767,"ethos":48768,"tich":48769,"spacey":48770,"peeking":48771,"umi":48772,"hager":48773,"psychotic":48774,"illian":48775,"illia":48776,"bonnaroo":48777,"anese":48778,"puc":48779,"laghateparth":48780,"enhall":48781,"economical":48782,"dredge":48783,"%-":48784,"uwe":48785,"tubular":48786,"scouncil":48787,"peasants":48788,"fler":48789,"tumbler":48790,"hep":48791,"fordham":48792,"rowley":48793,"initials":48794,"evasion":48795,"ernation":48796,"plugins":48797,"cochran":48798,"cattle":48799,"acidity":48800,"ðŁİĬðŁİī":48801,"regrann":48802,"jumpman":48803,"eface":48804,"xma":48805,"patriarchy":48806,"escobar":48807,"cristian":48808,"tipton":48809,"nueva":48810,"hackney":48811,"backseat":48812,"killarney":48813,"aidan":48814,"stadion":48815,"simultaneous":48816,"idaho":48817,"aje":48818,"uth":48819,"figure":48820,"clos":48821,"burk":48822,"voluntar":48823,"recite":48824,"macfarlane":48825,"curfew":48826,"boudo":48827,"wgn":48828,"stix":48829,"slap":48830,"scratched":48831,"phillip":48832,"journe":48833,"expelled":48834,"waz":48835,"uke":48836,"tatiana":48837,"oue":48838,"hopp":48839,"dimitri":48840,"ðŁĵ£":48841,"matologist":48842,"electrifying":48843,"bluffs":48844,"billsmafia":48845,"azcardinals":48846,"yaa":48847,"xmas":48848,"shara":48849,"rith":48850,"gills":48851,"dres":48852,"barton":48853,"authorization":48854,"imperialism":48855,"homeof":48856,"todo":48857,"footpath":48858,"bandwidth":48859,"visitspain":48860,"mohsin":48861,"erupted":48862,"miki":48863,"insignia":48864,"mikel":48865,"ssh":48866,"gera":48867,"bankholiday":48868,"awan":48869,"tweak":48870,"starcraft":48871,"eal":48872,"construction":48873,"skeletons":48874,"leep":48875,"inem":48876,"barclay":48877,"shipwreck":48878,"monsieur":48879,"yoh":48880,"ront":48881,"formative":48882,"sero":48883,"lep":48884,"horseman":48885,"hoosier":48886,"hazmat":48887,"cylinders":48888,"centi":48889,"ðŁĴ¥ðŁĴ¥ðŁĴ¥":48890,"reem":48891,"naire":48892,"musically":48893,"grasshopper":48894,"estonian":48895,"terminology":48896,"romain":48897,"bloggerrt":48898,"toxin":48899,"stance":48900,"cultivated":48901,"anast":48902,"ðŁIJį":48903,"shimano":48904,"gopher":48905,"enei":48906,"recyclable":48907,"gamification":48908,"fightfor":48909,"cq":48910,"avocados":48911,"keys":48912,"elike":48913,"glycer":48914,"shakur":48915,"mobilization":48916,"galley":48917,"explain":48918,"exchanged":48919,"peth":48920,"obedience":48921,"illage":48922,"ennis":48923,"ãĥŀ":48924,"wiv":48925,"wallabies":48926,"maar":48927,"igers":48928,"fintech":48929,"finalized":48930,"woj":48931,"meaningless":48932,"infield":48933,"onnaise":48934,"eet":48935,"bronte":48936,"passages":48937,"ðŁij§":48938,"strickland":48939,"northernlights":48940,"lomond":48941,"htc":48942,"wray":48943,"shifter":48944,"dialog":48945,"ðŁįį":48946,">>>>>>":48947,"teatime":48948,"stech":48949,"sichuan":48950,"quill":48951,"franca":48952,"complementary":48953,"barrington":48954,"marcus":48955,"malam":48956,"goooo":48957,"forsa":48958,"electra":48959,"afs":48960,"âĹĨ":48961,"trife":48962,"snazzy":48963,"folia":48964,"andolan":48965,"afterdark":48966,"woodson":48967,"strade":48968,"littlest":48969,"ogun":48970,"conwy":48971,"cowards":48972,"ðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤðŁĺĤ":48973,"íĬ¸":48974,"seul":48975,"murphy":48976,"dunks":48977,"kapilshar":48978,"joachim":48979,"womack":48980,"equality":48981,"averages":48982,"aine":48983,"ðŁ¦Ī":48984,"tacular":48985,"disability":48986,"uked":48987,"midcentury":48988,"barthol":48989,"teasers":48990,"tabern":48991,"njcaa":48992,"spout":48993,"opi":48994,"kubball":48995,"blom":48996,"soar":48997,"populism":48998,"methyl":48999,"ðŁijĬðŁı¼":49000,"ospre":49001,"aloils":49002,"ðŁĵĸ":49003,"ðŁĮļ":49004,"xer":49005,"spilling":49006,"publica":49007,"cardam":49008,"adish":49009,"sacha":49010,"pkg":49011,"buda":49012,"lyricist":49013,"ibc":49014,"grump":49015,"hover":49016,"halep":49017,"antibody":49018,"anemone":49019,"âĻ¥âĻ¥âĻ¥âĻ¥":49020,"mcl":49021,"lithograph":49022,"ccu":49023,"sfest":49024,"pathic":49025,"callister":49026,"ottawa":49027,"gunsn":49028,"rutger":49029,"halibut":49030,"envision":49031,"differentiate":49032,"ðŁļĢðŁļĢ":49033,"piran":49034,"latel":49035,"ucn":49036,"troubad":49037,"raine":49038,"fiercely":49039,"learnenglish":49040,"lease":49041,"wexmondays":49042,"emit":49043,"drayton":49044,"burrell":49045,"scubadiving":49046,"holler":49047,"dru":49048,"clocked":49049,"wral":49050,"apro":49051,"translucent":49052,"wbo":49053,"patriarch":49054,"moja":49055,"lannister":49056,"fishery":49057,"nederland":49058,"mildly":49059,"mirai":49060,"mako":49061,"jap":49062,"ðŁĺ©ðŁĺ©ðŁĺ©":49063,"prostatec":49064,"panna":49065,"arama":49066,"undertaking":49067,"tompkins":49068,"neop":49069,"solids":49070,"savoury":49071,"eames":49072,"cutlery":49073,"woodbridge":49074,"steamer":49075,"rizzo":49076,"wildcat":49077,"ratna":49078,"laminated":49079,"kineni":49080,"jalap":49081,"aides":49082,"acknowledges":49083,"?!?!?!":49084,"!ðŁİī":49085,"wafc":49086,"maggio":49087,"haves":49088,"darje":49089,"ofi":49090,"gril":49091,"vasi":49092,"brux":49093,"mohd":49094,"fakespeare":49095,"arnold":49096,"rmb":49097,"forbe":49098,"walleye":49099,"rodi":49100,"therapeutics":49101,"strategi":49102,"obste":49103,"mudder":49104,"downloadable":49105,"ddings":49106,"dca":49107,"asiangames":49108,"campeon":49109,"appropriation":49110,"thcentury":49111,"ramatta":49112,"draped":49113,"bullion":49114,"muc":49115,"onex":49116,"segreg":49117,"ophelia":49118,"bodily":49119,"âĿ¤ðŁĺį":49120,"wizar":49121,"teased":49122,"ademy":49123,"toid":49124,"sura":49125,"lazarus":49126,"snickers":49127,"mase":49128,"loh":49129,"bowed":49130,"biblio":49131,"xchange":49132,"harlan":49133,"ghoshal":49134,"flavorful":49135,"bhagat":49136,"allez":49137,"whichever":49138,"tenstein":49139,"discer":49140,"organiser":49141,"mtg":49142,"dreamliner":49143,"tse":49144,"hokkaido":49145,"mok":49146,"indulgent":49147,"hickman":49148,"blinded":49149,"alyn":49150,"aaaah":49151,"spool":49152,"loughborough":49153,"interpret":49154,"etv":49155,"aristotle":49156,"optimizing":49157,"avicii":49158,"madurai":49159,"juli":49160,"nawaz":49161,"matchups":49162,"abide":49163,"painting":49164,"welling":49165,"veli":49166,"octagon":49167,"inscribed":49168,"poking":49169,"placer":49170,"lifecycle":49171,"kilig":49172,"gsp":49173,"elives":49174,"clements":49175,"nasheed":49176,"mesut":49177,"incarcerated":49178,"distilled":49179,"walang":49180,"delicacy":49181,"delgado":49182,"chez":49183,"chita":49184,"adero":49185,"tux":49186,"patil":49187,"odo":49188,"abhcosmetics":49189,"tvc":49190,"pbc":49191,"inaccurate":49192,"hardworkpaysoff":49193,"baller":49194,"quotation":49195,"merchandising":49196,"gastri":49197,"defenses":49198,"drogba":49199,"bexhill":49200,"bankno":49201,"winona":49202,"sieg":49203,"pgs":49204,"hahahha":49205,"aguchi":49206,"subram":49207,"miracle":49208,"desch":49209,"libre":49210,"bacher":49211,"entine":49212,"bbcradi":49213,"loudest":49214,"rps":49215,"pierc":49216,"fryer":49217,"stormtrooper":49218,"rafaelnadal":49219,"pasco":49220,"exhaustion":49221,"epiconetsy":49222,"rctid":49223,"kellie":49224,"gaines":49225,"dbz":49226,"smriti":49227,"sbridge":49228,"limited":49229,"claw":49230,"technical":49231,"biographical":49232,"adored":49233,"ะ":49234,"exclude":49235,"acadia":49236,"keyboards":49237,"furman":49238,"soca":49239,"suru":49240,"nips":49241,"swaps":49242,"serverless":49243,"rune":49244,"puffy":49245,"northampton":49246,"nishings":49247,"hender":49248,"cartridges":49249,"gunshot":49250,"ðŁĵ¹":49251,"filament":49252,"respondents":49253,"peyton":49254,"mountaineer":49255,"merging":49256,"lifespan":49257,"intimidation":49258,"pafc":49259,"nlwx":49260,"expansive":49261,"purr":49262,"fck":49263,"cae":49264,"atti":49265,"telethon":49266,"sohn":49267,"mendel":49268,"lopes":49269,"dori":49270,"unbroken":49271,"tered":49272,"tastings":49273,"inactive":49274,"disintegr":49275,"tassel":49276,"sharethe":49277,"piano":49278,"islay":49279,"airspace":49280,"zawa":49281,"ricciardo":49282,"mington":49283,"fresher":49284,"curry":49285,"revs":49286,"pharoah":49287,"hmv":49288,"exhilarating":49289,"whoo":49290,"linkin":49291,"krispy":49292,"competency":49293,"stewards":49294,"nebu":49295,"katsu":49296,"admins":49297,"bazar":49298,"asar":49299,"givingback":49300,"ssummit":49301,"songz":49302,"linus":49303,"rajkumar":49304,"farmington":49305,"fantasia":49306,"ðŁĺ´ðŁĺ´":49307,"sobri":49308,"lisse":49309,"barrymore":49310,"prism":49311,"blob":49312,"senew":49313,"monoxide":49314,"expire":49315,"eighteen":49316,"dipper":49317,"xiao":49318,"kilt":49319,"hinch":49320,"bbcsport":49321,"bamboo":49322,"pter":49323,"exal":49324,"ðŁ¦ĭ":49325,"hamlin":49326,"expeditions":49327,"stargazing":49328,"foodsecurity":49329,"wylie":49330,"ulf":49331,"stingly":49332,"onstorm":49333,"loeb":49334,"broome":49335,"bnha":49336,"pancreatic":49337,"elive":49338,"!!!!!!!!!!!":49339,"therapper":49340,"orthopedic":49341,"avengersendgame":49342,"antitrust":49343,"ìļ°":49344,"gote":49345,"omd":49346,"offside":49347,"gyllen":49348,"wineries":49349,"whitewater":49350,"adl":49351,"lupita":49352,"exceeds":49353,"consisted":49354,"chewbacca":49355,"ashleigh":49356,"nhljets":49357,"issan":49358,"shld":49359,"hayat":49360,"cranberries":49361,"ðŁ¤ĺðŁı½":49362,"rockthe":49363,"springtraining":49364,"fallout":49365,"dairyfree":49366,"waj":49367,"undecided":49368,"sown":49369,"rcn":49370,"northwales":49371,"httr":49372,"fumble":49373,"dits":49374,"compelled":49375,"populist":49376,"minted":49377,"blanchett":49378,".''":49379,"propulsion":49380,"milla":49381,"auberg":49382,"hertz":49383,"hta":49384,"udaipur":49385,"serendipity":49386,"aztecs":49387,"alsace":49388,"ðŁIJij":49389,"lun":49390,"shoes":49391,"charli":49392,"garza":49393,"ðŁĴŁ":49394,"probiotics":49395,"foxtv":49396,"olis":49397,"miff":49398,"localized":49399,"diffuser":49400,"sigue":49401,"funko":49402,"rendous":49403,"ðŁĴij":49404,"jekyll":49405,"<|startoftext|>":49406,"<|endoftext|>":49407} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json new file mode 100644 index 0000000000000000000000000000000000000000..5f79e6941ccc0083aeee199d9e7a1fe8629046b2 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json @@ -0,0 +1,18 @@ +{ + "Outputs" : { + "pooled_outputs" : { + "MLFeatureShortDescription" : "The version of the `last_hidden_state` output after pooling" + }, + "hidden_embeds" : { + "MLFeatureShortDescription" : "Hidden states after the encoder layers" + } + }, + "Inputs" : { + "input_ids" : { + "MLFeatureShortDescription" : "The token ids that represent the input text" + } + }, + "TrainingInputs" : { + + } +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..00b30270d8792a83aaa9aff233c3a4cc101c06f6 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json @@ -0,0 +1,10 @@ +{ + "MLModelVersionStringKey" : "stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelDescriptionKey" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "MLModelCreatorDefinedKey" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "MLModelAuthorKey" : "Please refer to the Model Card available at huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelLicenseKey" : "OpenRAIL++-M (https:\/\/huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0\/blob\/main\/LICENSE.md)" +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..8d3c1981a05ebcf9b4770ff71908a0eafd9c6972 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7fc64f553e0f6be677899106f40ed3bf85eceb3d0b55f90022252cb8dff1d4a +size 161160 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..3cd2ebb4765b40c98cb09d0e35c908e9a2e229ef --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a86533724aadf50c8f5539592d440887a484f60002d7967505c69c7faf4d7797 +size 246145536 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Manifest.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..15321ea68520e74cb8487f0c25c648b2c779016b --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder.mlpackage/Manifest.json @@ -0,0 +1,30 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "2E437C75-2D29-4906-9BBF-06B4F60AF608": { + "author": "com.apple.CoreML", + "description": "External Metadata Overlay", + "name": "Metadata.json", + "path": "com.apple.CoreML/Metadata.json" + }, + "3d765fc2-d834-4477-9cf7-818901242816": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "58CF0F86-0A4C-4478-B878-FB21979F2094": { + "author": "com.apple.CoreML", + "description": "External FeatureDescription Overlay", + "name": "FeatureDescriptions.json", + "path": "com.apple.CoreML/FeatureDescriptions.json" + }, + "5fc727d7-449d-4885-8c48-b3b711dfed7b": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "5fc727d7-449d-4885-8c48-b3b711dfed7b" +} diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json new file mode 100644 index 0000000000000000000000000000000000000000..5f79e6941ccc0083aeee199d9e7a1fe8629046b2 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json @@ -0,0 +1,18 @@ +{ + "Outputs" : { + "pooled_outputs" : { + "MLFeatureShortDescription" : "The version of the `last_hidden_state` output after pooling" + }, + "hidden_embeds" : { + "MLFeatureShortDescription" : "Hidden states after the encoder layers" + } + }, + "Inputs" : { + "input_ids" : { + "MLFeatureShortDescription" : "The token ids that represent the input text" + } + }, + "TrainingInputs" : { + + } +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/Metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/Metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..00b30270d8792a83aaa9aff233c3a4cc101c06f6 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/Metadata.json @@ -0,0 +1,10 @@ +{ + "MLModelVersionStringKey" : "stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelDescriptionKey" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "MLModelCreatorDefinedKey" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "MLModelAuthorKey" : "Please refer to the Model Card available at huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelLicenseKey" : "OpenRAIL++-M (https:\/\/huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0\/blob\/main\/LICENSE.md)" +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/model.mlmodel b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..76b2b595c5749da57ddcd5255cb6a8ed5c9d794b --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cb832076a7e67ff890e274281c5bf83c3be6743b8214d0c6df95b88fce6711f +size 418221 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..4404ced8325eb5e803c3b4b2be80bf0b4517dce4 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd1fc0bcce11cff685648387b0060e0b6ecfce6c34e580e1ae904cae5903363 +size 1389367424 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Manifest.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..8aad7671d717672356c39f60943576df93188205 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_text_encoder_2.mlpackage/Manifest.json @@ -0,0 +1,30 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "1003fcc7-51c2-44b1-bfd2-71eaffb11598": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "5bff403e-2a80-4472-b7fe-4c3438e76e85": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + }, + "66E76242-93D0-4D4D-8D64-697552EC6C38": { + "author": "com.apple.CoreML", + "description": "External FeatureDescription Overlay", + "name": "FeatureDescriptions.json", + "path": "com.apple.CoreML/FeatureDescriptions.json" + }, + "8DF8F909-251F-4BFD-BFA3-F72860B2F27D": { + "author": "com.apple.CoreML", + "description": "External Metadata Overlay", + "name": "Metadata.json", + "path": "com.apple.CoreML/Metadata.json" + } + }, + "rootModelIdentifier": "5bff403e-2a80-4472-b7fe-4c3438e76e85" +} diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..f898d00ada006078d5d01952acfc3913efc3b7db --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d32aa25518ec6d20563c61f0cc8f0b9ecca6d31665e4a5345a4b44d4c7706004 +size 2135914 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..11a02126a20d91b2bfba01311502816d2bdfae06 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9219e9fcaa60ff76a31ea84658b485264b50f66147c058e18a7cfd456bb0f3a +size 1450049728 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Manifest.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..00b631085269f7bb0e00ce95ffe19b59a348d4de --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_unet.mlpackage/Manifest.json @@ -0,0 +1,18 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "4aab2a70-8353-41d1-81e0-1761fcebde0f": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "58e673b4-eef0-4992-a74f-f42d8ec45a72": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "58e673b4-eef0-4992-a74f-f42d8ec45a72" +} diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json new file mode 100644 index 0000000000000000000000000000000000000000..85cdc68569e1b5db241ffc253281d2b9431a65b9 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json @@ -0,0 +1,15 @@ +{ + "Outputs" : { + "image" : { + "MLFeatureShortDescription" : "Generated image normalized to range [-1, 1]" + } + }, + "Inputs" : { + "z" : { + "MLFeatureShortDescription" : "The denoised latent embeddings from the unet model after the last step of reverse diffusion" + } + }, + "TrainingInputs" : { + + } +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/Metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/Metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..df023f438552f16830ee2a1f4602807b0a88e28f --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/Metadata.json @@ -0,0 +1,10 @@ +{ + "MLModelVersionStringKey" : "stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelDescriptionKey" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "MLModelCreatorDefinedKey" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1" + }, + "MLModelAuthorKey" : "Please refer to the Model Card available at huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelLicenseKey" : "OpenRAIL++-M (https:\/\/huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0\/blob\/main\/LICENSE.md)" +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..cee02931ebfaf6bacc54bae47a7bbc9b914c90b7 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb66e41edf6678acc5df408ffb294157df24996f810f649d953d2997df2f40cc +size 143700 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..10a9bb7a56bab8bda6b91e312c1a30847f14d99e --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ade814d6037fb5ba892963be1596c8e37852f96c399101401831f8c07e64bd2 +size 197977216 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Manifest.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..65494bf2f4b4718f54af653ffcc316509d27cfe0 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_decoder.mlpackage/Manifest.json @@ -0,0 +1,30 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "0C4B4A84-E58C-4B55-9232-7AD156C36DF1": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + }, + "63939630-D849-41F2-B7BB-9439F4215E25": { + "author": "com.apple.CoreML", + "description": "External FeatureDescription Overlay", + "name": "FeatureDescriptions.json", + "path": "com.apple.CoreML/FeatureDescriptions.json" + }, + "920AF9A5-7F1B-4655-9367-780D11D0CACC": { + "author": "com.apple.CoreML", + "description": "External Metadata Overlay", + "name": "Metadata.json", + "path": "com.apple.CoreML/Metadata.json" + }, + "FF129861-DA4E-4A61-A51B-4E991CC3DAEA": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + } + }, + "rootModelIdentifier": "0C4B4A84-E58C-4B55-9232-7AD156C36DF1" +} diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json new file mode 100644 index 0000000000000000000000000000000000000000..ce86a23ae54ed365872b2a340f971d86d6eeb535 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json @@ -0,0 +1,15 @@ +{ + "Outputs" : { + "latent" : { + "MLFeatureShortDescription" : "The latent embeddings from the unet model from the input image." + } + }, + "Inputs" : { + "z" : { + "MLFeatureShortDescription" : "The input image to base the initial latents on normalized to range [-1, 1]" + } + }, + "TrainingInputs" : { + + } +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..00b30270d8792a83aaa9aff233c3a4cc101c06f6 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/Metadata.json @@ -0,0 +1,10 @@ +{ + "MLModelVersionStringKey" : "stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelDescriptionKey" : "Stable Diffusion generates images conditioned on text and\/or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "MLModelCreatorDefinedKey" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117" + }, + "MLModelAuthorKey" : "Please refer to the Model Card available at huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0", + "MLModelLicenseKey" : "OpenRAIL++-M (https:\/\/huggingface.co\/stabilityai\/stable-diffusion-xl-base-1.0\/blob\/main\/LICENSE.md)" +} \ No newline at end of file diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..8f3613cc2bc76661d9ffd3c53a0b2497f6e9ad2e --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee32c2ed8e0d8dc35e513bd328fda885af82ec3d252f50037cc70fc5d966860d +size 119397 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..b65da487e8950ca2002120fefa9387de7c571e32 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:329f708df0bae1990a1886007b5ae56cfd9a44e7091e8f822db907a9fc411858 +size 68338112 diff --git a/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Manifest.json b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..5321421f1072e766762c745aa440628ec535b449 --- /dev/null +++ b/coreml-stable-diffusion-xl-base_mbp_4_50_palettized/packages/Stable_Diffusion_version_stabilityai_stable-diffusion-xl-base-1.0_vae_encoder.mlpackage/Manifest.json @@ -0,0 +1,30 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "3B5695B7-E904-4965-8B20-FCA04DC0C035": { + "author": "com.apple.CoreML", + "description": "External FeatureDescription Overlay", + "name": "FeatureDescriptions.json", + "path": "com.apple.CoreML/FeatureDescriptions.json" + }, + "78a885c8-8779-418b-9bfe-cf0ac2e82e6f": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "7EE511A5-DB53-4061-8155-ED696C539F30": { + "author": "com.apple.CoreML", + "description": "External Metadata Overlay", + "name": "Metadata.json", + "path": "com.apple.CoreML/Metadata.json" + }, + "9e7ab831-1e35-4406-be6a-48236315c9c4": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "9e7ab831-1e35-4406-be6a-48236315c9c4" +} diff --git a/recipes/runwayml-stable-diffusion-v1-5_palettization_recipe.json b/recipes/runwayml-stable-diffusion-v1-5_palettization_recipe.json new file mode 100644 index 0000000000000000000000000000000000000000..3773fc60d1c840aaf8956c0944eb93927f9f15b8 --- /dev/null +++ b/recipes/runwayml-stable-diffusion-v1-5_palettization_recipe.json @@ -0,0 +1,21664 @@ +{ + "model_version": "runwayml/stable-diffusion-v1-5", + "single_layer": { + "1": { + "time_embedding.linear_1": [ + 59.0, + 58.9, + 59.3, + 59.4 + ], + "time_embedding.linear_2": [ + 58.3, + 58.2, + 58.9, + 58.9 + ], + "down_blocks.0.attentions.0.proj_in": [ + 74.3, + 75.1, + 75.6, + 75.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 78.7, + 78.4, + 78.3, + 78.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 79.9, + 79.7, + 79.8, + 80.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 76.7, + 76.6, + 77.0, + 77.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 75.4, + 76.6, + 76.9, + 76.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.7, + 80.9, + 82.2, + 81.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 77.2, + 75.0, + 76.1, + 76.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 74.2, + 74.8, + 76.0, + 76.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 82.2, + 81.3, + 82.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.9, + 75.8, + 75.9, + 76.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 75.3, + 76.2, + 76.5, + 76.3 + ], + "down_blocks.0.attentions.0.proj_out": [ + 75.1, + 75.9, + 76.2, + 76.2 + ], + "down_blocks.0.attentions.1.proj_in": [ + 74.5, + 74.6, + 75.7, + 75.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.1, + 78.4, + 79.2, + 78.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 76.3, + 76.5, + 77.6, + 76.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 75.2, + 75.1, + 75.9, + 75.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 73.9, + 74.5, + 74.1, + 73.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.3, + 79.6, + 78.6, + 79.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 77.6, + 77.0, + 77.2, + 78.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.4, + 82.9, + 82.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.6, + 82.2, + 82.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 74.4, + 74.5, + 75.8, + 75.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 75.4, + 76.2, + 76.6, + 76.6 + ], + "down_blocks.0.attentions.1.proj_out": [ + 75.5, + 75.7, + 76.7, + 76.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 73.9, + 74.3, + 74.3, + 74.8 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 75.1, + 75.5, + 75.8, + 76.1 + ], + "down_blocks.0.resnets.0.conv2": [ + 71.4, + 71.8, + 71.4, + 71.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 74.5, + 74.5, + 74.9, + 75.1 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 74.9, + 74.8, + 76.6, + 76.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 72.9, + 74.7, + 75.3, + 74.2 + ], + "down_blocks.0.downsamplers.0.conv": [ + 71.6, + 72.1, + 73.0, + 72.5 + ], + "down_blocks.1.attentions.0.proj_in": [ + 79.0, + 78.6, + 80.3, + 80.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.1, + 81.3, + 82.0, + 81.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.2, + 81.7, + 81.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.3, + 80.1, + 80.9, + 79.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.5, + 78.5, + 79.8, + 79.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.7, + 83.8, + 84.6, + 85.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 65.8, + 64.9, + 65.9, + 65.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 75.2, + 77.8, + 78.9, + 79.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.2, + 83.9, + 83.7, + 83.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.0, + 77.1, + 77.8, + 76.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 77.2, + 78.4, + 78.6, + 78.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 76.6, + 77.9, + 77.9, + 78.3 + ], + "down_blocks.1.attentions.1.proj_in": [ + 77.6, + 79.0, + 79.6, + 79.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 79.0, + 76.7, + 79.1, + 78.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 79.0, + 76.3, + 79.0, + 78.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.4, + 76.8, + 79.1, + 78.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.9, + 76.9, + 78.8, + 78.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 72.8, + 73.3, + 74.2, + 74.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.5, + 74.0, + 76.6, + 76.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.2, + 80.5, + 83.1, + 82.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 83.9, + 84.1, + 85.4, + 84.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.3, + 77.1, + 78.3, + 78.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.8, + 79.0, + 80.4, + 80.1 + ], + "down_blocks.1.attentions.1.proj_out": [ + 80.6, + 80.1, + 80.7, + 79.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 79.5, + 79.2, + 80.0, + 79.0 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 80.2, + 80.1, + 81.5, + 81.3 + ], + "down_blocks.1.resnets.0.conv2": [ + 80.3, + 80.6, + 81.3, + 81.0 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 72.8, + 73.0, + 73.9, + 73.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 80.3, + 78.9, + 80.8, + 81.3 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 81.3, + 81.5, + 81.0, + 80.0 + ], + "down_blocks.1.resnets.1.conv2": [ + 79.7, + 79.9, + 81.8, + 81.4 + ], + "down_blocks.1.downsamplers.0.conv": [ + 74.5, + 73.6, + 72.0, + 76.9 + ], + "down_blocks.2.attentions.0.proj_in": [ + 79.4, + 80.6, + 82.1, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.5, + 84.3, + 86.4, + 85.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.5, + 84.0, + 85.9, + 85.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.0, + 81.0, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.8, + 80.7, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 69.7, + 71.4, + 76.0, + 75.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 66.5, + 70.4, + 73.7, + 72.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 74.0, + 75.0, + 76.8, + 74.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 84.6, + 85.3, + 86.9, + 86.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.5, + 79.4, + 80.3, + 79.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.3, + 79.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.proj_out": [ + 81.2, + 80.9, + 82.6, + 82.0 + ], + "down_blocks.2.attentions.1.proj_in": [ + 79.3, + 79.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.2, + 86.3, + 86.9, + 86.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.2, + 86.2, + 87.2, + 86.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.8, + 81.1, + 84.2, + 83.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.5, + 80.6, + 83.5, + 82.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 75.3, + 79.0, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 73.8, + 78.5, + 79.6, + 79.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.6, + 79.3, + 82.1, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 84.7, + 84.4, + 86.1, + 84.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 80.0, + 83.7, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.7, + 81.9, + 84.3, + 83.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 81.8, + 82.2, + 84.3, + 83.8 + ], + "down_blocks.2.resnets.0.conv1": [ + 80.7, + 81.7, + 83.7, + 82.7 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 80.3, + 81.0, + 83.3, + 82.4 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.5, + 81.1, + 84.0, + 83.1 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 75.9, + 71.4, + 75.8, + 76.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 83.6, + 82.3, + 83.3, + 83.3 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 84.4, + 84.3, + 85.4, + 85.2 + ], + "down_blocks.2.resnets.1.conv2": [ + 82.0, + 81.9, + 83.5, + 83.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 82.0, + 83.6, + 85.9, + 85.1 + ], + "down_blocks.3.resnets.0.conv1": [ + 84.6, + 86.3, + 87.2, + 87.2 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 86.5, + 88.1, + 88.0, + 88.2 + ], + "down_blocks.3.resnets.0.conv2": [ + 83.7, + 85.9, + 86.4, + 87.0 + ], + "down_blocks.3.resnets.1.conv1": [ + 82.8, + 85.4, + 86.9, + 86.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 87.2, + 87.6, + 87.8, + 87.9 + ], + "down_blocks.3.resnets.1.conv2": [ + 83.4, + 86.2, + 87.2, + 86.6 + ], + "up_blocks.0.resnets.0.conv1": [ + 82.9, + 84.6, + 86.8, + 85.6 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 87.8, + 88.2, + 88.3, + 88.4 + ], + "up_blocks.0.resnets.0.conv2": [ + 82.6, + 84.0, + 86.2, + 85.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 86.4, + 86.3, + 87.9, + 87.8 + ], + "up_blocks.0.resnets.1.conv1": [ + 82.5, + 84.6, + 86.3, + 85.4 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 87.9, + 88.0, + 88.3, + 88.2 + ], + "up_blocks.0.resnets.1.conv2": [ + 82.9, + 83.7, + 85.3, + 85.2 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 87.3, + 87.0, + 88.1, + 87.8 + ], + "up_blocks.0.resnets.2.conv1": [ + 82.4, + 84.9, + 86.3, + 86.1 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.4, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.0.resnets.2.conv2": [ + 84.1, + 85.1, + 86.8, + 86.7 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 86.6, + 85.3, + 87.5, + 86.8 + ], + "up_blocks.0.upsamplers.0.conv": [ + 83.9, + 83.9, + 86.7, + 86.3 + ], + "up_blocks.1.attentions.0.proj_in": [ + 81.7, + 80.6, + 83.4, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.1, + 85.6, + 87.0, + 86.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.7, + 86.5, + 87.6, + 87.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.2, + 82.3, + 82.6, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.2, + 81.4, + 84.2, + 83.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 79.4, + 79.4, + 80.9, + 80.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.5, + 77.7, + 79.6, + 79.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.5, + 78.1, + 81.1, + 80.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.3, + 87.4, + 88.0, + 86.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.1, + 82.3, + 82.0, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.4, + 81.8, + 83.5, + 83.9 + ], + "up_blocks.1.attentions.0.proj_out": [ + 83.0, + 82.3, + 84.0, + 84.2 + ], + "up_blocks.1.attentions.1.proj_in": [ + 81.5, + 79.1, + 81.8, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.1, + 86.0, + 87.0, + 86.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.6, + 86.3, + 87.1, + 86.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.4, + 82.6, + 84.4, + 83.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.2, + 82.5, + 84.8, + 82.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 80.0, + 81.8, + 81.1, + 82.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.3, + 76.3, + 76.0, + 76.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.5, + 77.7, + 79.8, + 79.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.3, + 84.7, + 86.2, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.8, + 82.4, + 83.8, + 83.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 84.1, + 82.7, + 85.2, + 85.0 + ], + "up_blocks.1.attentions.1.proj_out": [ + 82.7, + 82.3, + 84.2, + 84.1 + ], + "up_blocks.1.attentions.2.proj_in": [ + 79.9, + 78.9, + 80.7, + 79.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 84.1, + 83.8, + 85.5, + 85.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 86.4, + 86.3, + 87.4, + 87.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.2, + 80.3, + 83.1, + 82.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 79.4, + 78.0, + 82.1, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 74.2, + 76.1, + 81.1, + 77.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 69.1, + 71.4, + 73.8, + 72.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 78.6, + 78.8, + 79.1, + 78.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.6, + 81.8, + 83.2, + 82.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.7, + 80.2, + 81.6, + 81.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 82.1, + 82.0, + 83.5, + 83.8 + ], + "up_blocks.1.attentions.2.proj_out": [ + 79.1, + 78.6, + 79.8, + 80.1 + ], + "up_blocks.1.resnets.0.conv1": [ + 84.1, + 82.5, + 85.9, + 85.2 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 86.2, + 84.8, + 87.3, + 87.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 83.3, + 82.3, + 84.0, + 84.3 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 79.1, + 77.9, + 79.1, + 80.6 + ], + "up_blocks.1.resnets.1.conv1": [ + 84.0, + 83.1, + 85.5, + 84.5 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 86.7, + 86.6, + 87.1, + 87.0 + ], + "up_blocks.1.resnets.1.conv2": [ + 82.5, + 82.2, + 84.1, + 84.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 79.4, + 80.4, + 80.7, + 80.6 + ], + "up_blocks.1.resnets.2.conv1": [ + 78.6, + 77.7, + 80.7, + 78.5 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 84.9, + 85.1, + 86.4, + 85.8 + ], + "up_blocks.1.resnets.2.conv2": [ + 84.8, + 84.5, + 85.7, + 85.3 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 78.5, + 79.4, + 80.8, + 80.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 79.4, + 78.8, + 80.7, + 80.4 + ], + "up_blocks.2.attentions.0.proj_in": [ + 79.8, + 78.4, + 81.3, + 80.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.3, + 81.7, + 82.9, + 82.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.1, + 81.8, + 82.8, + 82.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.5, + 80.3, + 82.4, + 82.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.8, + 80.8, + 81.5, + 81.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.6, + 85.4, + 86.2, + 85.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 80.2, + 79.7, + 80.7, + 80.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.2, + 79.3, + 80.5, + 80.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.6, + 83.9, + 84.4, + 84.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.6, + 81.1, + 82.2, + 82.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.8, + 81.9, + 82.9, + 82.8 + ], + "up_blocks.2.attentions.0.proj_out": [ + 80.2, + 79.5, + 81.2, + 80.8 + ], + "up_blocks.2.attentions.1.proj_in": [ + 75.5, + 77.6, + 77.3, + 78.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.9, + 80.4, + 81.9, + 81.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.9, + 80.6, + 81.7, + 81.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.7, + 77.9, + 79.4, + 79.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.6, + 78.5, + 79.2, + 79.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 78.9, + 78.3, + 78.7, + 77.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 76.4, + 74.2, + 77.0, + 76.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.2, + 80.9, + 81.4, + 81.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.1, + 80.1, + 81.3, + 80.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 77.0, + 77.1, + 78.3, + 78.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.1, + 80.2, + 81.6, + 81.7 + ], + "up_blocks.2.attentions.1.proj_out": [ + 79.4, + 79.0, + 80.5, + 80.4 + ], + "up_blocks.2.attentions.2.proj_in": [ + 76.4, + 76.8, + 77.6, + 77.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.4, + 81.8, + 82.1, + 82.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.7, + 82.1, + 82.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 77.8, + 77.6, + 78.7, + 78.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 74.9, + 75.9, + 76.3, + 76.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.2, + 80.1, + 81.6, + 81.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 75.6, + 75.0, + 77.8, + 77.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 79.1, + 79.6, + 79.7, + 79.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 79.1, + 78.8, + 79.4, + 78.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 78.0, + 78.1, + 79.2, + 78.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 79.5, + 79.1, + 80.5, + 79.8 + ], + "up_blocks.2.attentions.2.proj_out": [ + 75.9, + 76.0, + 76.8, + 76.7 + ], + "up_blocks.2.resnets.0.conv1": [ + 81.3, + 81.2, + 82.8, + 82.2 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 86.5, + 86.6, + 87.0, + 86.9 + ], + "up_blocks.2.resnets.0.conv2": [ + 80.3, + 80.7, + 82.1, + 81.9 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 74.2, + 73.1, + 75.2, + 75.2 + ], + "up_blocks.2.resnets.1.conv1": [ + 78.7, + 78.9, + 79.8, + 79.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 80.4, + 80.2, + 81.4, + 81.4 + ], + "up_blocks.2.resnets.1.conv2": [ + 78.7, + 78.5, + 79.4, + 79.6 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 75.8, + 75.8, + 77.2, + 76.8 + ], + "up_blocks.2.resnets.2.conv1": [ + 79.2, + 79.9, + 81.0, + 80.8 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 75.3, + 75.2, + 76.2, + 75.9 + ], + "up_blocks.2.resnets.2.conv2": [ + 78.2, + 78.7, + 79.3, + 79.4 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 73.2, + 74.1, + 74.4, + 74.5 + ], + "up_blocks.2.upsamplers.0.conv": [ + 72.8, + 73.4, + 74.7, + 74.7 + ], + "up_blocks.3.attentions.0.proj_in": [ + 71.4, + 71.4, + 72.5, + 72.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 74.7, + 75.2, + 76.9, + 76.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 73.0, + 73.2, + 75.6, + 75.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 73.2, + 73.3, + 75.1, + 75.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 72.3, + 71.8, + 73.6, + 74.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 78.8, + 76.0, + 76.3, + 77.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 76.5, + 77.1, + 77.1, + 77.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.7, + 80.4, + 80.7, + 80.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 80.6, + 81.2, + 81.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.4, + 74.4, + 75.6, + 75.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 75.1, + 75.1, + 76.9, + 76.8 + ], + "up_blocks.3.attentions.0.proj_out": [ + 75.0, + 75.2, + 76.1, + 76.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 66.1, + 66.8, + 67.1, + 67.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 69.0, + 69.8, + 69.7, + 69.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 69.7, + 69.9, + 70.6, + 71.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 68.0, + 68.6, + 69.0, + 69.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 66.0, + 66.3, + 66.8, + 66.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 77.2, + 75.5, + 74.6, + 74.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 72.4, + 72.2, + 74.3, + 74.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.3, + 80.6, + 80.8, + 80.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 82.3, + 81.6, + 81.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 70.2, + 71.2, + 71.6, + 71.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 71.4, + 72.6, + 72.7, + 72.8 + ], + "up_blocks.3.attentions.1.proj_out": [ + 70.3, + 71.1, + 71.1, + 71.2 + ], + "up_blocks.3.attentions.2.proj_in": [ + 63.0, + 63.3, + 62.1, + 62.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 75.2, + 75.8, + 75.9, + 76.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 73.0, + 73.6, + 73.9, + 74.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 69.4, + 69.4, + 70.0, + 69.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 66.7, + 67.1, + 67.1, + 67.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 80.1, + 79.1, + 70.9, + 71.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 78.1, + 74.4, + 73.1, + 74.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 76.7, + 76.9, + 79.1, + 80.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 78.0, + 78.1, + 77.7, + 77.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 64.4, + 64.5, + 64.3, + 64.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 70.6, + 71.1, + 71.0, + 70.8 + ], + "up_blocks.3.attentions.2.proj_out": [ + 69.3, + 69.9, + 69.2, + 69.2 + ], + "up_blocks.3.resnets.0.conv1": [ + 69.6, + 69.9, + 71.0, + 70.9 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 81.3, + 81.1, + 81.5, + 81.8 + ], + "up_blocks.3.resnets.0.conv2": [ + 71.1, + 71.9, + 73.1, + 72.7 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 63.8, + 64.5, + 65.0, + 64.8 + ], + "up_blocks.3.resnets.1.conv1": [ + 66.8, + 67.4, + 67.8, + 67.7 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 72.7, + 71.5, + 73.9, + 73.8 + ], + "up_blocks.3.resnets.1.conv2": [ + 64.6, + 64.9, + 65.1, + 65.1 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 63.4, + 63.6, + 63.5, + 63.7 + ], + "up_blocks.3.resnets.2.conv1": [ + 63.9, + 63.6, + 64.0, + 63.8 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 65.1, + 65.3, + 65.7, + 65.7 + ], + "up_blocks.3.resnets.2.conv2": [ + 62.2, + 62.3, + 62.1, + 62.2 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 59.8, + 60.2, + 60.2, + 60.2 + ], + "mid_block.attentions.0.proj_in": [ + 86.3, + 85.2, + 87.5, + 86.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.9, + 88.1, + 87.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.0, + 88.1, + 88.4, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.8, + 85.0, + 87.6, + 86.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.0, + 83.5, + 87.5, + 86.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.8, + 83.3, + 84.7, + 83.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.3, + 82.5, + 84.3, + 83.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.7, + 81.6, + 85.2, + 84.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.4, + 85.3, + 88.0, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.4, + 83.9, + 87.1, + 86.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.0, + 85.1, + 87.7, + 87.6 + ], + "mid_block.attentions.0.proj_out": [ + 84.9, + 84.6, + 87.5, + 86.6 + ], + "mid_block.resnets.0.conv1": [ + 84.6, + 85.6, + 87.4, + 87.2 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.2, + 87.6, + 88.3, + 87.8 + ], + "mid_block.resnets.0.conv2": [ + 85.4, + 86.6, + 86.3, + 86.7 + ], + "mid_block.resnets.1.conv1": [ + 85.8, + 85.5, + 88.0, + 87.3 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.1, + 88.1, + 88.5, + 88.3 + ], + "mid_block.resnets.1.conv2": [ + 84.9, + 84.1, + 86.2, + 86.6 + ] + }, + "2": { + "time_embedding.linear_1": [ + 62.5, + 62.6, + 62.9, + 62.7 + ], + "time_embedding.linear_2": [ + 62.6, + 62.2, + 62.7, + 63.4 + ], + "down_blocks.0.attentions.0.proj_in": [ + 78.3, + 78.9, + 79.5, + 79.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.6, + 83.5, + 84.3, + 84.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.0, + 83.1, + 83.4, + 83.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.9, + 81.1, + 81.2, + 81.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.4, + 79.7, + 80.0, + 80.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.5, + 85.1, + 84.4, + 84.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 80.5, + 81.0, + 80.4, + 80.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 80.6, + 80.7, + 82.3, + 81.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.6, + 83.8, + 83.6, + 84.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.1, + 78.8, + 78.8, + 79.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.1, + 80.3, + 80.1, + 80.1 + ], + "down_blocks.0.attentions.0.proj_out": [ + 79.0, + 79.4, + 79.5, + 79.6 + ], + "down_blocks.0.attentions.1.proj_in": [ + 77.1, + 77.4, + 78.1, + 78.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.7, + 82.5, + 83.3, + 83.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.9, + 81.2, + 81.9, + 81.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.7, + 78.9, + 79.9, + 79.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 77.4, + 77.7, + 77.6, + 77.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 84.9, + 85.1, + 85.2, + 84.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.3, + 79.6, + 79.0, + 78.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.5, + 84.1, + 85.0, + 84.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 83.1, + 83.7, + 84.3, + 84.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 78.1, + 78.2, + 78.8, + 78.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 79.0, + 79.6, + 79.9, + 79.8 + ], + "down_blocks.0.attentions.1.proj_out": [ + 79.3, + 79.9, + 80.4, + 80.6 + ], + "down_blocks.0.resnets.0.conv1": [ + 79.7, + 79.9, + 79.9, + 80.1 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 80.7, + 80.5, + 80.7, + 80.9 + ], + "down_blocks.0.resnets.0.conv2": [ + 76.6, + 77.0, + 76.5, + 76.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 78.4, + 78.3, + 79.1, + 79.2 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 80.3, + 80.4, + 81.8, + 81.7 + ], + "down_blocks.0.resnets.1.conv2": [ + 78.4, + 78.9, + 79.2, + 78.8 + ], + "down_blocks.0.downsamplers.0.conv": [ + 76.6, + 77.4, + 78.1, + 78.1 + ], + "down_blocks.1.attentions.0.proj_in": [ + 81.1, + 81.0, + 81.9, + 81.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.8, + 85.7, + 86.0, + 86.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.1, + 85.1, + 84.7, + 84.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.2, + 82.5, + 82.7, + 83.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.9, + 81.1, + 81.8, + 81.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.1, + 87.0, + 87.4, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 68.4, + 69.0, + 69.3, + 69.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 83.3, + 82.8, + 83.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 84.8, + 85.1, + 85.3, + 85.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.5, + 80.8, + 82.6, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.9, + 81.8, + 81.4, + 81.3 + ], + "down_blocks.1.attentions.0.proj_out": [ + 81.2, + 82.3, + 83.6, + 83.1 + ], + "down_blocks.1.attentions.1.proj_in": [ + 79.7, + 80.7, + 81.7, + 81.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 83.8, + 81.9, + 83.5, + 83.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 84.0, + 81.8, + 83.6, + 82.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.3, + 80.9, + 82.4, + 82.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.5, + 80.0, + 82.1, + 82.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 84.7, + 84.6, + 85.4, + 85.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 79.6, + 78.4, + 81.3, + 81.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.9, + 86.6, + 86.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 84.9, + 85.0, + 86.1, + 85.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.6, + 81.1, + 81.1, + 81.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.9, + 81.2, + 83.8, + 82.9 + ], + "down_blocks.1.attentions.1.proj_out": [ + 82.0, + 82.1, + 83.3, + 83.3 + ], + "down_blocks.1.resnets.0.conv1": [ + 82.5, + 82.1, + 83.5, + 82.2 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 82.1, + 82.3, + 83.6, + 83.6 + ], + "down_blocks.1.resnets.0.conv2": [ + 83.5, + 84.4, + 84.7, + 84.3 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 75.8, + 75.0, + 76.3, + 76.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 82.6, + 82.4, + 84.0, + 83.7 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 83.7, + 84.2, + 84.4, + 83.9 + ], + "down_blocks.1.resnets.1.conv2": [ + 82.7, + 82.3, + 84.6, + 84.3 + ], + "down_blocks.1.downsamplers.0.conv": [ + 80.0, + 80.0, + 80.0, + 79.4 + ], + "down_blocks.2.attentions.0.proj_in": [ + 81.4, + 82.9, + 84.6, + 83.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.3, + 87.2, + 87.9, + 87.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.3, + 87.8, + 87.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.1, + 82.7, + 84.3, + 83.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.8, + 83.8, + 84.6, + 84.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.1, + 81.6, + 83.5, + 83.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 69.4, + 72.4, + 74.7, + 75.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.7, + 80.4, + 81.4, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.0, + 87.3, + 87.9, + 87.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.5, + 81.6, + 81.9, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.7, + 82.9, + 84.7, + 83.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 83.5, + 83.2, + 85.5, + 84.5 + ], + "down_blocks.2.attentions.1.proj_in": [ + 84.4, + 83.8, + 85.7, + 85.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.1, + 87.2, + 87.8, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.6, + 88.2, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 84.4, + 84.1, + 85.2, + 85.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.2, + 84.3, + 85.7, + 85.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.6, + 83.3, + 84.7, + 84.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.0, + 78.9, + 81.5, + 80.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.3, + 82.1, + 84.7, + 84.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.5, + 86.3, + 87.0, + 85.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 83.6, + 82.9, + 85.3, + 85.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.6, + 85.3, + 86.8, + 86.7 + ], + "down_blocks.2.attentions.1.proj_out": [ + 84.7, + 84.5, + 86.0, + 85.9 + ], + "down_blocks.2.resnets.0.conv1": [ + 83.2, + 84.3, + 86.0, + 84.8 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 83.1, + 82.7, + 84.5, + 83.3 + ], + "down_blocks.2.resnets.0.conv2": [ + 84.4, + 84.0, + 86.0, + 85.7 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 77.9, + 76.3, + 80.5, + 79.7 + ], + "down_blocks.2.resnets.1.conv1": [ + 82.4, + 83.4, + 82.8, + 83.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 87.1, + 86.5, + 87.5, + 87.7 + ], + "down_blocks.2.resnets.1.conv2": [ + 84.2, + 84.2, + 84.7, + 85.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 83.7, + 85.2, + 87.2, + 85.9 + ], + "down_blocks.3.resnets.0.conv1": [ + 86.7, + 87.2, + 88.0, + 87.4 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 87.2, + 88.3, + 88.4, + 88.2 + ], + "down_blocks.3.resnets.0.conv2": [ + 85.7, + 87.0, + 87.7, + 87.3 + ], + "down_blocks.3.resnets.1.conv1": [ + 83.0, + 85.6, + 86.8, + 86.6 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.1, + 88.1, + 88.5, + 88.2 + ], + "down_blocks.3.resnets.1.conv2": [ + 84.3, + 86.0, + 87.8, + 87.1 + ], + "up_blocks.0.resnets.0.conv1": [ + 84.8, + 86.6, + 87.4, + 86.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.5, + 88.4, + 88.6, + 88.6 + ], + "up_blocks.0.resnets.0.conv2": [ + 83.5, + 85.7, + 87.0, + 87.0 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 87.2, + 87.6, + 88.2, + 88.2 + ], + "up_blocks.0.resnets.1.conv1": [ + 83.2, + 84.9, + 86.9, + 86.7 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.3, + 88.4, + 88.7, + 88.5 + ], + "up_blocks.0.resnets.1.conv2": [ + 83.9, + 85.6, + 86.7, + 86.5 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 87.9, + 87.9, + 88.0, + 88.2 + ], + "up_blocks.0.resnets.2.conv1": [ + 82.8, + 85.4, + 86.3, + 86.2 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.3, + 88.6, + 88.4, + 88.8 + ], + "up_blocks.0.resnets.2.conv2": [ + 86.4, + 86.6, + 87.9, + 87.7 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.6, + 87.4, + 88.0, + 87.8 + ], + "up_blocks.0.upsamplers.0.conv": [ + 86.0, + 86.3, + 87.5, + 87.3 + ], + "up_blocks.1.attentions.0.proj_in": [ + 84.9, + 84.3, + 85.5, + 84.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.1, + 87.0, + 87.9, + 88.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.4, + 87.7, + 88.2, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.0, + 85.2, + 86.3, + 85.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 84.6, + 84.3, + 85.5, + 84.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.4, + 83.7, + 85.7, + 85.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 77.6, + 79.2, + 80.6, + 79.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.8, + 82.6, + 83.8, + 83.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.6, + 88.2, + 88.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.2, + 83.5, + 84.9, + 84.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.9, + 85.1, + 85.9, + 85.6 + ], + "up_blocks.1.attentions.0.proj_out": [ + 85.7, + 84.8, + 86.5, + 86.1 + ], + "up_blocks.1.attentions.1.proj_in": [ + 82.3, + 80.7, + 82.9, + 82.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.5, + 88.2, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.8, + 88.3, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.0, + 85.2, + 86.5, + 86.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 85.7, + 85.1, + 86.1, + 86.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 85.0, + 86.3, + 84.6, + 86.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.3, + 81.4, + 79.5, + 80.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.8, + 81.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.8, + 85.1, + 86.4, + 86.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.5, + 85.1, + 86.8, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.3, + 85.4, + 86.7, + 86.7 + ], + "up_blocks.1.attentions.1.proj_out": [ + 85.4, + 84.8, + 86.4, + 86.4 + ], + "up_blocks.1.attentions.2.proj_in": [ + 81.2, + 82.1, + 83.2, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 85.9, + 85.5, + 86.8, + 86.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.7, + 88.0, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 84.1, + 83.3, + 85.7, + 85.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 80.8, + 80.6, + 83.2, + 82.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 83.8, + 84.2, + 87.0, + 86.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 75.4, + 77.1, + 78.7, + 76.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.7, + 82.1, + 83.3, + 83.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 83.2, + 82.4, + 84.1, + 83.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 85.8, + 85.5, + 86.5, + 86.2 + ], + "up_blocks.1.attentions.2.proj_out": [ + 81.8, + 82.5, + 83.3, + 83.1 + ], + "up_blocks.1.resnets.0.conv1": [ + 85.6, + 84.3, + 87.2, + 86.6 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 87.1, + 86.2, + 87.4, + 87.6 + ], + "up_blocks.1.resnets.0.conv2": [ + 85.6, + 84.5, + 86.5, + 86.4 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 82.8, + 83.1, + 85.3, + 84.7 + ], + "up_blocks.1.resnets.1.conv1": [ + 85.6, + 85.4, + 87.2, + 86.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 87.2, + 87.4, + 87.6, + 87.7 + ], + "up_blocks.1.resnets.1.conv2": [ + 84.7, + 84.8, + 86.2, + 85.8 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 83.0, + 83.0, + 84.7, + 85.0 + ], + "up_blocks.1.resnets.2.conv1": [ + 81.7, + 80.9, + 83.3, + 82.3 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 85.9, + 86.4, + 87.0, + 86.9 + ], + "up_blocks.1.resnets.2.conv2": [ + 86.3, + 86.5, + 87.0, + 86.4 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 82.0, + 82.0, + 83.5, + 83.4 + ], + "up_blocks.1.upsamplers.0.conv": [ + 82.4, + 81.6, + 83.5, + 83.1 + ], + "up_blocks.2.attentions.0.proj_in": [ + 82.2, + 82.6, + 83.7, + 83.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.8, + 85.1, + 86.4, + 86.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.5, + 85.6, + 86.4, + 86.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.8, + 82.5, + 85.2, + 84.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.3, + 82.3, + 84.0, + 83.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.4, + 87.4, + 87.5, + 87.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 84.4, + 84.1, + 84.6, + 84.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.9, + 82.8, + 83.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.0, + 84.9, + 85.5, + 85.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 83.6, + 83.6, + 84.7, + 84.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.6, + 84.4, + 85.3, + 85.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 83.4, + 83.0, + 84.2, + 84.1 + ], + "up_blocks.2.attentions.1.proj_in": [ + 80.3, + 80.0, + 81.4, + 80.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.2, + 85.0, + 86.1, + 85.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 85.0, + 84.8, + 85.7, + 85.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.1, + 82.0, + 82.7, + 82.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.3, + 81.4, + 81.8, + 81.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 83.9, + 84.5, + 83.7, + 83.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.9, + 80.0, + 80.5, + 79.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.4, + 84.5, + 85.2, + 85.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.9, + 80.7, + 82.0, + 81.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.6, + 80.1, + 81.6, + 81.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 83.1, + 82.5, + 84.1, + 84.2 + ], + "up_blocks.2.attentions.1.proj_out": [ + 82.3, + 82.3, + 83.4, + 83.1 + ], + "up_blocks.2.attentions.2.proj_in": [ + 78.3, + 78.7, + 78.7, + 78.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 84.8, + 85.3, + 85.4, + 85.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 84.5, + 84.4, + 84.9, + 84.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.9, + 80.9, + 81.9, + 81.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 77.6, + 77.6, + 78.1, + 78.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 85.0, + 84.5, + 86.5, + 86.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 80.9, + 80.0, + 82.5, + 81.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 78.9, + 78.6, + 79.3, + 78.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.6, + 80.7, + 81.8, + 81.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 82.7, + 82.5, + 83.4, + 83.0 + ], + "up_blocks.2.attentions.2.proj_out": [ + 79.6, + 79.7, + 80.6, + 80.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 83.9, + 83.6, + 84.9, + 85.0 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 87.5, + 87.5, + 87.8, + 88.1 + ], + "up_blocks.2.resnets.0.conv2": [ + 83.8, + 83.9, + 85.0, + 85.2 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 78.2, + 78.0, + 80.1, + 79.5 + ], + "up_blocks.2.resnets.1.conv1": [ + 81.5, + 81.3, + 82.2, + 82.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 83.7, + 83.6, + 84.2, + 84.5 + ], + "up_blocks.2.resnets.1.conv2": [ + 81.6, + 82.0, + 82.9, + 83.2 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 77.9, + 78.0, + 79.5, + 79.5 + ], + "up_blocks.2.resnets.2.conv1": [ + 82.0, + 82.3, + 83.2, + 83.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 81.1, + 81.2, + 81.9, + 81.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 80.7, + 80.9, + 81.1, + 80.9 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 78.2, + 78.1, + 78.2, + 77.8 + ], + "up_blocks.2.upsamplers.0.conv": [ + 77.6, + 78.1, + 79.0, + 78.9 + ], + "up_blocks.3.attentions.0.proj_in": [ + 74.8, + 74.4, + 75.9, + 76.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.1, + 80.4, + 81.5, + 81.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 78.8, + 79.2, + 80.1, + 80.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.8, + 78.4, + 80.1, + 80.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.3, + 75.9, + 77.5, + 78.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.7, + 85.8, + 86.6, + 86.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 80.1, + 81.4, + 81.6, + 82.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.4, + 78.7, + 82.3, + 81.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 84.1, + 82.9, + 83.3, + 83.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 77.5, + 77.4, + 78.4, + 78.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 78.6, + 78.8, + 79.8, + 80.0 + ], + "up_blocks.3.attentions.0.proj_out": [ + 78.6, + 78.8, + 79.7, + 79.8 + ], + "up_blocks.3.attentions.1.proj_in": [ + 71.6, + 71.5, + 71.8, + 71.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 77.6, + 78.5, + 79.3, + 79.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.5, + 77.9, + 78.3, + 78.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.4, + 76.8, + 77.4, + 77.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 71.7, + 73.3, + 72.8, + 72.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 84.2, + 83.5, + 83.2, + 84.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 76.6, + 77.3, + 77.5, + 77.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.4, + 81.3, + 82.7, + 82.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 83.2, + 83.7, + 83.0, + 83.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 74.6, + 75.5, + 76.0, + 76.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 76.5, + 77.8, + 77.6, + 77.7 + ], + "up_blocks.3.attentions.1.proj_out": [ + 74.7, + 75.6, + 76.0, + 76.1 + ], + "up_blocks.3.attentions.2.proj_in": [ + 69.1, + 68.3, + 67.4, + 67.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.4, + 80.9, + 81.2, + 81.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 79.7, + 80.2, + 80.1, + 80.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 76.1, + 76.1, + 76.4, + 76.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 76.2, + 76.7, + 76.2, + 76.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 84.4, + 82.9, + 80.8, + 81.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.6, + 79.3, + 76.7, + 76.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 80.3, + 78.9, + 81.0, + 81.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 80.2, + 79.7, + 79.8, + 79.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 72.1, + 72.4, + 72.3, + 72.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 78.2, + 78.8, + 78.4, + 78.2 + ], + "up_blocks.3.attentions.2.proj_out": [ + 75.5, + 75.7, + 75.6, + 75.5 + ], + "up_blocks.3.resnets.0.conv1": [ + 75.3, + 75.5, + 76.4, + 76.3 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 84.8, + 84.8, + 85.0, + 85.0 + ], + "up_blocks.3.resnets.0.conv2": [ + 77.1, + 77.5, + 78.0, + 78.0 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 69.9, + 70.5, + 71.7, + 71.6 + ], + "up_blocks.3.resnets.1.conv1": [ + 72.5, + 73.3, + 73.2, + 73.0 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 79.8, + 80.2, + 80.4, + 80.4 + ], + "up_blocks.3.resnets.1.conv2": [ + 70.8, + 71.4, + 71.4, + 71.4 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 68.0, + 68.4, + 68.5, + 68.6 + ], + "up_blocks.3.resnets.2.conv1": [ + 71.1, + 71.9, + 71.2, + 71.0 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 73.4, + 73.7, + 74.0, + 74.2 + ], + "up_blocks.3.resnets.2.conv2": [ + 68.1, + 68.5, + 68.5, + 68.6 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 66.9, + 67.7, + 67.5, + 67.3 + ], + "mid_block.attentions.0.proj_in": [ + 86.8, + 86.1, + 87.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.1, + 88.4, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.7, + 88.6, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.9, + 88.1, + 88.1, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.7, + 87.2, + 87.6, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.7, + 85.5, + 86.4, + 86.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.8, + 83.4, + 85.2, + 84.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.5, + 84.3, + 86.5, + 86.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 87.6, + 88.3, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.8, + 86.9, + 87.7, + 87.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.6, + 88.1, + 88.2, + 87.9 + ], + "mid_block.attentions.0.proj_out": [ + 86.8, + 86.2, + 88.0, + 88.0 + ], + "mid_block.resnets.0.conv1": [ + 86.0, + 87.1, + 87.9, + 87.7 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.9, + 88.0, + 88.8, + 88.5 + ], + "mid_block.resnets.0.conv2": [ + 87.8, + 87.6, + 87.8, + 87.7 + ], + "mid_block.resnets.1.conv1": [ + 87.3, + 87.1, + 88.3, + 87.8 + ], + "mid_block.resnets.1.time_emb_proj": [ + 87.8, + 88.3, + 88.4, + 88.7 + ], + "mid_block.resnets.1.conv2": [ + 85.7, + 86.2, + 86.8, + 87.0 + ] + }, + "4": { + "time_embedding.linear_1": [ + 85.1, + 85.6, + 86.0, + 85.9 + ], + "time_embedding.linear_2": [ + 83.4, + 84.2, + 85.0, + 84.4 + ], + "down_blocks.0.attentions.0.proj_in": [ + 84.7, + 85.0, + 85.2, + 85.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.4, + 87.6, + 87.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.4, + 87.7, + 87.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.8, + 85.1, + 85.1, + 85.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.5, + 84.4, + 84.6, + 84.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.6, + 87.4, + 87.6, + 87.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.8, + 87.4, + 87.2, + 87.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.3, + 87.1, + 86.6, + 86.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.9, + 86.5, + 86.6, + 87.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.5, + 84.7, + 84.8, + 84.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.3, + 85.4, + 85.7, + 85.9 + ], + "down_blocks.0.attentions.0.proj_out": [ + 84.3, + 84.7, + 84.8, + 85.0 + ], + "down_blocks.0.attentions.1.proj_in": [ + 82.9, + 82.8, + 83.4, + 83.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.9, + 86.9, + 87.2, + 86.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.0, + 86.6, + 86.9, + 87.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.7, + 83.4, + 83.9, + 83.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 83.9, + 83.9, + 84.6, + 84.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.0, + 88.1, + 87.9, + 87.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 86.6, + 86.5, + 87.4, + 87.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.5, + 86.8, + 87.2, + 87.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.9, + 87.1, + 87.3, + 87.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 83.8, + 84.1, + 84.3, + 84.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 84.8, + 85.0, + 85.2, + 85.3 + ], + "down_blocks.0.attentions.1.proj_out": [ + 84.9, + 85.3, + 85.7, + 85.4 + ], + "down_blocks.0.resnets.0.conv1": [ + 85.5, + 85.8, + 85.8, + 85.7 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 86.5, + 86.7, + 86.7, + 86.7 + ], + "down_blocks.0.resnets.0.conv2": [ + 83.6, + 83.4, + 84.1, + 84.1 + ], + "down_blocks.0.resnets.1.conv1": [ + 85.1, + 85.1, + 85.3, + 85.3 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 86.5, + 86.7, + 87.5, + 87.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 84.5, + 84.7, + 85.0, + 85.3 + ], + "down_blocks.0.downsamplers.0.conv": [ + 83.1, + 83.7, + 84.6, + 83.8 + ], + "down_blocks.1.attentions.0.proj_in": [ + 86.3, + 86.4, + 87.1, + 86.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.7, + 87.9, + 88.0, + 88.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.7, + 87.7, + 87.9, + 87.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.7, + 86.7, + 87.2, + 87.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.0, + 86.2, + 87.0, + 86.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.6, + 88.8, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.2, + 88.2, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.3, + 87.6, + 87.7, + 87.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.7, + 87.8, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 86.1, + 86.8, + 86.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.7, + 86.8, + 87.2, + 87.4 + ], + "down_blocks.1.attentions.0.proj_out": [ + 85.6, + 86.4, + 87.1, + 86.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 86.3, + 86.3, + 86.5, + 86.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.8, + 87.7, + 88.1, + 87.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 87.8, + 88.2, + 88.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.3, + 86.5, + 87.0, + 87.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.1, + 86.5, + 87.0, + 86.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.5, + 87.9, + 87.5, + 87.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.4, + 88.4, + 88.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.3, + 87.8, + 88.0, + 87.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 87.9, + 88.0, + 87.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.5, + 86.1, + 86.7, + 86.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.6, + 87.4, + 87.3 + ], + "down_blocks.1.attentions.1.proj_out": [ + 86.8, + 86.5, + 87.3, + 86.8 + ], + "down_blocks.1.resnets.0.conv1": [ + 87.0, + 86.2, + 87.5, + 86.8 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 87.5, + 87.7, + 88.0, + 88.4 + ], + "down_blocks.1.resnets.0.conv2": [ + 87.8, + 87.5, + 87.9, + 87.7 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 82.4, + 82.2, + 83.0, + 83.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 86.7, + 86.6, + 86.7, + 85.8 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.0, + 87.7, + 87.7, + 87.7 + ], + "down_blocks.1.resnets.1.conv2": [ + 85.1, + 85.8, + 86.8, + 86.3 + ], + "down_blocks.1.downsamplers.0.conv": [ + 83.1, + 82.6, + 83.8, + 83.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 85.9, + 86.4, + 87.5, + 86.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.4, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.6, + 88.5, + 88.7, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.7, + 87.0, + 88.0, + 87.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.4, + 86.9, + 87.4, + 87.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.8, + 88.5, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.8, + 84.1, + 87.1, + 87.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.6, + 86.2, + 87.6, + 87.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.5, + 88.4, + 88.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.8, + 85.5, + 87.0, + 86.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.0, + 87.7, + 87.7 + ], + "down_blocks.2.attentions.0.proj_out": [ + 87.1, + 87.4, + 87.5, + 87.7 + ], + "down_blocks.2.attentions.1.proj_in": [ + 87.1, + 87.2, + 88.2, + 88.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 87.8, + 88.3, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.4, + 88.7, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.8, + 87.2, + 87.7, + 87.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.7, + 87.3, + 87.6, + 86.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.8, + 87.8, + 87.7, + 87.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 83.1, + 83.4, + 85.2, + 84.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.2, + 86.8, + 87.6, + 86.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.0, + 88.4, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.1, + 87.4, + 87.7, + 86.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.6, + 88.0, + 87.7 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.0, + 87.2, + 87.5, + 87.2 + ], + "down_blocks.2.resnets.0.conv1": [ + 84.2, + 86.2, + 87.6, + 86.6 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 87.5, + 87.9, + 88.1, + 88.2 + ], + "down_blocks.2.resnets.0.conv2": [ + 87.6, + 87.9, + 88.1, + 88.2 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 85.0, + 84.3, + 85.7, + 85.6 + ], + "down_blocks.2.resnets.1.conv1": [ + 86.8, + 87.1, + 86.9, + 86.6 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.2, + 88.3, + 88.4, + 88.6 + ], + "down_blocks.2.resnets.1.conv2": [ + 87.3, + 87.5, + 88.1, + 87.8 + ], + "down_blocks.2.downsamplers.0.conv": [ + 87.9, + 87.7, + 88.3, + 88.3 + ], + "down_blocks.3.resnets.0.conv1": [ + 87.9, + 88.5, + 88.6, + 88.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.5, + 88.6, + 88.7, + 88.7 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.0, + 88.3, + 88.1, + 88.3 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.0, + 87.7, + 88.4, + 88.2 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.5, + 88.5, + 88.5, + 88.7 + ], + "down_blocks.3.resnets.1.conv2": [ + 87.9, + 88.5, + 88.4, + 88.4 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.3, + 88.4, + 88.5, + 88.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.7, + 88.7, + 88.6, + 88.7 + ], + "up_blocks.0.resnets.0.conv2": [ + 87.3, + 87.7, + 88.2, + 88.0 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.5, + 88.3, + 88.5, + 88.6 + ], + "up_blocks.0.resnets.1.conv1": [ + 87.1, + 87.4, + 88.4, + 88.3 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.6, + 88.5, + 88.8, + 88.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 87.8, + 88.1, + 88.6, + 88.2 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.6, + 88.3, + 88.7, + 88.2 + ], + "up_blocks.0.resnets.2.conv1": [ + 85.9, + 87.4, + 87.8, + 87.9 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.7, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.4, + 88.3, + 88.5, + 88.2 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.2, + 88.5, + 88.3, + 88.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 88.1, + 87.7, + 88.2, + 88.2 + ], + "up_blocks.1.attentions.0.proj_in": [ + 87.4, + 87.0, + 87.6, + 87.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.6, + 87.7, + 88.2, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.5, + 88.4, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.8, + 88.0, + 87.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.9, + 86.1, + 86.4, + 86.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.7, + 87.2, + 88.2, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.2, + 85.8, + 87.0, + 86.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.7, + 87.3, + 87.7, + 87.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.4, + 88.4, + 88.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 87.7, + 87.8, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 87.8, + 87.8, + 88.3 + ], + "up_blocks.1.attentions.0.proj_out": [ + 87.6, + 87.1, + 88.2, + 88.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 86.8, + 86.3, + 87.1, + 86.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.7, + 88.6, + 88.6, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.8, + 88.1, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.5, + 85.7, + 86.7, + 86.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.3, + 87.9, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.1, + 88.3, + 86.6, + 87.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.2, + 86.5, + 87.6, + 87.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.1, + 88.1, + 88.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 87.7, + 88.1, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.7, + 87.1, + 87.7, + 87.8 + ], + "up_blocks.1.attentions.1.proj_out": [ + 87.9, + 87.4, + 87.9, + 87.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 85.7, + 85.4, + 86.2, + 85.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.4, + 87.3, + 87.8, + 87.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.4, + 88.4, + 88.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.8, + 87.1, + 88.0, + 87.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 85.7, + 85.3, + 86.6, + 86.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.4, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 87.7, + 88.4, + 88.3, + 87.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.0, + 87.4, + 87.2, + 87.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.5, + 88.0, + 87.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 86.6, + 87.2, + 87.4, + 87.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.8, + 87.6, + 87.8, + 88.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 86.8, + 86.8, + 87.5, + 87.4 + ], + "up_blocks.1.resnets.0.conv1": [ + 87.8, + 87.6, + 88.2, + 88.1 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 87.8, + 87.7, + 88.2, + 88.1 + ], + "up_blocks.1.resnets.0.conv2": [ + 87.6, + 87.7, + 87.5, + 87.8 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 87.4, + 87.0, + 87.8, + 87.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 87.5, + 87.7, + 87.9, + 87.6 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 87.9, + 87.9, + 88.6, + 88.0 + ], + "up_blocks.1.resnets.1.conv2": [ + 87.7, + 87.7, + 88.0, + 88.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 86.7, + 86.6, + 87.5, + 87.5 + ], + "up_blocks.1.resnets.2.conv1": [ + 87.6, + 87.7, + 87.9, + 88.1 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 87.9, + 87.9, + 88.0, + 88.0 + ], + "up_blocks.1.resnets.2.conv2": [ + 87.8, + 88.0, + 88.3, + 88.0 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 85.4, + 85.2, + 86.1, + 86.2 + ], + "up_blocks.1.upsamplers.0.conv": [ + 86.3, + 85.6, + 86.4, + 86.5 + ], + "up_blocks.2.attentions.0.proj_in": [ + 86.5, + 86.5, + 87.1, + 86.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 88.3, + 87.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 87.9, + 88.2, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.3, + 87.1, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.1, + 86.9, + 87.2, + 87.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.8, + 88.8, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.8, + 88.8, + 88.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.2, + 87.6, + 87.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.0, + 87.2, + 87.4, + 87.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.1, + 87.0, + 87.5, + 87.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.3, + 87.7, + 87.9, + 87.9 + ], + "up_blocks.2.attentions.0.proj_out": [ + 87.4, + 87.0, + 87.4, + 87.5 + ], + "up_blocks.2.attentions.1.proj_in": [ + 85.6, + 85.6, + 86.3, + 86.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.8, + 88.3, + 87.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.9, + 88.0, + 88.1, + 88.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.5, + 87.0, + 87.7, + 87.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.2, + 85.5, + 86.5, + 86.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.5, + 88.5, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.7, + 88.2, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 87.3, + 87.8, + 87.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.4, + 84.6, + 85.8, + 85.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 84.8, + 85.0, + 84.6, + 84.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.4, + 87.3, + 87.3, + 87.3 + ], + "up_blocks.2.attentions.1.proj_out": [ + 86.6, + 86.7, + 87.1, + 86.9 + ], + "up_blocks.2.attentions.2.proj_in": [ + 85.2, + 85.3, + 86.0, + 85.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.8, + 87.7, + 87.9, + 88.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.3, + 87.7, + 87.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 86.8, + 86.5, + 87.0, + 87.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 84.8, + 84.3, + 84.9, + 84.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.5, + 88.8, + 88.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.2, + 88.6, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.5, + 87.6, + 87.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 87.6, + 88.0, + 87.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 84.8, + 84.4, + 85.5, + 84.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 86.6, + 86.7, + 87.1, + 86.7 + ], + "up_blocks.2.attentions.2.proj_out": [ + 85.1, + 85.3, + 85.6, + 85.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 87.4, + 87.1, + 87.3, + 87.9 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.1, + 88.0, + 88.2, + 88.2 + ], + "up_blocks.2.resnets.0.conv2": [ + 87.5, + 87.3, + 87.7, + 87.8 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 84.2, + 83.8, + 85.5, + 85.2 + ], + "up_blocks.2.resnets.1.conv1": [ + 86.3, + 86.2, + 86.7, + 86.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 87.9, + 87.5, + 88.1, + 87.9 + ], + "up_blocks.2.resnets.1.conv2": [ + 87.1, + 86.8, + 86.8, + 87.4 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 84.9, + 84.6, + 84.7, + 84.2 + ], + "up_blocks.2.resnets.2.conv1": [ + 86.1, + 86.7, + 86.9, + 86.8 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 87.6, + 87.0, + 88.1, + 87.8 + ], + "up_blocks.2.resnets.2.conv2": [ + 86.3, + 86.8, + 86.6, + 87.2 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 83.9, + 84.0, + 84.6, + 83.7 + ], + "up_blocks.2.upsamplers.0.conv": [ + 84.8, + 84.8, + 85.4, + 85.3 + ], + "up_blocks.3.attentions.0.proj_in": [ + 82.7, + 82.9, + 83.7, + 83.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.0, + 86.3, + 86.7, + 86.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.0, + 86.1, + 86.3, + 86.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.5, + 84.4, + 85.5, + 85.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.7, + 83.9, + 84.6, + 84.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.2, + 88.1, + 88.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 87.7, + 87.2, + 87.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.2, + 83.7, + 85.4, + 85.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.8, + 86.2, + 87.5, + 87.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 83.8, + 84.0, + 84.3, + 84.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.5, + 84.6, + 85.3, + 85.1 + ], + "up_blocks.3.attentions.0.proj_out": [ + 84.2, + 84.2, + 84.8, + 84.7 + ], + "up_blocks.3.attentions.1.proj_in": [ + 80.2, + 80.7, + 81.1, + 81.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 84.3, + 84.6, + 85.3, + 85.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 84.9, + 85.3, + 85.1, + 85.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.9, + 83.3, + 83.6, + 83.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.3, + 81.8, + 82.0, + 82.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.4, + 87.2, + 87.5, + 87.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 86.3, + 86.5, + 87.1, + 86.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.5, + 85.0, + 86.3, + 86.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.6, + 87.2, + 87.0, + 87.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 82.2, + 82.1, + 82.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 83.3, + 84.0, + 83.9, + 83.9 + ], + "up_blocks.3.attentions.1.proj_out": [ + 82.8, + 83.5, + 83.5, + 83.6 + ], + "up_blocks.3.attentions.2.proj_in": [ + 79.9, + 80.1, + 79.3, + 79.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 86.3, + 86.1, + 86.6, + 86.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 85.7, + 85.9, + 85.9, + 86.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 82.9, + 82.9, + 83.4, + 83.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.3, + 81.6, + 81.1, + 81.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.4, + 87.1, + 85.4, + 85.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 87.1, + 86.7, + 86.9, + 87.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 86.1, + 84.2, + 84.8, + 85.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 85.6, + 83.9, + 85.0, + 85.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 85.2, + 85.3, + 85.1, + 85.2 + ], + "up_blocks.3.attentions.2.proj_out": [ + 83.0, + 83.4, + 83.1, + 83.2 + ], + "up_blocks.3.resnets.0.conv1": [ + 82.9, + 83.0, + 83.7, + 83.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 87.8, + 87.6, + 88.0, + 87.9 + ], + "up_blocks.3.resnets.0.conv2": [ + 83.7, + 83.8, + 84.2, + 84.4 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 80.4, + 80.4, + 81.2, + 81.2 + ], + "up_blocks.3.resnets.1.conv1": [ + 81.9, + 82.1, + 82.7, + 82.5 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 87.1, + 87.4, + 87.7, + 87.1 + ], + "up_blocks.3.resnets.1.conv2": [ + 81.8, + 82.2, + 82.4, + 82.4 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 79.2, + 79.6, + 80.2, + 80.2 + ], + "up_blocks.3.resnets.2.conv1": [ + 80.9, + 81.2, + 81.0, + 81.2 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 85.3, + 85.8, + 85.8, + 86.0 + ], + "up_blocks.3.resnets.2.conv2": [ + 80.2, + 80.7, + 80.6, + 80.6 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 79.3, + 79.4, + 79.5, + 79.5 + ], + "mid_block.attentions.0.proj_in": [ + 87.4, + 87.1, + 88.2, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.7, + 88.3, + 88.5, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.3, + 89.0, + 89.0, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.5, + 88.2, + 88.7, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.1, + 88.5, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.2, + 88.6, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.9, + 87.5, + 87.7, + 87.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.8, + 87.5, + 88.0, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.7, + 88.8, + 88.7, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 87.5, + 88.7, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.8, + 88.7, + 88.5, + 88.5 + ], + "mid_block.attentions.0.proj_out": [ + 88.5, + 88.2, + 88.7, + 88.6 + ], + "mid_block.resnets.0.conv1": [ + 88.6, + 88.4, + 88.6, + 88.8 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.6, + 88.6, + 88.5, + 88.5 + ], + "mid_block.resnets.0.conv2": [ + 88.4, + 88.5, + 88.6, + 88.7 + ], + "mid_block.resnets.1.conv1": [ + 88.3, + 88.4, + 88.6, + 88.6 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.5, + 88.8, + 88.6, + 88.4 + ], + "mid_block.resnets.1.conv2": [ + 88.6, + 88.5, + 88.7, + 88.3 + ] + }, + "6": { + "time_embedding.linear_1": [ + 87.7, + 88.1, + 88.5, + 88.0 + ], + "time_embedding.linear_2": [ + 87.4, + 88.0, + 88.0, + 88.1 + ], + "down_blocks.0.attentions.0.proj_in": [ + 87.5, + 87.5, + 87.7, + 87.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.8, + 88.1, + 88.1, + 88.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.0, + 87.8, + 88.2, + 88.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.4, + 87.5, + 87.7, + 87.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.2, + 87.7, + 87.5, + 87.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.0, + 88.1, + 88.4, + 87.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.2, + 88.1, + 87.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.8, + 87.7, + 88.2, + 88.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.7, + 87.9, + 88.0, + 87.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.9, + 87.3, + 87.4, + 87.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.4, + 87.6, + 87.4, + 88.0 + ], + "down_blocks.0.attentions.0.proj_out": [ + 87.4, + 87.5, + 87.6, + 87.4 + ], + "down_blocks.0.attentions.1.proj_in": [ + 87.0, + 87.3, + 87.5, + 87.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.8, + 88.0, + 88.0, + 88.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.9, + 88.1, + 88.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.4, + 87.8, + 87.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.5, + 87.6, + 87.8, + 87.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.4, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.2, + 88.6, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.0, + 88.0, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 88.1, + 88.1, + 88.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.3, + 87.3, + 87.6, + 87.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.4, + 87.6, + 88.0, + 87.9 + ], + "down_blocks.0.attentions.1.proj_out": [ + 87.2, + 87.5, + 87.8, + 87.3 + ], + "down_blocks.0.resnets.0.conv1": [ + 87.4, + 87.7, + 87.6, + 88.0 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 87.8, + 87.6, + 88.0, + 87.9 + ], + "down_blocks.0.resnets.0.conv2": [ + 87.1, + 87.3, + 87.6, + 87.2 + ], + "down_blocks.0.resnets.1.conv1": [ + 87.3, + 87.7, + 87.8, + 87.6 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 88.1, + 87.9, + 88.3, + 88.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 87.1, + 87.4, + 87.6, + 87.1 + ], + "down_blocks.0.downsamplers.0.conv": [ + 87.4, + 87.3, + 87.7, + 87.7 + ], + "down_blocks.1.attentions.0.proj_in": [ + 87.9, + 88.1, + 88.1, + 88.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.5, + 88.7, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.0, + 88.4, + 88.3, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.7, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.4, + 87.9, + 87.9, + 87.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.5, + 88.5, + 88.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.7, + 88.6, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.1, + 88.1, + 88.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.3, + 88.4, + 88.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 88.1, + 88.1, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.7, + 88.0, + 88.3, + 88.7 + ], + "down_blocks.1.attentions.0.proj_out": [ + 87.6, + 87.8, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.1.proj_in": [ + 87.5, + 87.9, + 88.4, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.3, + 88.6, + 88.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.0, + 88.3, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.2, + 88.3, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.6, + 88.0, + 88.3, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.6, + 88.7, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.5, + 88.6, + 88.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.1, + 88.6, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.2, + 88.4, + 88.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.1, + 88.2, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.0, + 88.3, + 88.3 + ], + "down_blocks.1.attentions.1.proj_out": [ + 88.0, + 88.0, + 88.2, + 88.4 + ], + "down_blocks.1.resnets.0.conv1": [ + 87.9, + 88.0, + 88.1, + 88.2 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 88.1, + 88.2, + 88.1, + 88.5 + ], + "down_blocks.1.resnets.0.conv2": [ + 87.9, + 88.4, + 88.1, + 88.7 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 87.2, + 87.3, + 87.9, + 87.5 + ], + "down_blocks.1.resnets.1.conv1": [ + 87.7, + 88.0, + 88.3, + 88.3 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.1, + 88.2, + 88.5, + 88.4 + ], + "down_blocks.1.resnets.1.conv2": [ + 87.8, + 87.6, + 88.2, + 88.2 + ], + "down_blocks.1.downsamplers.0.conv": [ + 87.9, + 88.0, + 88.2, + 88.2 + ], + "down_blocks.2.attentions.0.proj_in": [ + 87.6, + 87.7, + 88.2, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.4, + 88.7, + 88.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.6, + 88.7, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.5, + 87.9, + 88.4, + 87.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 87.9, + 88.4, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.4, + 88.7, + 88.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 88.4, + 88.7, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.2, + 88.1, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.4, + 88.6, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 87.9, + 87.7, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 87.7, + 88.3, + 88.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 87.8, + 88.3, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.1.proj_in": [ + 87.7, + 88.0, + 88.1, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.2, + 88.5, + 88.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.7, + 88.5, + 88.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.3, + 88.4, + 88.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.3, + 88.0, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.3, + 88.6, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.4, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.5, + 88.6, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 88.2, + 88.4, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.1, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.1.proj_out": [ + 87.9, + 88.2, + 88.5, + 88.3 + ], + "down_blocks.2.resnets.0.conv1": [ + 87.2, + 88.2, + 88.4, + 88.1 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.3, + 88.6, + 88.8, + 88.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.1, + 88.2, + 88.7, + 88.5 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 87.4, + 87.7, + 88.2, + 88.2 + ], + "down_blocks.2.resnets.1.conv1": [ + 88.2, + 88.2, + 88.5, + 88.6 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.3, + 88.4, + 88.7, + 88.5 + ], + "down_blocks.2.resnets.1.conv2": [ + 88.3, + 88.3, + 88.4, + 88.3 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.4, + 88.4, + 88.4, + 88.7 + ], + "down_blocks.3.resnets.0.conv1": [ + 88.0, + 88.4, + 88.2, + 88.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.4, + 88.5, + 88.7, + 88.7 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.5, + 88.6, + 88.9, + 88.5 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.5, + 88.4, + 88.7, + 88.7 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.4, + 88.7, + 88.8, + 88.5 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.3, + 88.6, + 88.7, + 88.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.3, + 88.7, + 88.7, + 88.8 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.5, + 88.7, + 88.8, + 88.5 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.4, + 88.6, + 89.0, + 88.7 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.4, + 88.7, + 88.7, + 88.5 + ], + "up_blocks.0.resnets.1.conv1": [ + 88.2, + 88.5, + 88.7, + 88.5 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 88.6, + 88.8 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.3, + 88.6, + 88.6, + 88.8 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.5, + 88.5, + 89.0, + 88.6 + ], + "up_blocks.0.resnets.2.conv1": [ + 88.5, + 88.6, + 88.6, + 88.6 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.5, + 88.7, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.4, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.3, + 88.3, + 88.6, + 88.8 + ], + "up_blocks.0.upsamplers.0.conv": [ + 87.9, + 88.1, + 88.4, + 88.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 88.1, + 88.3, + 88.3, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.6, + 88.6, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.5, + 88.5, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.5, + 88.5, + 88.3, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.3, + 88.4, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.3, + 89.2, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.2, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.3, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.5, + 88.6, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.2, + 88.4, + 88.4, + 88.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.4, + 88.6, + 88.4 + ], + "up_blocks.1.attentions.0.proj_out": [ + 87.9, + 88.3, + 88.3, + 88.4 + ], + "up_blocks.1.attentions.1.proj_in": [ + 87.8, + 88.1, + 88.2, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.5, + 88.7, + 88.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.6, + 88.7, + 88.5, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.9, + 88.3, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.1, + 88.3, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.7, + 88.3, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.6, + 88.4, + 88.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.0, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 88.0, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.4, + 88.5, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.8, + 88.3, + 88.6 + ], + "up_blocks.1.attentions.1.proj_out": [ + 87.8, + 88.5, + 88.2, + 88.6 + ], + "up_blocks.1.attentions.2.proj_in": [ + 87.9, + 87.5, + 88.1, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.0, + 88.3, + 88.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.5, + 88.6, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.0, + 88.1, + 88.1, + 88.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 87.8, + 87.8, + 88.2, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.4, + 88.6, + 88.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.8, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.6, + 87.8, + 88.4, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.3, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.0, + 88.3, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.3, + 88.6, + 88.3 + ], + "up_blocks.1.attentions.2.proj_out": [ + 87.5, + 87.8, + 88.0, + 88.3 + ], + "up_blocks.1.resnets.0.conv1": [ + 88.2, + 88.3, + 88.2, + 88.7 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.7, + 88.5, + 88.8, + 88.6 + ], + "up_blocks.1.resnets.0.conv2": [ + 87.9, + 88.1, + 88.5, + 88.4 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.3, + 88.1, + 88.5, + 88.6 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.2, + 88.5, + 88.3, + 88.5 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 88.3, + 88.6, + 88.4, + 88.6 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.0, + 88.3, + 88.6, + 88.7 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 88.0, + 88.5, + 88.4, + 88.4 + ], + "up_blocks.1.resnets.2.conv1": [ + 87.9, + 88.0, + 88.4, + 88.6 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.2, + 88.6, + 88.6, + 88.8 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.3, + 88.3, + 88.4, + 88.3 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 87.5, + 87.7, + 88.3, + 87.9 + ], + "up_blocks.1.upsamplers.0.conv": [ + 87.5, + 88.1, + 88.0, + 88.3 + ], + "up_blocks.2.attentions.0.proj_in": [ + 88.1, + 88.2, + 88.1, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.4, + 88.3, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.7, + 88.5, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.0, + 88.0, + 88.0, + 88.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 88.0, + 88.3, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.6, + 88.6, + 88.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.6, + 88.7, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.1, + 88.4, + 88.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 88.4, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 87.9, + 88.4, + 88.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.0, + 88.3, + 88.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 87.9, + 88.0, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.1.proj_in": [ + 87.8, + 87.4, + 88.0, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.7, + 88.4, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.6, + 88.4, + 88.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.5, + 87.7, + 88.0, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 87.9, + 87.9, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.8, + 88.6, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.6, + 88.4, + 88.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.1, + 87.8, + 88.2, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 88.3, + 88.3, + 88.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.4, + 87.6, + 87.5, + 87.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.0, + 88.3, + 88.0 + ], + "up_blocks.2.attentions.1.proj_out": [ + 88.0, + 87.9, + 88.0, + 88.2 + ], + "up_blocks.2.attentions.2.proj_in": [ + 86.9, + 87.6, + 87.9, + 87.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.3, + 88.1, + 88.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.9, + 88.1, + 88.2, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.8, + 88.0, + 88.0, + 87.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 87.4, + 87.4, + 87.9, + 87.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.6, + 88.8, + 88.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.6, + 88.7, + 89.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.1, + 88.3, + 88.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.2, + 87.2, + 87.7, + 87.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.8, + 87.7, + 88.3, + 88.3 + ], + "up_blocks.2.attentions.2.proj_out": [ + 87.5, + 87.9, + 87.8, + 88.1 + ], + "up_blocks.2.resnets.0.conv1": [ + 88.3, + 88.1, + 88.2, + 88.3 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.3, + 88.4, + 88.6, + 88.8 + ], + "up_blocks.2.resnets.0.conv2": [ + 88.2, + 88.1, + 88.2, + 88.1 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 87.5, + 87.6, + 88.2, + 88.0 + ], + "up_blocks.2.resnets.1.conv1": [ + 87.7, + 88.1, + 88.0, + 88.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 87.8, + 88.2, + 88.3, + 88.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 87.6, + 88.2, + 88.0, + 88.2 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 87.4, + 87.7, + 88.0, + 88.1 + ], + "up_blocks.2.resnets.2.conv1": [ + 88.0, + 88.0, + 88.1, + 88.1 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 87.9, + 87.9, + 88.4, + 88.1 + ], + "up_blocks.2.resnets.2.conv2": [ + 87.7, + 87.8, + 88.2, + 88.0 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 87.3, + 87.6, + 87.7, + 87.9 + ], + "up_blocks.2.upsamplers.0.conv": [ + 87.1, + 87.3, + 87.6, + 87.7 + ], + "up_blocks.3.attentions.0.proj_in": [ + 86.8, + 87.1, + 87.2, + 87.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.6, + 88.1, + 88.3, + 87.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.6, + 88.0, + 88.2, + 88.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.6, + 87.9, + 88.0, + 88.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.6, + 87.6, + 87.6, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.3, + 88.4, + 88.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 88.4, + 88.5, + 88.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.6, + 87.2, + 87.8, + 88.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.7, + 88.0, + 88.4, + 88.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.8, + 87.4, + 87.8, + 87.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.6, + 87.8, + 87.9 + ], + "up_blocks.3.attentions.0.proj_out": [ + 87.2, + 87.5, + 88.0, + 88.0 + ], + "up_blocks.3.attentions.1.proj_in": [ + 85.9, + 86.4, + 86.2, + 86.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.8, + 87.8, + 88.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.4, + 87.8, + 87.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.9, + 86.9, + 87.2, + 87.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.6, + 86.7, + 87.0, + 86.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.7, + 88.2, + 88.2, + 88.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.8, + 88.3, + 88.2, + 88.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.7, + 88.2, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 88.0, + 88.2, + 88.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.2, + 86.8, + 86.9, + 87.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.1, + 87.6, + 87.6, + 87.8 + ], + "up_blocks.3.attentions.1.proj_out": [ + 87.2, + 87.4, + 87.6, + 87.5 + ], + "up_blocks.3.attentions.2.proj_in": [ + 86.1, + 86.3, + 86.2, + 86.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.1, + 87.9, + 88.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.7, + 88.3, + 88.3, + 88.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.7, + 87.5, + 87.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.9, + 87.2, + 87.0, + 87.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.2, + 88.5, + 88.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.1, + 88.6, + 88.4, + 88.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.0, + 88.2, + 88.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.9, + 88.0, + 87.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 86.7, + 87.2, + 87.2, + 87.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.7, + 87.9, + 88.1, + 87.8 + ], + "up_blocks.3.attentions.2.proj_out": [ + 87.3, + 87.6, + 87.6, + 87.7 + ], + "up_blocks.3.resnets.0.conv1": [ + 86.9, + 87.0, + 87.1, + 87.0 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 88.1, + 88.1, + 88.5, + 88.5 + ], + "up_blocks.3.resnets.0.conv2": [ + 87.1, + 87.1, + 87.4, + 87.5 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 85.8, + 86.1, + 86.8, + 86.4 + ], + "up_blocks.3.resnets.1.conv1": [ + 86.8, + 87.0, + 87.1, + 86.7 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 87.9, + 88.1, + 88.1, + 88.1 + ], + "up_blocks.3.resnets.1.conv2": [ + 86.4, + 86.6, + 86.8, + 87.0 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 85.4, + 85.9, + 86.2, + 86.1 + ], + "up_blocks.3.resnets.2.conv1": [ + 86.5, + 86.8, + 87.1, + 87.1 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 87.8, + 87.8, + 87.9, + 88.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 86.6, + 86.8, + 87.1, + 87.1 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 86.5, + 86.8, + 86.7, + 86.2 + ], + "mid_block.attentions.0.proj_in": [ + 88.3, + 88.5, + 88.4, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.6, + 88.5, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.7, + 88.6, + 88.6, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.6, + 88.5, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.6, + 88.6, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.5, + 88.6, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.5, + 88.7, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.4, + 88.5, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.6, + 88.4, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.4, + 88.6, + 88.5, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.5, + 88.6, + 88.8, + 88.6 + ], + "mid_block.attentions.0.proj_out": [ + 88.4, + 88.5, + 88.6, + 88.6 + ], + "mid_block.resnets.0.conv1": [ + 88.5, + 88.6, + 88.6, + 88.8 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.3, + 88.6, + 88.5, + 88.6 + ], + "mid_block.resnets.0.conv2": [ + 88.3, + 88.4, + 88.6, + 88.8 + ], + "mid_block.resnets.1.conv1": [ + 88.5, + 88.6, + 88.6, + 88.9 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 88.8, + 88.8 + ], + "mid_block.resnets.1.conv2": [ + 88.2, + 88.5, + 88.5, + 88.9 + ] + }, + "8": { + "time_embedding.linear_1": [ + 88.1, + 88.4, + 88.6, + 88.4 + ], + "time_embedding.linear_2": [ + 87.8, + 88.4, + 88.1, + 88.1 + ], + "down_blocks.0.attentions.0.proj_in": [ + 87.9, + 87.8, + 88.1, + 88.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.1, + 88.4, + 88.3, + 88.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.6, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.8, + 88.2, + 88.0, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.5, + 88.1, + 88.0, + 88.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.6, + 88.5, + 88.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.4, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.2, + 88.5, + 88.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.4, + 88.3, + 88.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.1, + 88.1, + 87.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.1, + 88.1, + 88.0 + ], + "down_blocks.0.attentions.0.proj_out": [ + 87.7, + 87.9, + 88.0, + 88.3 + ], + "down_blocks.0.attentions.1.proj_in": [ + 87.7, + 88.0, + 88.4, + 88.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.2, + 88.4, + 88.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.6, + 88.5, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.9, + 88.3, + 88.6, + 88.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.5, + 88.2, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.5, + 88.8, + 88.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.6, + 88.5, + 88.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.7, + 88.4, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.4, + 88.8, + 88.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 88.3, + 88.1, + 88.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.8, + 88.1, + 88.2, + 88.1 + ], + "down_blocks.0.attentions.1.proj_out": [ + 88.0, + 88.5, + 88.2, + 88.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 88.1, + 88.2, + 88.0, + 88.4 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 87.9, + 88.1, + 88.2, + 88.7 + ], + "down_blocks.0.resnets.0.conv2": [ + 87.7, + 88.1, + 88.2, + 87.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 87.9, + 88.1, + 88.2, + 88.2 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 88.0, + 88.5, + 88.6, + 88.2 + ], + "down_blocks.0.resnets.1.conv2": [ + 88.0, + 88.4, + 88.1, + 88.3 + ], + "down_blocks.0.downsamplers.0.conv": [ + 88.3, + 88.2, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.0.proj_in": [ + 88.0, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.6, + 88.6, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.7, + 88.8, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.5, + 88.4, + 88.7, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.1, + 88.8, + 88.6, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.7, + 88.9, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.7, + 88.6, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.7, + 88.6, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.6, + 88.5, + 88.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.3, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.6, + 88.7, + 88.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 87.9, + 88.5, + 88.5, + 88.3 + ], + "down_blocks.1.attentions.1.proj_in": [ + 88.0, + 88.6, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.8, + 88.8, + 88.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.7, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.6, + 88.6, + 88.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.8, + 88.6, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.5, + 88.8, + 88.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.7, + 88.5, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 89.0, + 88.7, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.6, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.4, + 88.5, + 88.6, + 88.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 88.4, + 88.6, + 88.4, + 88.5 + ], + "down_blocks.1.resnets.0.conv1": [ + 88.4, + 88.6, + 88.7, + 88.7 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 88.5, + 88.8, + 88.8, + 88.6 + ], + "down_blocks.1.resnets.0.conv2": [ + 88.2, + 88.6, + 88.6, + 88.5 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 87.9, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 88.5, + 88.6, + 88.4, + 88.7 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.3, + 88.5, + 88.7, + 88.6 + ], + "down_blocks.1.resnets.1.conv2": [ + 88.2, + 88.6, + 88.6, + 88.4 + ], + "down_blocks.1.downsamplers.0.conv": [ + 88.2, + 88.6, + 88.6, + 88.4 + ], + "down_blocks.2.attentions.0.proj_in": [ + 88.3, + 88.4, + 88.6, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.8, + 88.6, + 88.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.8, + 88.5, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.5, + 88.6, + 88.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.4, + 88.6, + 88.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.9, + 88.5, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.9, + 88.7, + 88.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.6, + 88.6, + 88.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.3, + 88.7, + 88.6, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.4, + 88.4, + 88.6, + 88.8 + ], + "down_blocks.2.attentions.0.proj_out": [ + 88.4, + 88.9, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.1.proj_in": [ + 88.4, + 88.5, + 88.8, + 88.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.7, + 88.7, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.7, + 88.7, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.9, + 88.8, + 88.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.5, + 88.3, + 88.6, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.9, + 88.6, + 88.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.7, + 88.6, + 88.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.7, + 88.5, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.9, + 88.7, + 88.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.4, + 88.8, + 88.8, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.7, + 88.5, + 88.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.3, + 88.7, + 88.8, + 88.7 + ], + "down_blocks.2.resnets.0.conv1": [ + 88.2, + 88.8, + 88.7, + 88.7 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.3, + 88.8, + 88.5, + 88.4 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.1, + 88.5, + 88.6, + 88.8 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 88.1, + 88.4, + 88.7, + 88.4 + ], + "down_blocks.2.resnets.1.conv1": [ + 88.3, + 88.8, + 88.8, + 88.6 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.4, + 88.5, + 88.6, + 88.8 + ], + "down_blocks.2.resnets.1.conv2": [ + 88.5, + 88.7, + 88.6, + 88.7 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.3, + 88.7, + 88.8, + 88.5 + ], + "down_blocks.3.resnets.0.conv1": [ + 88.3, + 88.9, + 88.9, + 88.7 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.5, + 88.9, + 89.0, + 88.4 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.6, + 88.9, + 88.7, + 88.7 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.4, + 89.0, + 88.8, + 88.6 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.6, + 89.0, + 88.7, + 88.3 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.4, + 89.1, + 88.8, + 88.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.4, + 88.6, + 88.8, + 88.7 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.2, + 88.7, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.6, + 88.7, + 88.6, + 88.7 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.6, + 89.0, + 88.3, + 88.7 + ], + "up_blocks.0.resnets.1.conv1": [ + 88.2, + 88.6, + 88.9, + 88.6 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.4, + 88.7, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.6, + 88.6, + 89.0, + 88.7 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.3, + 88.7, + 88.8, + 88.5 + ], + "up_blocks.0.resnets.2.conv1": [ + 88.4, + 88.9, + 88.9, + 88.5 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.5, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.4, + 88.7, + 88.9, + 88.3 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.4, + 88.6, + 88.5, + 88.5 + ], + "up_blocks.0.upsamplers.0.conv": [ + 88.6, + 88.8, + 88.7, + 88.8 + ], + "up_blocks.1.attentions.0.proj_in": [ + 88.5, + 88.5, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.6, + 88.9, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.7, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.6, + 88.3, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.2, + 88.9, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.5, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.8, + 88.6, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.7, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 89.0, + 88.4, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 88.7, + 88.7, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.8, + 88.7, + 88.5 + ], + "up_blocks.1.attentions.0.proj_out": [ + 88.4, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.proj_in": [ + 88.3, + 88.6, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.7, + 88.6, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.7, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.7, + 88.8, + 88.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.6, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.9, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.9, + 88.8, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.5, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.9, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.9, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.proj_out": [ + 88.3, + 88.8, + 88.6, + 88.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 88.2, + 88.6, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.9, + 88.4, + 88.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.8, + 88.5, + 88.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.4, + 88.6, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.2, + 88.5, + 88.4, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.8, + 88.9, + 88.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 89.0, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.7, + 88.7, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.8, + 88.7, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.5, + 88.8, + 88.9, + 88.6 + ], + "up_blocks.1.attentions.2.proj_out": [ + 88.2, + 88.3, + 88.6, + 88.5 + ], + "up_blocks.1.resnets.0.conv1": [ + 88.4, + 88.8, + 88.6, + 88.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.3, + 88.7, + 88.8, + 88.7 + ], + "up_blocks.1.resnets.0.conv2": [ + 88.4, + 88.8, + 88.6, + 88.6 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.5, + 88.7, + 88.7, + 88.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.6, + 88.6, + 88.6, + 88.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 88.3, + 88.8, + 88.6, + 88.5 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.5, + 88.8, + 88.6, + 88.6 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 88.1, + 88.8, + 88.5, + 88.5 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.1, + 88.8, + 89.0, + 88.6 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.2, + 88.8, + 88.9, + 88.6 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.5, + 88.7, + 88.6, + 88.6 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 88.1, + 88.6, + 88.5, + 88.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 88.1, + 88.5, + 88.7, + 88.7 + ], + "up_blocks.2.attentions.0.proj_in": [ + 87.9, + 88.6, + 88.9, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.9, + 88.9, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.7, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.7, + 88.4, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 89.0, + 88.7, + 88.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.7, + 88.6, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.5, + 88.7, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.5, + 88.8, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 88.8, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.4, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 88.1, + 88.7, + 88.5, + 88.5 + ], + "up_blocks.2.attentions.1.proj_in": [ + 87.9, + 88.1, + 88.3, + 88.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.1, + 89.0, + 88.7, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.5, + 88.8, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.3, + 88.6, + 88.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.9, + 88.7, + 88.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.8, + 88.4, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.3, + 88.6, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.2, + 88.5, + 88.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.7, + 88.3, + 88.5 + ], + "up_blocks.2.attentions.1.proj_out": [ + 88.2, + 88.5, + 88.5, + 88.5 + ], + "up_blocks.2.attentions.2.proj_in": [ + 87.9, + 88.3, + 88.2, + 88.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.1, + 88.7, + 88.5, + 88.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.7, + 88.6, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.0, + 88.1, + 88.3, + 88.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.2, + 88.2, + 88.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.7, + 88.6, + 88.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.5, + 88.5, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.6, + 88.5, + 88.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 88.0, + 88.4, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.2.attentions.2.proj_out": [ + 87.7, + 88.5, + 88.2, + 88.0 + ], + "up_blocks.2.resnets.0.conv1": [ + 88.4, + 88.7, + 88.4, + 88.8 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.7, + 88.8, + 88.7, + 88.5 + ], + "up_blocks.2.resnets.0.conv2": [ + 88.3, + 88.4, + 88.7, + 88.5 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 88.3, + 88.3, + 88.4, + 88.4 + ], + "up_blocks.2.resnets.1.conv1": [ + 87.9, + 88.5, + 88.7, + 88.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 88.3, + 88.7, + 88.6, + 88.4 + ], + "up_blocks.2.resnets.1.conv2": [ + 88.0, + 88.6, + 88.6, + 88.6 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 87.9, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.2.resnets.2.conv1": [ + 88.2, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 88.3, + 88.4, + 88.7, + 88.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 88.0, + 88.4, + 88.5, + 88.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 88.0, + 88.4, + 88.6, + 88.1 + ], + "up_blocks.2.upsamplers.0.conv": [ + 88.0, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.3.attentions.0.proj_in": [ + 88.0, + 88.3, + 88.4, + 88.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.1, + 88.2, + 88.5, + 88.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.9, + 88.5, + 88.3, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.1, + 88.7, + 88.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.9, + 88.3, + 88.4, + 88.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.7, + 88.5, + 88.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.6, + 88.5, + 88.5, + 88.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.4, + 88.5, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.3, + 88.5, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 88.3, + 88.3, + 88.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.6, + 88.2, + 88.5, + 88.5 + ], + "up_blocks.3.attentions.0.proj_out": [ + 87.8, + 88.6, + 88.4, + 88.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 87.8, + 88.3, + 88.1, + 88.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.3, + 88.6, + 88.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.3, + 88.6, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.0, + 88.1, + 88.3, + 88.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 88.3, + 88.5, + 88.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.6, + 88.8, + 88.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.4, + 88.7, + 88.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.4, + 88.5, + 88.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.7, + 88.7, + 88.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 88.3, + 88.1, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.5, + 88.0, + 88.3 + ], + "up_blocks.3.attentions.1.proj_out": [ + 87.9, + 88.6, + 88.4, + 88.4 + ], + "up_blocks.3.attentions.2.proj_in": [ + 88.2, + 88.1, + 88.1, + 88.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.5, + 88.6, + 88.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.8, + 88.5, + 88.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.6, + 88.4, + 88.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.1, + 88.4, + 88.6, + 88.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.6, + 88.7, + 88.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.4, + 88.8, + 88.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.1, + 88.4, + 88.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.2, + 88.4, + 88.4, + 88.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.6, + 88.3, + 88.6 + ], + "up_blocks.3.attentions.2.proj_out": [ + 88.2, + 88.5, + 88.5, + 88.4 + ], + "up_blocks.3.resnets.0.conv1": [ + 88.0, + 88.1, + 88.4, + 88.1 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 88.3, + 88.4, + 88.4, + 88.4 + ], + "up_blocks.3.resnets.0.conv2": [ + 87.9, + 88.2, + 88.3, + 88.3 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 87.9, + 88.2, + 88.1, + 88.1 + ], + "up_blocks.3.resnets.1.conv1": [ + 88.0, + 88.0, + 88.2, + 88.0 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 88.3, + 88.5, + 88.3, + 88.7 + ], + "up_blocks.3.resnets.1.conv2": [ + 88.0, + 88.0, + 88.1, + 88.4 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 87.7, + 87.9, + 88.3, + 87.9 + ], + "up_blocks.3.resnets.2.conv1": [ + 87.8, + 88.0, + 88.2, + 88.0 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 88.1, + 88.6, + 88.3, + 88.4 + ], + "up_blocks.3.resnets.2.conv2": [ + 87.8, + 88.4, + 88.2, + 88.2 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 88.2, + 88.3, + 88.2, + 88.5 + ], + "mid_block.attentions.0.proj_in": [ + 88.5, + 88.6, + 88.6, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.6, + 88.8, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 89.0, + 88.7, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.3, + 88.7, + 88.6, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.6, + 88.4, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.4, + 88.8, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.8, + 88.5, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.7, + 89.1, + 88.9, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.7, + 88.8, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.3, + 88.8, + 88.7, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.4, + 88.7, + 88.7, + 88.5 + ], + "mid_block.attentions.0.proj_out": [ + 88.6, + 88.9, + 88.8, + 88.7 + ], + "mid_block.resnets.0.conv1": [ + 88.5, + 88.7, + 88.8, + 88.5 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.5, + 88.8, + 88.8, + 88.5 + ], + "mid_block.resnets.0.conv2": [ + 88.3, + 89.0, + 88.7, + 88.5 + ], + "mid_block.resnets.1.conv1": [ + 88.4, + 88.6, + 88.6, + 88.4 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.2, + 88.7, + 88.8, + 88.7 + ], + "mid_block.resnets.1.conv2": [ + 88.4, + 88.7, + 88.5, + 88.5 + ] + } + }, + "cumulative": { + "1": { + "metadata": { + "nbits": 1, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "up_blocks.0.resnets.2.time_emb_proj", + "mid_block.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.3.resnets.0.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.resnets.0.time_emb_proj", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.proj_in", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.3.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.resnets.0.conv2", + "mid_block.resnets.0.conv1", + "mid_block.attentions.0.proj_out", + "down_blocks.3.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.3.resnets.0.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.2.conv2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.2.time_emb_proj", + "mid_block.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.3.resnets.1.conv1", + "up_blocks.0.upsamplers.0.conv", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.2.conv2", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.2.conv1", + "down_blocks.2.resnets.1.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.1.conv1", + "up_blocks.0.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.1.conv2", + "up_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.0.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.0.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.proj_in", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.1.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.resnets.1.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.proj_in", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.resnets.1.conv1", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.resnets.2.conv1", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.proj_in", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.2.resnets.1.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.2.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.conv2", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.proj_in", + "down_blocks.0.attentions.0.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.0.conv1", + "down_blocks.0.resnets.1.conv2", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.resnets.0.conv_shortcut", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.3.resnets.1.time_emb_proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.3.resnets.0.conv2", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.resnets.0.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.2.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.resnets.2.time_emb_proj", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.0.conv_shortcut", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.2.conv2", + "up_blocks.3.resnets.2.conv_shortcut", + "time_embedding.linear_1", + "time_embedding.linear_2" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 3276800, + 1638400, + 1638400, + 3276800, + 1638400, + 1638400, + 6553600, + 1638400, + 819200, + 14745600, + 1638400, + 1638400, + 3276800, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 14745600, + 14745600, + 1638400, + 14745600, + 1638400, + 13107200, + 14745600, + 409600, + 14745600, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 14745600, + 14745600, + 1638400, + 14745600, + 29491200, + 29491200, + 1638400, + 1638400, + 1638400, + 29491200, + 14745600, + 409600, + 29491200, + 409600, + 14745600, + 29491200, + 6553600, + 14745600, + 409600, + 409600, + 1638400, + 1638400, + 1638400, + 983040, + 14745600, + 1638400, + 1638400, + 14745600, + 6553600, + 983040, + 6553600, + 14745600, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 14745600, + 1638400, + 1638400, + 14745600, + 245760, + 102400, + 1638400, + 409600, + 409600, + 7372800, + 13107200, + 409600, + 11059200, + 409600, + 102400, + 1638400, + 102400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 409600, + 491520, + 3276800, + 409600, + 102400, + 409600, + 1638400, + 409600, + 102400, + 1638400, + 3686400, + 1638400, + 409600, + 409600, + 409600, + 13107200, + 409600, + 983040, + 491520, + 819200, + 1638400, + 819200, + 1638400, + 1638400, + 3686400, + 6553600, + 819200, + 3686400, + 245760, + 409600, + 1638400, + 409600, + 1638400, + 491520, + 3686400, + 3276800, + 245760, + 5529600, + 409600, + 1638400, + 409600, + 409600, + 491520, + 102400, + 409600, + 983040, + 14745600, + 2457600, + 1638400, + 1638400, + 102400, + 491520, + 409600, + 13107200, + 1843200, + 1638400, + 7372800, + 1638400, + 3276800, + 3686400, + 409600, + 409600, + 3686400, + 409600, + 22118400, + 983040, + 409600, + 409600, + 1638400, + 983040, + 102400, + 102400, + 3276800, + 409600, + 409600, + 409600, + 245760, + 1638400, + 409600, + 409600, + 409600, + 983040, + 983040, + 102400, + 491520, + 3276800, + 409600, + 245760, + 3276800, + 1638400, + 102400, + 409600, + 245760, + 409600, + 3276800, + 102400, + 102400, + 102400, + 819200, + 491520, + 409600, + 409600, + 245760, + 102400, + 409600, + 409600, + 409600, + 983040, + 491520, + 102400, + 102400, + 102400, + 491520, + 819200, + 819200, + 409600, + 409600, + 102400, + 102400, + 102400, + 102400, + 245760, + 983040, + 102400, + 102400, + 819200, + 819200, + 245760, + 819200, + 921600, + 1228800, + 921600, + 921600, + 3686400, + 102400, + 102400, + 614400, + 102400, + 3686400, + 409600, + 102400, + 245760, + 204800, + 1638400, + 409600, + 102400, + 409600, + 921600, + 921600, + 102400, + 983040, + 921600, + 819200, + 102400, + 409600, + 983040, + 2764800, + 102400, + 102400, + 102400, + 102400, + 102400, + 1843200, + 102400, + 102400, + 102400, + 491520, + 409600, + 921600, + 307200, + 819200, + 1843200, + 204800, + 102400, + 921600, + 204800, + 409600, + 1638400 + ] + }, + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.1, + 88.5, + 88.7, + 88.5 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.1, + 88.0, + 88.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.0, + 88.6, + 88.1 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 87.2, + 87.2, + 87.8, + 87.8 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 86.7, + 86.3, + 87.6, + 87.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.1, + 86.2, + 87.5, + 87.4 + ], + "mid_block.resnets.0.time_emb_proj": [ + 85.2, + 85.8, + 86.8, + 86.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 84.6, + 85.8, + 87.2, + 86.9 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 84.3, + 84.6, + 86.6, + 86.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 83.8, + 84.2, + 86.6, + 86.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.7, + 83.8, + 86.5, + 85.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.0, + 82.4, + 86.2, + 85.2 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 83.6, + 82.4, + 85.4, + 84.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.2, + 81.9, + 85.3, + 84.9 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 82.8, + 81.7, + 85.2, + 84.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.0, + 82.2, + 85.0, + 84.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 82.5, + 81.5, + 84.6, + 83.6 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 82.2, + 81.3, + 84.1, + 83.8 + ], + "mid_block.resnets.1.conv1": [ + 82.9, + 82.2, + 85.0, + 84.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.7, + 81.4, + 84.6, + 83.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.6, + 81.2, + 83.9, + 83.1 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 83.0, + 80.3, + 83.6, + 83.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.6, + 79.9, + 83.1, + 82.5 + ], + "mid_block.attentions.0.proj_in": [ + 82.0, + 79.8, + 82.8, + 82.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.5, + 79.3, + 82.1, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.2, + 79.1, + 81.6, + 81.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 81.2, + 79.1, + 81.7, + 81.2 + ], + "down_blocks.3.resnets.0.conv1": [ + 81.1, + 79.4, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.2, + 78.9, + 81.9, + 81.5 + ], + "mid_block.resnets.0.conv2": [ + 80.8, + 78.5, + 82.0, + 81.5 + ], + "mid_block.resnets.0.conv1": [ + 80.7, + 78.5, + 81.9, + 81.6 + ], + "mid_block.attentions.0.proj_out": [ + 80.7, + 78.5, + 81.9, + 81.6 + ], + "down_blocks.3.resnets.1.conv2": [ + 80.5, + 78.6, + 81.9, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.2, + 78.3, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.2, + 78.2, + 81.6, + 81.4 + ], + "down_blocks.3.resnets.0.conv2": [ + 80.2, + 77.9, + 81.6, + 81.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.1, + 77.7, + 81.7, + 81.3 + ], + "up_blocks.0.resnets.2.conv2": [ + 80.1, + 77.6, + 81.2, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.1, + 77.7, + 81.2, + 80.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.0, + 77.9, + 80.7, + 80.1 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 80.1, + 77.7, + 80.4, + 80.0 + ], + "mid_block.resnets.1.conv2": [ + 80.2, + 77.4, + 80.8, + 79.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.9, + 77.1, + 80.4, + 79.5 + ], + "down_blocks.3.resnets.1.conv1": [ + 79.9, + 77.1, + 80.2, + 79.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 80.0, + 77.1, + 80.2, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 79.9, + 76.9, + 79.9, + 79.0 + ], + "up_blocks.1.resnets.2.conv2": [ + 79.7, + 77.0, + 80.0, + 79.1 + ], + "up_blocks.0.resnets.0.conv1": [ + 79.7, + 76.9, + 80.1, + 79.2 + ], + "up_blocks.0.resnets.2.conv1": [ + 79.7, + 76.9, + 80.0, + 79.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 79.4, + 77.3, + 79.9, + 79.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 79.3, + 78.0, + 79.9, + 78.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 79.1, + 77.7, + 79.7, + 78.2 + ], + "up_blocks.0.resnets.1.conv1": [ + 79.1, + 77.8, + 79.6, + 78.3 + ], + "up_blocks.0.resnets.0.conv2": [ + 79.2, + 77.8, + 79.8, + 78.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.9, + 77.6, + 80.4, + 79.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 79.0, + 77.4, + 80.5, + 79.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 78.9, + 77.5, + 80.5, + 79.1 + ], + "up_blocks.0.resnets.1.conv2": [ + 78.9, + 77.6, + 80.5, + 79.2 + ], + "up_blocks.1.resnets.1.conv1": [ + 78.3, + 76.6, + 80.1, + 78.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 78.0, + 76.5, + 80.0, + 79.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 78.1, + 76.4, + 80.0, + 79.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.1, + 76.8, + 79.9, + 79.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.7, + 76.9, + 79.3, + 78.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 77.5, + 76.8, + 79.3, + 78.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 77.4, + 76.7, + 78.9, + 78.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 77.2, + 76.3, + 78.7, + 77.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 77.2, + 76.2, + 78.7, + 77.7 + ], + "up_blocks.1.resnets.0.conv2": [ + 77.1, + 76.1, + 79.0, + 77.9 + ], + "up_blocks.1.attentions.0.proj_out": [ + 77.2, + 76.0, + 78.8, + 78.9 + ], + "up_blocks.1.attentions.1.proj_out": [ + 77.5, + 75.9, + 78.9, + 78.9 + ], + "up_blocks.1.resnets.1.conv2": [ + 77.0, + 75.5, + 78.4, + 78.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 77.0, + 75.2, + 78.0, + 78.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 77.0, + 75.2, + 78.0, + 78.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 76.7, + 75.2, + 77.8, + 78.4 + ], + "down_blocks.2.resnets.1.conv1": [ + 76.6, + 75.2, + 77.5, + 78.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.6, + 75.4, + 77.2, + 77.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 76.3, + 75.0, + 77.1, + 77.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 76.2, + 75.1, + 77.0, + 77.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 76.6, + 75.5, + 78.0, + 78.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.5, + 75.4, + 77.8, + 77.9 + ], + "down_blocks.2.resnets.1.conv2": [ + 76.3, + 75.5, + 77.8, + 77.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 76.2, + 75.6, + 77.6, + 77.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 76.6, + 75.9, + 78.0, + 78.2 + ], + "down_blocks.2.resnets.0.conv2": [ + 76.7, + 76.1, + 78.0, + 77.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 76.5, + 76.0, + 77.5, + 77.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 76.2, + 75.7, + 77.2, + 77.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.2, + 75.6, + 77.2, + 77.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 75.8, + 75.1, + 76.8, + 76.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 75.4, + 74.8, + 76.3, + 76.2 + ], + "down_blocks.2.resnets.0.conv1": [ + 75.4, + 74.8, + 76.4, + 76.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 75.4, + 74.7, + 76.5, + 76.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 74.9, + 74.1, + 76.0, + 76.0 + ], + "up_blocks.2.resnets.0.conv1": [ + 74.4, + 73.6, + 75.5, + 75.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 74.1, + 73.4, + 75.2, + 75.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 73.8, + 72.9, + 74.4, + 74.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 73.4, + 72.4, + 74.0, + 74.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 72.8, + 72.1, + 73.4, + 73.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 73.0, + 72.2, + 73.5, + 73.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 72.9, + 72.2, + 73.6, + 73.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 72.9, + 72.2, + 73.5, + 73.8 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 72.9, + 72.1, + 73.4, + 73.5 + ], + "down_blocks.2.attentions.0.proj_out": [ + 72.8, + 72.0, + 73.4, + 73.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 72.6, + 72.0, + 73.4, + 73.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 72.7, + 71.8, + 73.3, + 73.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 72.7, + 72.0, + 73.4, + 73.5 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 72.2, + 71.6, + 73.1, + 73.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 72.0, + 71.6, + 72.9, + 73.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 72.0, + 71.5, + 72.9, + 73.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 72.0, + 71.4, + 72.8, + 73.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 71.5, + 71.0, + 72.4, + 72.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 71.2, + 70.4, + 71.9, + 72.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 71.1, + 70.2, + 72.0, + 72.0 + ], + "up_blocks.2.resnets.0.conv2": [ + 71.2, + 70.4, + 72.1, + 72.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 70.9, + 70.2, + 71.9, + 72.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 70.4, + 69.7, + 71.5, + 71.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 70.4, + 69.8, + 71.5, + 71.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 70.0, + 69.5, + 71.2, + 71.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 69.4, + 69.0, + 70.4, + 70.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 69.1, + 68.5, + 69.9, + 70.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 69.1, + 68.5, + 70.1, + 70.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 68.5, + 68.4, + 69.6, + 69.7 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 68.2, + 68.2, + 69.3, + 69.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 67.7, + 67.5, + 68.5, + 68.7 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 67.3, + 67.3, + 68.2, + 68.3 + ], + "up_blocks.1.attentions.1.proj_in": [ + 67.3, + 67.1, + 67.9, + 68.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 67.4, + 67.1, + 67.9, + 68.0 + ], + "down_blocks.1.resnets.0.conv2": [ + 67.3, + 67.2, + 68.0, + 68.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 67.3, + 67.1, + 67.9, + 68.1 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 67.2, + 67.1, + 67.6, + 68.0 + ], + "down_blocks.1.resnets.1.conv2": [ + 66.9, + 66.7, + 67.4, + 67.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 66.4, + 66.6, + 67.1, + 67.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 67.6, + 67.4, + 68.1, + 68.6 + ], + "down_blocks.2.attentions.1.proj_in": [ + 67.5, + 67.3, + 68.2, + 68.7 + ], + "up_blocks.2.attentions.0.proj_out": [ + 67.8, + 67.2, + 68.3, + 68.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 67.5, + 67.1, + 68.1, + 68.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 67.3, + 66.9, + 67.6, + 68.0 + ], + "down_blocks.1.resnets.1.conv1": [ + 67.0, + 66.6, + 67.4, + 67.9 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 67.2, + 66.7, + 67.7, + 68.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 67.3, + 66.8, + 67.8, + 68.1 + ], + "up_blocks.2.resnets.2.conv1": [ + 67.5, + 66.9, + 68.0, + 68.3 + ], + "down_blocks.1.attentions.1.proj_out": [ + 67.1, + 67.0, + 67.8, + 67.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 67.0, + 66.6, + 67.8, + 67.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 67.4, + 66.7, + 67.9, + 68.0 + ], + "up_blocks.2.attentions.0.proj_in": [ + 67.2, + 66.4, + 67.8, + 68.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 67.0, + 66.5, + 67.8, + 68.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 66.8, + 66.3, + 67.8, + 68.2 + ], + "up_blocks.2.attentions.1.proj_out": [ + 66.8, + 66.3, + 68.1, + 68.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 67.0, + 66.2, + 68.1, + 68.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 67.7, + 67.2, + 68.5, + 68.6 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 67.5, + 67.2, + 68.4, + 68.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 66.6, + 66.7, + 67.4, + 67.6 + ], + "up_blocks.1.attentions.2.proj_in": [ + 66.6, + 67.3, + 67.2, + 67.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 66.6, + 67.3, + 67.3, + 67.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 66.6, + 67.4, + 67.2, + 67.4 + ], + "down_blocks.1.attentions.0.proj_in": [ + 66.4, + 67.3, + 67.1, + 67.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 66.5, + 67.4, + 67.2, + 67.5 + ], + "down_blocks.1.resnets.0.conv1": [ + 66.8, + 67.8, + 67.5, + 67.9 + ], + "up_blocks.1.attentions.2.proj_out": [ + 67.3, + 67.7, + 67.8, + 68.1 + ], + "up_blocks.2.resnets.1.conv1": [ + 66.8, + 67.2, + 67.2, + 67.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 66.9, + 67.0, + 67.1, + 67.1 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 66.9, + 66.7, + 66.9, + 67.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 66.6, + 67.0, + 67.0, + 67.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 66.5, + 66.7, + 66.8, + 67.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 66.5, + 66.9, + 66.9, + 67.1 + ], + "up_blocks.2.resnets.2.conv2": [ + 65.3, + 65.4, + 65.7, + 65.9 + ], + "down_blocks.1.attentions.1.proj_in": [ + 65.4, + 65.5, + 65.7, + 66.0 + ], + "up_blocks.1.resnets.2.conv1": [ + 65.4, + 65.7, + 65.8, + 66.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 64.6, + 64.8, + 65.2, + 65.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 64.8, + 64.9, + 65.1, + 65.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 64.1, + 64.0, + 64.3, + 64.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 64.2, + 63.7, + 64.2, + 64.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 63.9, + 63.8, + 64.8, + 64.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 64.0, + 63.8, + 64.6, + 64.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 63.9, + 63.7, + 64.3, + 64.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 64.4, + 64.3, + 64.4, + 64.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 64.4, + 64.2, + 64.5, + 64.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 64.1, + 64.0, + 64.3, + 64.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 64.1, + 64.0, + 64.3, + 64.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 63.4, + 63.4, + 64.2, + 64.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 63.2, + 63.3, + 63.9, + 63.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 63.3, + 63.3, + 63.9, + 63.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 63.3, + 63.3, + 63.9, + 63.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 62.8, + 62.7, + 63.1, + 62.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 62.2, + 62.6, + 62.8, + 62.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 62.3, + 62.4, + 62.8, + 62.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 62.0, + 62.4, + 62.1, + 61.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 62.0, + 62.5, + 61.9, + 61.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 61.6, + 62.4, + 61.7, + 61.7 + ], + "down_blocks.1.attentions.0.proj_out": [ + 61.4, + 62.2, + 61.7, + 61.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 61.4, + 62.3, + 61.7, + 61.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 61.4, + 62.2, + 61.5, + 61.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 61.2, + 62.2, + 61.6, + 61.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 61.2, + 62.1, + 61.6, + 61.3 + ], + "up_blocks.2.attentions.1.proj_in": [ + 61.8, + 62.3, + 62.0, + 61.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 61.7, + 62.2, + 61.9, + 61.8 + ], + "up_blocks.2.attentions.2.proj_in": [ + 61.1, + 61.3, + 60.8, + 61.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 60.9, + 61.1, + 60.7, + 60.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 60.6, + 61.0, + 60.5, + 60.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 60.2, + 60.7, + 60.1, + 59.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 59.9, + 60.6, + 59.9, + 59.7 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 59.4, + 59.6, + 59.2, + 59.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 59.2, + 59.7, + 59.3, + 59.4 + ], + "up_blocks.2.attentions.2.proj_out": [ + 58.8, + 59.4, + 59.3, + 59.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 58.3, + 58.8, + 59.0, + 58.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 58.5, + 59.0, + 59.1, + 58.9 + ], + "down_blocks.0.attentions.1.proj_out": [ + 58.6, + 59.1, + 59.1, + 58.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 58.6, + 58.7, + 59.2, + 59.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 58.2, + 58.6, + 58.8, + 58.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 58.5, + 58.8, + 59.2, + 58.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 57.8, + 58.5, + 58.7, + 58.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 57.9, + 58.4, + 58.6, + 58.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 57.8, + 58.2, + 58.5, + 58.7 + ], + "down_blocks.0.attentions.0.proj_out": [ + 58.2, + 58.6, + 58.8, + 59.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 58.2, + 58.4, + 58.8, + 59.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 58.1, + 58.3, + 58.9, + 59.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 57.8, + 58.1, + 58.6, + 59.0 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 56.6, + 57.0, + 57.8, + 58.0 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 56.3, + 56.1, + 57.2, + 56.9 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 56.0, + 55.8, + 56.6, + 56.3 + ], + "up_blocks.3.attentions.0.proj_out": [ + 56.2, + 56.0, + 56.9, + 56.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 56.3, + 56.1, + 56.9, + 56.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 56.0, + 56.0, + 56.7, + 56.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 56.0, + 55.9, + 56.7, + 56.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 55.9, + 55.9, + 56.7, + 56.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 56.1, + 56.0, + 56.7, + 56.8 + ], + "down_blocks.0.attentions.1.proj_in": [ + 56.3, + 56.0, + 56.8, + 56.8 + ], + "down_blocks.0.attentions.0.proj_in": [ + 56.1, + 55.9, + 56.6, + 56.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 55.9, + 56.0, + 56.6, + 56.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 56.0, + 56.2, + 56.6, + 56.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 56.1, + 56.3, + 56.7, + 56.5 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 56.4, + 56.9, + 57.0, + 56.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 56.2, + 56.8, + 56.7, + 56.5 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 56.3, + 58.2, + 57.9, + 58.2 + ], + "down_blocks.0.resnets.0.conv1": [ + 56.9, + 57.6, + 57.4, + 57.9 + ], + "down_blocks.0.resnets.1.conv2": [ + 57.3, + 57.7, + 57.2, + 57.8 + ], + "down_blocks.1.downsamplers.0.conv": [ + 56.9, + 57.6, + 57.4, + 57.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 56.8, + 57.5, + 57.3, + 57.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 56.8, + 57.5, + 57.4, + 57.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 56.9, + 57.6, + 57.3, + 57.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 57.1, + 57.2, + 57.2, + 57.5 + ], + "up_blocks.2.upsamplers.0.conv": [ + 57.2, + 57.7, + 57.4, + 57.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 57.2, + 57.6, + 57.4, + 57.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 56.6, + 57.4, + 56.9, + 57.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 56.6, + 57.4, + 56.9, + 57.1 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 56.6, + 57.1, + 56.9, + 57.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 56.6, + 57.6, + 57.0, + 57.0 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 57.1, + 57.8, + 57.2, + 57.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 56.8, + 57.6, + 57.2, + 57.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 56.5, + 57.4, + 57.0, + 56.8 + ], + "down_blocks.0.downsamplers.0.conv": [ + 56.5, + 57.8, + 57.1, + 56.9 + ], + "up_blocks.3.resnets.0.conv2": [ + 55.9, + 57.2, + 56.5, + 56.5 + ], + "up_blocks.3.attentions.0.proj_in": [ + 56.1, + 57.0, + 56.6, + 56.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 55.9, + 56.7, + 56.7, + 56.5 + ], + "down_blocks.0.resnets.0.conv2": [ + 56.3, + 57.0, + 57.1, + 56.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 56.1, + 56.5, + 56.7, + 56.4 + ], + "up_blocks.3.attentions.1.proj_out": [ + 56.2, + 56.6, + 56.5, + 56.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 55.0, + 55.4, + 55.3, + 55.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 55.0, + 55.4, + 55.2, + 54.9 + ], + "up_blocks.3.resnets.0.conv1": [ + 54.4, + 54.6, + 55.0, + 54.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 54.4, + 54.5, + 54.8, + 54.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 53.3, + 54.0, + 54.7, + 54.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 53.3, + 54.0, + 54.4, + 54.1 + ], + "up_blocks.3.attentions.2.proj_out": [ + 52.5, + 53.2, + 53.4, + 53.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 52.9, + 53.4, + 53.8, + 53.6 + ], + "up_blocks.3.resnets.1.conv1": [ + 52.7, + 53.5, + 53.8, + 53.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 52.8, + 53.8, + 53.7, + 53.2 + ], + "up_blocks.3.attentions.1.proj_in": [ + 52.8, + 53.8, + 53.6, + 53.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 52.8, + 53.3, + 53.6, + 53.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 52.8, + 53.3, + 53.6, + 53.1 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 51.6, + 52.0, + 52.5, + 52.2 + ], + "up_blocks.3.resnets.1.conv2": [ + 51.3, + 52.0, + 52.5, + 52.3 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 51.4, + 51.6, + 52.2, + 51.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 50.6, + 50.8, + 51.4, + 51.2 + ], + "up_blocks.3.resnets.2.conv1": [ + 50.0, + 50.2, + 51.0, + 50.7 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 49.7, + 50.0, + 50.6, + 50.3 + ], + "up_blocks.3.attentions.2.proj_in": [ + 49.4, + 49.6, + 50.5, + 50.0 + ], + "up_blocks.3.resnets.2.conv2": [ + 48.5, + 48.7, + 49.5, + 49.2 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 50.3, + 51.0, + 52.0, + 51.3 + ], + "time_embedding.linear_1": [ + 51.4, + 52.0, + 53.2, + 51.9 + ], + "time_embedding.linear_2": [ + 51.4, + 52.0, + 53.3, + 52.1 + ] + }, + "2": { + "metadata": { + "nbits": 2, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.2.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.resnets.0.time_emb_proj", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.3.resnets.0.time_emb_proj", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.2.resnets.0.time_emb_proj", + "mid_block.resnets.0.conv2", + "up_blocks.0.resnets.2.conv_shortcut", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.3.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.proj_out", + "down_blocks.2.resnets.1.time_emb_proj", + "mid_block.resnets.0.conv1", + "up_blocks.0.resnets.2.conv2", + "mid_block.attentions.0.proj_in", + "up_blocks.1.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.3.resnets.0.conv2", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.resnets.2.conv2", + "mid_block.resnets.1.conv2", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.0.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.downsamplers.0.conv", + "down_blocks.3.resnets.1.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.2.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.0.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.proj_in", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.2.resnets.0.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv2", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.resnets.0.conv_shortcut", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.1.conv_shortcut", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.proj_out", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.1.conv1", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.2.resnets.1.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.resnets.2.conv1", + "up_blocks.1.attentions.2.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.2.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.resnets.2.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.2.conv2", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.proj_out", + "down_blocks.0.resnets.0.conv1", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.proj_out", + "down_blocks.0.attentions.0.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.1.conv2", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.1.conv_shortcut", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.proj_in", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.upsamplers.0.conv", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.proj_in", + "up_blocks.3.resnets.0.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.resnets.0.conv1", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.attentions.2.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.3.resnets.2.time_emb_proj", + "up_blocks.3.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.resnets.2.conv2", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.2.conv_shortcut", + "time_embedding.linear_2", + "time_embedding.linear_1" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 3276800, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 3276800, + 819200, + 14745600, + 3276800, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 1638400, + 409600, + 14745600, + 1638400, + 1638400, + 1638400, + 14745600, + 14745600, + 1638400, + 1638400, + 13107200, + 14745600, + 14745600, + 1638400, + 14745600, + 14745600, + 14745600, + 29491200, + 1638400, + 6553600, + 29491200, + 1638400, + 491520, + 6553600, + 102400, + 1638400, + 6553600, + 1638400, + 1638400, + 409600, + 409600, + 29491200, + 409600, + 13107200, + 14745600, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 983040, + 14745600, + 6553600, + 409600, + 1638400, + 14745600, + 14745600, + 409600, + 29491200, + 1638400, + 409600, + 14745600, + 409600, + 409600, + 1638400, + 409600, + 29491200, + 409600, + 409600, + 14745600, + 102400, + 1638400, + 409600, + 409600, + 1638400, + 1638400, + 491520, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 13107200, + 7372800, + 1638400, + 14745600, + 3686400, + 491520, + 102400, + 11059200, + 13107200, + 3686400, + 1638400, + 245760, + 409600, + 3276800, + 819200, + 819200, + 3276800, + 102400, + 3276800, + 983040, + 102400, + 409600, + 102400, + 102400, + 409600, + 1638400, + 102400, + 3686400, + 1638400, + 1638400, + 13107200, + 1638400, + 983040, + 6553600, + 983040, + 102400, + 102400, + 409600, + 3686400, + 1638400, + 409600, + 409600, + 409600, + 14745600, + 102400, + 1638400, + 819200, + 1638400, + 1638400, + 491520, + 409600, + 2457600, + 5529600, + 1638400, + 409600, + 409600, + 14745600, + 1843200, + 409600, + 3686400, + 409600, + 491520, + 102400, + 983040, + 1638400, + 1638400, + 1638400, + 22118400, + 409600, + 1638400, + 491520, + 245760, + 7372800, + 1638400, + 983040, + 409600, + 13107200, + 409600, + 102400, + 819200, + 409600, + 245760, + 245760, + 1638400, + 3276800, + 409600, + 491520, + 409600, + 102400, + 409600, + 3276800, + 409600, + 245760, + 102400, + 3686400, + 409600, + 102400, + 3276800, + 409600, + 245760, + 3276800, + 409600, + 983040, + 245760, + 983040, + 491520, + 409600, + 491520, + 409600, + 102400, + 102400, + 921600, + 409600, + 3686400, + 102400, + 102400, + 409600, + 102400, + 245760, + 102400, + 102400, + 983040, + 409600, + 102400, + 102400, + 102400, + 983040, + 1228800, + 921600, + 921600, + 409600, + 819200, + 819200, + 409600, + 819200, + 102400, + 819200, + 245760, + 3686400, + 409600, + 614400, + 102400, + 819200, + 409600, + 102400, + 921600, + 102400, + 921600, + 409600, + 245760, + 102400, + 102400, + 983040, + 921600, + 102400, + 102400, + 204800, + 2764800, + 102400, + 102400, + 819200, + 102400, + 409600, + 1843200, + 983040, + 102400, + 819200, + 102400, + 1843200, + 921600, + 307200, + 491520, + 921600, + 204800, + 102400, + 204800, + 1638400, + 409600 + ] + }, + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.1, + 88.2, + 88.4, + 88.8 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.0, + 88.2, + 88.6, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.2, + 88.7, + 88.4 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 87.7, + 87.8, + 88.5, + 88.3 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.0, + 87.8, + 87.8, + 87.8 + ], + "mid_block.resnets.1.time_emb_proj": [ + 87.0, + 87.8, + 88.1, + 87.8 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 86.5, + 87.3, + 87.8, + 87.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.4, + 87.2, + 87.8, + 87.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.4, + 86.7, + 87.5, + 87.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 85.9, + 86.7, + 87.7, + 87.2 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 85.4, + 86.3, + 87.7, + 87.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 85.6, + 85.8, + 87.8, + 87.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.6, + 85.4, + 87.3, + 87.0 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.8, + 85.0, + 87.1, + 86.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.7, + 84.7, + 87.1, + 86.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.7, + 84.0, + 87.0, + 86.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 85.4, + 84.0, + 86.8, + 86.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 85.2, + 83.6, + 86.8, + 85.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.2, + 83.2, + 86.4, + 85.6 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 85.2, + 83.2, + 86.3, + 85.3 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 85.1, + 82.8, + 86.0, + 85.2 + ], + "mid_block.resnets.0.conv2": [ + 84.9, + 83.3, + 85.7, + 85.2 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 85.0, + 82.9, + 85.7, + 84.8 + ], + "mid_block.resnets.1.conv1": [ + 84.7, + 82.5, + 86.0, + 85.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 84.6, + 82.4, + 85.7, + 84.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.2, + 81.6, + 85.3, + 84.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.9, + 80.9, + 85.1, + 83.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.7, + 80.8, + 84.7, + 83.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.7, + 80.5, + 84.5, + 83.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.5, + 80.4, + 84.4, + 82.9 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 83.5, + 80.4, + 84.2, + 83.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.3, + 80.4, + 84.2, + 83.0 + ], + "down_blocks.3.resnets.0.conv1": [ + 83.3, + 80.3, + 84.2, + 83.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.2, + 80.1, + 84.1, + 83.4 + ], + "mid_block.attentions.0.proj_out": [ + 83.3, + 79.9, + 84.1, + 83.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 82.9, + 79.8, + 84.1, + 83.1 + ], + "mid_block.resnets.0.conv1": [ + 82.5, + 80.2, + 83.9, + 83.0 + ], + "up_blocks.0.resnets.2.conv2": [ + 82.2, + 80.0, + 83.9, + 83.1 + ], + "mid_block.attentions.0.proj_in": [ + 82.5, + 80.3, + 83.9, + 83.0 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 82.6, + 80.6, + 83.8, + 82.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.4, + 80.0, + 83.8, + 82.6 + ], + "down_blocks.3.resnets.0.conv2": [ + 81.9, + 79.9, + 83.6, + 82.5 + ], + "up_blocks.0.upsamplers.0.conv": [ + 81.7, + 79.5, + 83.4, + 82.1 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 81.5, + 79.4, + 83.2, + 81.8 + ], + "up_blocks.1.resnets.2.conv2": [ + 81.4, + 79.2, + 83.2, + 82.0 + ], + "mid_block.resnets.1.conv2": [ + 81.7, + 79.3, + 83.2, + 82.0 + ], + "down_blocks.3.resnets.1.conv2": [ + 81.2, + 79.3, + 83.1, + 81.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 81.1, + 79.2, + 83.1, + 82.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.2, + 78.8, + 82.8, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.9, + 78.4, + 82.6, + 81.7 + ], + "up_blocks.1.resnets.1.conv1": [ + 80.3, + 77.8, + 82.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.6, + 77.7, + 82.4, + 81.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.7, + 77.9, + 81.9, + 80.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.8, + 78.1, + 82.4, + 81.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.6, + 78.1, + 82.4, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.4, + 77.8, + 82.1, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 80.3, + 77.9, + 81.9, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.3, + 77.9, + 81.9, + 81.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.3, + 78.0, + 81.7, + 80.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.1, + 78.0, + 81.5, + 80.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.0, + 77.7, + 81.1, + 80.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 79.6, + 77.1, + 81.0, + 80.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.3, + 76.7, + 80.6, + 79.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.2, + 76.9, + 80.5, + 79.8 + ], + "up_blocks.0.resnets.0.conv2": [ + 79.2, + 76.7, + 80.6, + 80.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.2, + 76.6, + 80.5, + 80.1 + ], + "up_blocks.1.attentions.0.proj_out": [ + 79.1, + 76.5, + 80.6, + 80.3 + ], + "up_blocks.1.attentions.1.proj_out": [ + 78.8, + 76.4, + 80.3, + 80.2 + ], + "up_blocks.1.resnets.0.conv2": [ + 78.8, + 76.3, + 80.2, + 80.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.9, + 76.4, + 80.3, + 79.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 78.8, + 76.3, + 80.4, + 79.8 + ], + "up_blocks.0.resnets.1.conv2": [ + 78.7, + 76.1, + 80.4, + 79.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 78.5, + 76.2, + 80.2, + 79.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 78.5, + 76.0, + 80.0, + 79.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 78.4, + 75.8, + 80.0, + 79.5 + ], + "down_blocks.2.downsamplers.0.conv": [ + 78.3, + 76.0, + 79.9, + 79.5 + ], + "down_blocks.3.resnets.1.conv1": [ + 78.4, + 76.0, + 80.0, + 79.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.2, + 75.9, + 79.7, + 79.2 + ], + "up_blocks.0.resnets.1.conv1": [ + 78.2, + 75.8, + 79.6, + 79.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 77.9, + 75.3, + 79.6, + 79.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.1, + 75.3, + 79.9, + 79.4 + ], + "up_blocks.1.resnets.1.conv2": [ + 78.1, + 75.3, + 79.8, + 79.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.7, + 75.0, + 79.1, + 79.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 77.4, + 74.8, + 78.9, + 78.7 + ], + "down_blocks.2.attentions.1.proj_out": [ + 77.3, + 74.7, + 78.7, + 78.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.3, + 75.0, + 78.7, + 78.7 + ], + "up_blocks.0.resnets.2.conv1": [ + 77.2, + 75.1, + 78.7, + 78.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.0, + 75.0, + 78.3, + 78.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 77.0, + 74.9, + 78.4, + 78.3 + ], + "down_blocks.2.resnets.0.conv2": [ + 76.7, + 74.9, + 78.2, + 78.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 76.6, + 74.7, + 77.9, + 77.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.8, + 74.6, + 77.9, + 77.6 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 76.8, + 74.7, + 77.8, + 77.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 76.6, + 74.8, + 77.7, + 77.6 + ], + "up_blocks.1.attentions.0.proj_in": [ + 76.8, + 75.0, + 77.9, + 77.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 76.7, + 75.2, + 77.8, + 77.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 76.5, + 75.3, + 77.6, + 77.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.7, + 75.1, + 77.6, + 77.5 + ], + "down_blocks.2.attentions.1.proj_in": [ + 76.5, + 75.3, + 77.7, + 77.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 76.7, + 75.2, + 77.7, + 77.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 76.7, + 74.8, + 77.7, + 77.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 76.5, + 74.8, + 77.5, + 77.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.2, + 74.6, + 77.2, + 77.2 + ], + "down_blocks.2.resnets.0.conv1": [ + 76.2, + 74.4, + 77.1, + 77.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 76.4, + 74.0, + 77.4, + 77.4 + ], + "down_blocks.2.resnets.1.conv2": [ + 76.3, + 73.9, + 77.3, + 77.5 + ], + "up_blocks.2.resnets.0.conv2": [ + 76.3, + 73.9, + 77.4, + 77.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.6, + 73.3, + 76.9, + 76.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 75.5, + 73.3, + 76.8, + 76.4 + ], + "up_blocks.2.resnets.0.conv1": [ + 75.2, + 73.2, + 76.7, + 76.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 75.1, + 73.2, + 76.7, + 76.3 + ], + "down_blocks.1.resnets.0.conv2": [ + 75.0, + 73.4, + 76.4, + 76.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 74.7, + 72.9, + 76.3, + 76.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 74.7, + 72.8, + 76.2, + 75.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 74.5, + 72.5, + 75.9, + 75.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.6, + 72.8, + 76.0, + 75.6 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 74.3, + 72.8, + 75.8, + 75.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 73.9, + 72.3, + 75.5, + 75.2 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 73.7, + 72.6, + 75.4, + 75.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 73.6, + 72.5, + 75.2, + 75.2 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 73.6, + 72.7, + 74.8, + 75.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 73.6, + 72.6, + 74.8, + 75.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 73.6, + 72.6, + 75.0, + 75.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 73.7, + 72.6, + 74.9, + 75.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 73.6, + 72.3, + 74.7, + 75.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 73.5, + 72.3, + 74.2, + 75.1 + ], + "up_blocks.2.attentions.0.proj_out": [ + 73.4, + 72.1, + 74.3, + 74.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.0, + 71.9, + 74.2, + 74.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 72.8, + 71.7, + 73.8, + 74.3 + ], + "down_blocks.1.resnets.1.conv2": [ + 72.5, + 71.7, + 73.8, + 74.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 72.3, + 71.0, + 73.3, + 74.0 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 72.0, + 71.0, + 73.5, + 73.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 72.9, + 71.2, + 73.3, + 74.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 72.7, + 71.0, + 72.9, + 73.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 72.7, + 71.1, + 72.9, + 72.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 72.7, + 71.0, + 72.9, + 72.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 72.7, + 71.1, + 72.5, + 72.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 72.5, + 71.0, + 72.5, + 72.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 72.4, + 70.7, + 72.4, + 72.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 72.3, + 70.6, + 72.0, + 71.8 + ], + "down_blocks.1.resnets.1.conv1": [ + 72.4, + 70.8, + 72.2, + 72.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 72.2, + 70.5, + 72.1, + 71.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 72.0, + 70.2, + 71.5, + 71.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 71.8, + 69.8, + 71.3, + 71.1 + ], + "up_blocks.2.attentions.0.proj_in": [ + 72.1, + 69.7, + 71.5, + 71.0 + ], + "down_blocks.2.resnets.1.conv1": [ + 72.0, + 69.8, + 71.6, + 71.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 71.9, + 70.0, + 71.4, + 70.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 71.9, + 70.2, + 71.2, + 70.9 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 71.5, + 70.6, + 70.8, + 70.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 71.5, + 70.5, + 70.8, + 70.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 71.5, + 71.0, + 69.8, + 70.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 71.0, + 70.2, + 69.4, + 69.5 + ], + "up_blocks.2.attentions.1.proj_out": [ + 70.9, + 69.9, + 68.9, + 69.2 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 70.7, + 70.1, + 68.9, + 69.4 + ], + "up_blocks.2.resnets.2.conv1": [ + 70.9, + 70.4, + 69.2, + 69.9 + ], + "up_blocks.1.attentions.2.proj_out": [ + 70.9, + 69.8, + 69.6, + 70.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 70.4, + 69.6, + 68.9, + 69.4 + ], + "down_blocks.1.attentions.1.proj_out": [ + 70.2, + 69.6, + 68.9, + 69.2 + ], + "up_blocks.1.upsamplers.0.conv": [ + 70.4, + 69.7, + 69.7, + 70.1 + ], + "down_blocks.1.resnets.0.conv1": [ + 70.4, + 69.8, + 69.8, + 70.1 + ], + "down_blocks.1.attentions.0.proj_out": [ + 70.4, + 69.8, + 69.6, + 70.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 71.0, + 70.4, + 70.2, + 70.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 70.5, + 70.4, + 70.0, + 70.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 70.0, + 70.4, + 69.8, + 70.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 69.8, + 70.3, + 69.1, + 69.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 69.6, + 70.5, + 68.6, + 69.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 69.4, + 69.9, + 68.1, + 69.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 69.6, + 69.5, + 68.3, + 69.2 + ], + "up_blocks.1.attentions.1.proj_in": [ + 69.1, + 68.4, + 67.8, + 68.6 + ], + "up_blocks.1.resnets.2.conv1": [ + 68.8, + 66.8, + 67.1, + 67.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 68.2, + 66.6, + 66.9, + 67.3 + ], + "up_blocks.1.attentions.2.proj_in": [ + 68.3, + 66.2, + 66.4, + 66.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 67.9, + 66.0, + 66.0, + 66.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 68.0, + 66.1, + 66.2, + 66.6 + ], + "up_blocks.2.resnets.1.conv1": [ + 67.8, + 65.6, + 66.3, + 66.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 67.8, + 65.6, + 65.7, + 66.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 68.5, + 68.9, + 67.4, + 67.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 69.2, + 69.1, + 67.7, + 67.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 69.2, + 69.1, + 67.9, + 68.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 69.2, + 69.5, + 68.4, + 68.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 68.9, + 69.4, + 68.2, + 68.2 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 67.6, + 68.1, + 66.8, + 66.6 + ], + "down_blocks.1.attentions.0.proj_in": [ + 68.0, + 68.2, + 67.1, + 66.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 67.9, + 68.2, + 67.0, + 66.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 67.7, + 68.1, + 66.8, + 66.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 67.4, + 68.0, + 66.9, + 66.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 67.5, + 68.0, + 66.9, + 66.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 68.3, + 68.3, + 66.9, + 67.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 67.8, + 67.8, + 66.7, + 67.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 67.6, + 67.8, + 67.6, + 67.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 67.9, + 67.9, + 67.6, + 67.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 67.6, + 67.8, + 67.7, + 68.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 67.4, + 67.3, + 67.8, + 68.1 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 67.0, + 67.0, + 67.3, + 67.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 67.0, + 66.8, + 67.5, + 67.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 66.8, + 66.6, + 67.3, + 67.2 + ], + "up_blocks.2.resnets.2.conv2": [ + 66.2, + 66.1, + 66.6, + 66.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 65.7, + 65.6, + 66.4, + 66.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 65.7, + 65.7, + 66.2, + 66.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 65.3, + 65.2, + 65.5, + 65.4 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 65.7, + 65.6, + 65.6, + 65.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 65.5, + 65.5, + 65.7, + 65.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 65.2, + 65.5, + 65.8, + 65.9 + ], + "up_blocks.2.attentions.1.proj_in": [ + 65.2, + 65.5, + 66.0, + 66.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 64.9, + 65.0, + 65.7, + 65.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 64.9, + 65.0, + 66.2, + 65.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 64.4, + 62.9, + 65.0, + 65.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 64.2, + 62.3, + 64.9, + 65.3 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 63.6, + 61.7, + 64.4, + 64.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 63.4, + 61.7, + 64.5, + 64.5 + ], + "up_blocks.2.attentions.2.proj_out": [ + 63.3, + 62.1, + 64.3, + 64.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 63.3, + 62.2, + 64.4, + 64.3 + ], + "down_blocks.0.attentions.1.proj_out": [ + 63.0, + 62.2, + 64.6, + 64.3 + ], + "down_blocks.0.resnets.0.conv1": [ + 62.9, + 62.2, + 64.8, + 64.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 62.8, + 62.1, + 64.4, + 63.9 + ], + "down_blocks.1.downsamplers.0.conv": [ + 62.6, + 62.5, + 64.2, + 63.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 62.4, + 62.2, + 64.0, + 63.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 62.3, + 62.1, + 63.5, + 62.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 62.5, + 61.8, + 63.9, + 62.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 62.3, + 61.5, + 63.7, + 62.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 62.5, + 61.8, + 63.7, + 62.6 + ], + "down_blocks.0.attentions.0.proj_out": [ + 62.5, + 62.3, + 63.7, + 62.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 62.0, + 61.7, + 63.4, + 62.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 61.5, + 60.6, + 62.7, + 61.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 61.2, + 60.1, + 62.2, + 61.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 61.1, + 60.1, + 62.0, + 61.3 + ], + "up_blocks.3.attentions.0.proj_out": [ + 60.9, + 60.1, + 62.0, + 61.1 + ], + "down_blocks.0.attentions.0.proj_in": [ + 60.9, + 60.2, + 61.8, + 60.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 60.9, + 60.1, + 61.4, + 60.7 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 60.5, + 59.8, + 61.0, + 61.0 + ], + "down_blocks.0.resnets.1.conv2": [ + 60.2, + 59.6, + 60.5, + 60.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 60.4, + 59.7, + 60.5, + 60.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 60.2, + 59.3, + 60.2, + 60.5 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 60.2, + 59.6, + 60.3, + 60.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 59.7, + 59.3, + 60.1, + 60.2 + ], + "up_blocks.2.attentions.2.proj_in": [ + 59.2, + 58.9, + 60.2, + 60.1 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 59.4, + 59.2, + 59.6, + 59.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 59.2, + 59.2, + 59.4, + 59.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 59.2, + 59.5, + 59.7, + 59.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 59.2, + 59.3, + 59.4, + 59.4 + ], + "up_blocks.2.upsamplers.0.conv": [ + 59.2, + 59.5, + 59.5, + 60.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 58.9, + 59.0, + 59.0, + 59.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 59.2, + 59.0, + 58.8, + 59.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 59.1, + 58.9, + 58.7, + 59.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 59.1, + 58.7, + 58.7, + 59.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 58.8, + 59.1, + 59.1, + 59.5 + ], + "down_blocks.0.attentions.1.proj_in": [ + 58.5, + 59.4, + 59.6, + 59.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 58.3, + 59.3, + 59.4, + 59.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 57.9, + 58.9, + 59.2, + 59.1 + ], + "down_blocks.0.downsamplers.0.conv": [ + 57.8, + 58.7, + 58.9, + 59.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 57.8, + 58.7, + 58.8, + 59.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 57.7, + 58.6, + 58.6, + 58.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 57.5, + 58.4, + 58.6, + 58.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 57.9, + 58.7, + 58.8, + 58.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 58.3, + 58.6, + 59.2, + 58.8 + ], + "down_blocks.0.resnets.0.conv2": [ + 57.8, + 58.3, + 59.3, + 58.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 57.7, + 58.5, + 59.4, + 58.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 57.6, + 58.2, + 58.8, + 58.0 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 56.9, + 57.9, + 58.3, + 57.2 + ], + "up_blocks.3.resnets.0.conv1": [ + 56.3, + 57.9, + 58.0, + 57.0 + ], + "up_blocks.3.attentions.1.proj_out": [ + 56.2, + 57.5, + 57.2, + 56.6 + ], + "up_blocks.3.attentions.2.proj_out": [ + 56.0, + 57.1, + 56.8, + 56.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 55.6, + 56.5, + 56.0, + 55.7 + ], + "up_blocks.3.attentions.0.proj_in": [ + 55.8, + 56.5, + 55.4, + 55.4 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 55.0, + 55.7, + 54.5, + 54.7 + ], + "up_blocks.3.resnets.1.conv1": [ + 55.3, + 56.0, + 54.6, + 54.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 55.0, + 56.0, + 55.3, + 55.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 54.6, + 55.4, + 55.1, + 54.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 54.4, + 54.8, + 54.6, + 54.7 + ], + "up_blocks.3.attentions.1.proj_in": [ + 54.2, + 54.7, + 54.7, + 54.7 + ], + "up_blocks.3.resnets.2.conv1": [ + 54.6, + 54.9, + 54.3, + 54.4 + ], + "up_blocks.3.resnets.1.conv2": [ + 54.8, + 55.1, + 54.4, + 54.4 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 54.9, + 55.0, + 55.3, + 54.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 54.7, + 55.1, + 55.3, + 54.6 + ], + "up_blocks.3.resnets.2.conv2": [ + 54.0, + 54.5, + 54.8, + 54.1 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 54.3, + 54.4, + 54.8, + 54.5 + ], + "up_blocks.3.attentions.2.proj_in": [ + 53.9, + 54.0, + 54.8, + 54.5 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 55.0, + 54.7, + 55.8, + 55.4 + ], + "time_embedding.linear_2": [ + 52.0, + 51.3, + 51.7, + 51.6 + ], + "time_embedding.linear_1": [ + 52.4, + 50.4, + 51.1, + 51.6 + ] + }, + "4": { + "metadata": { + "nbits": 4, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.0.time_emb_proj", + "down_blocks.3.resnets.0.time_emb_proj", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.2.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.resnets.0.time_emb_proj", + "mid_block.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.resnets.1.conv2", + "mid_block.attentions.0.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv_shortcut", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.3.resnets.0.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.3.resnets.1.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.3.resnets.0.conv2", + "up_blocks.0.resnets.1.conv2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.resnets.1.time_emb_proj", + "down_blocks.3.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.downsamplers.0.conv", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.resnets.0.conv1", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.resnets.2.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.0.conv2", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.0.proj_out", + "mid_block.attentions.0.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv1", + "down_blocks.2.resnets.1.conv2", + "up_blocks.1.resnets.0.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.2.resnets.0.conv1", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.1.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.2.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.2.resnets.2.conv1", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.0.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.2.resnets.0.conv1", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.3.resnets.2.time_emb_proj", + "down_blocks.0.resnets.0.conv1", + "time_embedding.linear_1", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.2.attentions.2.proj_out", + "down_blocks.0.attentions.1.proj_out", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.resnets.1.conv1", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.0.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.resnets.1.conv2", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.0.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "time_embedding.linear_2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.resnets.0.conv2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.conv2", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.resnets.0.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.3.attentions.2.proj_out", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.proj_in", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.0.conv_shortcut", + "up_blocks.3.resnets.2.conv2", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.2.conv_shortcut" + ], + "sizes": [ + 491520, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 6553600, + 14745600, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 409600, + 14745600, + 1638400, + 1638400, + 3276800, + 14745600, + 1638400, + 1638400, + 3276800, + 1638400, + 29491200, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 491520, + 14745600, + 3276800, + 1638400, + 1638400, + 14745600, + 491520, + 1638400, + 491520, + 13107200, + 491520, + 102400, + 14745600, + 14745600, + 1638400, + 409600, + 819200, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 14745600, + 409600, + 14745600, + 14745600, + 409600, + 409600, + 6553600, + 102400, + 1638400, + 409600, + 14745600, + 1638400, + 1638400, + 1638400, + 409600, + 13107200, + 1638400, + 29491200, + 819200, + 13107200, + 983040, + 409600, + 1638400, + 983040, + 14745600, + 1638400, + 819200, + 22118400, + 409600, + 409600, + 409600, + 14745600, + 29491200, + 6553600, + 409600, + 819200, + 491520, + 1638400, + 1638400, + 409600, + 3686400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 29491200, + 14745600, + 14745600, + 491520, + 409600, + 1638400, + 102400, + 1638400, + 1638400, + 819200, + 1638400, + 102400, + 6553600, + 3686400, + 409600, + 102400, + 983040, + 409600, + 491520, + 491520, + 491520, + 1638400, + 245760, + 6553600, + 1638400, + 11059200, + 1638400, + 983040, + 3276800, + 409600, + 983040, + 102400, + 409600, + 1638400, + 409600, + 409600, + 13107200, + 29491200, + 1638400, + 3276800, + 983040, + 983040, + 409600, + 13107200, + 1638400, + 1638400, + 245760, + 245760, + 102400, + 3276800, + 409600, + 1638400, + 3686400, + 1638400, + 245760, + 102400, + 102400, + 102400, + 409600, + 983040, + 245760, + 1638400, + 409600, + 1843200, + 409600, + 983040, + 14745600, + 409600, + 409600, + 102400, + 1638400, + 1638400, + 3686400, + 409600, + 245760, + 409600, + 102400, + 409600, + 5529600, + 1638400, + 409600, + 245760, + 409600, + 7372800, + 409600, + 409600, + 1638400, + 3686400, + 1638400, + 102400, + 983040, + 102400, + 3276800, + 102400, + 409600, + 3276800, + 102400, + 14745600, + 7372800, + 13107200, + 409600, + 3686400, + 1638400, + 102400, + 409600, + 1638400, + 245760, + 2457600, + 409600, + 921600, + 409600, + 409600, + 409600, + 409600, + 102400, + 983040, + 409600, + 921600, + 409600, + 102400, + 819200, + 409600, + 3686400, + 102400, + 102400, + 245760, + 102400, + 245760, + 3276800, + 102400, + 921600, + 409600, + 102400, + 3276800, + 819200, + 102400, + 1228800, + 409600, + 819200, + 102400, + 102400, + 1638400, + 102400, + 102400, + 819200, + 819200, + 614400, + 921600, + 983040, + 921600, + 921600, + 409600, + 102400, + 102400, + 2764800, + 102400, + 102400, + 102400, + 3686400, + 102400, + 102400, + 204800, + 1843200, + 921600, + 819200, + 819200, + 102400, + 102400, + 1843200, + 102400, + 307200, + 921600, + 204800, + 102400, + 204800 + ] + }, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.5, + 88.5, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 88.2, + 88.0, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 88.1, + 88.4, + 88.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.0, + 88.2, + 88.3, + 88.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.1, + 88.1, + 88.0, + 88.3 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 87.9, + 88.1, + 88.2, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.8, + 88.0, + 88.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.0, + 88.2, + 88.2, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.9, + 87.9, + 87.9, + 88.4 + ], + "mid_block.resnets.0.conv1": [ + 87.9, + 87.8, + 88.1, + 88.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 87.9, + 88.0, + 88.3 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.0, + 87.8, + 88.1, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.7, + 87.9, + 88.1, + 88.4 + ], + "mid_block.resnets.1.time_emb_proj": [ + 87.5, + 87.7, + 87.9, + 88.1 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 87.6, + 87.8, + 87.9, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.5, + 88.0, + 87.8, + 88.4 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.8, + 87.7, + 87.9, + 88.3 + ], + "mid_block.resnets.0.conv2": [ + 87.5, + 87.9, + 88.0, + 88.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.1, + 87.6, + 87.9, + 88.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.5, + 87.8, + 87.8, + 88.3 + ], + "mid_block.resnets.1.conv2": [ + 87.5, + 87.4, + 87.8, + 88.1 + ], + "mid_block.attentions.0.proj_out": [ + 87.3, + 87.1, + 87.7, + 88.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.5, + 87.8, + 88.1 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 87.3, + 87.5, + 87.6, + 88.0 + ], + "mid_block.resnets.1.conv1": [ + 87.4, + 87.0, + 87.5, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.1, + 86.8, + 87.7, + 87.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.2, + 86.7, + 87.6, + 88.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 87.0, + 86.6, + 87.5, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.2, + 86.6, + 87.7, + 87.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 87.0, + 86.7, + 87.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.1, + 86.8, + 87.3, + 88.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 86.6, + 86.8, + 87.6, + 87.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.0, + 86.0, + 87.6, + 87.8 + ], + "up_blocks.0.resnets.2.conv2": [ + 87.2, + 86.6, + 87.5, + 88.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 86.9, + 86.2, + 87.4, + 87.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.9, + 86.0, + 87.0, + 87.7 + ], + "down_blocks.3.resnets.0.conv1": [ + 86.5, + 85.7, + 87.4, + 87.6 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 86.8, + 86.0, + 87.3, + 87.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.8, + 85.8, + 87.3, + 87.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.7, + 85.7, + 87.3, + 87.6 + ], + "down_blocks.3.resnets.1.conv2": [ + 86.5, + 85.8, + 87.4, + 87.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 86.5, + 85.8, + 87.2, + 87.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.2, + 85.9, + 87.2, + 87.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 86.0, + 85.8, + 86.9, + 86.4 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 85.0, + 86.9, + 86.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 86.0, + 84.9, + 86.9, + 86.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.8, + 84.8, + 86.7, + 86.5 + ], + "down_blocks.3.resnets.0.conv2": [ + 85.7, + 84.9, + 86.8, + 86.5 + ], + "up_blocks.0.resnets.1.conv2": [ + 84.9, + 85.0, + 86.7, + 86.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.2, + 84.9, + 86.6, + 86.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.0, + 85.0, + 86.5, + 86.3 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 84.9, + 85.0, + 86.6, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 84.9, + 84.8, + 86.5, + 86.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.0, + 84.8, + 86.5, + 86.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.9, + 84.8, + 86.5, + 86.2 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 84.5, + 84.7, + 86.6, + 86.3 + ], + "down_blocks.3.resnets.1.conv1": [ + 83.8, + 84.4, + 86.3, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 83.9, + 84.6, + 86.1, + 85.8 + ], + "down_blocks.2.downsamplers.0.conv": [ + 83.7, + 84.4, + 86.2, + 85.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 83.9, + 84.3, + 85.8, + 85.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 83.9, + 84.2, + 85.8, + 85.6 + ], + "up_blocks.1.resnets.2.conv2": [ + 83.9, + 84.1, + 85.8, + 85.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 83.9, + 83.9, + 85.7, + 85.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.7, + 83.9, + 85.5, + 85.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.7, + 83.6, + 85.4, + 85.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 83.6, + 83.5, + 85.2, + 85.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.6, + 83.5, + 85.5, + 84.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 83.6, + 83.4, + 85.3, + 84.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 83.7, + 83.0, + 85.3, + 84.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.8, + 82.7, + 84.9, + 84.4 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 83.8, + 82.6, + 85.1, + 84.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 83.7, + 82.6, + 84.9, + 84.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.7, + 82.7, + 84.8, + 84.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 83.6, + 82.5, + 84.8, + 84.4 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 83.3, + 82.3, + 84.4, + 84.3 + ], + "up_blocks.1.resnets.0.conv1": [ + 83.5, + 82.4, + 84.5, + 84.4 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 83.2, + 82.2, + 84.6, + 84.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 83.2, + 82.6, + 84.5, + 84.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.2, + 82.3, + 84.6, + 84.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 83.1, + 82.2, + 84.4, + 84.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.1, + 82.1, + 84.7, + 84.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 83.0, + 81.8, + 84.3, + 83.3 + ], + "up_blocks.1.resnets.1.conv2": [ + 83.1, + 81.4, + 84.3, + 83.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.7, + 81.3, + 84.2, + 83.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 82.8, + 81.1, + 84.1, + 83.2 + ], + "up_blocks.1.resnets.2.conv1": [ + 82.4, + 81.1, + 84.0, + 82.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.6, + 80.9, + 83.8, + 82.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 82.4, + 80.7, + 83.7, + 82.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.4, + 81.0, + 83.8, + 82.7 + ], + "up_blocks.0.resnets.0.conv2": [ + 82.2, + 80.8, + 83.8, + 82.6 + ], + "up_blocks.0.resnets.1.conv1": [ + 82.1, + 80.4, + 84.0, + 82.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 82.0, + 80.0, + 83.6, + 82.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 80.2, + 83.6, + 82.5 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 81.7, + 80.4, + 83.4, + 82.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.4, + 80.5, + 83.7, + 82.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.6, + 80.6, + 83.5, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.4, + 80.5, + 83.3, + 82.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 80.4, + 83.4, + 82.2 + ], + "down_blocks.1.resnets.0.conv2": [ + 81.5, + 80.3, + 83.3, + 82.0 + ], + "up_blocks.1.attentions.1.proj_out": [ + 81.4, + 80.3, + 83.5, + 82.2 + ], + "up_blocks.1.attentions.0.proj_out": [ + 81.3, + 79.8, + 83.4, + 82.4 + ], + "mid_block.attentions.0.proj_in": [ + 81.1, + 80.4, + 83.3, + 82.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.3, + 80.5, + 83.3, + 82.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.1, + 80.3, + 83.5, + 82.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 81.2, + 80.1, + 83.5, + 82.5 + ], + "down_blocks.2.resnets.1.conv2": [ + 81.0, + 80.1, + 83.2, + 82.3 + ], + "up_blocks.1.resnets.0.conv2": [ + 80.8, + 79.8, + 83.3, + 82.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.9, + 79.9, + 83.5, + 82.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 80.8, + 80.2, + 83.5, + 82.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.9, + 80.2, + 83.5, + 82.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.7, + 80.1, + 83.3, + 82.7 + ], + "down_blocks.2.attentions.1.proj_in": [ + 80.3, + 80.2, + 83.0, + 82.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.2, + 80.2, + 82.8, + 82.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 80.1, + 80.0, + 82.8, + 82.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.2, + 79.8, + 82.6, + 81.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.2, + 80.1, + 82.7, + 81.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.1, + 79.9, + 82.5, + 81.3 + ], + "up_blocks.2.resnets.0.conv2": [ + 80.1, + 79.7, + 82.4, + 81.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 79.9, + 79.4, + 82.2, + 81.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.8, + 79.5, + 82.1, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.8, + 79.5, + 81.9, + 80.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 79.8, + 79.5, + 81.9, + 80.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 79.8, + 79.3, + 81.6, + 80.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.6, + 79.4, + 81.6, + 80.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.6, + 79.6, + 81.4, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.7, + 79.3, + 81.5, + 80.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.6, + 79.0, + 81.2, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.3, + 78.9, + 81.0, + 80.0 + ], + "down_blocks.2.attentions.1.proj_out": [ + 79.3, + 79.3, + 81.0, + 80.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 79.3, + 79.0, + 81.1, + 80.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 79.4, + 78.9, + 81.1, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 78.7, + 78.7, + 80.6, + 79.9 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 78.8, + 78.9, + 80.6, + 80.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.7, + 78.6, + 80.6, + 79.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.6, + 78.3, + 80.6, + 79.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 78.6, + 78.6, + 80.7, + 79.8 + ], + "up_blocks.2.attentions.0.proj_out": [ + 78.7, + 78.6, + 80.7, + 80.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 78.6, + 78.7, + 80.4, + 79.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.5, + 78.6, + 80.6, + 79.8 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 78.7, + 78.8, + 80.8, + 79.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 78.7, + 78.5, + 80.5, + 80.1 + ], + "up_blocks.0.resnets.2.conv1": [ + 78.7, + 78.4, + 80.6, + 80.1 + ], + "up_blocks.1.attentions.0.proj_in": [ + 78.8, + 78.6, + 80.5, + 80.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.7, + 78.3, + 80.2, + 80.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.7, + 78.1, + 80.6, + 80.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 78.8, + 78.3, + 80.6, + 80.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 79.0, + 78.3, + 80.6, + 80.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 78.9, + 78.5, + 80.8, + 80.6 + ], + "up_blocks.1.attentions.2.proj_out": [ + 78.9, + 78.3, + 80.6, + 80.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 79.0, + 78.5, + 80.9, + 80.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 78.9, + 78.4, + 80.6, + 80.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.9, + 78.4, + 80.4, + 80.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.9, + 78.2, + 80.3, + 80.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 78.9, + 78.0, + 80.2, + 80.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 78.7, + 77.8, + 80.1, + 80.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 78.8, + 77.9, + 80.3, + 80.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 78.7, + 77.7, + 80.1, + 80.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 78.6, + 77.5, + 80.0, + 79.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.6, + 77.4, + 80.1, + 80.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.6, + 77.3, + 79.9, + 79.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.6, + 77.3, + 80.1, + 80.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.5, + 77.4, + 80.0, + 80.0 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 78.4, + 77.4, + 79.9, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 78.7, + 77.4, + 80.0, + 79.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 78.5, + 77.2, + 80.0, + 79.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 78.4, + 76.9, + 79.9, + 79.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.4, + 76.9, + 79.9, + 79.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 78.5, + 77.0, + 79.9, + 79.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 78.4, + 76.7, + 79.9, + 79.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.3, + 76.8, + 79.7, + 79.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 78.5, + 76.7, + 79.6, + 79.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 78.5, + 76.6, + 79.5, + 79.5 + ], + "up_blocks.2.attentions.1.proj_out": [ + 78.4, + 76.7, + 79.4, + 79.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.5, + 76.6, + 79.2, + 79.4 + ], + "up_blocks.1.attentions.1.proj_in": [ + 78.4, + 76.7, + 79.2, + 79.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 78.3, + 76.6, + 79.0, + 79.3 + ], + "up_blocks.2.resnets.2.conv2": [ + 78.2, + 76.4, + 78.9, + 79.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.1, + 76.2, + 78.8, + 78.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 77.9, + 76.1, + 78.8, + 78.6 + ], + "up_blocks.2.attentions.0.proj_in": [ + 77.8, + 75.9, + 78.8, + 78.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.7, + 75.9, + 78.8, + 78.7 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 77.5, + 75.7, + 78.5, + 78.4 + ], + "up_blocks.2.resnets.2.conv1": [ + 77.4, + 75.8, + 78.5, + 78.5 + ], + "down_blocks.2.attentions.0.proj_in": [ + 77.5, + 76.0, + 78.5, + 78.6 + ], + "down_blocks.1.attentions.0.proj_in": [ + 77.5, + 76.0, + 78.3, + 78.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 77.4, + 75.8, + 78.2, + 78.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 77.0, + 75.4, + 78.1, + 78.0 + ], + "up_blocks.2.resnets.1.conv1": [ + 76.9, + 75.2, + 77.9, + 77.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.1, + 75.4, + 78.2, + 78.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 77.2, + 75.3, + 78.1, + 78.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.0, + 75.2, + 77.9, + 77.7 + ], + "down_blocks.1.resnets.1.conv1": [ + 77.0, + 75.2, + 78.0, + 77.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 76.8, + 75.2, + 77.8, + 77.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 76.7, + 75.2, + 77.9, + 77.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 76.2, + 75.1, + 77.7, + 76.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 76.1, + 75.0, + 77.7, + 77.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.0, + 75.0, + 77.6, + 76.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 76.0, + 74.9, + 77.6, + 76.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 76.0, + 75.2, + 77.7, + 76.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 75.9, + 75.2, + 77.6, + 76.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 75.8, + 75.1, + 77.5, + 76.8 + ], + "up_blocks.1.upsamplers.0.conv": [ + 75.8, + 75.2, + 77.4, + 76.7 + ], + "down_blocks.2.resnets.0.conv1": [ + 76.0, + 75.2, + 77.4, + 76.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.0, + 75.4, + 77.5, + 76.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 75.8, + 75.5, + 77.2, + 77.0 + ], + "down_blocks.1.resnets.1.conv2": [ + 75.7, + 75.4, + 77.2, + 77.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 75.3, + 75.0, + 77.0, + 76.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 75.3, + 74.9, + 76.8, + 76.6 + ], + "up_blocks.2.attentions.1.proj_in": [ + 75.2, + 74.8, + 76.9, + 76.5 + ], + "up_blocks.1.attentions.2.proj_in": [ + 75.0, + 74.4, + 76.8, + 76.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 74.9, + 74.3, + 76.7, + 76.2 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 74.9, + 74.0, + 76.5, + 76.1 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 74.9, + 73.8, + 76.2, + 75.8 + ], + "down_blocks.0.resnets.0.conv1": [ + 74.7, + 73.8, + 76.1, + 75.8 + ], + "time_embedding.linear_1": [ + 74.4, + 73.4, + 75.6, + 75.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 74.2, + 73.2, + 75.6, + 75.2 + ], + "up_blocks.2.attentions.2.proj_in": [ + 74.2, + 73.2, + 75.5, + 75.1 + ], + "up_blocks.2.attentions.2.proj_out": [ + 74.3, + 73.2, + 75.4, + 75.0 + ], + "down_blocks.0.attentions.1.proj_out": [ + 74.2, + 73.1, + 75.3, + 75.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 74.1, + 72.7, + 75.2, + 74.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 74.0, + 73.2, + 75.1, + 75.0 + ], + "down_blocks.0.resnets.1.conv1": [ + 74.2, + 73.1, + 75.1, + 75.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 74.2, + 73.1, + 75.0, + 75.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 74.1, + 72.9, + 74.8, + 74.8 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 74.2, + 72.3, + 74.8, + 74.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 74.3, + 72.4, + 74.6, + 74.5 + ], + "up_blocks.2.upsamplers.0.conv": [ + 74.3, + 72.3, + 74.6, + 74.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 74.1, + 72.3, + 74.5, + 74.3 + ], + "down_blocks.0.attentions.0.proj_in": [ + 74.1, + 72.2, + 74.6, + 74.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 74.1, + 72.2, + 74.4, + 74.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 74.0, + 72.0, + 74.1, + 73.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 73.9, + 72.1, + 74.1, + 73.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 73.9, + 72.3, + 74.3, + 74.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 73.8, + 72.3, + 74.2, + 74.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 73.7, + 72.0, + 74.1, + 73.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 73.7, + 71.9, + 74.2, + 73.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 73.7, + 71.9, + 73.9, + 73.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 73.5, + 72.3, + 73.9, + 74.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 73.5, + 72.2, + 73.9, + 74.0 + ], + "down_blocks.0.attentions.0.proj_out": [ + 73.6, + 72.1, + 73.7, + 73.9 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 73.4, + 71.5, + 73.6, + 73.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 73.3, + 71.1, + 73.4, + 73.3 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 73.2, + 71.1, + 73.7, + 73.3 + ], + "up_blocks.3.attentions.0.proj_out": [ + 73.2, + 71.0, + 73.5, + 73.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.4, + 71.0, + 73.5, + 73.2 + ], + "time_embedding.linear_2": [ + 72.8, + 70.3, + 72.9, + 72.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 72.7, + 70.1, + 72.6, + 72.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 72.7, + 69.9, + 72.6, + 72.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 72.6, + 69.9, + 72.5, + 72.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 72.4, + 69.9, + 72.6, + 72.1 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 72.1, + 69.8, + 72.5, + 72.0 + ], + "up_blocks.3.resnets.0.conv2": [ + 72.0, + 69.6, + 72.2, + 71.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 71.7, + 69.4, + 71.9, + 71.2 + ], + "down_blocks.0.resnets.0.conv2": [ + 71.7, + 69.3, + 71.6, + 71.2 + ], + "down_blocks.0.downsamplers.0.conv": [ + 71.1, + 69.0, + 70.7, + 70.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 71.0, + 68.9, + 70.7, + 70.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 70.3, + 68.4, + 70.1, + 69.7 + ], + "up_blocks.3.attentions.1.proj_out": [ + 70.2, + 68.4, + 70.0, + 69.7 + ], + "up_blocks.3.resnets.0.conv1": [ + 70.1, + 68.3, + 69.8, + 69.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 70.1, + 68.4, + 69.8, + 69.5 + ], + "up_blocks.3.attentions.0.proj_in": [ + 69.9, + 68.2, + 69.6, + 69.3 + ], + "up_blocks.3.attentions.2.proj_out": [ + 69.7, + 68.1, + 69.5, + 69.2 + ], + "down_blocks.1.downsamplers.0.conv": [ + 69.2, + 67.8, + 68.8, + 68.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 69.2, + 67.6, + 68.8, + 68.5 + ], + "down_blocks.0.attentions.1.proj_in": [ + 69.2, + 67.8, + 69.6, + 68.8 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 69.3, + 68.0, + 69.0, + 69.1 + ], + "up_blocks.3.resnets.1.conv1": [ + 69.1, + 67.8, + 68.8, + 68.8 + ], + "up_blocks.3.resnets.1.conv2": [ + 68.9, + 67.6, + 68.6, + 68.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 68.8, + 67.4, + 68.3, + 68.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 68.3, + 66.9, + 67.8, + 67.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 68.1, + 66.8, + 67.8, + 67.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 68.1, + 66.6, + 67.3, + 67.4 + ], + "up_blocks.3.resnets.2.conv1": [ + 67.9, + 66.3, + 66.9, + 67.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 67.4, + 65.9, + 66.4, + 66.6 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 67.3, + 66.0, + 66.2, + 66.5 + ], + "up_blocks.3.resnets.2.conv2": [ + 67.0, + 65.6, + 65.8, + 66.0 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 66.8, + 65.7, + 65.6, + 66.0 + ], + "up_blocks.3.attentions.2.proj_in": [ + 66.2, + 65.0, + 64.6, + 65.0 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 66.7, + 65.3, + 65.1, + 65.5 + ] + }, + "6": { + "metadata": { + "nbits": 6, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "mid_block.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.3.resnets.0.conv2", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.resnets.0.conv1", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.3.resnets.1.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.3.resnets.0.time_emb_proj", + "down_blocks.3.resnets.1.conv1", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.0.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.resnets.2.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.proj_out", + "mid_block.resnets.1.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.0.time_emb_proj", + "mid_block.resnets.0.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.resnets.0.conv2", + "down_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.1.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.resnets.0.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.2.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.proj_out", + "down_blocks.1.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.proj_out", + "down_blocks.3.resnets.0.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.2.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.resnets.0.conv2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.0.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.resnets.2.time_emb_proj", + "time_embedding.linear_1", + "down_blocks.1.attentions.0.proj_in", + "down_blocks.1.resnets.1.conv1", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.2.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.conv1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.1.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.2.resnets.0.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.2.time_emb_proj", + "down_blocks.1.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.conv2", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "time_embedding.linear_2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.resnets.0.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.resnets.1.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.proj_out", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv_shortcut", + "down_blocks.0.attentions.1.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.1.conv2", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.resnets.0.conv2", + "down_blocks.0.attentions.1.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.resnets.0.conv1", + "up_blocks.3.resnets.2.conv2", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.2.conv_shortcut", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.0.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.1.conv_shortcut" + ], + "sizes": [ + 1638400, + 1638400, + 14745600, + 1638400, + 491520, + 3276800, + 1638400, + 409600, + 14745600, + 983040, + 1638400, + 14745600, + 29491200, + 1638400, + 1638400, + 14745600, + 1638400, + 6553600, + 14745600, + 1638400, + 409600, + 1638400, + 14745600, + 1638400, + 409600, + 1638400, + 983040, + 1638400, + 14745600, + 3276800, + 14745600, + 1638400, + 1638400, + 1638400, + 29491200, + 491520, + 491520, + 1638400, + 1638400, + 409600, + 491520, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 13107200, + 1638400, + 1638400, + 1638400, + 14745600, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 819200, + 14745600, + 409600, + 3276800, + 409600, + 1638400, + 409600, + 14745600, + 491520, + 1638400, + 1638400, + 1638400, + 1638400, + 29491200, + 1638400, + 1638400, + 983040, + 983040, + 1638400, + 6553600, + 983040, + 1638400, + 1638400, + 409600, + 983040, + 6553600, + 14745600, + 245760, + 409600, + 983040, + 14745600, + 14745600, + 13107200, + 3276800, + 29491200, + 409600, + 245760, + 102400, + 29491200, + 102400, + 14745600, + 6553600, + 14745600, + 409600, + 1638400, + 3276800, + 245760, + 409600, + 819200, + 409600, + 983040, + 13107200, + 102400, + 409600, + 491520, + 6553600, + 1638400, + 983040, + 1638400, + 1638400, + 1638400, + 3686400, + 409600, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 409600, + 819200, + 13107200, + 1638400, + 14745600, + 22118400, + 1638400, + 11059200, + 1638400, + 409600, + 409600, + 14745600, + 1638400, + 245760, + 1638400, + 1638400, + 409600, + 409600, + 491520, + 409600, + 409600, + 3686400, + 245760, + 409600, + 491520, + 819200, + 102400, + 409600, + 409600, + 3276800, + 3276800, + 983040, + 1638400, + 102400, + 983040, + 819200, + 409600, + 409600, + 3686400, + 3686400, + 245760, + 1638400, + 491520, + 3276800, + 5529600, + 102400, + 409600, + 102400, + 102400, + 1843200, + 102400, + 1638400, + 409600, + 6553600, + 409600, + 1638400, + 7372800, + 102400, + 245760, + 1638400, + 491520, + 409600, + 102400, + 102400, + 409600, + 1638400, + 1638400, + 3686400, + 983040, + 14745600, + 102400, + 409600, + 409600, + 7372800, + 102400, + 102400, + 1638400, + 409600, + 409600, + 3686400, + 1638400, + 102400, + 245760, + 102400, + 409600, + 3686400, + 1638400, + 1638400, + 409600, + 245760, + 409600, + 1638400, + 102400, + 13107200, + 819200, + 102400, + 2457600, + 1228800, + 409600, + 409600, + 819200, + 102400, + 409600, + 409600, + 409600, + 409600, + 921600, + 245760, + 102400, + 102400, + 614400, + 102400, + 102400, + 409600, + 409600, + 102400, + 921600, + 102400, + 3276800, + 102400, + 921600, + 409600, + 409600, + 102400, + 102400, + 102400, + 819200, + 204800, + 102400, + 819200, + 102400, + 102400, + 3686400, + 819200, + 921600, + 3276800, + 921600, + 921600, + 102400, + 819200, + 102400, + 102400, + 102400, + 2764800, + 921600, + 1843200, + 1843200, + 819200, + 102400, + 921600, + 204800, + 102400, + 307200, + 102400, + 204800 + ] + }, + "mid_block.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 89.0, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.7, + 88.6, + 88.9, + 88.6 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.6, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.6, + 88.3, + 88.7, + 88.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.5, + 88.8, + 88.8 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.4, + 88.3, + 89.1, + 88.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.6, + 88.8, + 88.5, + 88.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.6, + 88.9, + 88.3 + ], + "mid_block.resnets.1.conv1": [ + 88.4, + 88.6, + 88.9, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.5, + 88.6, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.4, + 88.9, + 88.6 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.6, + 88.6, + 88.5, + 88.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.6, + 88.4, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.3, + 88.3, + 88.5, + 88.5 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.3, + 88.3, + 88.6, + 88.4 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.3, + 88.3, + 88.4, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.3, + 88.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.4, + 88.3, + 88.3 + ], + "mid_block.resnets.0.conv1": [ + 88.0, + 88.0, + 88.3, + 88.5 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.4, + 88.6, + 88.5, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.3, + 88.5, + 88.3 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.2, + 88.4, + 88.2, + 88.2 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.1, + 88.2, + 88.5, + 88.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.1, + 88.5, + 88.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.5, + 88.4, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.4, + 88.5, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.1, + 88.2, + 88.4, + 88.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.1, + 88.1, + 88.5, + 88.2 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.0, + 88.2, + 88.4, + 88.4 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.2, + 88.4, + 88.5, + 88.2 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.0, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.1, + 88.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.7, + 88.4, + 88.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.8, + 88.3, + 88.5, + 88.1 + ], + "up_blocks.0.resnets.2.conv1": [ + 87.9, + 88.2, + 88.4, + 88.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.7, + 88.3, + 88.5, + 88.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.9, + 88.3, + 88.5, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 88.3, + 88.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 88.2, + 88.1, + 88.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.8, + 88.1, + 88.3, + 87.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.6, + 88.2, + 88.4, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.4, + 88.1, + 88.2, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.7, + 88.3, + 88.2, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.0, + 88.3, + 88.4, + 88.1 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 87.8, + 88.1, + 88.2, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.6, + 88.3, + 88.5, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.7, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.9, + 88.2, + 88.2, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.7, + 88.1, + 88.2, + 88.1 + ], + "mid_block.attentions.0.proj_out": [ + 87.6, + 88.0, + 88.0, + 88.0 + ], + "mid_block.resnets.1.conv2": [ + 87.8, + 88.2, + 88.5, + 87.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 87.9, + 88.3, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.9, + 88.3, + 88.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.9, + 88.1, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.6, + 88.1, + 88.2, + 88.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 88.1, + 88.3, + 87.5 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 87.7, + 87.9, + 88.3, + 88.2 + ], + "mid_block.resnets.0.conv2": [ + 88.0, + 87.9, + 88.2, + 88.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.0, + 88.0, + 87.9 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.9, + 87.9, + 88.1, + 88.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.8, + 88.2, + 87.7 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.6, + 88.0, + 88.3, + 87.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.9, + 88.2, + 87.9 + ], + "down_blocks.2.downsamplers.0.conv": [ + 87.6, + 87.9, + 88.4, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.5, + 87.7, + 88.0, + 87.7 + ], + "mid_block.attentions.0.proj_in": [ + 87.7, + 88.0, + 88.2, + 87.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.9, + 88.3, + 87.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.8, + 88.3, + 87.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 87.7, + 88.0, + 88.4, + 87.6 + ], + "up_blocks.0.resnets.1.conv1": [ + 87.6, + 87.9, + 88.2, + 87.8 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 87.7, + 87.7, + 88.1, + 87.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.6, + 87.5, + 88.1, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.3, + 87.6, + 88.1, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.7, + 87.5, + 88.1, + 87.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.5, + 87.6, + 88.0, + 87.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.4, + 87.7, + 88.1, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.2, + 87.6, + 88.1, + 87.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.7, + 87.7, + 88.2, + 87.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.7, + 87.7, + 87.9, + 87.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 87.8, + 87.8, + 87.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.5, + 87.6, + 88.0, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.6, + 87.7, + 87.9, + 87.9 + ], + "up_blocks.1.resnets.1.conv2": [ + 87.6, + 87.4, + 88.0, + 87.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.4, + 87.4, + 88.0, + 87.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.2, + 87.2, + 87.9, + 87.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.8, + 87.0, + 87.9, + 87.5 + ], + "down_blocks.2.resnets.0.conv2": [ + 87.0, + 87.1, + 87.6, + 87.7 + ], + "down_blocks.2.resnets.1.conv1": [ + 86.7, + 87.0, + 87.7, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.7, + 86.8, + 87.9, + 87.5 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 86.7, + 86.6, + 87.7, + 87.3 + ], + "up_blocks.1.resnets.1.conv1": [ + 86.7, + 86.6, + 87.9, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.7, + 86.4, + 87.7, + 87.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 86.6, + 86.8, + 87.7, + 87.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 86.6, + 86.6, + 87.7, + 87.7 + ], + "up_blocks.1.resnets.0.conv1": [ + 86.5, + 86.7, + 87.6, + 87.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 86.6, + 86.7, + 87.6, + 87.4 + ], + "down_blocks.2.resnets.1.conv2": [ + 86.7, + 86.7, + 87.5, + 87.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.7, + 87.7, + 87.4 + ], + "up_blocks.1.resnets.2.conv2": [ + 86.5, + 86.7, + 87.5, + 87.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.7, + 86.5, + 87.7, + 87.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 86.6, + 86.4, + 87.6, + 87.4 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 86.8, + 86.7, + 87.7, + 87.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.6, + 86.8, + 87.7, + 87.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.6, + 86.7, + 87.4, + 87.4 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 86.7, + 86.7, + 87.3, + 87.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 86.6, + 86.7, + 87.7, + 87.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.6, + 86.7, + 87.5, + 87.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.3, + 86.7, + 87.6, + 87.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 86.3, + 86.6, + 87.5, + 87.4 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 86.5, + 86.7, + 87.5, + 87.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.0, + 86.3, + 87.4, + 87.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.0, + 86.5, + 87.2, + 87.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.1, + 86.4, + 87.5, + 87.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.4, + 86.6, + 87.4, + 87.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.3, + 86.3, + 87.3, + 86.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.1, + 86.4, + 87.5, + 87.1 + ], + "up_blocks.1.attentions.1.proj_out": [ + 86.2, + 86.3, + 87.4, + 87.2 + ], + "down_blocks.1.resnets.0.conv2": [ + 86.1, + 86.5, + 87.4, + 87.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.9, + 86.4, + 87.4, + 86.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 86.2, + 87.0, + 86.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.6, + 86.3, + 87.2, + 86.8 + ], + "down_blocks.2.attentions.0.proj_out": [ + 85.7, + 86.2, + 87.3, + 86.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 85.5, + 86.1, + 87.2, + 86.9 + ], + "down_blocks.2.attentions.1.proj_out": [ + 85.1, + 86.0, + 87.2, + 86.8 + ], + "down_blocks.3.resnets.0.conv1": [ + 85.5, + 86.2, + 87.1, + 86.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.4, + 86.1, + 87.0, + 86.8 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 85.5, + 86.1, + 87.1, + 86.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.0, + 86.1, + 87.1, + 86.5 + ], + "up_blocks.1.attentions.0.proj_out": [ + 85.6, + 86.0, + 87.2, + 86.5 + ], + "up_blocks.1.resnets.0.conv2": [ + 84.9, + 86.0, + 86.9, + 86.6 + ], + "up_blocks.1.resnets.2.conv1": [ + 85.2, + 86.1, + 87.1, + 86.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.4, + 86.0, + 87.1, + 86.4 + ], + "up_blocks.2.resnets.0.conv1": [ + 85.2, + 86.1, + 86.9, + 86.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 85.3, + 86.0, + 87.1, + 86.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.4, + 85.9, + 86.9, + 85.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.4, + 86.0, + 87.1, + 86.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 85.5, + 85.9, + 86.9, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.7, + 85.9, + 87.0, + 85.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 85.5, + 85.8, + 86.9, + 85.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.5, + 85.9, + 87.0, + 85.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.6, + 85.7, + 87.0, + 86.0 + ], + "up_blocks.2.attentions.0.proj_in": [ + 85.6, + 85.9, + 86.7, + 86.0 + ], + "down_blocks.1.attentions.1.proj_out": [ + 85.5, + 85.7, + 86.9, + 85.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.4, + 85.7, + 86.8, + 86.0 + ], + "up_blocks.2.attentions.0.proj_out": [ + 85.4, + 85.7, + 86.6, + 85.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 85.2, + 85.7, + 86.6, + 85.9 + ], + "up_blocks.2.resnets.0.conv2": [ + 85.2, + 85.7, + 86.4, + 85.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.5, + 86.6, + 85.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.2, + 85.5, + 86.3, + 85.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.5, + 86.3, + 85.2 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 85.3, + 85.6, + 86.4, + 85.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 85.2, + 85.5, + 86.4, + 85.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 85.2, + 85.6, + 86.3, + 85.4 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 85.2, + 85.5, + 86.4, + 85.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.2, + 85.4, + 86.1, + 85.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.4, + 85.3, + 86.2, + 85.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.4, + 86.2, + 85.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.4, + 85.5, + 85.8, + 85.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 85.3, + 85.4, + 85.9, + 85.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.1, + 86.0, + 84.9 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 85.2, + 85.3, + 86.0, + 84.8 + ], + "time_embedding.linear_1": [ + 85.2, + 85.4, + 85.9, + 84.8 + ], + "down_blocks.1.attentions.0.proj_in": [ + 85.1, + 85.4, + 85.9, + 84.8 + ], + "down_blocks.1.resnets.1.conv1": [ + 85.2, + 85.3, + 85.9, + 84.8 + ], + "down_blocks.1.downsamplers.0.conv": [ + 85.1, + 85.0, + 85.9, + 84.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.0, + 84.8, + 85.7, + 84.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 85.0, + 85.0, + 86.0, + 84.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.9, + 85.0, + 85.8, + 84.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.1, + 85.1, + 85.8, + 84.6 + ], + "up_blocks.2.resnets.2.conv1": [ + 85.1, + 85.1, + 85.9, + 84.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 84.9, + 85.0, + 85.8, + 84.9 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 84.9, + 85.0, + 85.9, + 84.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.8, + 85.0, + 85.8, + 84.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.9, + 85.1, + 85.7, + 84.9 + ], + "down_blocks.1.resnets.0.conv1": [ + 85.0, + 84.9, + 85.6, + 84.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 84.9, + 84.8, + 85.3, + 84.6 + ], + "up_blocks.1.attentions.1.proj_in": [ + 84.9, + 84.9, + 85.4, + 84.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.8, + 84.7, + 85.2, + 84.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.6, + 84.8, + 85.5, + 84.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.5, + 84.7, + 85.4, + 84.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 84.5, + 84.7, + 85.4, + 84.4 + ], + "up_blocks.2.resnets.1.conv1": [ + 84.6, + 84.7, + 85.3, + 84.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 84.6, + 84.7, + 85.3, + 84.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 84.5, + 84.7, + 85.3, + 84.3 + ], + "down_blocks.2.attentions.1.proj_in": [ + 84.6, + 84.7, + 85.3, + 84.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.6, + 84.8, + 85.2, + 84.0 + ], + "up_blocks.2.attentions.1.proj_out": [ + 84.5, + 84.6, + 85.1, + 84.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 84.4, + 84.9, + 85.1, + 84.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 84.6, + 84.6, + 85.2, + 84.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.7, + 84.4, + 85.0, + 84.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 84.6, + 84.8, + 85.0, + 84.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 84.7, + 84.8, + 84.8, + 84.6 + ], + "up_blocks.2.resnets.1.conv2": [ + 84.5, + 84.7, + 85.2, + 84.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 84.5, + 84.6, + 85.1, + 84.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 84.6, + 84.5, + 85.3, + 84.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 84.6, + 84.6, + 84.9, + 84.6 + ], + "down_blocks.1.attentions.0.proj_out": [ + 84.6, + 84.6, + 85.0, + 84.6 + ], + "down_blocks.1.attentions.1.proj_in": [ + 84.6, + 84.5, + 85.1, + 84.5 + ], + "down_blocks.2.resnets.0.conv1": [ + 84.3, + 84.6, + 84.9, + 84.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.1, + 84.7, + 84.8, + 84.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 84.1, + 84.5, + 84.8, + 84.4 + ], + "up_blocks.1.attentions.2.proj_in": [ + 84.0, + 84.5, + 85.0, + 84.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 83.9, + 84.5, + 85.0, + 84.5 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 84.1, + 84.4, + 84.7, + 84.1 + ], + "down_blocks.1.resnets.1.conv2": [ + 83.9, + 84.5, + 84.9, + 84.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.4, + 84.2, + 84.9, + 84.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 83.4, + 84.2, + 84.8, + 84.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.4, + 84.2, + 84.5, + 84.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 83.5, + 84.2, + 84.6, + 84.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.3, + 84.1, + 84.6, + 84.0 + ], + "up_blocks.2.resnets.2.conv2": [ + 83.3, + 84.2, + 84.6, + 84.2 + ], + "down_blocks.2.attentions.0.proj_in": [ + 83.1, + 84.2, + 84.3, + 84.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 83.2, + 84.1, + 84.3, + 83.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 83.0, + 84.0, + 84.4, + 84.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.0, + 84.0, + 84.4, + 84.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 83.0, + 83.9, + 84.5, + 83.9 + ], + "time_embedding.linear_2": [ + 83.0, + 84.0, + 84.2, + 83.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.9, + 83.8, + 84.2, + 83.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.8, + 83.9, + 84.1, + 83.6 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 83.0, + 83.8, + 84.1, + 83.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.0, + 83.9, + 84.0, + 83.6 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 82.9, + 83.7, + 84.0, + 83.3 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 82.8, + 83.5, + 83.8, + 82.9 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 82.9, + 83.6, + 83.6, + 82.9 + ], + "up_blocks.2.attentions.2.proj_out": [ + 83.0, + 83.4, + 83.2, + 82.6 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 82.9, + 83.2, + 83.3, + 82.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.9, + 83.3, + 83.1, + 82.5 + ], + "up_blocks.2.attentions.1.proj_in": [ + 82.8, + 83.2, + 83.0, + 82.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.8, + 83.1, + 83.0, + 82.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.8, + 83.1, + 83.1, + 82.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.7, + 83.1, + 83.2, + 82.6 + ], + "down_blocks.0.resnets.0.conv1": [ + 82.7, + 83.1, + 83.1, + 82.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.5, + 83.0, + 83.0, + 82.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.5, + 83.0, + 83.1, + 82.8 + ], + "up_blocks.3.attentions.0.proj_out": [ + 82.4, + 82.9, + 83.0, + 82.6 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 82.3, + 82.9, + 83.0, + 82.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.3, + 82.7, + 82.9, + 82.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.1, + 82.7, + 82.8, + 82.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 82.2, + 82.4, + 83.0, + 82.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.2, + 82.3, + 82.7, + 82.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.3, + 82.4, + 82.6, + 81.9 + ], + "down_blocks.0.resnets.1.conv1": [ + 82.3, + 82.4, + 82.7, + 82.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.0, + 82.2, + 82.6, + 81.9 + ], + "up_blocks.3.attentions.2.proj_out": [ + 82.0, + 82.0, + 82.5, + 81.8 + ], + "down_blocks.0.downsamplers.0.conv": [ + 81.9, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.2.attentions.2.proj_in": [ + 81.8, + 81.9, + 82.2, + 81.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.8, + 81.8, + 82.3, + 81.5 + ], + "down_blocks.0.attentions.0.proj_in": [ + 81.7, + 81.9, + 82.3, + 81.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.3 + ], + "down_blocks.0.attentions.0.proj_out": [ + 81.6, + 81.8, + 82.0, + 81.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.5, + 81.5, + 82.0, + 81.2 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 81.4, + 81.5, + 82.0, + 81.1 + ], + "down_blocks.0.attentions.1.proj_out": [ + 81.4, + 81.5, + 82.1, + 81.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.3, + 81.4, + 81.7, + 80.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.3, + 81.5, + 81.8, + 81.0 + ], + "up_blocks.3.attentions.1.proj_out": [ + 81.2, + 81.2, + 81.9, + 80.9 + ], + "up_blocks.2.upsamplers.0.conv": [ + 81.2, + 81.3, + 81.6, + 80.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.0, + 81.2, + 81.4, + 80.6 + ], + "down_blocks.0.resnets.1.conv2": [ + 81.1, + 81.1, + 81.4, + 80.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.1, + 81.2, + 81.4, + 80.8 + ], + "down_blocks.0.resnets.0.conv2": [ + 81.0, + 81.1, + 81.4, + 80.7 + ], + "up_blocks.3.resnets.0.conv2": [ + 81.0, + 81.1, + 81.4, + 80.7 + ], + "down_blocks.0.attentions.1.proj_in": [ + 80.9, + 81.1, + 81.5, + 80.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.7, + 80.9, + 81.3, + 80.6 + ], + "up_blocks.3.attentions.0.proj_in": [ + 80.6, + 80.7, + 80.9, + 80.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 80.4, + 80.5, + 80.8, + 80.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.6, + 80.7, + 80.9, + 80.3 + ], + "up_blocks.3.resnets.0.conv1": [ + 80.4, + 80.7, + 81.0, + 80.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 80.3, + 80.6, + 80.8, + 80.1 + ], + "up_blocks.3.resnets.1.conv1": [ + 80.4, + 80.6, + 80.8, + 80.1 + ], + "up_blocks.3.resnets.2.conv1": [ + 80.2, + 80.4, + 80.6, + 80.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.0, + 80.1, + 80.4, + 79.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 79.8, + 80.0, + 80.3, + 79.8 + ], + "up_blocks.3.resnets.1.conv2": [ + 79.7, + 80.0, + 80.1, + 79.4 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 79.7, + 79.9, + 80.1, + 79.7 + ], + "up_blocks.3.attentions.1.proj_in": [ + 79.5, + 79.8, + 80.0, + 79.6 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 79.5, + 79.7, + 79.9, + 79.5 + ], + "up_blocks.3.attentions.2.proj_in": [ + 79.4, + 79.6, + 79.8, + 79.5 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 79.2, + 79.6, + 80.1, + 79.6 + ] + }, + "8": { + "metadata": { + "nbits": 8, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.3.resnets.1.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.proj_out", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.3.resnets.0.conv2", + "up_blocks.0.resnets.1.conv2", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.3.resnets.0.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.3.resnets.0.time_emb_proj", + "down_blocks.3.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.resnets.2.conv1", + "up_blocks.2.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.resnets.0.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.proj_out", + "down_blocks.2.resnets.1.conv1", + "up_blocks.0.resnets.0.conv1", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.resnets.2.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.resnets.0.conv1", + "mid_block.resnets.0.conv2", + "down_blocks.2.resnets.1.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.0.conv1", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.resnets.1.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.1.time_emb_proj", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.0.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.proj_in", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.resnets.1.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.2.attentions.0.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.2.resnets.2.time_emb_proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "mid_block.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.resnets.0.conv2", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.0.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.1.conv2", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.1.conv2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.proj_out", + "time_embedding.linear_1", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv1", + "down_blocks.0.downsamplers.0.conv", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.resnets.2.conv1", + "up_blocks.2.resnets.2.conv2", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.2.time_emb_proj", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.1.proj_out", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.3.resnets.2.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_out", + "down_blocks.0.resnets.1.conv2", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.proj_in", + "down_blocks.0.resnets.0.conv1", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.2.upsamplers.0.conv", + "up_blocks.3.resnets.0.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.0.conv1", + "up_blocks.3.resnets.2.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.conv2", + "time_embedding.linear_2", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.0.conv_shortcut", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.proj_in", + "up_blocks.3.attentions.1.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.resnets.1.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.2.conv1", + "down_blocks.0.attentions.0.proj_out", + "down_blocks.0.attentions.0.proj_in", + "up_blocks.3.resnets.1.conv_shortcut", + "down_blocks.0.resnets.0.conv2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0" + ], + "sizes": [ + 983040, + 14745600, + 409600, + 409600, + 1638400, + 409600, + 983040, + 14745600, + 14745600, + 14745600, + 6553600, + 409600, + 14745600, + 409600, + 1638400, + 14745600, + 1638400, + 819200, + 1638400, + 29491200, + 819200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 983040, + 1638400, + 983040, + 13107200, + 1638400, + 1638400, + 14745600, + 3276800, + 245760, + 409600, + 409600, + 491520, + 1638400, + 1638400, + 14745600, + 29491200, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 22118400, + 1638400, + 1638400, + 14745600, + 14745600, + 14745600, + 409600, + 983040, + 409600, + 245760, + 102400, + 102400, + 1638400, + 1843200, + 13107200, + 7372800, + 1638400, + 1638400, + 1638400, + 14745600, + 3276800, + 14745600, + 3276800, + 409600, + 102400, + 1638400, + 491520, + 29491200, + 102400, + 1638400, + 1638400, + 1638400, + 14745600, + 3276800, + 14745600, + 983040, + 6553600, + 1638400, + 1638400, + 1638400, + 409600, + 409600, + 11059200, + 6553600, + 409600, + 1638400, + 1638400, + 29491200, + 409600, + 409600, + 409600, + 102400, + 1638400, + 1638400, + 13107200, + 983040, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 983040, + 1638400, + 3686400, + 6553600, + 13107200, + 1638400, + 102400, + 245760, + 1638400, + 1638400, + 1638400, + 1638400, + 29491200, + 409600, + 409600, + 409600, + 819200, + 1638400, + 983040, + 1638400, + 491520, + 1638400, + 14745600, + 409600, + 1638400, + 102400, + 1638400, + 6553600, + 1638400, + 14745600, + 3276800, + 14745600, + 409600, + 491520, + 409600, + 409600, + 491520, + 819200, + 819200, + 102400, + 245760, + 14745600, + 491520, + 409600, + 3686400, + 1638400, + 3276800, + 409600, + 409600, + 3686400, + 983040, + 409600, + 409600, + 409600, + 491520, + 3686400, + 3686400, + 1638400, + 13107200, + 1638400, + 3686400, + 102400, + 102400, + 1638400, + 491520, + 102400, + 1638400, + 983040, + 2457600, + 409600, + 245760, + 819200, + 1638400, + 1638400, + 409600, + 1638400, + 102400, + 245760, + 409600, + 102400, + 409600, + 102400, + 102400, + 245760, + 491520, + 491520, + 245760, + 409600, + 1638400, + 1638400, + 7372800, + 921600, + 409600, + 983040, + 409600, + 1228800, + 5529600, + 3686400, + 819200, + 409600, + 409600, + 102400, + 102400, + 409600, + 102400, + 102400, + 102400, + 204800, + 1638400, + 102400, + 102400, + 245760, + 245760, + 102400, + 409600, + 3276800, + 102400, + 204800, + 102400, + 409600, + 614400, + 102400, + 3276800, + 102400, + 409600, + 102400, + 102400, + 409600, + 3276800, + 102400, + 921600, + 819200, + 409600, + 102400, + 921600, + 409600, + 3686400, + 921600, + 819200, + 409600, + 409600, + 3276800, + 819200, + 102400, + 2764800, + 921600, + 102400, + 921600, + 1638400, + 921600, + 409600, + 409600, + 102400, + 819200, + 307200, + 102400, + 102400, + 102400, + 409600, + 1843200, + 409600, + 819200, + 1843200, + 102400, + 102400, + 204800, + 921600, + 102400 + ] + }, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.7, + 88.8, + 88.5 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.4, + 88.9, + 88.7, + 88.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.6, + 88.7, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.6, + 88.6, + 88.4 + ], + "mid_block.attentions.0.proj_out": [ + 88.4, + 88.9, + 88.6, + 88.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.7, + 88.9, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.6, + 88.6, + 88.6, + 88.4 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.7, + 88.9, + 88.7, + 88.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.5, + 88.7, + 88.6, + 88.8 + ], + "up_blocks.0.upsamplers.0.conv": [ + 88.7, + 88.4, + 88.5, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.5, + 88.8, + 88.6, + 88.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.3.resnets.0.conv1": [ + 88.4, + 88.8, + 88.5, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.5, + 88.5, + 88.7, + 88.6 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.4, + 88.5, + 88.8, + 88.4 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.7, + 88.6, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.7, + 88.7, + 88.5 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 88.2, + 88.6, + 88.6, + 88.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.3, + 88.4, + 88.6, + 88.6 + ], + "up_blocks.0.resnets.2.conv1": [ + 88.3, + 88.5, + 88.7, + 88.6 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.3, + 88.5, + 88.8, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.6, + 88.6, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.5, + 88.7, + 88.6 + ], + "down_blocks.2.attentions.1.proj_in": [ + 88.4, + 88.6, + 88.7, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.5, + 88.5, + 88.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.4, + 88.6, + 88.8, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.1, + 88.4, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.4, + 88.7, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.4, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.5, + 88.7, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.6, + 88.8, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.4, + 88.8, + 88.4, + 88.5 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.6, + 88.5, + 88.5, + 88.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.5, + 88.6, + 88.4 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.0, + 88.4, + 88.6, + 88.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.2, + 88.4, + 88.3, + 88.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.7, + 88.4, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.4, + 88.6, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.5, + 88.6, + 88.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.4, + 88.5, + 88.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.5, + 88.5, + 88.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.3, + 88.4, + 88.5, + 88.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 88.4, + 88.5, + 88.5, + 88.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.3, + 88.4, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.0.proj_in": [ + 88.4, + 88.3, + 88.6, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.5, + 88.4, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.3, + 88.5, + 88.4 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.5, + 88.4, + 88.6, + 88.3 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.2, + 88.2, + 88.6, + 88.4 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.3, + 88.4, + 88.5, + 88.4 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.5, + 88.5, + 88.5, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.2, + 88.4, + 88.5 + ], + "mid_block.resnets.0.conv1": [ + 87.9, + 88.1, + 88.4, + 88.3 + ], + "mid_block.resnets.0.conv2": [ + 88.1, + 88.2, + 88.5, + 88.3 + ], + "down_blocks.2.resnets.1.conv2": [ + 88.1, + 88.0, + 88.4, + 88.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.2, + 88.3, + 88.5, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.4, + 88.5, + 88.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.5, + 88.6, + 88.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.5, + 87.8, + 88.5, + 88.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.1, + 88.2, + 88.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.3, + 88.2, + 88.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.3, + 88.3, + 88.1 + ], + "down_blocks.1.resnets.0.conv1": [ + 87.8, + 88.0, + 88.2, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.4, + 88.2, + 88.1 + ], + "down_blocks.2.resnets.0.conv1": [ + 88.2, + 88.1, + 88.3, + 88.2 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.0, + 88.3, + 88.4, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.3, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.1.proj_out": [ + 88.0, + 88.2, + 88.2, + 88.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 88.0, + 88.1, + 88.4, + 88.1 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.0, + 88.3, + 88.3, + 88.4 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.2, + 88.3, + 88.3, + 88.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.2, + 88.3, + 88.4, + 88.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.1, + 88.2, + 88.5, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.2, + 88.4, + 88.3 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.2, + 88.2, + 88.3, + 88.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 88.1, + 88.2, + 88.2 + ], + "up_blocks.0.resnets.1.conv1": [ + 88.1, + 88.1, + 88.5, + 88.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.1, + 88.1, + 88.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.0, + 88.0, + 88.4, + 88.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.2, + 88.3, + 88.0 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.1, + 88.0, + 88.5, + 88.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.0, + 88.0, + 88.2, + 88.4 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.4, + 88.0, + 88.2, + 88.2 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.3, + 87.9, + 88.2, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.2, + 88.3, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.0, + 88.1, + 88.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.2, + 88.3, + 88.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.1, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.2, + 88.2, + 88.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 87.9, + 88.4, + 88.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.0, + 88.1, + 88.2 + ], + "up_blocks.2.resnets.0.conv1": [ + 88.0, + 88.2, + 88.3, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.0, + 88.4, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.9, + 88.0, + 88.3, + 88.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.1, + 87.9, + 88.3, + 88.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.0, + 88.3, + 88.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 88.1, + 88.0, + 87.9, + 88.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 87.9, + 88.5, + 88.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.1, + 87.8, + 88.2, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.0, + 88.3, + 88.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.1, + 88.3, + 88.0 + ], + "mid_block.attentions.0.proj_in": [ + 87.9, + 87.9, + 88.2, + 88.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 88.3, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 88.2, + 88.3, + 88.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 88.1, + 88.1, + 88.0 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.1, + 88.0, + 88.4, + 88.4 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 87.9, + 88.2, + 88.2, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.8, + 88.2, + 88.2, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.0, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 88.1, + 88.1, + 88.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.9, + 87.9, + 88.0, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.1, + 88.3, + 88.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 88.0, + 88.4, + 88.0 + ], + "down_blocks.1.resnets.1.conv1": [ + 88.1, + 88.0, + 88.1, + 88.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.0, + 88.0, + 88.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 88.0, + 88.1, + 87.9 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 88.0, + 88.0, + 88.3, + 87.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.1, + 88.1, + 87.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.8, + 87.9, + 88.2, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 88.1, + 88.1, + 88.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.9, + 88.2, + 88.4 + ], + "up_blocks.1.attentions.1.proj_in": [ + 87.7, + 88.1, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 88.1, + 88.0, + 87.8 + ], + "up_blocks.1.resnets.1.conv1": [ + 87.9, + 87.9, + 88.0, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 88.1, + 88.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.8, + 87.7, + 88.0, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.8, + 87.9, + 88.0, + 88.0 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 87.8, + 87.9, + 88.0, + 88.0 + ], + "down_blocks.2.attentions.0.proj_out": [ + 88.0, + 87.9, + 87.9, + 88.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.1, + 88.2, + 88.2, + 88.2 + ], + "up_blocks.1.attentions.0.proj_out": [ + 88.1, + 88.0, + 88.0, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 87.9, + 88.0, + 87.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.9, + 88.0, + 88.3 + ], + "mid_block.resnets.1.conv2": [ + 88.0, + 88.3, + 87.9, + 88.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.7, + 88.0, + 87.8, + 87.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 87.9, + 87.8, + 87.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 88.1, + 87.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.9, + 87.9, + 87.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.7, + 87.7, + 88.0, + 88.0 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 87.6, + 87.8, + 87.8, + 88.0 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.0, + 87.8, + 87.9, + 88.0 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.7, + 87.9, + 87.9, + 88.3 + ], + "up_blocks.1.upsamplers.0.conv": [ + 87.8, + 88.1, + 88.0, + 87.9 + ], + "up_blocks.2.attentions.0.proj_in": [ + 87.8, + 87.7, + 88.1, + 87.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.7, + 87.9, + 88.0, + 87.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.9, + 88.2, + 87.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.0, + 87.7, + 87.8, + 88.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 87.8, + 87.8, + 87.8, + 87.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 87.8, + 87.8, + 87.7, + 87.7 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 87.8, + 87.9, + 87.9, + 87.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.7, + 87.9, + 87.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 87.9, + 87.9, + 88.2, + 87.8 + ], + "mid_block.resnets.1.conv1": [ + 88.0, + 88.1, + 88.2, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 88.0, + 88.0, + 88.1 + ], + "down_blocks.1.attentions.1.proj_out": [ + 87.9, + 87.8, + 88.1, + 87.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 87.8, + 87.7, + 88.0, + 88.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 87.7, + 87.9, + 88.0, + 87.9 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 87.9, + 87.8, + 88.2, + 87.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 87.8, + 88.3, + 87.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.7, + 88.1, + 87.8 + ], + "up_blocks.2.resnets.0.conv2": [ + 87.7, + 87.8, + 88.3, + 87.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.5, + 87.9, + 87.8, + 88.0 + ], + "up_blocks.2.attentions.0.proj_out": [ + 87.8, + 87.6, + 88.0, + 87.6 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 87.8, + 87.9, + 88.1, + 87.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.8, + 88.0, + 87.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 87.8, + 87.9, + 87.7 + ], + "down_blocks.1.resnets.1.conv2": [ + 87.9, + 87.7, + 87.8, + 87.8 + ], + "down_blocks.1.downsamplers.0.conv": [ + 88.0, + 87.8, + 87.9, + 87.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.6, + 87.8, + 87.9, + 87.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 87.8, + 87.7, + 88.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.7, + 88.0, + 88.0, + 88.0 + ], + "up_blocks.2.resnets.1.conv2": [ + 87.8, + 87.6, + 88.0, + 87.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.6, + 87.9, + 87.7, + 87.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.6, + 87.7, + 87.7, + 87.8 + ], + "up_blocks.1.attentions.2.proj_in": [ + 87.6, + 87.5, + 87.8, + 87.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.5, + 87.7, + 87.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.4, + 87.4, + 87.6, + 87.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.2, + 87.5, + 87.4, + 87.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.5, + 87.6, + 87.8, + 87.1 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 87.5, + 87.7, + 87.6, + 87.5 + ], + "up_blocks.2.attentions.1.proj_out": [ + 87.2, + 87.5, + 88.0, + 87.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.7, + 87.4, + 87.7, + 87.8 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 87.4, + 87.3, + 87.8, + 87.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.7, + 87.6, + 87.6 + ], + "up_blocks.1.attentions.2.proj_out": [ + 87.4, + 87.5, + 87.9, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.6, + 87.5, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.3, + 87.9, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.4, + 87.5, + 87.9, + 87.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.3, + 87.5, + 87.9, + 87.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.4, + 87.3, + 87.9, + 87.9 + ], + "up_blocks.3.attentions.2.proj_out": [ + 87.4, + 87.2, + 87.7, + 87.5 + ], + "time_embedding.linear_1": [ + 87.5, + 87.4, + 87.4, + 87.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.4, + 87.5, + 87.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 87.2, + 87.6, + 87.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.5, + 87.7, + 87.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.3, + 87.1, + 87.8, + 87.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.4, + 87.5, + 87.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.2, + 87.1, + 87.7, + 87.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 87.3, + 87.1, + 87.6, + 87.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.4, + 87.4, + 87.8, + 87.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 87.3, + 87.3, + 87.6, + 87.5 + ], + "up_blocks.2.resnets.1.conv1": [ + 87.3, + 87.2, + 87.7, + 87.6 + ], + "down_blocks.0.downsamplers.0.conv": [ + 87.1, + 87.2, + 87.6, + 87.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.3, + 87.5, + 87.6, + 87.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.5, + 87.5, + 87.5, + 87.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.2, + 87.4, + 87.6, + 87.4 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 87.5, + 87.2, + 87.6, + 87.6 + ], + "up_blocks.2.resnets.2.conv1": [ + 87.5, + 87.4, + 87.7, + 87.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 87.4, + 87.5, + 87.6, + 87.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.0, + 87.2, + 87.6, + 87.4 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 87.1, + 87.0, + 87.7, + 87.5 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 87.2, + 87.2, + 87.5, + 87.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.1, + 86.9, + 87.5, + 87.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.2, + 87.2, + 87.5, + 87.2 + ], + "down_blocks.1.attentions.0.proj_in": [ + 86.9, + 87.3, + 87.5, + 87.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.2, + 87.2, + 87.8, + 87.3 + ], + "up_blocks.3.attentions.1.proj_out": [ + 87.1, + 87.2, + 87.6, + 87.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.0, + 87.2, + 87.5, + 87.4 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 87.1, + 87.3, + 87.5, + 87.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.0, + 87.3, + 87.5, + 87.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.8, + 86.8, + 87.3, + 87.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 86.9, + 87.0, + 87.5, + 87.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.8, + 87.0, + 87.2, + 87.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.7, + 87.1, + 87.5, + 87.1 + ], + "down_blocks.0.attentions.1.proj_out": [ + 87.0, + 87.1, + 87.6, + 87.2 + ], + "down_blocks.1.attentions.0.proj_out": [ + 86.9, + 86.9, + 87.5, + 87.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.8, + 87.2, + 87.3, + 87.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.9, + 87.2, + 87.3, + 87.2 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 86.9, + 87.0, + 87.2, + 87.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.8, + 87.1, + 87.1, + 86.9 + ], + "down_blocks.1.attentions.1.proj_in": [ + 86.7, + 86.8, + 87.1, + 87.1 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 86.9, + 87.1, + 87.4, + 87.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.9, + 86.9, + 87.3, + 87.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.8, + 86.9, + 87.1, + 86.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.9, + 86.9, + 87.2, + 86.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.7, + 86.9, + 87.0, + 86.8 + ], + "up_blocks.3.attentions.0.proj_in": [ + 86.5, + 87.0, + 87.0, + 86.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.7, + 86.7, + 86.8, + 87.1 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 86.9, + 86.7, + 86.9, + 86.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.7, + 86.7, + 87.0, + 86.9 + ], + "up_blocks.3.attentions.0.proj_out": [ + 86.7, + 86.7, + 87.1, + 86.9 + ], + "down_blocks.0.resnets.1.conv2": [ + 86.9, + 87.0, + 87.0, + 86.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.5, + 86.8, + 87.1, + 86.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.5, + 86.8, + 87.2, + 86.7 + ], + "up_blocks.3.attentions.2.proj_in": [ + 86.6, + 86.6, + 86.8, + 86.7 + ], + "down_blocks.0.resnets.0.conv1": [ + 86.6, + 86.5, + 86.7, + 86.5 + ], + "up_blocks.2.attentions.1.proj_in": [ + 86.3, + 86.4, + 86.9, + 86.6 + ], + "up_blocks.2.upsamplers.0.conv": [ + 86.5, + 86.3, + 86.9, + 86.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 86.5, + 86.5, + 86.8, + 86.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.3, + 86.5, + 86.5, + 86.6 + ], + "up_blocks.2.attentions.2.proj_in": [ + 86.6, + 86.7, + 86.8, + 86.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 86.5, + 87.0, + 86.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 86.5, + 86.6, + 86.7, + 86.8 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 86.4, + 86.4, + 86.8, + 86.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 86.5, + 86.8, + 86.4 + ], + "up_blocks.3.resnets.0.conv1": [ + 86.3, + 86.4, + 86.5, + 86.5 + ], + "up_blocks.3.resnets.2.conv2": [ + 86.3, + 86.3, + 86.6, + 86.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.2, + 86.3, + 86.6, + 86.7 + ], + "up_blocks.3.resnets.1.conv2": [ + 86.3, + 86.1, + 86.9, + 86.6 + ], + "time_embedding.linear_2": [ + 86.1, + 86.2, + 86.5, + 86.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 86.3, + 86.4, + 86.6, + 86.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 86.1, + 86.2, + 86.6, + 86.7 + ], + "up_blocks.2.attentions.2.proj_out": [ + 86.1, + 86.4, + 86.8, + 86.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.2, + 86.3, + 86.4, + 86.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 86.2, + 86.5, + 86.5 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 85.9, + 86.0, + 86.3, + 86.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.9, + 86.1, + 86.3, + 86.2 + ], + "down_blocks.0.attentions.1.proj_in": [ + 85.9, + 86.1, + 86.3, + 86.4 + ], + "up_blocks.3.attentions.1.proj_in": [ + 85.7, + 85.8, + 86.0, + 86.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.9, + 86.0, + 86.2, + 86.2 + ], + "up_blocks.3.resnets.1.conv1": [ + 85.8, + 85.8, + 86.3, + 86.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.8, + 85.9, + 86.0, + 86.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 85.5, + 85.8, + 86.4, + 86.2 + ], + "up_blocks.3.resnets.2.conv1": [ + 85.5, + 85.8, + 86.2, + 86.1 + ], + "down_blocks.0.attentions.0.proj_out": [ + 85.8, + 85.8, + 86.1, + 86.0 + ], + "down_blocks.0.attentions.0.proj_in": [ + 85.6, + 85.8, + 86.1, + 86.3 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 85.4, + 85.6, + 85.9, + 86.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 85.3, + 85.7, + 86.1, + 86.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.5, + 85.5, + 86.0, + 85.9 + ] + } + }, + "baselines": { + "original": 88.7, + "linear_8bit": 80.8, + "recipe_7.75_bit_mixedpalette": 86.8, + "recipe_6.40_bit_mixedpalette": 84.5, + "recipe_5.74_bit_mixedpalette": 83.3, + "recipe_5.34_bit_mixedpalette": 82.1, + "recipe_5.10_bit_mixedpalette": 80.7, + "recipe_4.85_bit_mixedpalette": 78.8, + "recipe_4.35_bit_mixedpalette": 77.7 + }, + "recipes": { + "recipe_7.75_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 2, + "up_blocks.0.resnets.1.time_emb_proj": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "mid_block.resnets.0.time_emb_proj": 4, + "down_blocks.3.resnets.0.time_emb_proj": 4, + "down_blocks.3.resnets.1.time_emb_proj": 4, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.resnets.0.conv_shortcut": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "mid_block.resnets.1.conv1": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.2.conv_shortcut": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.3.resnets.0.conv1": 8, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "mid_block.resnets.0.conv2": 4, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 6, + "down_blocks.3.resnets.1.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.3.resnets.0.conv2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.2.conv2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.3.resnets.1.conv1": 6, + "up_blocks.0.upsamplers.0.conv": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.2.conv2": 8, + "up_blocks.0.resnets.0.conv1": 6, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 6, + "up_blocks.1.resnets.1.conv1": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.resnets.0.conv2": 8, + "up_blocks.1.attentions.0.proj_out": 8, + "up_blocks.1.attentions.1.proj_out": 8, + "up_blocks.1.resnets.1.conv2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.resnets.1.conv1": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.2.resnets.1.conv2": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.proj_in": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.0.time_emb_proj": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.1.resnets.1.time_emb_proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.resnets.0.conv2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.resnets.0.time_emb_proj": 8, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.1.proj_in": 8, + "up_blocks.2.attentions.0.proj_out": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 8, + "up_blocks.1.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "up_blocks.2.resnets.2.conv1": 16, + "down_blocks.1.attentions.1.proj_out": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_in": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.2.attentions.1.proj_out": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.upsamplers.0.conv": 8, + "up_blocks.1.resnets.2.conv_shortcut": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 16, + "up_blocks.1.attentions.2.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.1.attentions.0.proj_in": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.proj_out": 16, + "up_blocks.2.resnets.1.conv1": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.resnets.1.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv2": 16, + "down_blocks.1.attentions.1.proj_in": 16, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.attentions.0.proj_out": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.2.attentions.1.proj_in": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.2.attentions.2.proj_in": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.resnets.1.conv_shortcut": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_out": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 16, + "down_blocks.0.attentions.1.proj_out": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.0.attentions.0.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "down_blocks.0.resnets.0.time_emb_proj": 16, + "down_blocks.0.resnets.1.time_emb_proj": 16, + "up_blocks.3.attentions.0.proj_out": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.0.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.proj_in": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.2.resnets.0.conv_shortcut": 16, + "down_blocks.0.resnets.1.conv1": 16, + "up_blocks.2.resnets.0.conv_shortcut": 16, + "down_blocks.0.resnets.0.conv1": 16, + "down_blocks.0.resnets.1.conv2": 16, + "down_blocks.1.downsamplers.0.conv": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.resnets.2.conv_shortcut": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.upsamplers.0.conv": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.1.resnets.0.conv_shortcut": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.3.resnets.0.conv2": 16, + "up_blocks.3.attentions.0.proj_in": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.conv2": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.1.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.resnets.0.conv1": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 16, + "up_blocks.3.attentions.2.proj_out": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.resnets.1.conv1": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.1.proj_in": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.2.time_emb_proj": 16, + "up_blocks.3.resnets.1.conv2": 16, + "up_blocks.3.resnets.0.conv_shortcut": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.resnets.2.conv1": 16, + "up_blocks.3.resnets.1.conv_shortcut": 16, + "up_blocks.3.attentions.2.proj_in": 16, + "up_blocks.3.resnets.2.conv2": 16, + "up_blocks.3.resnets.2.conv_shortcut": 16, + "time_embedding.linear_1": 16, + "time_embedding.linear_2": 16 + }, + "recipe_6.40_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 2, + "down_blocks.3.resnets.0.time_emb_proj": 2, + "down_blocks.3.resnets.1.time_emb_proj": 2, + "up_blocks.0.resnets.1.conv_shortcut": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.2.conv_shortcut": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.3.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 4, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 4, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.3.resnets.0.conv2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.3.resnets.1.conv1": 6, + "up_blocks.0.upsamplers.0.conv": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 6, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.resnets.0.conv2": 8, + "up_blocks.1.attentions.0.proj_out": 8, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv2": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.resnets.0.time_emb_proj": 8, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.1.proj_in": 8, + "up_blocks.2.attentions.0.proj_out": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 8, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.resnets.2.conv1": 8, + "down_blocks.1.attentions.1.proj_out": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_in": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 8, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.2.resnets.1.conv1": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv2": 8, + "down_blocks.1.attentions.1.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.1.proj_in": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.resnets.1.conv_shortcut": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_out": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.proj_in": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.1.conv1": 16, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.0.conv1": 16, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.upsamplers.0.conv": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.resnets.0.conv2": 16, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.conv2": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.resnets.1.conv1": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 8, + "up_blocks.3.resnets.1.conv2": 16, + "up_blocks.3.resnets.0.conv_shortcut": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 16, + "up_blocks.3.resnets.1.conv_shortcut": 16, + "up_blocks.3.attentions.2.proj_in": 16, + "up_blocks.3.resnets.2.conv2": 16, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 8, + "time_embedding.linear_2": 16 + }, + "recipe_5.74_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 2, + "up_blocks.0.resnets.1.conv_shortcut": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.2.conv_shortcut": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.3.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 4, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 4, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.3.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.3.resnets.1.conv1": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.resnets.0.conv2": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv2": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.proj_in": 8, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 8, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.resnets.2.conv1": 8, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 8, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.2.resnets.1.conv1": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv2": 8, + "down_blocks.1.attentions.1.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.1.proj_in": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.2.attentions.2.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 8, + "time_embedding.linear_2": 8 + }, + "recipe_5.34_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.0.conv_shortcut": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.1.resnets.1.time_emb_proj": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.2.resnets.0.time_emb_proj": 2, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.resnets.2.conv_shortcut": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.3.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 2, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 4, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.3.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.3.resnets.1.conv1": 4, + "up_blocks.0.upsamplers.0.conv": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.resnets.0.conv2": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv2": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.proj_in": 6, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 6, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.2.resnets.1.conv1": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv2": 8, + "down_blocks.1.attentions.1.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.1.proj_in": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.2.attentions.2.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8 + }, + "recipe_5.10_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.1.resnets.1.time_emb_proj": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.2.resnets.0.time_emb_proj": 2, + "mid_block.resnets.1.conv1": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.resnets.2.conv_shortcut": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 2, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 4, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.3.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.3.resnets.1.conv1": 4, + "up_blocks.0.upsamplers.0.conv": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.resnets.0.conv2": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv2": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.proj_in": 6, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 6, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_out": 6, + "up_blocks.2.resnets.1.conv1": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.conv2": 6, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.1.proj_in": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.2.attentions.2.proj_out": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8 + }, + "recipe_4.85_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.1.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.2.resnets.0.time_emb_proj": 1, + "mid_block.resnets.1.conv1": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.resnets.2.conv_shortcut": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 2, + "mid_block.resnets.0.conv1": 4, + "mid_block.attentions.0.proj_out": 4, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.3.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.3.resnets.1.conv1": 4, + "up_blocks.0.upsamplers.0.conv": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv1": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.0.conv2": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.resnets.1.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.resnets.0.conv2": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.proj_in": 6, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_out": 6, + "up_blocks.2.resnets.1.conv1": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.conv2": 6, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.1.proj_in": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.2.attentions.2.proj_out": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.0.resnets.0.conv1": 6, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 6 + }, + "recipe_4.35_bit_mixedpalette": { + "up_blocks.0.resnets.2.time_emb_proj": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.1.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.2.resnets.0.time_emb_proj": 1, + "mid_block.resnets.1.conv1": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "mid_block.attentions.0.proj_in": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.resnets.0.time_emb_proj": 2, + "down_blocks.3.resnets.0.conv1": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.resnets.0.conv2": 2, + "mid_block.resnets.0.conv1": 2, + "mid_block.attentions.0.proj_out": 2, + "down_blocks.3.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 2, + "down_blocks.3.resnets.0.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.resnets.2.conv2": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.3.resnets.1.conv1": 4, + "up_blocks.0.upsamplers.0.conv": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.resnets.2.conv1": 6, + "down_blocks.2.resnets.1.time_emb_proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.0.resnets.0.conv2": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv1": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.1.resnets.1.conv1": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.0.conv2": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.proj_out": 4, + "up_blocks.1.resnets.1.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.resnets.0.conv2": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.3.resnets.0.time_emb_proj": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.resnets.1.time_emb_proj": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.proj_in": 6, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.upsamplers.0.conv": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_out": 6, + "up_blocks.2.resnets.1.conv1": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.conv2": 6, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.1.proj_in": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.2.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.2.attentions.2.proj_out": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.proj_out": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.proj_in": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.0.resnets.1.conv1": 6, + "up_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.0.resnets.0.conv1": 6, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.conv_shortcut": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.downsamplers.0.conv": 6, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.resnets.0.conv1": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.2.proj_out": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 6 + } + } +} \ No newline at end of file diff --git a/recipes/stabilityai-stable-diffusion-2-1-base_palettization_recipe.json b/recipes/stabilityai-stable-diffusion-2-1-base_palettization_recipe.json new file mode 100644 index 0000000000000000000000000000000000000000..22d091061a24bdffd3f77f8c476043103ba9dea7 --- /dev/null +++ b/recipes/stabilityai-stable-diffusion-2-1-base_palettization_recipe.json @@ -0,0 +1,21664 @@ +{ + "model_version": "stabilityai/stable-diffusion-2-1-base", + "single_layer": { + "1": { + "time_embedding.linear_1": [ + 60.6, + 60.5, + 60.4, + 60.4 + ], + "time_embedding.linear_2": [ + 60.0, + 60.2, + 59.9, + 59.9 + ], + "down_blocks.0.attentions.0.proj_in": [ + 73.0, + 74.4, + 74.5, + 72.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.6, + 79.6, + 76.6, + 78.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.8, + 80.7, + 78.5, + 79.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 73.9, + 75.9, + 74.8, + 73.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 76.2, + 76.0, + 76.6, + 74.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.2, + 81.8, + 84.0, + 83.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 77.3, + 76.8, + 77.5, + 75.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.9, + 83.6, + 84.4, + 83.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 84.2, + 84.2, + 83.2, + 82.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.1, + 76.2, + 76.2, + 71.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 74.4, + 77.8, + 77.4, + 74.8 + ], + "down_blocks.0.attentions.0.proj_out": [ + 74.9, + 75.6, + 75.5, + 74.3 + ], + "down_blocks.0.attentions.1.proj_in": [ + 73.5, + 73.5, + 74.3, + 72.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 77.0, + 75.1, + 76.8, + 75.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 76.8, + 76.3, + 77.8, + 74.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.3, + 75.1, + 77.1, + 75.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 75.8, + 76.3, + 75.8, + 74.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 80.2, + 79.6, + 78.4, + 73.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 79.3, + 78.2, + 76.9, + 75.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.6, + 81.9, + 77.4, + 74.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.8, + 82.4, + 77.2, + 79.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 72.6, + 73.9, + 73.1, + 73.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 76.6, + 76.8, + 76.1, + 75.6 + ], + "down_blocks.0.attentions.1.proj_out": [ + 75.2, + 75.5, + 76.0, + 72.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 74.5, + 75.7, + 75.5, + 74.6 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 73.5, + 73.5, + 73.7, + 70.4 + ], + "down_blocks.0.resnets.0.conv2": [ + 70.5, + 70.3, + 70.3, + 69.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 73.2, + 73.2, + 73.3, + 72.4 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 76.3, + 76.6, + 77.0, + 75.9 + ], + "down_blocks.0.resnets.1.conv2": [ + 75.7, + 73.9, + 75.7, + 74.7 + ], + "down_blocks.0.downsamplers.0.conv": [ + 70.5, + 73.0, + 72.6, + 71.2 + ], + "down_blocks.1.attentions.0.proj_in": [ + 75.0, + 76.2, + 76.6, + 75.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.2, + 77.2, + 78.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.0, + 82.7, + 78.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 77.0, + 78.5, + 75.2, + 71.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 75.7, + 75.2, + 74.9, + 73.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 78.1, + 80.2, + 80.7, + 75.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 70.3, + 74.3, + 73.6, + 73.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.2, + 82.8, + 76.9, + 78.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.9, + 84.1, + 86.5, + 74.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.0, + 76.3, + 76.4, + 73.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 77.5, + 78.7, + 78.6, + 75.4 + ], + "down_blocks.1.attentions.0.proj_out": [ + 75.6, + 77.3, + 76.7, + 75.7 + ], + "down_blocks.1.attentions.1.proj_in": [ + 77.7, + 80.3, + 70.4, + 71.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.5, + 82.1, + 78.1, + 75.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 83.0, + 82.0, + 77.4, + 75.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.3, + 80.8, + 80.1, + 77.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.3, + 80.2, + 77.1, + 76.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.8, + 82.9, + 76.2, + 74.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.9, + 76.2, + 72.1, + 75.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.7, + 86.4, + 84.7, + 84.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.1, + 87.2, + 87.1, + 79.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 77.6, + 80.4, + 79.8, + 76.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.8, + 83.4, + 83.7, + 81.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 80.8, + 82.8, + 82.7, + 79.9 + ], + "down_blocks.1.resnets.0.conv1": [ + 80.0, + 81.1, + 76.5, + 77.0 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 78.5, + 79.3, + 79.6, + 76.7 + ], + "down_blocks.1.resnets.0.conv2": [ + 76.9, + 79.7, + 81.1, + 77.6 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 72.9, + 72.5, + 74.4, + 72.7 + ], + "down_blocks.1.resnets.1.conv1": [ + 81.2, + 82.1, + 77.1, + 77.3 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 82.0, + 82.5, + 83.8, + 78.2 + ], + "down_blocks.1.resnets.1.conv2": [ + 81.4, + 83.1, + 78.8, + 76.2 + ], + "down_blocks.1.downsamplers.0.conv": [ + 78.5, + 78.9, + 81.8, + 76.3 + ], + "down_blocks.2.attentions.0.proj_in": [ + 84.3, + 84.7, + 78.8, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.5, + 87.2, + 80.1, + 73.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.0, + 87.3, + 76.5, + 73.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.6, + 84.2, + 83.1, + 77.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.9, + 81.6, + 77.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.1, + 80.8, + 82.2, + 75.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.1, + 76.5, + 78.0, + 75.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.3, + 83.9, + 78.0, + 74.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.6, + 85.4, + 86.4, + 74.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.8, + 81.8, + 82.0, + 75.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.4, + 84.2, + 81.4, + 80.7 + ], + "down_blocks.2.attentions.0.proj_out": [ + 84.0, + 83.4, + 82.2, + 73.2 + ], + "down_blocks.2.attentions.1.proj_in": [ + 83.1, + 84.6, + 80.3, + 78.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.7, + 79.0, + 86.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.9, + 77.0, + 86.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.5, + 81.9, + 79.0, + 83.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 83.9, + 84.1, + 78.3, + 78.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.1, + 79.9, + 82.0, + 76.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 79.8, + 79.8, + 76.9, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.5, + 83.7, + 84.6, + 73.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 88.1, + 87.4, + 86.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 82.3, + 76.5, + 77.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 84.9, + 84.8, + 79.4, + 84.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 83.1, + 83.1, + 79.0, + 78.4 + ], + "down_blocks.2.resnets.0.conv1": [ + 83.0, + 81.7, + 84.1, + 77.8 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 84.8, + 83.7, + 86.5, + 78.1 + ], + "down_blocks.2.resnets.0.conv2": [ + 77.8, + 76.8, + 84.8, + 78.1 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 78.9, + 81.1, + 82.0, + 75.7 + ], + "down_blocks.2.resnets.1.conv1": [ + 85.8, + 87.1, + 82.0, + 78.3 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 85.9, + 85.9, + 87.1, + 74.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 84.6, + 83.8, + 86.5, + 83.3 + ], + "down_blocks.2.downsamplers.0.conv": [ + 86.5, + 86.7, + 87.5, + 78.9 + ], + "down_blocks.3.resnets.0.conv1": [ + 87.2, + 87.4, + 87.5, + 83.4 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.3, + 88.4, + 88.5, + 88.2 + ], + "down_blocks.3.resnets.0.conv2": [ + 86.6, + 86.5, + 86.7, + 86.0 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.1, + 88.1, + 87.5, + 86.9 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.6, + 89.0, + 88.5, + 88.5 + ], + "down_blocks.3.resnets.1.conv2": [ + 87.5, + 88.0, + 87.8, + 84.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 86.5, + 86.6, + 87.4, + 87.2 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.6, + 88.4, + 88.9, + 88.5 + ], + "up_blocks.0.resnets.0.conv2": [ + 84.8, + 86.4, + 86.2, + 85.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.0, + 88.3, + 88.2, + 83.7 + ], + "up_blocks.0.resnets.1.conv1": [ + 86.1, + 86.5, + 87.4, + 85.5 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 88.6, + 88.8 + ], + "up_blocks.0.resnets.1.conv2": [ + 84.5, + 85.4, + 85.4, + 85.1 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 87.7, + 88.3, + 87.7, + 82.3 + ], + "up_blocks.0.resnets.2.conv1": [ + 85.3, + 86.2, + 85.8, + 85.4 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.3, + 88.4, + 88.4, + 88.3 + ], + "up_blocks.0.resnets.2.conv2": [ + 81.3, + 83.1, + 83.6, + 80.2 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.5, + 88.1, + 88.2, + 87.6 + ], + "up_blocks.0.upsamplers.0.conv": [ + 81.4, + 81.9, + 80.0, + 79.8 + ], + "up_blocks.1.attentions.0.proj_in": [ + 83.0, + 82.6, + 79.5, + 77.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.6, + 86.7, + 81.4, + 81.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.8, + 86.9, + 78.1, + 82.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 83.0, + 84.2, + 77.0, + 77.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.6, + 84.5, + 81.2, + 77.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.3, + 82.5, + 85.0, + 83.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.5, + 81.4, + 83.8, + 79.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.2, + 85.6, + 86.7, + 81.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.4, + 84.9, + 87.4, + 87.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.0, + 83.3, + 82.7, + 79.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.0, + 83.9, + 79.6, + 77.3 + ], + "up_blocks.1.attentions.0.proj_out": [ + 83.6, + 84.1, + 82.2, + 83.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 83.2, + 82.5, + 84.6, + 73.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.3, + 87.3, + 82.3, + 78.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.4, + 87.7, + 80.2, + 79.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 84.1, + 84.3, + 81.7, + 77.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.6, + 84.1, + 80.6, + 77.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.2, + 83.6, + 80.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 77.9, + 80.0, + 80.7, + 79.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.7, + 84.4, + 84.8, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.4, + 88.0, + 88.4, + 86.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.9, + 82.9, + 84.2, + 81.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 84.7, + 84.8, + 85.9, + 79.9 + ], + "up_blocks.1.attentions.1.proj_out": [ + 84.1, + 84.6, + 85.2, + 83.5 + ], + "up_blocks.1.attentions.2.proj_in": [ + 80.7, + 81.3, + 82.2, + 75.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 85.8, + 86.0, + 86.9, + 80.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 86.5, + 86.6, + 87.2, + 84.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 83.6, + 84.2, + 85.3, + 83.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 83.1, + 83.1, + 84.0, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.2, + 81.3, + 82.5, + 81.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 78.3, + 78.7, + 80.5, + 79.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 84.2, + 83.6, + 84.6, + 82.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 85.5, + 85.6, + 86.1, + 85.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 82.7, + 82.4, + 83.5, + 79.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 84.5, + 84.6, + 85.2, + 82.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 80.6, + 80.7, + 81.7, + 77.3 + ], + "up_blocks.1.resnets.0.conv1": [ + 83.4, + 84.1, + 84.7, + 81.3 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 84.2, + 84.4, + 85.5, + 84.8 + ], + "up_blocks.1.resnets.0.conv2": [ + 82.9, + 83.3, + 84.6, + 78.3 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 82.9, + 79.6, + 84.0, + 77.6 + ], + "up_blocks.1.resnets.1.conv1": [ + 85.0, + 85.1, + 81.9, + 79.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 86.2, + 86.6, + 86.9, + 85.7 + ], + "up_blocks.1.resnets.1.conv2": [ + 85.1, + 85.4, + 81.5, + 77.4 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 82.4, + 82.2, + 83.1, + 77.9 + ], + "up_blocks.1.resnets.2.conv1": [ + 83.4, + 83.1, + 84.2, + 81.0 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 86.3, + 86.3, + 86.9, + 86.5 + ], + "up_blocks.1.resnets.2.conv2": [ + 84.0, + 85.0, + 85.1, + 80.8 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 81.1, + 82.4, + 81.6, + 79.0 + ], + "up_blocks.1.upsamplers.0.conv": [ + 81.1, + 82.2, + 81.8, + 79.3 + ], + "up_blocks.2.attentions.0.proj_in": [ + 78.0, + 78.4, + 76.0, + 68.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.7, + 84.4, + 85.0, + 82.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 84.6, + 84.0, + 85.0, + 83.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.5, + 81.2, + 82.0, + 80.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.4, + 81.0, + 83.0, + 80.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.7, + 80.5, + 82.3, + 81.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.1, + 77.7, + 78.0, + 78.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.2, + 85.3, + 85.4, + 84.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.8, + 84.1, + 84.4, + 83.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.0, + 80.9, + 81.7, + 78.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.8, + 83.0, + 83.6, + 79.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 75.9, + 76.1, + 76.6, + 69.0 + ], + "up_blocks.2.attentions.1.proj_in": [ + 77.0, + 77.0, + 77.7, + 71.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.4, + 82.3, + 79.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.1, + 81.3, + 81.8, + 79.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 77.4, + 77.3, + 77.6, + 76.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 77.8, + 78.1, + 78.3, + 76.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 80.7, + 80.7, + 81.1, + 79.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 74.3, + 74.7, + 76.7, + 75.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.5, + 84.9, + 84.5, + 83.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 87.8, + 88.3, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.7, + 75.7, + 78.6, + 77.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 78.4, + 78.0, + 79.9, + 78.6 + ], + "up_blocks.2.attentions.1.proj_out": [ + 77.4, + 76.9, + 77.9, + 72.2 + ], + "up_blocks.2.attentions.2.proj_in": [ + 72.6, + 74.1, + 73.3, + 71.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.1, + 80.3, + 80.8, + 79.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 80.5, + 80.6, + 81.2, + 79.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 75.0, + 75.7, + 75.9, + 74.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 74.2, + 75.4, + 74.8, + 73.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 78.1, + 78.6, + 79.0, + 78.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 74.9, + 75.5, + 76.7, + 76.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 79.4, + 79.5, + 80.6, + 80.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 79.0, + 80.7, + 80.3, + 79.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 75.2, + 75.8, + 76.6, + 74.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 77.8, + 78.3, + 78.3, + 76.4 + ], + "up_blocks.2.attentions.2.proj_out": [ + 74.4, + 75.3, + 75.3, + 69.6 + ], + "up_blocks.2.resnets.0.conv1": [ + 82.5, + 83.0, + 83.9, + 79.3 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 86.1, + 86.2, + 86.9, + 85.2 + ], + "up_blocks.2.resnets.0.conv2": [ + 81.0, + 81.7, + 81.9, + 77.4 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 70.7, + 71.0, + 71.5, + 61.3 + ], + "up_blocks.2.resnets.1.conv1": [ + 81.4, + 82.1, + 82.4, + 78.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 81.0, + 81.2, + 81.1, + 79.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 80.4, + 81.2, + 82.0, + 79.0 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 74.7, + 75.3, + 75.5, + 69.3 + ], + "up_blocks.2.resnets.2.conv1": [ + 79.4, + 79.9, + 80.2, + 76.2 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 75.6, + 74.7, + 76.1, + 74.3 + ], + "up_blocks.2.resnets.2.conv2": [ + 77.9, + 77.6, + 78.7, + 77.4 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 72.6, + 73.1, + 72.9, + 69.4 + ], + "up_blocks.2.upsamplers.0.conv": [ + 72.9, + 74.1, + 73.5, + 72.0 + ], + "up_blocks.3.attentions.0.proj_in": [ + 71.7, + 72.6, + 72.1, + 70.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 73.8, + 74.5, + 74.6, + 73.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 75.1, + 75.5, + 75.9, + 74.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 74.4, + 74.1, + 75.3, + 74.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 72.0, + 72.7, + 73.2, + 71.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.1, + 79.1, + 80.9, + 80.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 74.1, + 74.6, + 72.7, + 72.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.4, + 82.2, + 81.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.4, + 77.0, + 78.0, + 77.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.1, + 74.7, + 74.4, + 73.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 75.8, + 76.2, + 76.1, + 75.6 + ], + "up_blocks.3.attentions.0.proj_out": [ + 74.1, + 74.2, + 74.4, + 73.3 + ], + "up_blocks.3.attentions.1.proj_in": [ + 67.9, + 68.0, + 68.4, + 67.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 73.0, + 73.2, + 74.0, + 74.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 71.2, + 71.3, + 72.3, + 72.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 69.5, + 69.5, + 70.5, + 70.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 65.2, + 64.9, + 66.0, + 65.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 76.7, + 76.1, + 76.2, + 76.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.9, + 74.7, + 75.7, + 76.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.4, + 82.1, + 82.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.1, + 79.3, + 80.2, + 80.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 69.5, + 69.5, + 70.2, + 69.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 72.0, + 71.9, + 72.5, + 71.9 + ], + "up_blocks.3.attentions.1.proj_out": [ + 70.7, + 70.9, + 71.3, + 70.6 + ], + "up_blocks.3.attentions.2.proj_in": [ + 63.3, + 63.6, + 63.4, + 63.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 74.5, + 74.5, + 75.3, + 74.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 72.9, + 72.9, + 73.3, + 72.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 68.2, + 67.7, + 68.9, + 68.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 69.5, + 69.2, + 69.4, + 69.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 74.2, + 74.1, + 75.8, + 75.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 71.2, + 73.6, + 70.2, + 70.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 75.0, + 75.5, + 75.4, + 74.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 75.4, + 75.5, + 75.9, + 74.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 65.9, + 65.6, + 65.8, + 65.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 70.5, + 70.4, + 70.7, + 70.5 + ], + "up_blocks.3.attentions.2.proj_out": [ + 69.2, + 68.9, + 69.3, + 69.2 + ], + "up_blocks.3.resnets.0.conv1": [ + 71.4, + 71.9, + 72.5, + 71.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 79.4, + 80.0, + 79.8, + 79.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 72.3, + 72.7, + 73.2, + 72.3 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 60.6, + 60.9, + 60.7, + 53.1 + ], + "up_blocks.3.resnets.1.conv1": [ + 68.0, + 68.2, + 68.1, + 66.9 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 71.5, + 71.9, + 71.8, + 71.5 + ], + "up_blocks.3.resnets.1.conv2": [ + 65.9, + 66.0, + 66.1, + 65.6 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 65.3, + 66.0, + 65.6, + 65.2 + ], + "up_blocks.3.resnets.2.conv1": [ + 66.4, + 66.8, + 66.8, + 66.3 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 63.5, + 63.6, + 63.4, + 63.4 + ], + "up_blocks.3.resnets.2.conv2": [ + 62.8, + 62.8, + 62.7, + 62.6 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 59.6, + 59.9, + 60.0, + 59.9 + ], + "mid_block.attentions.0.proj_in": [ + 87.9, + 88.2, + 88.4, + 87.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.5, + 88.8, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.7, + 88.8, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.8, + 88.4, + 88.3, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.3, + 88.3, + 86.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.0, + 88.5, + 87.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.9, + 87.1, + 87.2, + 85.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.3, + 88.0, + 88.1, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.6, + 88.9, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.1, + 87.0, + 87.5, + 86.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.6, + 87.7, + 87.8, + 87.9 + ], + "mid_block.attentions.0.proj_out": [ + 88.4, + 88.4, + 88.2, + 86.4 + ], + "mid_block.resnets.0.conv1": [ + 87.9, + 88.1, + 87.6, + 87.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.6, + 88.4, + 88.7, + 87.7 + ], + "mid_block.resnets.0.conv2": [ + 87.7, + 87.8, + 88.1, + 81.7 + ], + "mid_block.resnets.1.conv1": [ + 87.9, + 87.9, + 88.3, + 80.7 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.7, + 88.6, + 88.7, + 88.9 + ], + "mid_block.resnets.1.conv2": [ + 88.0, + 88.1, + 87.3, + 79.7 + ] + }, + "2": { + "time_embedding.linear_1": [ + 72.6, + 72.8, + 72.8, + 72.6 + ], + "time_embedding.linear_2": [ + 80.7, + 80.1, + 80.6, + 80.2 + ], + "down_blocks.0.attentions.0.proj_in": [ + 79.0, + 78.7, + 75.9, + 76.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.7, + 84.6, + 77.4, + 79.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.3, + 83.4, + 83.8, + 73.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.3, + 80.0, + 79.8, + 76.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.7, + 80.0, + 77.0, + 79.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.0, + 84.7, + 85.7, + 84.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.6, + 76.7, + 78.1, + 77.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.2, + 85.1, + 85.0, + 76.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.9, + 85.6, + 84.9, + 84.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.1, + 79.2, + 79.4, + 77.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.0, + 80.9, + 80.6, + 80.5 + ], + "down_blocks.0.attentions.0.proj_out": [ + 79.1, + 79.3, + 79.2, + 76.1 + ], + "down_blocks.0.attentions.1.proj_in": [ + 77.2, + 77.1, + 77.5, + 72.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.9, + 81.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.8, + 81.4, + 82.2, + 80.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.7, + 80.7, + 76.5, + 75.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.9, + 79.2, + 74.5, + 76.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 84.6, + 84.4, + 83.7, + 78.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 78.3, + 77.3, + 78.0, + 75.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.6, + 82.5, + 83.6, + 74.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 83.7, + 83.9, + 84.5, + 83.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 78.2, + 78.4, + 76.4, + 76.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.2, + 80.3, + 79.8, + 78.5 + ], + "down_blocks.0.attentions.1.proj_out": [ + 79.2, + 79.3, + 79.8, + 73.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 80.0, + 80.1, + 80.2, + 79.3 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 81.7, + 81.9, + 79.2, + 80.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 75.7, + 75.9, + 76.4, + 75.1 + ], + "down_blocks.0.resnets.1.conv1": [ + 77.7, + 78.3, + 78.6, + 76.0 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 77.9, + 81.1, + 76.7, + 73.5 + ], + "down_blocks.0.resnets.1.conv2": [ + 79.1, + 79.5, + 79.8, + 73.7 + ], + "down_blocks.0.downsamplers.0.conv": [ + 76.8, + 76.8, + 77.4, + 76.0 + ], + "down_blocks.1.attentions.0.proj_in": [ + 80.7, + 81.8, + 82.2, + 80.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.5, + 85.4, + 85.7, + 75.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.8, + 85.5, + 85.8, + 73.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.4, + 83.0, + 81.7, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.4, + 79.4, + 78.5, + 78.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.3, + 85.4, + 86.0, + 73.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 73.3, + 75.5, + 73.6, + 73.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.4, + 84.9, + 85.2, + 80.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.1, + 86.5, + 88.1, + 84.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.0, + 81.2, + 75.9, + 78.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.4, + 81.9, + 82.3, + 75.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 77.0, + 80.4, + 82.6, + 77.1 + ], + "down_blocks.1.attentions.1.proj_in": [ + 82.1, + 82.4, + 77.3, + 77.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.6, + 85.6, + 78.6, + 76.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.8, + 85.9, + 77.0, + 73.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.4, + 83.1, + 77.9, + 73.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.2, + 83.0, + 83.5, + 78.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.2, + 87.3, + 87.2, + 80.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.3, + 79.6, + 76.6, + 77.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.5, + 87.4, + 77.1, + 85.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.0, + 88.1, + 87.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.4, + 83.4, + 79.1, + 77.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 83.9, + 85.6, + 78.1, + 83.9 + ], + "down_blocks.1.attentions.1.proj_out": [ + 83.9, + 84.6, + 85.2, + 78.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 82.9, + 83.5, + 84.6, + 82.9 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 78.9, + 79.7, + 79.9, + 75.0 + ], + "down_blocks.1.resnets.0.conv2": [ + 80.9, + 83.0, + 84.3, + 78.3 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 75.0, + 73.4, + 74.7, + 74.9 + ], + "down_blocks.1.resnets.1.conv1": [ + 84.5, + 85.0, + 85.4, + 78.1 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 83.7, + 84.1, + 85.0, + 78.4 + ], + "down_blocks.1.resnets.1.conv2": [ + 85.0, + 85.5, + 77.8, + 77.6 + ], + "down_blocks.1.downsamplers.0.conv": [ + 83.0, + 82.4, + 83.8, + 78.0 + ], + "down_blocks.2.attentions.0.proj_in": [ + 86.6, + 86.4, + 79.5, + 78.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.0, + 77.4, + 76.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.3, + 87.5, + 77.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.5, + 85.6, + 86.8, + 75.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.6, + 85.1, + 85.7, + 76.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.9, + 84.9, + 86.0, + 73.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.7, + 77.9, + 79.1, + 75.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.6, + 86.0, + 85.5, + 85.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.3, + 87.0, + 85.6, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.5, + 84.8, + 77.7, + 73.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.2, + 86.2, + 86.6, + 85.2 + ], + "down_blocks.2.attentions.0.proj_out": [ + 85.8, + 86.9, + 78.0, + 72.8 + ], + "down_blocks.2.attentions.1.proj_in": [ + 85.6, + 86.6, + 78.0, + 78.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.3, + 87.5, + 87.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.3, + 78.7, + 87.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.7, + 86.4, + 86.6, + 85.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.0, + 86.3, + 86.9, + 85.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 86.1, + 85.3, + 77.1, + 78.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.6, + 79.2, + 76.4, + 76.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.8, + 86.6, + 87.4, + 73.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.3, + 88.7, + 85.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.1, + 85.5, + 87.0, + 78.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.3, + 87.3, + 87.3, + 74.4 + ], + "down_blocks.2.attentions.1.proj_out": [ + 86.0, + 86.4, + 78.0, + 78.3 + ], + "down_blocks.2.resnets.0.conv1": [ + 84.4, + 83.9, + 84.4, + 78.1 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 86.8, + 86.2, + 87.7, + 74.5 + ], + "down_blocks.2.resnets.0.conv2": [ + 86.3, + 86.5, + 87.4, + 78.4 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 82.4, + 82.5, + 82.1, + 78.0 + ], + "down_blocks.2.resnets.1.conv1": [ + 87.6, + 87.7, + 87.6, + 85.5 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 86.9, + 87.1, + 87.5, + 74.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 86.5, + 87.1, + 87.4, + 85.7 + ], + "down_blocks.2.downsamplers.0.conv": [ + 87.6, + 87.6, + 87.6, + 81.9 + ], + "down_blocks.3.resnets.0.conv1": [ + 87.5, + 87.9, + 87.8, + 83.3 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.8, + 88.8, + 88.7, + 89.0 + ], + "down_blocks.3.resnets.0.conv2": [ + 87.8, + 88.2, + 88.3, + 87.8 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.6, + 88.8, + 87.9, + 88.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.9, + 88.8, + 88.9, + 88.7 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.3, + 88.6, + 88.2, + 87.9 + ], + "up_blocks.0.resnets.0.conv1": [ + 87.1, + 87.8, + 88.0, + 87.9 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.7, + 88.9, + 88.9, + 88.9 + ], + "up_blocks.0.resnets.0.conv2": [ + 87.3, + 88.2, + 87.9, + 87.0 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.5, + 88.7, + 88.5, + 84.8 + ], + "up_blocks.0.resnets.1.conv1": [ + 88.1, + 88.2, + 88.2, + 88.2 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 89.0, + 88.7, + 88.7, + 88.9 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.4, + 88.3, + 87.8, + 88.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.3, + 88.6, + 88.3, + 85.4 + ], + "up_blocks.0.resnets.2.conv1": [ + 87.1, + 87.4, + 86.7, + 77.8 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.9, + 88.9, + 88.8, + 88.8 + ], + "up_blocks.0.resnets.2.conv2": [ + 82.8, + 84.5, + 84.6, + 78.3 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.8, + 88.4, + 87.9, + 86.6 + ], + "up_blocks.0.upsamplers.0.conv": [ + 83.1, + 83.5, + 78.5, + 84.6 + ], + "up_blocks.1.attentions.0.proj_in": [ + 83.4, + 84.4, + 82.0, + 79.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.1, + 87.7, + 87.1, + 86.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.8, + 87.9, + 86.3, + 87.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.5, + 86.4, + 86.8, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.5, + 86.5, + 80.3, + 78.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 86.8, + 86.4, + 87.1, + 86.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.7, + 81.3, + 78.0, + 73.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.6, + 87.5, + 88.1, + 86.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.8, + 88.6, + 87.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.2, + 85.5, + 81.0, + 73.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.8, + 85.9, + 80.9, + 85.4 + ], + "up_blocks.1.attentions.0.proj_out": [ + 86.3, + 86.4, + 86.3, + 85.9 + ], + "up_blocks.1.attentions.1.proj_in": [ + 84.9, + 84.6, + 85.0, + 78.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.2, + 87.8, + 78.9, + 78.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.4, + 81.7, + 79.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.3, + 86.3, + 85.0, + 80.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.5, + 85.9, + 85.4, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 86.3, + 86.1, + 86.8, + 85.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 79.5, + 80.4, + 81.8, + 79.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.1, + 87.3, + 87.6, + 86.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.7, + 88.5, + 88.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.0, + 86.0, + 86.2, + 84.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.9, + 86.9, + 87.1, + 83.3 + ], + "up_blocks.1.attentions.1.proj_out": [ + 86.0, + 86.4, + 87.0, + 85.4 + ], + "up_blocks.1.attentions.2.proj_in": [ + 82.2, + 83.1, + 83.7, + 76.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 86.4, + 87.0, + 87.3, + 82.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.2, + 87.9, + 88.2, + 87.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 85.8, + 85.9, + 86.9, + 85.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 84.0, + 83.4, + 85.4, + 82.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 85.1, + 85.6, + 86.0, + 85.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 79.4, + 79.8, + 80.9, + 79.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 86.7, + 87.0, + 87.1, + 86.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 85.2, + 85.5, + 86.2, + 85.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 86.2, + 86.2, + 83.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.8, + 86.9, + 85.5 + ], + "up_blocks.1.attentions.2.proj_out": [ + 83.7, + 84.0, + 85.1, + 81.7 + ], + "up_blocks.1.resnets.0.conv1": [ + 86.7, + 86.6, + 84.0, + 78.3 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 85.7, + 85.9, + 86.6, + 86.5 + ], + "up_blocks.1.resnets.0.conv2": [ + 85.8, + 85.8, + 86.0, + 80.0 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 85.6, + 84.0, + 78.2, + 84.8 + ], + "up_blocks.1.resnets.1.conv1": [ + 86.6, + 86.4, + 80.6, + 77.2 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 87.3, + 87.7, + 87.8, + 87.0 + ], + "up_blocks.1.resnets.1.conv2": [ + 86.8, + 86.9, + 78.2, + 78.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 84.2, + 84.4, + 84.7, + 79.9 + ], + "up_blocks.1.resnets.2.conv1": [ + 84.2, + 84.6, + 85.6, + 82.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 87.3, + 87.3, + 87.6, + 87.3 + ], + "up_blocks.1.resnets.2.conv2": [ + 87.0, + 87.0, + 86.8, + 83.7 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 83.7, + 84.2, + 83.7, + 82.9 + ], + "up_blocks.1.upsamplers.0.conv": [ + 83.9, + 84.3, + 84.1, + 82.3 + ], + "up_blocks.2.attentions.0.proj_in": [ + 81.5, + 81.2, + 82.7, + 78.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.1, + 87.4, + 87.5, + 85.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.0, + 87.0, + 87.6, + 86.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.3, + 85.5, + 85.8, + 83.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.4, + 82.8, + 81.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 86.1, + 85.9, + 86.6, + 85.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.1, + 79.9, + 81.1, + 80.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.0, + 86.7, + 87.0, + 86.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.5, + 85.1, + 85.8, + 84.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.4, + 84.6, + 85.0, + 81.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.5, + 85.6, + 85.8, + 83.0 + ], + "up_blocks.2.attentions.0.proj_out": [ + 81.1, + 82.2, + 82.2, + 75.4 + ], + "up_blocks.2.attentions.1.proj_in": [ + 78.5, + 77.9, + 78.6, + 76.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 84.8, + 84.2, + 85.4, + 82.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 84.4, + 84.7, + 84.7, + 82.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.3, + 81.4, + 80.7, + 79.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 79.5, + 79.4, + 80.2, + 78.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 85.2, + 84.1, + 84.9, + 83.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 75.9, + 77.1, + 77.5, + 76.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.1, + 87.1, + 87.3, + 87.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.3, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 80.3, + 82.6, + 80.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.8, + 80.7, + 82.3, + 80.8 + ], + "up_blocks.2.attentions.1.proj_out": [ + 81.3, + 79.9, + 81.3, + 77.7 + ], + "up_blocks.2.attentions.2.proj_in": [ + 76.7, + 73.7, + 77.4, + 74.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 83.5, + 83.7, + 84.3, + 82.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 84.5, + 84.7, + 84.9, + 83.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 78.0, + 78.7, + 78.5, + 77.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 79.0, + 79.7, + 79.5, + 78.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 79.9, + 81.2, + 80.8, + 80.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 74.8, + 76.6, + 76.9, + 77.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.6, + 79.8, + 82.3, + 82.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 80.7, + 82.5, + 81.3, + 80.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 79.1, + 79.5, + 80.3, + 78.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.3, + 81.9, + 82.1, + 80.6 + ], + "up_blocks.2.attentions.2.proj_out": [ + 79.4, + 79.4, + 79.8, + 74.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 85.4, + 86.1, + 86.4, + 83.8 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 86.7, + 86.8, + 87.4, + 86.0 + ], + "up_blocks.2.resnets.0.conv2": [ + 84.9, + 85.5, + 85.8, + 81.4 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 78.3, + 79.3, + 79.3, + 73.9 + ], + "up_blocks.2.resnets.1.conv1": [ + 83.6, + 84.3, + 84.4, + 81.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 82.3, + 82.7, + 82.9, + 81.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 82.5, + 84.1, + 83.9, + 81.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 79.7, + 80.2, + 80.3, + 74.9 + ], + "up_blocks.2.resnets.2.conv1": [ + 82.6, + 82.7, + 83.2, + 79.9 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 77.9, + 77.1, + 78.6, + 76.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 81.5, + 82.0, + 82.1, + 81.0 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 78.0, + 78.2, + 78.6, + 73.9 + ], + "up_blocks.2.upsamplers.0.conv": [ + 77.7, + 77.9, + 78.0, + 76.2 + ], + "up_blocks.3.attentions.0.proj_in": [ + 75.7, + 76.3, + 76.1, + 75.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.5, + 80.2, + 80.2, + 79.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 79.6, + 79.9, + 80.2, + 79.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.4, + 78.7, + 79.2, + 78.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.0, + 77.4, + 77.8, + 76.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.7, + 84.6, + 84.8, + 84.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.7, + 79.0, + 77.9, + 77.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.1, + 84.3, + 84.6, + 84.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 79.8, + 79.8, + 80.2, + 79.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 77.9, + 78.2, + 78.3, + 77.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.6, + 80.1, + 80.1, + 79.4 + ], + "up_blocks.3.attentions.0.proj_out": [ + 78.4, + 78.4, + 78.5, + 77.7 + ], + "up_blocks.3.attentions.1.proj_in": [ + 72.9, + 72.8, + 72.9, + 72.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 79.9, + 79.9, + 80.5, + 80.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 78.4, + 78.7, + 79.9, + 79.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 75.7, + 75.6, + 76.4, + 76.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 74.0, + 73.8, + 74.8, + 74.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 83.7, + 83.4, + 84.7, + 84.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 79.3, + 76.9, + 78.9, + 78.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 83.1, + 82.8, + 83.7, + 83.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.1, + 81.3, + 81.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 74.7, + 74.9, + 75.6, + 74.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.1, + 77.4, + 77.6, + 76.7 + ], + "up_blocks.3.attentions.1.proj_out": [ + 75.5, + 75.7, + 76.0, + 75.3 + ], + "up_blocks.3.attentions.2.proj_in": [ + 67.7, + 67.6, + 68.0, + 67.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 78.8, + 78.9, + 79.6, + 78.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 77.2, + 77.4, + 77.9, + 77.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 74.6, + 74.1, + 75.2, + 74.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 73.3, + 72.8, + 73.7, + 73.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 80.3, + 79.7, + 80.7, + 80.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 75.5, + 76.2, + 76.0, + 75.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 77.9, + 78.0, + 78.7, + 78.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 77.5, + 76.9, + 77.1, + 76.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 72.3, + 72.3, + 72.7, + 72.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 76.2, + 75.9, + 76.0, + 75.8 + ], + "up_blocks.3.attentions.2.proj_out": [ + 75.3, + 75.1, + 75.4, + 75.1 + ], + "up_blocks.3.resnets.0.conv1": [ + 76.4, + 76.4, + 77.1, + 76.1 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 83.5, + 84.0, + 83.6, + 83.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 76.9, + 77.6, + 77.7, + 76.9 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 72.2, + 72.3, + 72.7, + 68.6 + ], + "up_blocks.3.resnets.1.conv1": [ + 73.5, + 73.6, + 73.5, + 72.1 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 78.9, + 79.3, + 79.1, + 78.9 + ], + "up_blocks.3.resnets.1.conv2": [ + 72.0, + 71.9, + 72.0, + 71.5 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 69.5, + 70.3, + 69.4, + 68.6 + ], + "up_blocks.3.resnets.2.conv1": [ + 73.7, + 73.5, + 74.4, + 73.6 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 73.1, + 73.3, + 73.0, + 73.0 + ], + "up_blocks.3.resnets.2.conv2": [ + 69.7, + 69.5, + 70.0, + 69.7 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 66.1, + 66.4, + 66.4, + 66.1 + ], + "mid_block.attentions.0.proj_in": [ + 88.5, + 88.6, + 88.5, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.8, + 88.9, + 88.9, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.7, + 89.0, + 88.7, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.9, + 88.7, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.9, + 88.7, + 88.9, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.6, + 89.2, + 89.0, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.2, + 87.8, + 87.9, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.5, + 88.5, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 89.0, + 88.8, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.2, + 88.3, + 88.2, + 87.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.7, + 88.8, + 88.9 + ], + "mid_block.attentions.0.proj_out": [ + 88.8, + 88.7, + 89.0, + 88.3 + ], + "mid_block.resnets.0.conv1": [ + 88.5, + 88.6, + 88.6, + 88.5 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.8, + 88.7, + 88.9, + 89.0 + ], + "mid_block.resnets.0.conv2": [ + 88.3, + 88.5, + 88.3, + 88.8 + ], + "mid_block.resnets.1.conv1": [ + 88.6, + 88.5, + 89.0, + 84.6 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.8, + 88.9, + 88.6, + 88.8 + ], + "mid_block.resnets.1.conv2": [ + 88.8, + 88.3, + 88.7, + 87.7 + ] + }, + "4": { + "time_embedding.linear_1": [ + 87.3, + 87.5, + 87.3, + 86.4 + ], + "time_embedding.linear_2": [ + 82.3, + 82.5, + 82.4, + 81.9 + ], + "down_blocks.0.attentions.0.proj_in": [ + 83.7, + 83.6, + 84.1, + 73.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.7, + 87.8, + 87.6, + 87.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.3, + 87.5, + 87.6, + 86.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.7, + 85.9, + 85.8, + 85.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.4, + 85.5, + 77.0, + 85.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 88.1, + 88.3, + 88.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.6, + 83.5, + 83.1, + 82.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.7, + 87.2, + 86.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.9, + 87.6, + 86.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.9, + 85.1, + 77.9, + 82.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.0, + 85.8, + 86.1, + 78.3 + ], + "down_blocks.0.attentions.0.proj_out": [ + 84.8, + 85.0, + 84.9, + 74.0 + ], + "down_blocks.0.attentions.1.proj_in": [ + 83.3, + 83.3, + 82.5, + 82.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.3, + 87.2, + 84.4, + 87.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.6, + 86.5, + 86.8, + 80.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.8, + 86.3, + 86.2, + 73.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.6, + 84.5, + 77.3, + 73.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.2, + 87.6, + 86.8, + 86.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 85.6, + 85.1, + 84.7, + 78.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.7, + 85.2, + 86.6, + 78.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.1, + 87.0, + 87.0, + 87.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 84.6, + 85.0, + 78.3, + 81.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.5, + 85.5, + 85.7, + 78.4 + ], + "down_blocks.0.attentions.1.proj_out": [ + 85.6, + 85.6, + 84.7, + 74.6 + ], + "down_blocks.0.resnets.0.conv1": [ + 86.1, + 86.1, + 84.9, + 85.6 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 85.0, + 85.9, + 86.0, + 74.6 + ], + "down_blocks.0.resnets.0.conv2": [ + 83.2, + 83.1, + 77.8, + 73.7 + ], + "down_blocks.0.resnets.1.conv1": [ + 84.5, + 84.9, + 84.6, + 75.4 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 87.3, + 87.1, + 87.1, + 86.9 + ], + "down_blocks.0.resnets.1.conv2": [ + 84.5, + 84.7, + 84.9, + 83.5 + ], + "down_blocks.0.downsamplers.0.conv": [ + 82.6, + 82.8, + 83.0, + 81.9 + ], + "down_blocks.1.attentions.0.proj_in": [ + 86.5, + 85.6, + 86.5, + 78.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.0, + 87.8, + 87.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.2, + 88.1, + 83.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.5, + 86.1, + 80.2, + 73.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 86.4, + 77.8, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.9, + 87.8, + 88.1, + 87.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.8, + 79.3, + 80.3, + 70.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.7, + 87.9, + 87.8, + 86.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.3, + 88.3, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.2, + 86.3, + 86.2, + 84.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.0, + 86.5, + 83.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 86.6, + 85.9, + 86.6, + 79.1 + ], + "down_blocks.1.attentions.1.proj_in": [ + 86.5, + 87.2, + 77.1, + 75.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.3, + 86.5, + 86.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.9, + 88.3, + 88.3, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.2, + 87.9, + 85.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.1, + 86.9, + 86.9, + 86.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.0, + 88.4, + 88.1, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.8, + 82.8, + 76.7, + 77.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.3, + 88.6, + 87.3, + 83.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.7, + 88.5, + 88.8, + 87.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.3, + 87.9, + 87.6, + 86.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.1, + 87.7, + 87.5, + 86.7 + ], + "down_blocks.1.attentions.1.proj_out": [ + 87.6, + 88.0, + 87.9, + 76.1 + ], + "down_blocks.1.resnets.0.conv1": [ + 87.4, + 87.7, + 87.6, + 86.2 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 87.4, + 87.0, + 87.4, + 85.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 87.5, + 87.2, + 80.2, + 73.6 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 82.7, + 83.2, + 83.7, + 77.5 + ], + "down_blocks.1.resnets.1.conv1": [ + 87.0, + 87.0, + 86.6, + 86.6 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 87.5, + 87.9, + 87.5, + 78.0 + ], + "down_blocks.1.resnets.1.conv2": [ + 88.3, + 88.0, + 88.1, + 74.4 + ], + "down_blocks.1.downsamplers.0.conv": [ + 84.5, + 84.8, + 84.2, + 78.4 + ], + "down_blocks.2.attentions.0.proj_in": [ + 87.8, + 87.7, + 88.2, + 86.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.0, + 88.8, + 88.8, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.9, + 88.7, + 88.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.2, + 87.7, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 87.3, + 86.4, + 87.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.5, + 88.2, + 83.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 80.7, + 81.0, + 83.3, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.6, + 88.2, + 87.0, + 87.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.6, + 88.3, + 87.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.3, + 87.8, + 87.3, + 73.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.3, + 82.5, + 77.6 + ], + "down_blocks.2.attentions.0.proj_out": [ + 88.3, + 87.9, + 87.5, + 87.5 + ], + "down_blocks.2.attentions.1.proj_in": [ + 87.6, + 87.6, + 87.8, + 86.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.6, + 88.1, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.1, + 89.2, + 88.9, + 88.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 87.9, + 88.2, + 75.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.5, + 86.9, + 88.4, + 76.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.5, + 87.9, + 79.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 77.1, + 81.4, + 79.5, + 74.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.6, + 87.7, + 87.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 88.8, + 88.5, + 88.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.4, + 88.1, + 85.3, + 86.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.7, + 88.4, + 88.1 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.3, + 88.3, + 88.3, + 87.4 + ], + "down_blocks.2.resnets.0.conv1": [ + 88.0, + 87.7, + 88.0, + 73.9 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.3, + 88.5, + 88.2, + 87.8 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.5, + 88.3, + 87.9, + 73.7 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 85.8, + 86.0, + 83.7, + 85.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 88.8, + 88.6, + 88.9, + 86.3 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 88.8, + 88.3 + ], + "down_blocks.2.resnets.1.conv2": [ + 88.5, + 88.7, + 88.3, + 87.7 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.8, + 88.8, + 88.9, + 87.6 + ], + "down_blocks.3.resnets.0.conv1": [ + 88.9, + 88.9, + 88.5, + 87.5 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 89.3, + 88.6, + 89.2, + 88.7 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.9, + 88.7, + 88.5, + 88.8 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.7, + 89.1, + 88.8, + 88.5 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 89.0, + 89.0, + 88.6, + 87.8 + ], + "down_blocks.3.resnets.1.conv2": [ + 89.1, + 89.0, + 88.8, + 88.4 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.8, + 88.9, + 88.7, + 88.7 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 89.1, + 89.0, + 88.6, + 88.5 + ], + "up_blocks.0.resnets.0.conv2": [ + 89.2, + 89.0, + 89.1, + 88.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 89.2, + 88.8, + 88.7, + 88.5 + ], + "up_blocks.0.resnets.1.conv1": [ + 88.9, + 88.8, + 88.5, + 88.2 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.9, + 89.1, + 89.0, + 88.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 89.1, + 88.8, + 88.7, + 88.7 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.9, + 88.9, + 88.8, + 88.4 + ], + "up_blocks.0.resnets.2.conv1": [ + 88.5, + 88.2, + 87.3, + 84.3 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.9, + 89.0, + 88.8, + 88.7 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.6, + 88.6, + 88.5, + 87.9 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.8, + 88.6, + 88.3, + 88.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 87.6, + 87.9, + 88.0, + 85.5 + ], + "up_blocks.1.attentions.0.proj_in": [ + 86.0, + 87.2, + 87.4, + 86.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.3, + 88.4, + 88.6, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.1, + 88.8, + 88.7, + 88.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.5, + 88.4, + 88.4, + 87.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.1, + 88.1, + 86.6, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.9, + 88.8, + 88.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 84.0, + 82.8, + 86.1, + 85.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.8, + 88.6, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.8, + 88.6, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.5, + 87.8, + 78.7, + 78.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.4, + 88.6, + 85.7 + ], + "up_blocks.1.attentions.0.proj_out": [ + 87.8, + 88.3, + 88.0, + 88.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 87.6, + 87.4, + 87.8, + 84.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.3, + 87.5, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.9, + 89.3, + 88.6, + 88.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.0, + 88.3, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 87.4, + 87.8, + 86.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.2, + 88.4, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.4, + 82.8, + 83.8, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.2, + 88.5, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 88.9, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 87.6, + 87.2, + 85.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.8, + 88.3, + 88.4, + 87.9 + ], + "up_blocks.1.attentions.1.proj_out": [ + 88.2, + 88.3, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.2.proj_in": [ + 86.9, + 87.4, + 87.4, + 85.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.6, + 87.9, + 87.5, + 86.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.6, + 88.5, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.3, + 88.4, + 88.0, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 87.5, + 87.8, + 87.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.9, + 87.9, + 88.1, + 87.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.4, + 82.0, + 83.1, + 81.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.1, + 88.2, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 87.9, + 88.0, + 88.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.1, + 88.4, + 87.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.2, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.2.proj_out": [ + 87.5, + 87.6, + 87.9, + 86.4 + ], + "up_blocks.1.resnets.0.conv1": [ + 88.7, + 88.3, + 86.3, + 80.1 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 89.0, + 88.6, + 88.6, + 88.1 + ], + "up_blocks.1.resnets.0.conv2": [ + 88.2, + 88.2, + 78.6, + 73.5 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.1, + 87.6, + 86.7, + 84.1 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.0, + 88.2, + 88.5, + 87.7 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 88.3, + 88.3, + 88.6, + 88.1 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.4, + 88.5, + 88.4, + 88.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 86.5, + 86.2, + 86.3, + 84.3 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.5, + 88.3, + 88.4, + 88.3 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.4, + 88.6, + 88.6, + 88.2 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.4, + 88.3, + 88.3, + 87.9 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 85.9, + 86.3, + 86.2, + 85.4 + ], + "up_blocks.1.upsamplers.0.conv": [ + 87.0, + 86.9, + 86.8, + 86.1 + ], + "up_blocks.2.attentions.0.proj_in": [ + 86.2, + 86.7, + 86.7, + 85.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.5, + 88.2, + 87.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.6, + 88.4, + 88.2, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 87.9, + 88.2, + 87.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.8, + 87.1, + 86.6, + 85.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 88.3, + 88.6, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.8, + 83.2, + 84.2, + 83.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.6, + 88.2, + 88.6, + 87.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.3, + 87.0, + 87.5, + 86.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 87.7, + 87.8, + 87.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.2, + 87.8, + 88.2, + 86.6 + ], + "up_blocks.2.attentions.0.proj_out": [ + 86.5, + 86.2, + 86.7, + 84.8 + ], + "up_blocks.2.attentions.1.proj_in": [ + 84.9, + 85.0, + 85.1, + 83.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 87.4, + 88.0, + 87.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.2, + 88.1, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.8, + 86.3, + 87.0, + 86.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 83.3, + 82.9, + 84.4, + 82.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.6, + 88.2, + 88.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.2, + 81.6, + 82.9, + 82.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.1, + 88.3, + 88.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 88.6, + 88.8, + 88.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.8, + 86.0, + 86.0, + 84.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.6, + 87.1, + 86.2 + ], + "up_blocks.2.attentions.1.proj_out": [ + 86.2, + 86.0, + 86.7, + 85.0 + ], + "up_blocks.2.attentions.2.proj_in": [ + 82.2, + 82.4, + 83.3, + 82.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.8, + 87.8, + 87.9, + 87.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.9, + 87.9, + 88.0, + 87.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 85.1, + 85.9, + 85.5, + 84.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 80.9, + 82.2, + 82.6, + 81.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 86.4, + 86.8, + 86.7, + 86.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 79.1, + 80.7, + 80.9, + 81.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 85.2, + 86.8, + 86.2, + 86.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 85.5, + 85.5, + 85.9, + 85.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 83.7, + 82.8, + 84.1, + 82.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 86.4, + 86.5, + 86.6, + 85.4 + ], + "up_blocks.2.attentions.2.proj_out": [ + 84.8, + 85.1, + 85.1, + 82.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 88.0, + 87.9, + 88.0, + 87.1 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.9, + 88.5, + 88.6, + 88.3 + ], + "up_blocks.2.resnets.0.conv2": [ + 87.6, + 87.8, + 87.9, + 86.7 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 85.7, + 86.2, + 86.2, + 84.6 + ], + "up_blocks.2.resnets.1.conv1": [ + 87.4, + 87.3, + 87.5, + 85.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 88.0, + 88.0, + 88.1, + 87.6 + ], + "up_blocks.2.resnets.1.conv2": [ + 87.5, + 87.5, + 87.2, + 86.7 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 83.9, + 84.4, + 83.9, + 82.4 + ], + "up_blocks.2.resnets.2.conv1": [ + 86.8, + 86.6, + 86.7, + 85.8 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 87.6, + 87.4, + 87.8, + 87.0 + ], + "up_blocks.2.resnets.2.conv2": [ + 86.9, + 86.0, + 86.8, + 86.0 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 82.7, + 82.9, + 83.3, + 79.6 + ], + "up_blocks.2.upsamplers.0.conv": [ + 83.9, + 84.6, + 84.2, + 82.2 + ], + "up_blocks.3.attentions.0.proj_in": [ + 82.6, + 83.0, + 82.9, + 82.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.2, + 86.1, + 86.4, + 85.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.0, + 86.0, + 86.0, + 85.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.1, + 85.5, + 85.3, + 84.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 83.5, + 83.8, + 83.9, + 83.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.0, + 88.2, + 88.3, + 88.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 84.9, + 83.9, + 84.3, + 83.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.6, + 86.1, + 87.1, + 86.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.5, + 86.2, + 86.7, + 86.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.4, + 84.7, + 84.6, + 83.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.5, + 85.0, + 85.2, + 84.5 + ], + "up_blocks.3.attentions.0.proj_out": [ + 84.2, + 84.5, + 84.8, + 84.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 80.1, + 80.4, + 80.6, + 79.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.8, + 86.2, + 86.6, + 86.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.3, + 86.0, + 86.5, + 86.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.0, + 82.9, + 83.4, + 83.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.5, + 81.7, + 82.0, + 81.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.7, + 87.7, + 88.0, + 87.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 85.2, + 85.1, + 84.8, + 85.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.1, + 87.2, + 87.6, + 87.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.7, + 85.8, + 85.9, + 85.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 83.7, + 83.9, + 84.1, + 83.6 + ], + "up_blocks.3.attentions.1.proj_out": [ + 83.1, + 83.1, + 83.3, + 83.0 + ], + "up_blocks.3.attentions.2.proj_in": [ + 78.9, + 78.7, + 79.3, + 78.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 84.7, + 84.5, + 85.2, + 84.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 84.0, + 84.0, + 84.5, + 84.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 82.7, + 82.8, + 83.5, + 83.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 81.9, + 82.6, + 82.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.0, + 86.9, + 87.1, + 86.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 84.1, + 84.2, + 84.5, + 84.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 83.8, + 84.0, + 84.4, + 84.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.7, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.9, + 80.8, + 81.4, + 81.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 84.6, + 84.7, + 84.6, + 84.6 + ], + "up_blocks.3.attentions.2.proj_out": [ + 81.6, + 81.5, + 82.1, + 81.8 + ], + "up_blocks.3.resnets.0.conv1": [ + 83.5, + 83.5, + 83.8, + 82.9 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 87.6, + 87.7, + 87.4, + 86.9 + ], + "up_blocks.3.resnets.0.conv2": [ + 83.5, + 83.7, + 84.0, + 83.5 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 80.8, + 81.0, + 81.0, + 77.9 + ], + "up_blocks.3.resnets.1.conv1": [ + 81.8, + 81.8, + 81.8, + 81.3 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 84.5, + 84.7, + 85.1, + 84.8 + ], + "up_blocks.3.resnets.1.conv2": [ + 81.7, + 81.7, + 81.6, + 81.3 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 79.1, + 79.4, + 79.7, + 79.2 + ], + "up_blocks.3.resnets.2.conv1": [ + 81.5, + 81.5, + 81.9, + 81.3 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 82.4, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 80.3, + 80.6, + 80.9, + 80.3 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 80.1, + 80.3, + 80.4, + 80.1 + ], + "mid_block.attentions.0.proj_in": [ + 88.9, + 88.6, + 88.8, + 87.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.0, + 89.0, + 89.1, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.7, + 88.9, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.8, + 89.2, + 88.9, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.1, + 89.0, + 88.7, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 89.0, + 88.9, + 89.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.1, + 88.9, + 89.1, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.1, + 89.1, + 88.5, + 88.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.8, + 88.7, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.8, + 89.0, + 89.0, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.2, + 89.1, + 88.9, + 88.4 + ], + "mid_block.attentions.0.proj_out": [ + 89.1, + 88.9, + 88.9, + 88.6 + ], + "mid_block.resnets.0.conv1": [ + 89.0, + 89.0, + 88.7, + 88.8 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.8, + 89.1, + 88.6, + 88.3 + ], + "mid_block.resnets.0.conv2": [ + 89.0, + 88.8, + 88.7, + 89.3 + ], + "mid_block.resnets.1.conv1": [ + 89.0, + 88.7, + 88.6, + 88.6 + ], + "mid_block.resnets.1.time_emb_proj": [ + 89.1, + 89.2, + 88.8, + 88.9 + ], + "mid_block.resnets.1.conv2": [ + 88.8, + 89.0, + 88.7, + 88.8 + ] + }, + "6": { + "time_embedding.linear_1": [ + 88.5, + 88.8, + 88.5, + 88.6 + ], + "time_embedding.linear_2": [ + 87.7, + 87.9, + 87.8, + 87.0 + ], + "down_blocks.0.attentions.0.proj_in": [ + 87.6, + 87.7, + 87.9, + 86.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.4, + 88.5, + 87.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.7, + 88.3, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.7, + 88.0, + 87.6, + 86.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.3, + 87.6, + 86.2, + 86.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.5, + 88.5, + 88.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.2, + 88.0, + 87.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.4, + 87.8, + 88.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.5, + 88.7, + 88.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.4, + 87.9, + 85.9, + 88.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.5, + 88.1, + 87.9, + 87.9 + ], + "down_blocks.0.attentions.0.proj_out": [ + 87.7, + 87.7, + 87.7, + 87.2 + ], + "down_blocks.0.attentions.1.proj_in": [ + 87.6, + 87.5, + 87.4, + 85.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.5, + 87.5, + 87.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.3, + 87.1, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.0, + 88.1, + 87.4, + 87.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.7, + 87.9, + 85.7, + 86.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.7, + 88.7, + 88.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.3, + 88.4, + 88.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.3, + 88.3, + 88.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.6, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 87.7, + 87.8, + 86.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.9, + 88.0, + 87.7, + 87.6 + ], + "down_blocks.0.attentions.1.proj_out": [ + 87.4, + 88.0, + 86.8, + 79.7 + ], + "down_blocks.0.resnets.0.conv1": [ + 87.9, + 88.3, + 87.5, + 83.7 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 87.9, + 88.2, + 88.2, + 87.7 + ], + "down_blocks.0.resnets.0.conv2": [ + 87.3, + 87.4, + 86.2, + 86.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 87.6, + 88.0, + 86.3, + 86.4 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 88.2, + 88.5, + 88.3, + 87.5 + ], + "down_blocks.0.resnets.1.conv2": [ + 87.7, + 87.7, + 88.0, + 86.8 + ], + "down_blocks.0.downsamplers.0.conv": [ + 87.9, + 88.1, + 88.1, + 85.9 + ], + "down_blocks.1.attentions.0.proj_in": [ + 87.9, + 88.3, + 87.7, + 84.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.5, + 88.3, + 87.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.7, + 88.9, + 88.9, + 88.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.3, + 88.7, + 86.4, + 88.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.0, + 86.8, + 85.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.9, + 89.0, + 88.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.6, + 88.4, + 88.3, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.8, + 88.4, + 86.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 88.8, + 88.7, + 88.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.0, + 88.4, + 87.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.2, + 88.5, + 87.3, + 87.1 + ], + "down_blocks.1.attentions.0.proj_out": [ + 87.3, + 88.2, + 87.6, + 87.2 + ], + "down_blocks.1.attentions.1.proj_in": [ + 88.4, + 88.7, + 88.5, + 87.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.9, + 88.7, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.7, + 89.1, + 88.5, + 88.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.3, + 88.3, + 87.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.5, + 88.4, + 88.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.8, + 89.0, + 88.6, + 88.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.7, + 89.2, + 88.6, + 87.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.7, + 89.0, + 89.0, + 88.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 89.1, + 88.7, + 88.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 88.5, + 88.6, + 75.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.5, + 87.3, + 79.9 + ], + "down_blocks.1.attentions.1.proj_out": [ + 88.4, + 88.7, + 88.1, + 86.8 + ], + "down_blocks.1.resnets.0.conv1": [ + 88.5, + 88.5, + 87.8, + 88.3 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 88.5, + 88.3, + 88.2, + 87.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 88.3, + 88.7, + 88.4, + 88.1 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 87.8, + 88.1, + 87.4, + 77.6 + ], + "down_blocks.1.resnets.1.conv1": [ + 88.2, + 88.6, + 88.2, + 87.6 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.6, + 88.7, + 88.5, + 87.7 + ], + "down_blocks.1.resnets.1.conv2": [ + 88.6, + 88.6, + 88.6, + 88.0 + ], + "down_blocks.1.downsamplers.0.conv": [ + 88.8, + 88.4, + 87.9, + 86.3 + ], + "down_blocks.2.attentions.0.proj_in": [ + 88.6, + 89.0, + 88.6, + 87.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.9, + 89.0, + 89.0, + 87.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.9, + 88.9, + 89.1, + 88.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.8, + 88.8, + 88.9, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.0, + 88.8, + 88.7, + 86.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.0, + 88.9, + 88.6, + 88.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.5, + 88.3, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.9, + 88.8, + 88.7, + 88.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 88.9, + 88.7, + 88.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.7, + 88.5, + 87.4, + 87.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.6, + 88.7, + 88.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 88.6, + 88.6, + 88.8, + 88.7 + ], + "down_blocks.2.attentions.1.proj_in": [ + 88.7, + 88.8, + 88.6, + 88.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 89.1, + 88.9, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.8, + 89.1, + 89.0, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.9, + 88.9, + 89.0, + 88.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.9, + 89.1, + 88.6, + 88.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.0, + 89.0, + 88.9, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.8, + 87.9, + 87.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.8, + 88.8, + 88.7, + 89.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 89.0, + 88.6, + 88.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 89.0, + 86.9, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 89.0, + 89.0, + 88.6, + 88.4 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.9, + 88.8, + 88.2, + 88.0 + ], + "down_blocks.2.resnets.0.conv1": [ + 88.7, + 89.1, + 88.0, + 87.9 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 89.0, + 89.3, + 89.1, + 88.8 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.6, + 88.8, + 88.9, + 82.7 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 88.2, + 88.9, + 88.4, + 85.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 89.1, + 88.9, + 88.8, + 88.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 89.0, + 89.1, + 88.9, + 89.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 89.0, + 88.8, + 88.8, + 89.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.9, + 88.9, + 88.7, + 88.5 + ], + "down_blocks.3.resnets.0.conv1": [ + 89.1, + 89.3, + 88.7, + 89.0 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 89.1, + 88.9, + 88.7, + 89.0 + ], + "down_blocks.3.resnets.0.conv2": [ + 89.2, + 89.0, + 88.7, + 88.7 + ], + "down_blocks.3.resnets.1.conv1": [ + 89.0, + 89.0, + 89.2, + 88.6 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 89.1, + 88.9, + 88.7, + 88.0 + ], + "down_blocks.3.resnets.1.conv2": [ + 89.2, + 89.0, + 88.7, + 89.2 + ], + "up_blocks.0.resnets.0.conv1": [ + 89.0, + 88.8, + 88.9, + 89.1 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.8, + 89.0, + 88.9, + 89.0 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.7, + 88.9, + 89.3, + 88.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 89.0, + 89.3, + 89.0, + 89.2 + ], + "up_blocks.0.resnets.1.conv1": [ + 89.1, + 89.0, + 88.9, + 89.0 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 89.2, + 89.0, + 89.1, + 88.7 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.9, + 89.0, + 88.9, + 89.1 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 89.1, + 89.0, + 88.8, + 88.9 + ], + "up_blocks.0.resnets.2.conv1": [ + 89.0, + 89.1, + 89.1, + 89.1 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 89.2, + 89.1, + 89.0, + 88.8 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.7, + 89.1, + 88.9, + 88.7 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 89.0, + 89.0, + 89.0, + 88.8 + ], + "up_blocks.0.upsamplers.0.conv": [ + 88.8, + 89.0, + 88.8, + 88.5 + ], + "up_blocks.1.attentions.0.proj_in": [ + 88.7, + 88.6, + 88.5, + 87.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.1, + 88.9, + 89.0, + 89.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.0, + 88.9, + 88.9, + 88.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.9, + 88.9, + 88.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.6, + 88.8, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 88.9, + 89.0, + 89.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.5, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.8, + 89.4, + 88.8, + 88.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 89.1, + 88.9, + 88.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 89.0, + 88.7, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.7, + 89.0, + 88.7, + 88.8 + ], + "up_blocks.1.attentions.0.proj_out": [ + 88.9, + 88.8, + 88.6, + 88.5 + ], + "up_blocks.1.attentions.1.proj_in": [ + 88.7, + 88.4, + 88.4, + 87.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 89.0, + 89.4, + 88.9, + 89.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.2, + 89.0, + 88.7, + 88.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.8, + 89.0, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.6, + 88.8, + 88.9, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.9, + 88.9, + 89.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.7, + 88.8, + 88.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.9, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.7, + 89.1, + 89.1, + 89.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 88.7, + 88.6, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.8, + 88.9, + 88.5 + ], + "up_blocks.1.attentions.1.proj_out": [ + 88.8, + 88.6, + 88.8, + 88.5 + ], + "up_blocks.1.attentions.2.proj_in": [ + 88.4, + 88.5, + 88.5, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.8, + 88.6, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.9, + 89.1, + 88.9, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.9, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.6, + 88.6, + 88.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.7, + 88.8, + 88.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.8, + 88.6, + 88.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 89.2, + 88.9, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.5, + 88.6, + 89.0, + 88.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.8, + 88.7, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.2.proj_out": [ + 88.2, + 88.8, + 88.8, + 88.7 + ], + "up_blocks.1.resnets.0.conv1": [ + 88.8, + 89.1, + 87.0, + 88.2 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.9, + 89.2, + 88.6, + 88.8 + ], + "up_blocks.1.resnets.0.conv2": [ + 88.9, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.8, + 88.7, + 88.7, + 88.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.9, + 88.6, + 88.8, + 89.0 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 89.0, + 88.9, + 88.8, + 88.9 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.9, + 89.2, + 88.8, + 88.2 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 88.4, + 88.7, + 88.6, + 88.8 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.9, + 89.1, + 88.7, + 88.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 89.0, + 88.8, + 89.1, + 88.9 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.9, + 88.8, + 88.8, + 88.9 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 88.8, + 88.4, + 88.6, + 88.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 88.3, + 88.7, + 88.7, + 88.3 + ], + "up_blocks.2.attentions.0.proj_in": [ + 88.2, + 88.3, + 88.4, + 88.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.6, + 88.6, + 88.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.6, + 89.2, + 88.7, + 88.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.7, + 88.6, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.1, + 88.6, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 89.3, + 89.2, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.9, + 89.0, + 88.9, + 88.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.8, + 89.0, + 88.8, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.8, + 88.9, + 88.5, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.5, + 88.7, + 88.8 + ], + "up_blocks.2.attentions.0.proj_out": [ + 88.4, + 88.3, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.1.proj_in": [ + 87.9, + 88.1, + 88.1, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.7, + 88.6, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.5, + 88.7, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.3, + 88.6, + 88.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.1, + 88.3, + 88.2, + 87.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.8, + 88.7, + 88.9, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.8, + 89.0, + 88.9, + 88.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.6, + 88.7, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 89.1, + 88.9, + 88.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 88.5, + 88.4, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.4, + 88.4, + 88.3, + 88.4 + ], + "up_blocks.2.attentions.1.proj_out": [ + 87.9, + 88.0, + 88.1, + 87.9 + ], + "up_blocks.2.attentions.2.proj_in": [ + 87.8, + 87.9, + 87.9, + 87.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.7, + 88.7, + 88.6, + 88.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.5, + 88.4, + 88.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.3, + 88.1, + 88.1, + 87.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 87.0, + 87.7, + 87.5, + 87.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 89.0, + 88.5, + 88.7, + 88.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.0, + 88.1, + 88.3, + 87.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.1, + 88.3, + 88.5, + 88.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.4, + 88.3, + 88.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 86.8, + 87.3, + 87.6, + 86.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.6, + 88.3, + 87.9 + ], + "up_blocks.2.attentions.2.proj_out": [ + 87.8, + 87.6, + 87.9, + 87.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 88.4, + 88.6, + 89.0, + 88.1 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 89.0, + 88.9, + 88.9, + 88.9 + ], + "up_blocks.2.resnets.0.conv2": [ + 88.4, + 88.6, + 88.7, + 88.0 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 88.1, + 88.2, + 88.2, + 87.7 + ], + "up_blocks.2.resnets.1.conv1": [ + 88.3, + 88.2, + 88.6, + 88.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 88.5, + 88.4, + 88.6, + 88.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 88.4, + 88.3, + 88.6, + 88.5 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 88.0, + 88.1, + 88.1, + 87.5 + ], + "up_blocks.2.resnets.2.conv1": [ + 88.4, + 88.3, + 88.3, + 87.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 88.3, + 88.5, + 88.2, + 88.1 + ], + "up_blocks.2.resnets.2.conv2": [ + 88.2, + 88.1, + 88.2, + 88.3 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 87.6, + 87.8, + 87.9, + 87.0 + ], + "up_blocks.2.upsamplers.0.conv": [ + 88.1, + 87.8, + 88.2, + 87.6 + ], + "up_blocks.3.attentions.0.proj_in": [ + 86.9, + 87.4, + 87.4, + 86.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.4, + 88.2, + 88.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 87.7, + 87.9, + 88.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.6, + 87.8, + 87.5, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.5, + 87.4, + 87.7, + 87.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.7, + 89.0, + 88.5, + 88.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.2, + 88.4, + 88.3, + 87.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.6, + 88.6, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.2, + 88.2, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.9, + 87.9, + 87.8, + 87.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.9, + 88.1, + 88.0, + 87.9 + ], + "up_blocks.3.attentions.0.proj_out": [ + 87.3, + 87.9, + 87.9, + 87.5 + ], + "up_blocks.3.attentions.1.proj_in": [ + 86.0, + 86.5, + 86.4, + 86.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.2, + 88.4, + 88.4, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.5, + 88.7, + 88.3, + 88.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.2, + 87.9, + 88.1, + 87.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.2, + 87.2, + 87.2, + 87.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.7, + 88.7, + 88.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.1, + 88.6, + 88.8, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.3, + 88.2, + 88.4, + 88.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.6, + 88.6, + 88.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.9, + 86.7, + 87.3, + 86.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.7, + 87.9, + 87.9, + 87.7 + ], + "up_blocks.3.attentions.1.proj_out": [ + 87.1, + 87.5, + 87.4, + 87.4 + ], + "up_blocks.3.attentions.2.proj_in": [ + 85.4, + 85.1, + 85.9, + 85.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.2, + 88.1, + 87.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.7, + 87.9, + 87.5, + 87.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.0, + 86.8, + 87.3, + 86.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.6, + 86.6, + 87.1, + 86.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.7, + 88.7, + 88.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.2, + 87.9, + 88.6, + 88.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.7, + 87.9, + 87.9, + 87.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 87.6, + 87.5, + 87.5, + 87.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 85.8, + 85.8, + 86.1, + 85.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.0, + 88.5, + 88.0, + 87.6 + ], + "up_blocks.3.attentions.2.proj_out": [ + 87.5, + 87.4, + 87.4, + 87.4 + ], + "up_blocks.3.resnets.0.conv1": [ + 87.7, + 87.7, + 87.8, + 87.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 88.5, + 88.7, + 88.4, + 88.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 87.4, + 87.4, + 87.8, + 87.0 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 86.5, + 86.5, + 86.5, + 85.0 + ], + "up_blocks.3.resnets.1.conv1": [ + 86.5, + 87.0, + 86.9, + 86.4 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 88.1, + 87.9, + 88.3, + 88.2 + ], + "up_blocks.3.resnets.1.conv2": [ + 86.9, + 86.7, + 87.2, + 86.6 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 85.9, + 85.6, + 86.2, + 86.0 + ], + "up_blocks.3.resnets.2.conv1": [ + 87.2, + 87.3, + 87.1, + 86.7 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 88.0, + 88.3, + 88.3, + 88.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 87.0, + 86.7, + 87.1, + 86.6 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 85.9, + 86.1, + 85.7, + 85.8 + ], + "mid_block.attentions.0.proj_in": [ + 88.9, + 89.2, + 88.8, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.1, + 89.2, + 88.6, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.9, + 89.0, + 89.1, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.8, + 89.1, + 88.9, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.1, + 89.2, + 88.9, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.9, + 89.2, + 88.9, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.1, + 89.3, + 89.0, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.0, + 89.4, + 89.0, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 89.2, + 89.0, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.8, + 89.2, + 88.8, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.9, + 89.0, + 89.0, + 89.1 + ], + "mid_block.attentions.0.proj_out": [ + 88.9, + 89.1, + 89.2, + 89.1 + ], + "mid_block.resnets.0.conv1": [ + 89.1, + 89.3, + 89.1, + 89.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 89.0, + 89.0, + 89.0, + 89.1 + ], + "mid_block.resnets.0.conv2": [ + 88.8, + 89.0, + 88.7, + 88.9 + ], + "mid_block.resnets.1.conv1": [ + 88.9, + 89.1, + 88.9, + 88.9 + ], + "mid_block.resnets.1.time_emb_proj": [ + 89.0, + 89.3, + 89.0, + 88.8 + ], + "mid_block.resnets.1.conv2": [ + 89.0, + 88.9, + 89.0, + 88.9 + ] + }, + "8": { + "time_embedding.linear_1": [ + 89.1, + 88.8, + 88.9, + 88.5 + ], + "time_embedding.linear_2": [ + 88.8, + 88.6, + 88.8, + 87.6 + ], + "down_blocks.0.attentions.0.proj_in": [ + 88.3, + 88.3, + 88.2, + 88.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.7, + 88.9, + 88.6, + 88.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.7, + 88.3, + 88.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.6, + 88.5, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.5, + 88.5, + 88.6, + 88.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.8, + 88.4, + 88.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.4, + 88.4, + 88.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.8, + 88.6, + 88.8, + 87.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.9, + 88.6, + 88.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.4, + 88.4, + 88.4, + 88.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.4, + 88.2, + 88.5, + 87.9 + ], + "down_blocks.0.attentions.0.proj_out": [ + 88.4, + 87.9, + 88.4, + 88.5 + ], + "down_blocks.0.attentions.1.proj_in": [ + 88.2, + 88.3, + 87.5, + 87.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.8, + 89.0, + 88.7, + 88.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.0, + 88.7, + 88.7, + 88.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.8, + 88.5, + 88.4, + 88.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.8, + 88.7, + 88.1, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.8, + 89.2, + 88.8, + 88.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.9, + 88.6, + 88.6, + 87.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.9, + 88.8, + 88.4 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 88.8, + 89.2, + 88.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.6, + 88.4, + 88.5, + 87.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.8, + 88.7, + 88.5, + 88.5 + ], + "down_blocks.0.attentions.1.proj_out": [ + 88.9, + 88.3, + 88.2, + 87.8 + ], + "down_blocks.0.resnets.0.conv1": [ + 88.6, + 88.6, + 88.5, + 88.3 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 88.7, + 88.7, + 88.5, + 88.1 + ], + "down_blocks.0.resnets.0.conv2": [ + 88.4, + 88.5, + 88.1, + 87.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 88.7, + 88.3, + 88.2, + 86.9 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 88.4, + 88.7, + 88.7, + 88.8 + ], + "down_blocks.0.resnets.1.conv2": [ + 88.5, + 88.3, + 87.9, + 88.7 + ], + "down_blocks.0.downsamplers.0.conv": [ + 88.8, + 88.4, + 88.5, + 86.1 + ], + "down_blocks.1.attentions.0.proj_in": [ + 89.1, + 88.8, + 88.8, + 87.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.0, + 88.9, + 89.0, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.0, + 89.1, + 89.0, + 88.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 89.1, + 88.4, + 88.6, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.7, + 88.5, + 88.0, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.2, + 88.8, + 89.1, + 88.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.0, + 89.1, + 89.0, + 88.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.0, + 89.0, + 89.1, + 88.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 89.0, + 88.8, + 88.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.8, + 88.4, + 88.6, + 87.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.8, + 88.3, + 88.8, + 88.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 88.9, + 88.8, + 89.0, + 87.1 + ], + "down_blocks.1.attentions.1.proj_in": [ + 88.9, + 88.6, + 89.2, + 87.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 89.3, + 89.0, + 88.6, + 88.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.3, + 89.1, + 88.9, + 89.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 89.2, + 88.8, + 88.7, + 88.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.9, + 88.8, + 88.6, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.5, + 88.9, + 88.9, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 89.2, + 88.9, + 88.9, + 88.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.9, + 88.9, + 87.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.4, + 89.0, + 88.7, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 89.1, + 88.6, + 88.7, + 87.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 89.1, + 89.2, + 89.0, + 88.8 + ], + "down_blocks.1.attentions.1.proj_out": [ + 89.0, + 88.7, + 89.0, + 88.3 + ], + "down_blocks.1.resnets.0.conv1": [ + 89.1, + 88.7, + 88.9, + 88.4 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 89.0, + 88.9, + 88.8, + 88.3 + ], + "down_blocks.1.resnets.0.conv2": [ + 89.0, + 89.0, + 88.8, + 88.4 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 88.9, + 88.4, + 88.6, + 86.8 + ], + "down_blocks.1.resnets.1.conv1": [ + 88.9, + 88.9, + 88.6, + 88.2 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.8, + 89.1, + 88.5, + 88.3 + ], + "down_blocks.1.resnets.1.conv2": [ + 88.9, + 88.8, + 88.4, + 88.7 + ], + "down_blocks.1.downsamplers.0.conv": [ + 89.0, + 88.7, + 88.7, + 88.5 + ], + "down_blocks.2.attentions.0.proj_in": [ + 88.9, + 89.0, + 88.7, + 88.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.2, + 88.9, + 89.0, + 88.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.0, + 89.1, + 88.4, + 88.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 89.1, + 88.8, + 89.4, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.2, + 88.9, + 88.8, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 88.8, + 89.1, + 88.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.1, + 88.9, + 88.6, + 89.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.3, + 89.0, + 88.8, + 88.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 88.8, + 89.0, + 89.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 89.1, + 89.1, + 89.0, + 88.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.2, + 89.1, + 89.1, + 88.7 + ], + "down_blocks.2.attentions.0.proj_out": [ + 89.1, + 89.0, + 88.8, + 88.6 + ], + "down_blocks.2.attentions.1.proj_in": [ + 88.9, + 89.1, + 88.8, + 87.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 89.0, + 88.7, + 89.0, + 88.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.2, + 89.1, + 88.7, + 87.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 89.2, + 88.9, + 88.8, + 88.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 89.2, + 89.0, + 88.6, + 88.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.2, + 88.9, + 88.7, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 89.1, + 89.0, + 89.0, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.3, + 89.0, + 88.9, + 89.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.3, + 89.2, + 88.6, + 88.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 89.2, + 89.0, + 89.0, + 89.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 89.1, + 89.0, + 88.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 89.1, + 88.6, + 88.9, + 89.0 + ], + "down_blocks.2.resnets.0.conv1": [ + 89.1, + 89.3, + 88.8, + 88.2 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.9, + 89.0, + 88.8, + 88.0 + ], + "down_blocks.2.resnets.0.conv2": [ + 89.3, + 89.0, + 88.7, + 88.5 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 89.1, + 88.8, + 88.7, + 89.1 + ], + "down_blocks.2.resnets.1.conv1": [ + 89.2, + 89.1, + 88.6, + 88.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 89.1, + 89.0, + 89.0, + 89.2 + ], + "down_blocks.2.resnets.1.conv2": [ + 89.2, + 89.3, + 88.9, + 88.4 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.9, + 88.8, + 88.9, + 88.1 + ], + "down_blocks.3.resnets.0.conv1": [ + 89.3, + 89.2, + 88.8, + 89.0 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 89.0, + 88.8, + 89.1, + 87.9 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.9, + 89.1, + 89.2, + 88.6 + ], + "down_blocks.3.resnets.1.conv1": [ + 89.4, + 89.1, + 88.9, + 88.8 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 89.1, + 89.0, + 89.1, + 88.9 + ], + "down_blocks.3.resnets.1.conv2": [ + 89.1, + 88.9, + 89.0, + 88.8 + ], + "up_blocks.0.resnets.0.conv1": [ + 89.3, + 89.2, + 89.0, + 88.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 89.0, + 88.8, + 88.7, + 87.9 + ], + "up_blocks.0.resnets.0.conv2": [ + 89.3, + 88.9, + 89.0, + 88.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 89.2, + 89.0, + 88.9, + 89.1 + ], + "up_blocks.0.resnets.1.conv1": [ + 89.3, + 88.9, + 88.8, + 89.2 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 89.1, + 88.9, + 89.0, + 88.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 89.3, + 88.9, + 88.8, + 88.5 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 89.2, + 88.9, + 89.1, + 89.0 + ], + "up_blocks.0.resnets.2.conv1": [ + 89.3, + 89.0, + 88.7, + 88.7 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 89.0, + 89.2, + 88.9, + 89.1 + ], + "up_blocks.0.resnets.2.conv2": [ + 89.1, + 89.0, + 89.1, + 88.9 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 89.2, + 89.3, + 88.9, + 88.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 89.1, + 88.9, + 88.8, + 89.0 + ], + "up_blocks.1.attentions.0.proj_in": [ + 89.0, + 88.8, + 88.8, + 88.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.0, + 89.0, + 88.8, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.1, + 89.0, + 88.8, + 88.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 89.3, + 88.8, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.9, + 89.1, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.2, + 89.0, + 88.8, + 87.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.1, + 89.2, + 88.6, + 89.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.8, + 88.9, + 88.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 89.1, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 89.4, + 89.2, + 88.8, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.9, + 89.1, + 88.1 + ], + "up_blocks.1.attentions.0.proj_out": [ + 88.8, + 89.1, + 88.7, + 89.1 + ], + "up_blocks.1.attentions.1.proj_in": [ + 88.8, + 89.2, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 89.2, + 88.8, + 88.8, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.3, + 89.1, + 88.9, + 88.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.8, + 89.0, + 88.9, + 87.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 89.3, + 88.9, + 88.7, + 88.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.2, + 89.0, + 89.2, + 89.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 89.0, + 89.0, + 89.1, + 88.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.9, + 88.8, + 89.3, + 88.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 88.9, + 88.5, + 89.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 89.2, + 88.9, + 88.6, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 89.1, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.1.proj_out": [ + 89.2, + 88.9, + 88.9, + 88.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 89.1, + 89.0, + 88.6, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 89.1, + 89.2, + 89.1, + 89.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 89.1, + 88.9, + 89.2, + 89.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 89.2, + 88.8, + 88.9, + 89.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 89.0, + 88.9, + 89.1, + 87.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 89.0, + 89.0, + 88.7, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 89.0, + 89.2, + 88.9, + 88.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.9, + 89.3, + 88.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 88.9, + 88.7, + 88.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 89.2, + 89.3, + 88.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.8, + 88.9, + 89.1, + 88.9 + ], + "up_blocks.1.attentions.2.proj_out": [ + 89.2, + 88.7, + 88.9, + 88.5 + ], + "up_blocks.1.resnets.0.conv1": [ + 89.1, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 89.0, + 89.1, + 89.1, + 88.6 + ], + "up_blocks.1.resnets.0.conv2": [ + 89.2, + 89.1, + 88.7, + 87.9 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 89.2, + 88.9, + 89.0, + 88.9 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.9, + 88.9, + 88.9, + 89.0 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 89.1, + 88.8, + 88.8, + 88.2 + ], + "up_blocks.1.resnets.1.conv2": [ + 89.0, + 89.2, + 89.0, + 88.4 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 89.4, + 89.1, + 89.3, + 88.1 + ], + "up_blocks.1.resnets.2.conv1": [ + 89.5, + 88.8, + 88.8, + 88.5 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 89.0, + 89.1, + 89.0, + 88.9 + ], + "up_blocks.1.resnets.2.conv2": [ + 89.3, + 88.8, + 88.6, + 88.0 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 88.9, + 88.9, + 88.6, + 87.9 + ], + "up_blocks.1.upsamplers.0.conv": [ + 89.1, + 88.8, + 88.8, + 89.0 + ], + "up_blocks.2.attentions.0.proj_in": [ + 88.8, + 88.7, + 89.0, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.2, + 89.1, + 88.9, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.2, + 89.0, + 88.9, + 88.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.9, + 88.9, + 88.8, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.2, + 88.9, + 88.9, + 88.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.2, + 88.9, + 88.6, + 88.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.3, + 89.0, + 89.2, + 89.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.7, + 88.9, + 89.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 89.0, + 88.8, + 88.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 89.1, + 88.9, + 88.8, + 88.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.2, + 88.7, + 88.9, + 89.0 + ], + "up_blocks.2.attentions.0.proj_out": [ + 89.1, + 88.8, + 88.6, + 88.9 + ], + "up_blocks.2.attentions.1.proj_in": [ + 88.6, + 88.6, + 88.8, + 87.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 89.0, + 88.8, + 88.8, + 88.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.9, + 88.9, + 89.0, + 88.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 89.0, + 88.5, + 88.7, + 89.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.6, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.2, + 89.2, + 88.8, + 88.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 89.2, + 89.1, + 89.2, + 88.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.9, + 89.0, + 88.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 89.2, + 88.8, + 89.3, + 88.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.6, + 88.5, + 89.0, + 88.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.6, + 88.7, + 88.4 + ], + "up_blocks.2.attentions.1.proj_out": [ + 89.0, + 88.5, + 88.7, + 88.4 + ], + "up_blocks.2.attentions.2.proj_in": [ + 88.6, + 88.1, + 88.3, + 88.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.8, + 88.6, + 88.8, + 88.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 89.0, + 88.9, + 88.9, + 88.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.8, + 88.3, + 88.8, + 88.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.5, + 88.5, + 88.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 89.0, + 89.1, + 88.9, + 89.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.9, + 88.7, + 88.9, + 88.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 89.2, + 88.9, + 88.6, + 88.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 89.0, + 88.7, + 88.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.7, + 88.4, + 88.7, + 88.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.4, + 88.7, + 88.7 + ], + "up_blocks.2.attentions.2.proj_out": [ + 88.7, + 88.6, + 88.7, + 88.0 + ], + "up_blocks.2.resnets.0.conv1": [ + 89.0, + 88.8, + 89.1, + 89.1 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 89.2, + 89.2, + 88.9, + 88.6 + ], + "up_blocks.2.resnets.0.conv2": [ + 89.1, + 89.0, + 88.7, + 88.3 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 88.8, + 88.5, + 88.6, + 88.3 + ], + "up_blocks.2.resnets.1.conv1": [ + 89.0, + 88.7, + 89.0, + 88.5 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 89.2, + 89.0, + 89.3, + 88.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 89.2, + 88.9, + 88.8, + 87.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 88.5, + 89.0, + 88.4, + 88.6 + ], + "up_blocks.2.resnets.2.conv1": [ + 89.0, + 88.9, + 88.9, + 88.7 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 88.9, + 88.7, + 88.6, + 88.8 + ], + "up_blocks.2.resnets.2.conv2": [ + 88.8, + 88.3, + 88.8, + 89.1 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 88.7, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.2.upsamplers.0.conv": [ + 88.7, + 88.7, + 88.5, + 88.1 + ], + "up_blocks.3.attentions.0.proj_in": [ + 88.3, + 88.3, + 88.4, + 88.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.8, + 88.7, + 88.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.6, + 88.9, + 88.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.6, + 88.7, + 88.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.7, + 88.4, + 88.7, + 88.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.4, + 88.9, + 88.8, + 88.8 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.5, + 88.8, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.7, + 88.9, + 88.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.8, + 88.8, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.6, + 88.4, + 88.6, + 88.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.6, + 88.5, + 88.4 + ], + "up_blocks.3.attentions.0.proj_out": [ + 88.4, + 88.3, + 88.5, + 88.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 88.1, + 88.0, + 88.3, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.8, + 88.5, + 88.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 89.0, + 88.5, + 88.9, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.6, + 88.4, + 88.7, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.6, + 88.3, + 88.6, + 88.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 89.0, + 89.0, + 88.9, + 89.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.9, + 89.0, + 88.7, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.3, + 88.8, + 89.2, + 88.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 88.7, + 88.9, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.8, + 88.5, + 88.3, + 88.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.3, + 88.5, + 88.1 + ], + "up_blocks.3.attentions.1.proj_out": [ + 88.5, + 88.4, + 88.7, + 88.6 + ], + "up_blocks.3.attentions.2.proj_in": [ + 88.3, + 88.3, + 88.4, + 88.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.7, + 88.9, + 88.5, + 88.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 89.0, + 88.7, + 88.8, + 88.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.8, + 88.8, + 88.5, + 88.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.4, + 88.6, + 88.5, + 88.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 89.0, + 88.6, + 89.0, + 88.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 89.2, + 89.0, + 89.0, + 88.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 89.2, + 88.5, + 88.9, + 87.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 89.0, + 88.8, + 88.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.7, + 88.3, + 88.8, + 88.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.7, + 88.7, + 88.8, + 88.4 + ], + "up_blocks.3.attentions.2.proj_out": [ + 88.8, + 88.8, + 89.0, + 89.0 + ], + "up_blocks.3.resnets.0.conv1": [ + 88.6, + 88.3, + 88.6, + 87.9 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 88.8, + 89.0, + 88.8, + 88.9 + ], + "up_blocks.3.resnets.0.conv2": [ + 88.6, + 88.5, + 88.7, + 88.7 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 88.5, + 88.2, + 88.4, + 87.8 + ], + "up_blocks.3.resnets.1.conv1": [ + 88.2, + 88.3, + 88.6, + 88.6 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 88.5, + 88.6, + 88.8, + 88.5 + ], + "up_blocks.3.resnets.1.conv2": [ + 88.6, + 88.2, + 88.6, + 87.9 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 88.0, + 87.9, + 88.1, + 88.3 + ], + "up_blocks.3.resnets.2.conv1": [ + 88.8, + 88.2, + 88.5, + 88.7 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 88.7, + 88.2, + 88.7, + 88.7 + ], + "up_blocks.3.resnets.2.conv2": [ + 88.6, + 88.5, + 88.7, + 88.8 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 88.4, + 88.3, + 88.6, + 87.3 + ], + "mid_block.attentions.0.proj_in": [ + 89.2, + 89.1, + 89.2, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.4, + 89.0, + 89.1, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 89.3, + 89.2, + 89.1, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 89.0, + 89.0, + 89.0, + 88.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.0, + 89.1, + 88.8, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 88.8, + 88.9, + 88.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.1, + 89.0, + 88.8, + 88.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.2, + 88.7, + 88.8, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.2, + 89.1, + 88.9, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 89.0, + 88.9, + 89.0 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.4, + 88.9, + 89.0, + 89.3 + ], + "mid_block.attentions.0.proj_out": [ + 89.0, + 88.7, + 89.0, + 89.0 + ], + "mid_block.resnets.0.conv1": [ + 89.2, + 88.8, + 88.9, + 88.8 + ], + "mid_block.resnets.0.time_emb_proj": [ + 89.0, + 89.2, + 89.0, + 89.0 + ], + "mid_block.resnets.0.conv2": [ + 89.2, + 89.0, + 89.0, + 89.2 + ], + "mid_block.resnets.1.conv1": [ + 89.1, + 88.8, + 88.9, + 88.9 + ], + "mid_block.resnets.1.time_emb_proj": [ + 89.2, + 88.9, + 88.7, + 88.7 + ], + "mid_block.resnets.1.conv2": [ + 89.1, + 88.9, + 88.7, + 87.8 + ] + } + }, + "cumulative": { + "1": { + "metadata": { + "nbits": 1, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.resnets.1.time_emb_proj", + "up_blocks.0.resnets.1.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.2.time_emb_proj", + "down_blocks.3.resnets.0.time_emb_proj", + "mid_block.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.proj_in", + "up_blocks.0.resnets.2.conv_shortcut", + "mid_block.attentions.0.proj_out", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.3.resnets.1.conv1", + "mid_block.resnets.0.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.0.conv_shortcut", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.resnets.2.time_emb_proj", + "down_blocks.3.resnets.0.conv2", + "down_blocks.3.resnets.0.conv1", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.resnets.1.time_emb_proj", + "mid_block.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "mid_block.resnets.1.conv1", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.0.resnets.0.conv2", + "mid_block.resnets.1.conv2", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.1.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.2.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.1.conv1", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.0.conv2", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.0.conv1", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.2.conv_shortcut", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.2.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.2.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.0.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.1.conv1", + "down_blocks.2.resnets.0.conv_shortcut", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.0.conv2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.2.conv1", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.1.resnets.0.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.2.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.1.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.time_emb_proj", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.0.resnets.0.conv1", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.1.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.1.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.2.resnets.1.conv_shortcut", + "down_blocks.0.attentions.0.proj_in", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.1.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.3.resnets.0.conv2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.resnets.0.conv1", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.3.resnets.1.time_emb_proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.proj_out", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.2.time_emb_proj", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.2.conv2", + "time_embedding.linear_1", + "time_embedding.linear_2", + "up_blocks.3.resnets.2.conv_shortcut", + "up_blocks.3.resnets.0.conv_shortcut" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1310720, + 1638400, + 409600, + 1638400, + 3276800, + 1638400, + 1638400, + 6553600, + 14745600, + 14745600, + 1638400, + 1638400, + 13107200, + 3276800, + 14745600, + 29491200, + 1310720, + 3276800, + 1638400, + 14745600, + 14745600, + 29491200, + 1638400, + 14745600, + 1638400, + 1638400, + 14745600, + 819200, + 14745600, + 14745600, + 29491200, + 1638400, + 655360, + 1638400, + 409600, + 14745600, + 655360, + 14745600, + 1638400, + 1638400, + 1638400, + 1310720, + 14745600, + 1638400, + 409600, + 409600, + 655360, + 1638400, + 6553600, + 327680, + 1638400, + 6553600, + 409600, + 1638400, + 1310720, + 1310720, + 14745600, + 1638400, + 1638400, + 102400, + 6553600, + 29491200, + 1638400, + 14745600, + 1638400, + 1638400, + 1638400, + 102400, + 1638400, + 1638400, + 22118400, + 29491200, + 13107200, + 6553600, + 14745600, + 409600, + 14745600, + 1638400, + 1638400, + 1638400, + 11059200, + 13107200, + 14745600, + 1638400, + 1638400, + 327680, + 13107200, + 1638400, + 327680, + 7372800, + 819200, + 1638400, + 1310720, + 1638400, + 1638400, + 409600, + 1638400, + 1310720, + 409600, + 1638400, + 1638400, + 3276800, + 409600, + 409600, + 6553600, + 409600, + 1638400, + 14745600, + 1638400, + 1638400, + 7372800, + 409600, + 3276800, + 2457600, + 1638400, + 1638400, + 409600, + 14745600, + 819200, + 1638400, + 1638400, + 3686400, + 409600, + 102400, + 3686400, + 3276800, + 409600, + 655360, + 1638400, + 102400, + 409600, + 13107200, + 1638400, + 102400, + 1310720, + 1638400, + 409600, + 1638400, + 655360, + 102400, + 3686400, + 409600, + 1638400, + 1310720, + 409600, + 409600, + 13107200, + 409600, + 3686400, + 819200, + 409600, + 14745600, + 1310720, + 1310720, + 327680, + 409600, + 5529600, + 3686400, + 3686400, + 1638400, + 1843200, + 409600, + 409600, + 3276800, + 819200, + 409600, + 102400, + 655360, + 102400, + 3686400, + 409600, + 1638400, + 1638400, + 327680, + 102400, + 409600, + 3276800, + 327680, + 102400, + 409600, + 102400, + 409600, + 1310720, + 409600, + 409600, + 409600, + 102400, + 102400, + 655360, + 409600, + 409600, + 102400, + 409600, + 409600, + 327680, + 102400, + 3276800, + 655360, + 102400, + 102400, + 409600, + 819200, + 409600, + 3276800, + 102400, + 327680, + 921600, + 409600, + 819200, + 921600, + 655360, + 409600, + 102400, + 102400, + 102400, + 102400, + 409600, + 102400, + 409600, + 102400, + 819200, + 102400, + 819200, + 102400, + 409600, + 327680, + 102400, + 102400, + 819200, + 204800, + 3686400, + 921600, + 409600, + 102400, + 655360, + 409600, + 921600, + 102400, + 409600, + 614400, + 2764800, + 921600, + 102400, + 102400, + 409600, + 327680, + 102400, + 409600, + 921600, + 102400, + 819200, + 102400, + 102400, + 1228800, + 102400, + 102400, + 1843200, + 1843200, + 921600, + 819200, + 204800, + 102400, + 409600, + 102400, + 921600, + 409600, + 1638400, + 204800, + 307200 + ] + }, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.7, + 88.7, + 88.5, + 88.5 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.5, + 88.4, + 88.7, + 88.0 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.5, + 88.3, + 88.4, + 88.2 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.3, + 87.9, + 88.3, + 87.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 87.9, + 88.4, + 88.1 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.1, + 88.3, + 88.4, + 88.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 88.0, + 88.3, + 88.0 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 87.8, + 87.9, + 87.8, + 88.0 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 87.4, + 87.9, + 88.1, + 87.5 + ], + "mid_block.resnets.0.time_emb_proj": [ + 87.4, + 87.6, + 88.1, + 86.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.4, + 87.7, + 88.0, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.3, + 87.6, + 88.0, + 87.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.6, + 87.4, + 87.8, + 87.3 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.2, + 86.9, + 87.6, + 86.7 + ], + "mid_block.attentions.0.proj_in": [ + 86.8, + 87.2, + 87.4, + 85.9 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 86.3, + 86.7, + 87.2, + 85.5 + ], + "mid_block.attentions.0.proj_out": [ + 86.1, + 86.8, + 87.1, + 85.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 86.7, + 87.0, + 82.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 86.1, + 86.5, + 86.8, + 85.2 + ], + "down_blocks.3.resnets.1.conv1": [ + 86.6, + 86.8, + 86.7, + 75.5 + ], + "mid_block.resnets.0.conv1": [ + 86.4, + 86.9, + 87.1, + 78.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.3, + 86.3, + 86.8, + 78.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.3, + 86.2, + 86.8, + 77.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.2, + 86.2, + 86.9, + 77.9 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 86.0, + 86.3, + 87.0, + 76.2 + ], + "down_blocks.3.resnets.1.conv2": [ + 86.2, + 86.3, + 86.8, + 76.9 + ], + "up_blocks.0.resnets.0.conv1": [ + 85.3, + 85.6, + 86.3, + 73.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 85.3, + 85.6, + 86.4, + 73.6 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 85.2, + 85.4, + 86.4, + 73.5 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 84.9, + 84.6, + 86.1, + 73.7 + ], + "down_blocks.3.resnets.0.conv2": [ + 84.1, + 84.5, + 84.7, + 73.5 + ], + "down_blocks.3.resnets.0.conv1": [ + 83.9, + 84.2, + 79.1, + 73.5 + ], + "up_blocks.0.resnets.1.conv1": [ + 83.6, + 83.8, + 85.1, + 83.0 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 82.6, + 82.8, + 84.3, + 81.9 + ], + "mid_block.resnets.0.conv2": [ + 82.3, + 82.8, + 84.0, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.3, + 83.5, + 84.3, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 83.3, + 83.2, + 84.3, + 81.4 + ], + "mid_block.resnets.1.conv1": [ + 83.3, + 83.2, + 84.2, + 81.5 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 82.8, + 82.5, + 83.8, + 80.5 + ], + "up_blocks.0.resnets.0.conv2": [ + 80.4, + 82.2, + 82.7, + 76.8 + ], + "mid_block.resnets.1.conv2": [ + 80.7, + 82.0, + 82.5, + 80.0 + ], + "up_blocks.0.resnets.2.conv1": [ + 80.7, + 81.7, + 76.4, + 79.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 80.2, + 80.9, + 76.1, + 78.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 79.8, + 81.2, + 75.1, + 77.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.2, + 81.2, + 76.3, + 77.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.4, + 81.0, + 76.8, + 78.2 + ], + "up_blocks.0.resnets.1.conv2": [ + 79.0, + 79.7, + 78.4, + 78.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 78.7, + 79.7, + 78.2, + 78.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 78.7, + 79.8, + 77.9, + 77.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 78.7, + 79.6, + 76.3, + 76.8 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 77.7, + 78.3, + 75.8, + 76.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.8, + 78.3, + 76.8, + 75.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 78.2, + 78.9, + 74.2, + 76.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 77.2, + 77.9, + 75.5, + 76.0 + ], + "up_blocks.1.attentions.1.proj_out": [ + 76.7, + 78.4, + 75.7, + 75.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 76.6, + 78.5, + 75.0, + 75.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 76.5, + 78.4, + 73.8, + 75.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 76.7, + 78.5, + 73.9, + 75.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 77.1, + 78.3, + 75.0, + 75.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 77.1, + 78.6, + 75.1, + 75.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 77.0, + 78.4, + 78.2, + 75.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 77.4, + 78.0, + 68.6, + 70.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.6, + 78.2, + 69.6, + 70.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.9, + 78.3, + 68.7, + 70.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 78.0, + 77.9, + 73.9, + 74.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 78.1, + 78.0, + 74.6, + 74.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.1, + 78.2, + 74.9, + 74.8 + ], + "up_blocks.1.resnets.2.conv2": [ + 77.6, + 77.6, + 74.9, + 74.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 78.1, + 77.1, + 72.9, + 74.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.5, + 76.4, + 77.4, + 73.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.2, + 76.3, + 76.9, + 73.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 76.6, + 76.0, + 76.6, + 73.0 + ], + "up_blocks.1.resnets.0.conv1": [ + 72.8, + 75.5, + 77.1, + 73.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 76.2, + 76.6, + 76.6, + 72.8 + ], + "down_blocks.2.resnets.1.conv1": [ + 76.5, + 76.2, + 76.4, + 73.4 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 75.5, + 75.8, + 76.3, + 73.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 73.8, + 75.9, + 74.1, + 74.2 + ], + "up_blocks.1.attentions.0.proj_out": [ + 74.1, + 75.1, + 76.4, + 72.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 74.0, + 74.9, + 76.5, + 72.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 73.9, + 75.0, + 76.6, + 74.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 73.0, + 74.1, + 75.3, + 74.2 + ], + "up_blocks.1.resnets.2.conv1": [ + 72.2, + 74.9, + 75.2, + 74.4 + ], + "up_blocks.1.resnets.1.conv1": [ + 71.6, + 74.5, + 74.8, + 74.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.5, + 76.2, + 77.4, + 75.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.6, + 75.5, + 77.1, + 75.2 + ], + "up_blocks.1.resnets.1.conv2": [ + 76.2, + 75.4, + 77.1, + 75.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 76.1, + 75.4, + 77.1, + 75.2 + ], + "up_blocks.1.resnets.0.conv2": [ + 76.0, + 75.6, + 76.7, + 75.0 + ], + "down_blocks.2.attentions.0.proj_in": [ + 76.2, + 76.1, + 76.8, + 74.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.6, + 76.5, + 77.0, + 75.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 75.7, + 75.6, + 76.5, + 74.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 75.8, + 75.4, + 76.3, + 74.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 76.0, + 75.6, + 76.2, + 74.7 + ], + "up_blocks.0.resnets.2.conv2": [ + 76.3, + 75.7, + 76.6, + 74.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.3, + 75.6, + 76.5, + 74.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 76.2, + 75.5, + 76.4, + 74.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 75.5, + 75.0, + 75.6, + 74.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 75.8, + 75.0, + 75.8, + 74.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 76.1, + 75.4, + 75.8, + 74.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 75.9, + 75.0, + 75.5, + 74.3 + ], + "down_blocks.2.resnets.0.conv1": [ + 75.8, + 74.8, + 75.3, + 74.4 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 75.3, + 74.3, + 75.0, + 74.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 74.8, + 73.8, + 74.5, + 73.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 74.8, + 73.4, + 74.6, + 73.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 74.6, + 73.6, + 74.5, + 73.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 74.3, + 72.9, + 74.5, + 73.6 + ], + "down_blocks.1.attentions.1.proj_out": [ + 74.9, + 73.6, + 75.2, + 73.9 + ], + "down_blocks.2.attentions.1.proj_in": [ + 74.7, + 73.2, + 75.4, + 73.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 74.7, + 73.3, + 75.4, + 73.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 74.2, + 72.8, + 74.7, + 73.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 74.1, + 72.8, + 74.7, + 73.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 74.1, + 73.1, + 74.9, + 73.2 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 73.9, + 73.2, + 74.7, + 73.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 73.2, + 72.4, + 74.7, + 72.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 72.8, + 71.7, + 74.0, + 72.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 72.8, + 71.6, + 73.9, + 72.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 72.6, + 71.8, + 74.0, + 72.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 72.6, + 71.9, + 74.0, + 72.4 + ], + "up_blocks.1.upsamplers.0.conv": [ + 73.4, + 72.3, + 74.3, + 72.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.4, + 72.3, + 74.2, + 72.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 73.3, + 72.1, + 74.2, + 72.9 + ], + "up_blocks.2.resnets.1.conv1": [ + 73.2, + 71.9, + 74.2, + 73.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 73.2, + 72.1, + 74.3, + 73.1 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 73.1, + 72.7, + 73.9, + 72.9 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 73.3, + 72.6, + 74.0, + 72.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 73.3, + 72.4, + 74.0, + 72.8 + ], + "up_blocks.1.attentions.1.proj_in": [ + 73.5, + 72.5, + 74.2, + 72.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 73.5, + 72.9, + 74.2, + 72.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 73.3, + 72.6, + 73.9, + 72.5 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 71.7, + 71.5, + 72.3, + 70.9 + ], + "down_blocks.2.attentions.0.proj_out": [ + 71.6, + 71.5, + 72.2, + 70.9 + ], + "up_blocks.1.attentions.0.proj_in": [ + 71.8, + 71.7, + 72.4, + 71.0 + ], + "up_blocks.2.resnets.1.conv2": [ + 71.2, + 71.7, + 72.3, + 71.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 71.4, + 71.7, + 72.5, + 71.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 71.1, + 71.4, + 72.1, + 71.1 + ], + "up_blocks.2.resnets.0.conv2": [ + 71.3, + 71.6, + 71.8, + 70.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 69.8, + 70.9, + 71.1, + 70.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 69.9, + 71.0, + 70.9, + 70.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 69.9, + 70.8, + 70.8, + 70.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 69.6, + 70.7, + 70.7, + 70.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 69.6, + 70.6, + 70.6, + 70.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 69.1, + 70.0, + 70.2, + 69.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 69.1, + 70.0, + 70.2, + 69.6 + ], + "up_blocks.1.attentions.2.proj_out": [ + 68.8, + 70.2, + 69.9, + 69.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 68.4, + 69.8, + 69.6, + 69.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 68.4, + 69.9, + 69.6, + 69.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 68.5, + 69.9, + 69.6, + 69.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 68.1, + 69.7, + 69.6, + 69.0 + ], + "up_blocks.1.attentions.2.proj_in": [ + 68.1, + 69.3, + 69.9, + 69.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 68.2, + 69.3, + 70.1, + 69.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 68.3, + 69.6, + 70.4, + 69.5 + ], + "down_blocks.1.resnets.1.conv2": [ + 68.2, + 69.4, + 70.2, + 69.6 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 68.2, + 69.4, + 70.1, + 69.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 68.2, + 69.4, + 69.9, + 69.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 67.7, + 68.3, + 70.1, + 69.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 67.6, + 68.3, + 69.8, + 69.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 67.7, + 68.2, + 69.7, + 69.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 67.7, + 68.3, + 69.8, + 69.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 67.7, + 68.2, + 69.7, + 69.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 67.7, + 67.9, + 69.5, + 68.9 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 67.6, + 67.8, + 69.2, + 68.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 67.6, + 67.8, + 69.3, + 68.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 67.7, + 67.7, + 69.1, + 68.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 67.5, + 67.9, + 69.2, + 68.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 67.4, + 68.5, + 69.6, + 69.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 67.3, + 68.5, + 69.6, + 69.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 67.3, + 68.5, + 69.5, + 69.0 + ], + "up_blocks.2.resnets.2.conv1": [ + 67.3, + 68.3, + 69.4, + 69.1 + ], + "down_blocks.1.downsamplers.0.conv": [ + 67.1, + 68.1, + 69.5, + 69.2 + ], + "down_blocks.1.resnets.0.conv2": [ + 67.2, + 67.4, + 69.5, + 69.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 68.5, + 68.6, + 70.3, + 69.8 + ], + "down_blocks.1.resnets.0.conv1": [ + 68.7, + 68.9, + 70.4, + 69.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 68.4, + 69.0, + 70.3, + 69.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 68.6, + 69.1, + 70.1, + 69.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 68.7, + 69.1, + 70.0, + 69.7 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 68.3, + 67.7, + 69.4, + 68.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 68.0, + 67.4, + 69.4, + 68.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 67.8, + 67.3, + 69.2, + 68.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 67.8, + 67.7, + 68.7, + 68.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 67.8, + 67.7, + 68.7, + 68.2 + ], + "up_blocks.2.resnets.2.conv2": [ + 67.1, + 66.8, + 68.0, + 67.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 66.4, + 65.9, + 68.3, + 67.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 66.2, + 65.5, + 67.4, + 66.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 67.2, + 65.4, + 67.7, + 67.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 67.1, + 65.4, + 67.7, + 67.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 66.8, + 65.0, + 67.3, + 66.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 66.2, + 64.4, + 66.5, + 65.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 66.1, + 64.1, + 66.0, + 65.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 66.1, + 64.0, + 66.0, + 65.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 66.4, + 64.6, + 66.4, + 65.5 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 65.4, + 63.9, + 65.3, + 64.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 65.2, + 63.8, + 65.2, + 64.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 65.4, + 64.1, + 65.4, + 64.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 65.3, + 64.0, + 65.5, + 64.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 65.0, + 63.6, + 65.0, + 64.2 + ], + "up_blocks.2.attentions.1.proj_out": [ + 65.0, + 63.9, + 65.1, + 64.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 64.8, + 63.8, + 64.9, + 64.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 64.8, + 63.8, + 64.8, + 64.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 64.6, + 63.7, + 64.3, + 63.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 64.4, + 63.6, + 64.3, + 63.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 64.1, + 63.5, + 64.3, + 63.7 + ], + "up_blocks.2.attentions.1.proj_in": [ + 64.1, + 63.6, + 64.3, + 63.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 64.0, + 63.6, + 64.1, + 63.5 + ], + "down_blocks.1.attentions.0.proj_in": [ + 63.7, + 63.4, + 64.1, + 63.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 63.5, + 63.4, + 63.7, + 63.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 63.5, + 63.3, + 63.8, + 63.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 63.7, + 63.3, + 64.4, + 63.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 62.8, + 62.4, + 62.9, + 62.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 63.0, + 62.5, + 63.0, + 62.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 63.1, + 62.6, + 62.8, + 62.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 62.9, + 62.1, + 62.4, + 62.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 62.1, + 60.9, + 61.4, + 61.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 61.3, + 60.3, + 60.9, + 60.8 + ], + "up_blocks.2.attentions.0.proj_in": [ + 61.1, + 60.7, + 61.3, + 61.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 61.1, + 60.5, + 61.2, + 60.9 + ], + "down_blocks.0.attentions.0.proj_out": [ + 61.1, + 60.4, + 61.2, + 61.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 61.3, + 60.5, + 61.3, + 61.0 + ], + "down_blocks.0.resnets.0.conv1": [ + 61.0, + 60.3, + 61.1, + 60.9 + ], + "down_blocks.1.attentions.1.proj_in": [ + 61.0, + 60.4, + 61.2, + 60.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 60.8, + 60.2, + 60.9, + 60.7 + ], + "down_blocks.0.resnets.1.conv2": [ + 60.3, + 60.0, + 60.8, + 60.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 60.4, + 60.1, + 60.8, + 60.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 60.2, + 59.9, + 60.7, + 60.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 60.2, + 59.9, + 60.8, + 60.6 + ], + "down_blocks.0.attentions.1.proj_out": [ + 60.3, + 60.3, + 61.0, + 60.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 59.7, + 59.9, + 60.4, + 60.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 60.0, + 59.8, + 60.3, + 60.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 59.3, + 59.1, + 60.0, + 59.9 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 59.3, + 59.1, + 60.1, + 59.9 + ], + "up_blocks.2.attentions.0.proj_out": [ + 60.1, + 59.3, + 60.2, + 60.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 59.8, + 59.1, + 59.7, + 59.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 59.6, + 58.9, + 59.6, + 59.6 + ], + "up_blocks.3.attentions.0.proj_out": [ + 59.7, + 59.5, + 60.2, + 60.1 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 59.4, + 58.4, + 59.9, + 59.8 + ], + "down_blocks.0.attentions.0.proj_in": [ + 59.2, + 58.4, + 59.8, + 59.8 + ], + "up_blocks.2.attentions.2.proj_out": [ + 59.5, + 59.1, + 60.0, + 60.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 59.4, + 59.0, + 60.1, + 60.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 59.0, + 58.8, + 59.6, + 59.8 + ], + "down_blocks.0.attentions.1.proj_in": [ + 59.1, + 58.9, + 59.6, + 59.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 58.8, + 58.5, + 59.4, + 59.5 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 58.7, + 58.2, + 59.4, + 59.5 + ], + "up_blocks.2.upsamplers.0.conv": [ + 58.8, + 59.3, + 60.1, + 60.1 + ], + "down_blocks.0.resnets.1.conv1": [ + 58.6, + 59.2, + 60.0, + 59.9 + ], + "up_blocks.2.attentions.2.proj_in": [ + 57.9, + 59.1, + 59.4, + 59.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 57.8, + 58.8, + 59.2, + 59.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 58.0, + 58.8, + 59.4, + 59.3 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 57.9, + 58.2, + 58.7, + 58.8 + ], + "up_blocks.3.resnets.0.conv2": [ + 57.9, + 58.1, + 58.4, + 58.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 57.9, + 58.0, + 58.4, + 58.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 57.6, + 57.5, + 58.2, + 58.2 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 58.0, + 58.5, + 58.7, + 58.6 + ], + "up_blocks.3.resnets.0.conv1": [ + 57.9, + 57.9, + 58.3, + 58.3 + ], + "down_blocks.0.downsamplers.0.conv": [ + 58.1, + 58.0, + 58.7, + 58.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 58.2, + 58.2, + 58.7, + 58.5 + ], + "up_blocks.3.attentions.0.proj_in": [ + 58.3, + 58.4, + 58.9, + 58.6 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 57.9, + 57.9, + 58.4, + 58.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 57.8, + 57.8, + 58.3, + 57.9 + ], + "up_blocks.3.attentions.1.proj_out": [ + 57.6, + 57.8, + 57.9, + 57.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 56.5, + 56.9, + 56.8, + 56.5 + ], + "down_blocks.0.resnets.0.conv2": [ + 56.2, + 56.9, + 56.8, + 56.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 55.6, + 56.3, + 56.4, + 56.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 55.4, + 55.9, + 56.2, + 56.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 54.9, + 55.7, + 55.8, + 55.5 + ], + "up_blocks.3.attentions.2.proj_out": [ + 54.2, + 55.0, + 54.9, + 54.7 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 54.6, + 54.7, + 53.6, + 53.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 54.4, + 54.4, + 53.3, + 53.5 + ], + "up_blocks.3.attentions.1.proj_in": [ + 54.2, + 53.9, + 53.2, + 53.4 + ], + "up_blocks.3.resnets.1.conv1": [ + 53.8, + 53.0, + 52.6, + 52.8 + ], + "up_blocks.3.resnets.2.conv1": [ + 53.7, + 53.5, + 53.4, + 53.5 + ], + "up_blocks.3.resnets.1.conv2": [ + 53.7, + 54.0, + 53.8, + 54.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 52.4, + 52.4, + 52.5, + 52.6 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 52.4, + 52.1, + 52.5, + 52.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 52.2, + 52.1, + 52.2, + 52.3 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 51.1, + 51.3, + 51.2, + 51.3 + ], + "up_blocks.3.attentions.2.proj_in": [ + 51.1, + 51.4, + 51.4, + 51.5 + ], + "up_blocks.3.resnets.2.conv2": [ + 50.9, + 51.2, + 51.3, + 51.3 + ], + "time_embedding.linear_1": [ + 51.0, + 51.2, + 51.2, + 51.1 + ], + "time_embedding.linear_2": [ + 51.0, + 51.2, + 51.1, + 51.1 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 54.8, + 54.8, + 54.5, + 54.5 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 54.3, + 53.5, + 55.8, + 55.3 + ] + }, + "2": { + "metadata": { + "nbits": 2, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.0.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.3.resnets.0.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.proj_out", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.proj_in", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.resnets.0.conv1", + "mid_block.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.3.resnets.1.conv1", + "mid_block.resnets.1.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.resnets.1.conv1", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.1.conv2", + "down_blocks.3.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "mid_block.resnets.1.conv1", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.0.resnets.0.conv_shortcut", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.0.conv2", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.resnets.0.time_emb_proj", + "down_blocks.2.resnets.1.conv2", + "down_blocks.3.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.0.conv2", + "up_blocks.2.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.2.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.resnets.0.time_emb_proj", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.1.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.1.conv_shortcut", + "down_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.2.resnets.1.conv2", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.conv1", + "up_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.resnets.1.conv2", + "up_blocks.0.upsamplers.0.conv", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.1.time_emb_proj", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.1.proj_out", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.2.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.2.conv2", + "down_blocks.1.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.1.conv2", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "time_embedding.linear_2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.conv1", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.proj_out", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.0.resnets.1.conv2", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.1.proj_out", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.resnets.2.time_emb_proj", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.0.attentions.0.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.1.time_emb_proj", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.resnets.2.conv_shortcut", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.3.resnets.0.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.proj_in", + "down_blocks.0.resnets.0.conv2", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.attentions.2.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.resnets.2.conv1", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.resnets.2.time_emb_proj", + "up_blocks.3.attentions.1.proj_in", + "time_embedding.linear_1", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.0.conv_shortcut", + "up_blocks.3.resnets.2.conv2", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "up_blocks.3.resnets.2.conv_shortcut" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1310720, + 14745600, + 14745600, + 1638400, + 14745600, + 14745600, + 409600, + 14745600, + 29491200, + 13107200, + 14745600, + 14745600, + 1638400, + 1638400, + 409600, + 1638400, + 29491200, + 3276800, + 14745600, + 3276800, + 3276800, + 1310720, + 1638400, + 14745600, + 1638400, + 1310720, + 1638400, + 1638400, + 1638400, + 655360, + 14745600, + 1310720, + 409600, + 409600, + 1310720, + 655360, + 819200, + 14745600, + 14745600, + 1638400, + 1638400, + 409600, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 14745600, + 409600, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 1310720, + 1638400, + 13107200, + 1638400, + 409600, + 1638400, + 1638400, + 11059200, + 1638400, + 13107200, + 409600, + 102400, + 409600, + 102400, + 1638400, + 1638400, + 29491200, + 6553600, + 1638400, + 102400, + 14745600, + 1638400, + 14745600, + 3686400, + 655360, + 1638400, + 409600, + 409600, + 327680, + 22118400, + 409600, + 409600, + 6553600, + 102400, + 13107200, + 102400, + 3276800, + 1638400, + 29491200, + 1638400, + 1638400, + 655360, + 409600, + 14745600, + 1638400, + 2457600, + 409600, + 1310720, + 1638400, + 1843200, + 1638400, + 7372800, + 327680, + 3276800, + 3686400, + 1638400, + 3276800, + 3686400, + 409600, + 409600, + 1638400, + 327680, + 819200, + 1638400, + 102400, + 1638400, + 409600, + 1638400, + 7372800, + 29491200, + 409600, + 1638400, + 14745600, + 14745600, + 14745600, + 1638400, + 819200, + 102400, + 1638400, + 1638400, + 1638400, + 409600, + 409600, + 5529600, + 409600, + 409600, + 3686400, + 1638400, + 3686400, + 3686400, + 13107200, + 655360, + 3686400, + 1638400, + 102400, + 1638400, + 102400, + 409600, + 102400, + 819200, + 3276800, + 409600, + 1638400, + 102400, + 409600, + 327680, + 1638400, + 409600, + 409600, + 409600, + 409600, + 1638400, + 409600, + 1638400, + 409600, + 102400, + 1310720, + 3276800, + 102400, + 655360, + 13107200, + 409600, + 1310720, + 921600, + 409600, + 102400, + 409600, + 102400, + 102400, + 409600, + 3276800, + 409600, + 409600, + 409600, + 409600, + 102400, + 409600, + 102400, + 409600, + 3276800, + 102400, + 1310720, + 102400, + 819200, + 102400, + 819200, + 327680, + 102400, + 819200, + 409600, + 102400, + 327680, + 327680, + 102400, + 1310720, + 409600, + 921600, + 819200, + 102400, + 409600, + 1228800, + 921600, + 819200, + 3686400, + 102400, + 102400, + 102400, + 327680, + 409600, + 819200, + 921600, + 655360, + 102400, + 409600, + 614400, + 1310720, + 102400, + 327680, + 655360, + 921600, + 2764800, + 655360, + 409600, + 102400, + 102400, + 327680, + 102400, + 921600, + 409600, + 102400, + 102400, + 819200, + 102400, + 204800, + 102400, + 655360, + 1843200, + 102400, + 1843200, + 409600, + 102400, + 409600, + 819200, + 921600, + 307200, + 921600, + 204800, + 102400, + 204800 + ] + }, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.1, + 88.7, + 88.9, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.1, + 88.6, + 88.7, + 88.9 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 89.0, + 88.8, + 89.0, + 88.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.7, + 88.7, + 88.8, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.6, + 88.4, + 88.4, + 88.4 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.3, + 88.5, + 88.8, + 88.1 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.4, + 88.3, + 88.5, + 88.2 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 88.5, + 88.5, + 88.8, + 88.1 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.3, + 88.4, + 88.4, + 87.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.3, + 88.1, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.2, + 88.4, + 87.5 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.1, + 88.2, + 88.3, + 87.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.1, + 88.2, + 88.2, + 87.9 + ], + "mid_block.attentions.0.proj_out": [ + 87.7, + 88.1, + 88.0, + 87.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.7, + 87.9, + 88.1, + 87.0 + ], + "mid_block.attentions.0.proj_in": [ + 87.9, + 88.1, + 88.2, + 86.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.8, + 88.2, + 88.0, + 86.3 + ], + "mid_block.resnets.0.conv1": [ + 88.0, + 88.2, + 88.3, + 83.7 + ], + "mid_block.resnets.0.conv2": [ + 87.6, + 88.1, + 88.0, + 82.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.7, + 88.0, + 87.8, + 82.7 + ], + "down_blocks.3.resnets.1.conv1": [ + 87.4, + 87.7, + 87.9, + 80.3 + ], + "mid_block.resnets.1.conv2": [ + 87.4, + 87.6, + 87.8, + 79.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 87.1, + 87.5, + 87.5, + 78.7 + ], + "down_blocks.3.resnets.1.conv2": [ + 87.0, + 87.0, + 87.4, + 77.8 + ], + "up_blocks.0.resnets.1.conv1": [ + 86.8, + 87.6, + 87.5, + 76.5 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.6, + 87.3, + 87.5, + 77.0 + ], + "up_blocks.0.resnets.1.conv2": [ + 86.4, + 87.0, + 87.2, + 85.7 + ], + "down_blocks.3.resnets.0.conv2": [ + 86.5, + 86.8, + 87.3, + 86.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.6, + 87.1, + 87.3, + 85.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 86.2, + 86.6, + 87.3, + 85.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.7, + 86.1, + 87.2, + 85.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.5, + 85.9, + 87.0, + 85.0 + ], + "up_blocks.0.resnets.0.conv1": [ + 85.7, + 85.8, + 87.1, + 85.0 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 85.6, + 85.8, + 87.0, + 81.8 + ], + "mid_block.resnets.1.conv1": [ + 85.7, + 85.9, + 86.9, + 83.9 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 85.5, + 85.8, + 87.0, + 84.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 85.8, + 86.0, + 86.9, + 84.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 85.6, + 86.2, + 87.0, + 85.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.6, + 86.2, + 87.0, + 81.5 + ], + "up_blocks.0.resnets.0.conv2": [ + 85.4, + 85.9, + 86.8, + 83.9 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 84.8, + 85.4, + 86.6, + 83.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 85.5, + 85.5, + 86.7, + 81.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 85.1, + 85.5, + 78.3, + 83.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 84.6, + 84.8, + 78.6, + 83.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.9, + 84.5, + 77.3, + 83.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.0, + 84.2, + 77.4, + 83.2 + ], + "down_blocks.2.resnets.1.conv1": [ + 84.2, + 84.0, + 80.6, + 82.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.0, + 84.0, + 80.5, + 82.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.9, + 83.8, + 80.4, + 82.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.3, + 83.4, + 80.3, + 81.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 83.4, + 83.5, + 80.3, + 81.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 83.3, + 83.2, + 80.2, + 81.7 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 82.8, + 82.9, + 80.0, + 81.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 82.6, + 82.9, + 77.2, + 77.7 + ], + "down_blocks.3.resnets.0.conv1": [ + 82.7, + 82.9, + 76.3, + 77.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.6, + 82.6, + 78.0, + 77.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.7, + 82.5, + 77.2, + 77.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.3, + 82.3, + 78.5, + 77.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 82.2, + 82.2, + 78.4, + 77.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.6, + 82.2, + 78.3, + 77.6 + ], + "up_blocks.1.attentions.0.proj_out": [ + 81.4, + 82.0, + 78.2, + 77.5 + ], + "up_blocks.1.attentions.1.proj_out": [ + 81.5, + 82.0, + 78.9, + 77.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 81.1, + 81.7, + 79.3, + 77.5 + ], + "down_blocks.2.downsamplers.0.conv": [ + 81.0, + 81.6, + 82.9, + 77.4 + ], + "up_blocks.1.resnets.2.conv2": [ + 81.0, + 81.6, + 82.8, + 77.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.9, + 81.5, + 82.6, + 77.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.1, + 81.5, + 82.0, + 77.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.8, + 81.2, + 81.7, + 77.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.7, + 81.0, + 76.1, + 77.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 76.5, + 80.9, + 76.5, + 77.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 75.9, + 80.9, + 75.8, + 77.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.7, + 80.7, + 75.4, + 77.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.3, + 80.7, + 75.2, + 77.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.8, + 80.3, + 77.6, + 76.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 78.7, + 79.5, + 76.8, + 76.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 78.8, + 79.7, + 79.8, + 76.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 78.9, + 79.6, + 80.7, + 76.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 77.8, + 78.7, + 80.1, + 76.2 + ], + "up_blocks.2.resnets.0.conv1": [ + 77.3, + 78.3, + 79.9, + 76.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 77.5, + 78.6, + 80.1, + 76.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 77.9, + 78.9, + 80.2, + 76.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 77.6, + 78.4, + 79.8, + 76.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 76.8, + 78.4, + 79.7, + 76.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 76.3, + 77.7, + 79.4, + 76.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 76.7, + 77.5, + 79.1, + 75.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.9, + 77.9, + 79.1, + 75.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 76.9, + 78.0, + 79.1, + 75.8 + ], + "up_blocks.0.resnets.2.conv1": [ + 77.2, + 77.8, + 79.0, + 75.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 77.3, + 77.7, + 79.0, + 75.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 76.9, + 77.4, + 78.0, + 75.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 76.7, + 77.2, + 78.1, + 75.6 + ], + "down_blocks.2.resnets.0.conv2": [ + 76.9, + 77.2, + 78.7, + 76.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 77.1, + 77.3, + 77.1, + 75.8 + ], + "up_blocks.1.resnets.0.conv2": [ + 77.2, + 77.4, + 78.8, + 75.3 + ], + "up_blocks.2.resnets.0.conv2": [ + 77.3, + 77.4, + 78.8, + 75.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 77.2, + 77.4, + 78.9, + 75.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.3, + 77.4, + 78.9, + 74.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 77.3, + 77.4, + 78.5, + 74.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 77.2, + 77.4, + 78.6, + 74.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 77.1, + 77.2, + 78.5, + 74.4 + ], + "up_blocks.1.resnets.2.conv1": [ + 77.1, + 76.9, + 77.9, + 74.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 77.1, + 76.9, + 78.0, + 74.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.0, + 76.9, + 77.9, + 74.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.1, + 77.1, + 77.9, + 74.7 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 76.7, + 76.8, + 77.6, + 74.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.4, + 76.4, + 77.6, + 73.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 76.4, + 76.2, + 77.4, + 73.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.4, + 76.5, + 77.5, + 73.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 76.1, + 76.3, + 77.3, + 73.9 + ], + "up_blocks.1.resnets.0.conv1": [ + 76.3, + 76.2, + 76.9, + 73.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 75.6, + 75.2, + 76.9, + 73.8 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 75.7, + 75.2, + 76.8, + 73.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 75.8, + 75.5, + 76.9, + 73.9 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 75.5, + 75.2, + 76.2, + 73.6 + ], + "up_blocks.1.upsamplers.0.conv": [ + 75.3, + 75.2, + 76.3, + 74.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 75.2, + 75.1, + 75.9, + 73.9 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 75.2, + 75.2, + 76.1, + 74.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 75.2, + 75.1, + 76.0, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 75.2, + 75.1, + 75.8, + 74.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 75.2, + 75.0, + 75.5, + 73.8 + ], + "down_blocks.1.resnets.0.conv1": [ + 74.7, + 74.8, + 74.9, + 73.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 74.7, + 74.8, + 75.1, + 73.6 + ], + "up_blocks.2.resnets.1.conv1": [ + 74.7, + 74.6, + 74.8, + 73.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 74.6, + 74.5, + 74.8, + 73.3 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 74.6, + 74.5, + 74.4, + 73.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 74.1, + 74.6, + 74.3, + 73.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 73.6, + 74.2, + 73.8, + 72.6 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 73.8, + 74.5, + 74.1, + 72.6 + ], + "up_blocks.2.resnets.1.conv2": [ + 73.6, + 74.2, + 74.0, + 72.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 73.4, + 73.6, + 73.3, + 72.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 73.1, + 73.4, + 72.9, + 72.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 73.0, + 73.4, + 73.0, + 72.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 73.0, + 73.4, + 73.0, + 72.5 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 72.7, + 72.0, + 72.4, + 72.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.1, + 72.7, + 73.0, + 72.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 73.1, + 72.8, + 73.3, + 72.3 + ], + "down_blocks.2.attentions.0.proj_in": [ + 73.6, + 73.4, + 73.1, + 72.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 73.4, + 73.2, + 72.9, + 72.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.7, + 73.6, + 73.0, + 72.3 + ], + "down_blocks.2.resnets.0.conv1": [ + 73.3, + 73.5, + 73.2, + 72.3 + ], + "up_blocks.1.resnets.1.conv1": [ + 73.4, + 73.3, + 73.6, + 72.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 73.3, + 73.1, + 73.7, + 72.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 73.3, + 73.1, + 73.8, + 72.5 + ], + "up_blocks.0.resnets.2.conv2": [ + 72.8, + 73.3, + 73.1, + 72.1 + ], + "up_blocks.1.resnets.1.conv2": [ + 72.5, + 73.1, + 73.2, + 71.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 72.8, + 73.3, + 73.3, + 71.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 72.2, + 73.2, + 73.1, + 71.6 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 71.5, + 72.5, + 72.4, + 70.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 71.2, + 72.2, + 72.2, + 70.9 + ], + "up_blocks.1.attentions.0.proj_in": [ + 71.3, + 72.3, + 72.4, + 71.2 + ], + "down_blocks.2.attentions.1.proj_out": [ + 71.4, + 72.3, + 72.3, + 71.1 + ], + "down_blocks.2.attentions.1.proj_in": [ + 71.6, + 72.4, + 72.3, + 71.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 71.7, + 72.2, + 72.5, + 71.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 71.6, + 72.0, + 72.7, + 71.5 + ], + "up_blocks.2.resnets.2.conv1": [ + 71.8, + 72.0, + 72.8, + 71.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 71.9, + 72.2, + 72.9, + 71.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 71.9, + 72.2, + 72.9, + 71.4 + ], + "down_blocks.1.downsamplers.0.conv": [ + 71.8, + 72.1, + 72.9, + 71.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 71.5, + 71.9, + 72.4, + 70.9 + ], + "up_blocks.2.resnets.2.conv2": [ + 70.9, + 71.6, + 72.2, + 70.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 70.7, + 71.5, + 72.2, + 70.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 70.6, + 71.5, + 72.2, + 70.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 70.6, + 71.4, + 72.0, + 70.3 + ], + "down_blocks.1.resnets.1.conv2": [ + 70.2, + 71.3, + 72.0, + 70.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 70.1, + 71.2, + 72.0, + 70.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 70.0, + 71.2, + 71.8, + 70.3 + ], + "up_blocks.1.attentions.2.proj_in": [ + 69.5, + 70.5, + 71.2, + 69.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 69.5, + 70.5, + 71.1, + 69.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 69.3, + 70.4, + 71.1, + 69.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 69.1, + 70.1, + 70.6, + 69.3 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 68.7, + 70.3, + 69.8, + 68.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 68.8, + 70.2, + 69.9, + 68.5 + ], + "down_blocks.1.attentions.0.proj_in": [ + 68.8, + 69.9, + 69.5, + 68.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 68.8, + 69.5, + 69.4, + 68.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 69.0, + 69.6, + 69.5, + 68.2 + ], + "up_blocks.2.attentions.0.proj_in": [ + 68.9, + 69.5, + 69.3, + 67.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 68.9, + 69.5, + 69.4, + 68.0 + ], + "down_blocks.2.attentions.0.proj_out": [ + 69.0, + 69.6, + 69.7, + 68.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 69.0, + 69.4, + 69.4, + 68.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 68.5, + 69.4, + 69.4, + 68.5 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 68.4, + 69.1, + 69.4, + 68.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 68.4, + 68.9, + 69.3, + 68.3 + ], + "time_embedding.linear_2": [ + 67.6, + 67.9, + 68.3, + 67.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 67.7, + 68.7, + 68.7, + 67.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 67.5, + 68.0, + 68.6, + 67.8 + ], + "up_blocks.2.attentions.0.proj_out": [ + 68.1, + 68.1, + 68.8, + 68.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 67.8, + 67.8, + 68.6, + 67.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 67.1, + 67.3, + 68.0, + 67.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 66.9, + 66.8, + 67.7, + 66.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 66.7, + 66.5, + 67.5, + 66.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 66.3, + 66.2, + 67.4, + 66.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 66.4, + 66.2, + 67.6, + 66.9 + ], + "up_blocks.2.attentions.1.proj_out": [ + 66.2, + 66.0, + 67.5, + 66.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 66.9, + 64.7, + 66.2, + 65.9 + ], + "down_blocks.0.resnets.0.conv1": [ + 66.4, + 64.4, + 66.0, + 65.7 + ], + "down_blocks.1.attentions.1.proj_in": [ + 66.9, + 64.5, + 65.7, + 65.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 66.7, + 64.3, + 65.5, + 65.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 66.5, + 64.2, + 65.4, + 65.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 66.3, + 64.1, + 65.2, + 65.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 65.8, + 63.6, + 64.8, + 64.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 65.9, + 63.7, + 65.2, + 64.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 65.2, + 63.5, + 64.8, + 64.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 65.1, + 63.4, + 64.6, + 64.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 65.6, + 63.1, + 64.4, + 64.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 65.2, + 63.0, + 64.4, + 64.8 + ], + "down_blocks.1.attentions.0.proj_out": [ + 65.6, + 63.0, + 64.3, + 64.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 65.2, + 62.7, + 64.0, + 64.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 64.9, + 62.3, + 63.9, + 64.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 65.1, + 62.3, + 63.9, + 64.4 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 64.4, + 61.7, + 63.2, + 63.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 64.1, + 61.6, + 63.1, + 63.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 63.8, + 61.5, + 62.9, + 63.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 63.7, + 61.8, + 62.7, + 63.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 63.4, + 61.8, + 62.5, + 62.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 62.8, + 60.4, + 62.2, + 62.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 62.7, + 60.7, + 61.9, + 62.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 62.7, + 60.6, + 61.7, + 61.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 62.8, + 60.8, + 61.7, + 61.8 + ], + "down_blocks.0.attentions.0.proj_out": [ + 62.9, + 60.8, + 62.0, + 62.1 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 62.8, + 60.2, + 61.6, + 62.2 + ], + "up_blocks.2.attentions.2.proj_out": [ + 62.6, + 60.6, + 61.5, + 62.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 62.6, + 60.0, + 61.0, + 62.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 62.6, + 60.2, + 61.0, + 62.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 62.6, + 60.4, + 61.0, + 62.1 + ], + "up_blocks.3.attentions.0.proj_out": [ + 62.6, + 60.7, + 61.0, + 62.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 61.1, + 61.2, + 60.3, + 60.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 61.2, + 61.0, + 60.4, + 61.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 61.4, + 61.3, + 60.5, + 61.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 61.5, + 61.4, + 60.7, + 61.3 + ], + "down_blocks.0.attentions.1.proj_out": [ + 62.0, + 61.8, + 60.8, + 61.3 + ], + "up_blocks.2.attentions.1.proj_in": [ + 61.6, + 61.4, + 60.7, + 61.3 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 61.2, + 61.7, + 62.0, + 62.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 61.0, + 61.9, + 61.9, + 62.5 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 60.3, + 61.2, + 61.0, + 62.0 + ], + "up_blocks.2.upsamplers.0.conv": [ + 60.8, + 61.2, + 61.5, + 62.4 + ], + "down_blocks.0.attentions.0.proj_in": [ + 60.9, + 61.8, + 61.6, + 62.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 61.0, + 61.7, + 61.5, + 62.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 60.9, + 61.5, + 61.4, + 62.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 61.0, + 61.8, + 61.4, + 62.1 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 60.7, + 61.6, + 61.2, + 62.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 60.1, + 61.2, + 60.8, + 61.4 + ], + "up_blocks.3.resnets.0.conv2": [ + 60.3, + 61.0, + 60.5, + 61.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 60.4, + 61.3, + 60.7, + 61.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 59.8, + 61.0, + 60.2, + 60.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 59.6, + 60.8, + 60.1, + 60.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 59.1, + 60.3, + 59.6, + 60.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 58.4, + 60.0, + 59.6, + 59.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 58.3, + 59.8, + 59.5, + 59.7 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 58.3, + 59.7, + 59.4, + 59.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 58.2, + 59.7, + 59.5, + 59.7 + ], + "down_blocks.0.downsamplers.0.conv": [ + 56.9, + 58.7, + 59.2, + 59.3 + ], + "up_blocks.3.resnets.0.conv1": [ + 56.1, + 58.2, + 58.5, + 58.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 55.6, + 57.8, + 58.4, + 58.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 55.2, + 57.4, + 57.9, + 58.0 + ], + "down_blocks.0.attentions.1.proj_in": [ + 55.6, + 57.4, + 58.4, + 58.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 55.4, + 57.1, + 57.8, + 58.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 55.4, + 57.1, + 57.7, + 57.9 + ], + "up_blocks.3.attentions.0.proj_in": [ + 55.3, + 56.9, + 57.0, + 57.2 + ], + "down_blocks.0.resnets.0.conv2": [ + 54.5, + 56.9, + 56.5, + 56.9 + ], + "up_blocks.2.attentions.2.proj_in": [ + 54.6, + 56.6, + 56.8, + 56.8 + ], + "up_blocks.3.attentions.1.proj_out": [ + 54.1, + 56.1, + 56.3, + 56.4 + ], + "up_blocks.3.attentions.2.proj_out": [ + 53.8, + 55.7, + 55.9, + 56.0 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 53.0, + 55.1, + 55.3, + 55.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 52.8, + 54.9, + 55.2, + 55.3 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 52.9, + 54.6, + 54.1, + 54.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 52.4, + 54.2, + 53.9, + 54.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 52.6, + 54.4, + 53.5, + 53.7 + ], + "up_blocks.3.resnets.2.conv1": [ + 53.0, + 54.8, + 54.0, + 54.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 52.7, + 54.5, + 53.9, + 54.6 + ], + "up_blocks.3.resnets.1.conv1": [ + 52.2, + 54.2, + 53.5, + 54.4 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 51.7, + 53.6, + 52.8, + 53.6 + ], + "up_blocks.3.attentions.1.proj_in": [ + 51.4, + 53.4, + 52.7, + 53.4 + ], + "time_embedding.linear_1": [ + 51.0, + 52.9, + 52.3, + 52.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 50.7, + 52.5, + 52.0, + 52.6 + ], + "up_blocks.3.resnets.1.conv2": [ + 50.7, + 52.7, + 52.1, + 52.7 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 50.3, + 52.2, + 51.9, + 52.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 50.6, + 52.4, + 52.1, + 52.6 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 50.1, + 52.0, + 51.9, + 52.5 + ], + "up_blocks.3.attentions.2.proj_in": [ + 49.6, + 51.6, + 51.5, + 52.2 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 49.4, + 51.5, + 51.6, + 52.3 + ] + }, + "4": { + "metadata": { + "nbits": 4, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv2", + "mid_block.resnets.1.time_emb_proj", + "mid_block.resnets.0.conv2", + "down_blocks.3.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.proj_out", + "mid_block.resnets.0.conv1", + "up_blocks.0.resnets.2.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.resnets.1.conv2", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.0.conv_shortcut", + "down_blocks.3.resnets.1.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.1.conv_shortcut", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.3.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.resnets.1.conv1", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.1.time_emb_proj", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.0.resnets.1.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.downsamplers.0.conv", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.3.resnets.0.conv1", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.2.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.0.conv2", + "down_blocks.2.attentions.1.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.0.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.upsamplers.0.conv", + "down_blocks.1.resnets.0.conv1", + "up_blocks.2.resnets.1.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "time_embedding.linear_1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.0.resnets.2.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.1.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.1.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.2.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.2.conv2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.1.resnets.2.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.0.conv1", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.resnets.0.conv1", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.3.resnets.1.time_emb_proj", + "down_blocks.1.resnets.1.conv2", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.2.resnets.0.conv2", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.2.attentions.2.proj_out", + "down_blocks.0.resnets.1.conv2", + "down_blocks.2.resnets.0.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.upsamplers.0.conv", + "up_blocks.3.resnets.0.conv2", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.0.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.proj_out", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.0.attentions.1.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.proj_in", + "down_blocks.0.attentions.1.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.proj_in", + "down_blocks.0.downsamplers.0.conv", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.1.conv1", + "up_blocks.3.resnets.2.time_emb_proj", + "time_embedding.linear_2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.0.conv2", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.attentions.2.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.resnets.1.conv1", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.2.conv1", + "down_blocks.0.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.2.conv2", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.2.conv_shortcut", + "up_blocks.3.resnets.0.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k" + ], + "sizes": [ + 1638400, + 1638400, + 14745600, + 1638400, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 14745600, + 1638400, + 1638400, + 1310720, + 14745600, + 14745600, + 14745600, + 1638400, + 1638400, + 3276800, + 14745600, + 409600, + 1638400, + 29491200, + 3276800, + 1638400, + 14745600, + 1638400, + 14745600, + 1638400, + 1638400, + 1310720, + 1638400, + 1638400, + 1310720, + 1638400, + 1638400, + 1638400, + 29491200, + 3276800, + 1638400, + 819200, + 1638400, + 1638400, + 14745600, + 409600, + 1638400, + 409600, + 1310720, + 6553600, + 14745600, + 1638400, + 409600, + 409600, + 14745600, + 22118400, + 1638400, + 14745600, + 1638400, + 655360, + 1310720, + 14745600, + 1638400, + 1310720, + 1638400, + 409600, + 1638400, + 1638400, + 6553600, + 409600, + 655360, + 1638400, + 1638400, + 14745600, + 1638400, + 1638400, + 102400, + 102400, + 14745600, + 13107200, + 6553600, + 29491200, + 409600, + 1638400, + 409600, + 1638400, + 409600, + 1638400, + 409600, + 819200, + 1638400, + 1310720, + 409600, + 6553600, + 1638400, + 409600, + 102400, + 11059200, + 409600, + 1638400, + 1638400, + 409600, + 3276800, + 409600, + 102400, + 1638400, + 102400, + 1638400, + 3686400, + 1638400, + 1638400, + 819200, + 409600, + 1638400, + 409600, + 3276800, + 655360, + 1638400, + 327680, + 102400, + 327680, + 1638400, + 14745600, + 1843200, + 3686400, + 13107200, + 409600, + 102400, + 1638400, + 13107200, + 409600, + 29491200, + 102400, + 409600, + 409600, + 7372800, + 655360, + 102400, + 819200, + 1638400, + 409600, + 1638400, + 3686400, + 1638400, + 1638400, + 14745600, + 409600, + 3276800, + 327680, + 1638400, + 409600, + 102400, + 409600, + 409600, + 5529600, + 102400, + 3686400, + 102400, + 102400, + 1638400, + 409600, + 1638400, + 655360, + 102400, + 409600, + 1638400, + 409600, + 2457600, + 3276800, + 102400, + 29491200, + 3276800, + 102400, + 102400, + 921600, + 1228800, + 409600, + 3276800, + 1638400, + 819200, + 819200, + 102400, + 102400, + 409600, + 327680, + 1638400, + 1638400, + 409600, + 409600, + 102400, + 409600, + 3686400, + 409600, + 14745600, + 409600, + 1310720, + 409600, + 409600, + 921600, + 7372800, + 819200, + 102400, + 327680, + 409600, + 13107200, + 327680, + 6553600, + 102400, + 327680, + 409600, + 327680, + 409600, + 409600, + 3686400, + 921600, + 819200, + 102400, + 2764800, + 327680, + 655360, + 3276800, + 102400, + 409600, + 102400, + 102400, + 102400, + 13107200, + 102400, + 3686400, + 102400, + 327680, + 409600, + 819200, + 102400, + 102400, + 1310720, + 409600, + 921600, + 819200, + 655360, + 921600, + 409600, + 1638400, + 102400, + 102400, + 102400, + 409600, + 3686400, + 14745600, + 614400, + 1310720, + 409600, + 819200, + 204800, + 102400, + 409600, + 1843200, + 409600, + 102400, + 921600, + 1843200, + 102400, + 1310720, + 819200, + 921600, + 655360, + 102400, + 204800, + 307200, + 102400, + 655360, + 921600, + 204800, + 102400, + 1310720, + 655360 + ] + }, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.0, + 89.0, + 89.0, + 88.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.9, + 88.8, + 88.3 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.7, + 88.9, + 88.7, + 88.2 + ], + "mid_block.resnets.1.time_emb_proj": [ + 89.0, + 88.9, + 88.7, + 88.0 + ], + "mid_block.resnets.0.conv2": [ + 88.5, + 88.8, + 88.7, + 88.5 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.8, + 88.7, + 88.6, + 87.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.8, + 88.8, + 87.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.5, + 89.0, + 88.9, + 88.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.6, + 88.6, + 88.8, + 87.6 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.5, + 88.7, + 88.2, + 87.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 88.7, + 87.9, + 86.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.6, + 88.8, + 86.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.5, + 87.6, + 86.7 + ], + "mid_block.attentions.0.proj_out": [ + 88.6, + 88.3, + 87.7, + 86.9 + ], + "mid_block.resnets.0.conv1": [ + 88.7, + 88.6, + 87.5, + 86.6 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.5, + 88.3, + 88.0, + 86.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.7, + 88.4, + 87.8, + 86.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.6, + 87.8, + 86.5 + ], + "mid_block.resnets.1.conv2": [ + 88.6, + 88.6, + 87.8, + 86.4 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.6, + 88.6, + 87.4, + 86.4 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.6, + 88.4, + 88.1, + 87.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.5, + 88.2, + 88.3, + 86.3 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.5, + 88.5, + 88.2, + 86.1 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.3, + 88.3, + 87.4, + 86.1 + ], + "down_blocks.3.resnets.1.conv1": [ + 88.5, + 88.4, + 87.8, + 86.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.4, + 87.4, + 86.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.7, + 88.3, + 87.3, + 86.3 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.2, + 88.3, + 87.4, + 85.8 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 87.9, + 88.3, + 88.2, + 86.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.8, + 88.0, + 88.2, + 85.9 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.0, + 88.1, + 88.2, + 86.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.1, + 88.3, + 86.2 + ], + "mid_block.resnets.1.conv1": [ + 87.8, + 88.2, + 87.8, + 86.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.1, + 88.5, + 88.1, + 85.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 88.3, + 87.5, + 85.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.8, + 88.2, + 87.4, + 85.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.3, + 87.3, + 85.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.5, + 88.1, + 87.6, + 85.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.8, + 88.1, + 88.3, + 86.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 88.2, + 88.2, + 85.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 87.9, + 88.1, + 87.9, + 86.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 87.8, + 88.1, + 88.2, + 86.3 + ], + "up_blocks.0.resnets.1.conv1": [ + 87.7, + 88.2, + 88.0, + 86.3 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 87.7, + 87.9, + 88.0, + 86.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 88.0, + 88.1, + 86.3 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 87.8, + 87.8, + 87.8, + 86.1 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 87.5, + 87.9, + 87.9, + 86.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.6, + 87.3, + 85.6 + ], + "down_blocks.2.downsamplers.0.conv": [ + 87.5, + 87.7, + 87.2, + 85.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.5, + 87.5, + 86.9, + 85.0 + ], + "mid_block.attentions.0.proj_in": [ + 87.6, + 87.4, + 86.7, + 85.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.3, + 87.2, + 87.2, + 85.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.2, + 86.6, + 88.1, + 85.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.1, + 87.2, + 87.3, + 85.1 + ], + "down_blocks.3.resnets.0.conv1": [ + 87.0, + 87.1, + 87.7, + 85.3 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 87.0, + 87.3, + 87.3, + 85.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.7, + 86.9, + 87.2, + 85.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.2, + 86.4, + 87.1, + 85.5 + ], + "up_blocks.0.resnets.2.conv2": [ + 86.2, + 86.5, + 87.6, + 85.3 + ], + "up_blocks.1.resnets.2.conv1": [ + 86.3, + 86.2, + 87.2, + 84.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.5, + 86.4, + 86.9, + 85.0 + ], + "up_blocks.1.resnets.1.conv2": [ + 86.5, + 86.4, + 86.7, + 84.8 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 86.4, + 86.0, + 86.7, + 84.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.5, + 86.0, + 86.7, + 84.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 86.4, + 86.1, + 87.0, + 84.7 + ], + "down_blocks.2.resnets.1.conv2": [ + 86.4, + 86.2, + 86.2, + 84.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 86.4, + 86.1, + 86.5, + 84.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.3, + 85.8, + 81.9, + 84.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.1, + 85.7, + 81.1, + 84.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 85.8, + 85.7, + 84.0, + 84.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.9, + 85.8, + 84.8, + 83.7 + ], + "up_blocks.1.attentions.1.proj_out": [ + 85.8, + 85.9, + 84.9, + 83.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 85.8, + 85.7, + 85.0, + 83.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.6, + 85.6, + 83.8, + 84.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.6, + 85.6, + 84.3, + 82.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 85.4, + 85.7, + 84.4, + 83.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 85.4, + 85.0, + 84.2, + 83.8 + ], + "up_blocks.1.resnets.2.conv2": [ + 85.1, + 85.4, + 83.8, + 83.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.7, + 85.1, + 81.8, + 83.3 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 84.6, + 84.7, + 78.7, + 82.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.8, + 85.2, + 79.6, + 81.1 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.7, + 85.0, + 79.1, + 81.0 + ], + "down_blocks.2.resnets.1.conv1": [ + 84.7, + 85.1, + 79.7, + 83.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 84.8, + 84.5, + 79.6, + 83.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 84.7, + 84.4, + 79.9, + 83.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 84.9, + 84.6, + 78.9, + 83.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 84.8, + 84.6, + 78.3, + 83.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 84.5, + 84.6, + 79.9, + 83.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 84.5, + 84.8, + 80.4, + 83.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 84.4, + 84.5, + 81.2, + 83.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 84.4, + 84.8, + 80.4, + 83.6 + ], + "up_blocks.1.attentions.0.proj_out": [ + 84.4, + 84.6, + 81.5, + 83.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 84.4, + 84.4, + 81.3, + 83.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 84.4, + 84.5, + 81.2, + 83.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 84.2, + 84.7, + 81.2, + 83.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.1, + 84.3, + 81.6, + 83.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.1, + 84.3, + 81.8, + 83.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 84.1, + 84.6, + 81.7, + 83.2 + ], + "down_blocks.2.attentions.0.proj_out": [ + 83.9, + 84.4, + 79.2, + 83.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 83.7, + 84.0, + 79.1, + 82.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 83.7, + 84.0, + 78.7, + 82.9 + ], + "up_blocks.2.resnets.0.conv1": [ + 83.7, + 83.9, + 79.3, + 82.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 83.8, + 83.6, + 82.6, + 82.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 83.8, + 83.7, + 80.9, + 82.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.7, + 83.5, + 80.7, + 82.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 83.6, + 83.4, + 80.4, + 82.1 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 83.4, + 83.2, + 81.1, + 82.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 83.4, + 83.2, + 80.8, + 82.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 83.3, + 83.2, + 81.1, + 82.2 + ], + "down_blocks.2.attentions.0.proj_in": [ + 83.0, + 83.2, + 80.6, + 81.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 83.1, + 83.2, + 83.7, + 80.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 82.8, + 82.9, + 83.4, + 79.8 + ], + "up_blocks.2.resnets.0.conv2": [ + 82.5, + 82.7, + 83.6, + 80.1 + ], + "down_blocks.2.attentions.1.proj_in": [ + 82.2, + 82.7, + 83.6, + 78.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.4, + 83.6, + 78.9 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 82.2, + 82.6, + 83.7, + 80.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.3, + 82.4, + 83.6, + 80.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.5, + 83.8, + 81.2 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 82.1, + 82.3, + 83.8, + 81.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.0, + 82.3, + 83.6, + 80.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.4, + 83.6, + 81.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 81.8, + 82.2, + 83.6, + 80.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.2, + 83.3, + 80.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.6, + 82.1, + 80.9, + 78.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.1, + 81.9, + 78.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.7, + 82.1, + 78.5, + 78.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 81.8, + 82.1, + 76.6, + 78.3 + ], + "down_blocks.1.resnets.0.conv1": [ + 81.7, + 82.1, + 81.7, + 78.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 81.7, + 82.1, + 81.2, + 78.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 82.0, + 82.5, + 78.1 + ], + "time_embedding.linear_1": [ + 81.5, + 81.7, + 82.6, + 78.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.3, + 81.7, + 82.6, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.3, + 81.7, + 82.6, + 78.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.3, + 81.8, + 80.2, + 78.0 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 81.2, + 81.7, + 77.5, + 77.9 + ], + "up_blocks.0.resnets.2.conv1": [ + 81.2, + 81.7, + 77.1, + 77.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.1, + 81.6, + 77.7, + 77.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.9, + 81.4, + 77.4, + 77.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.8, + 81.3, + 82.1, + 77.9 + ], + "up_blocks.2.resnets.1.conv1": [ + 80.8, + 81.4, + 82.0, + 77.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.8, + 81.4, + 82.1, + 77.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 80.7, + 81.3, + 81.9, + 77.8 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 80.5, + 81.1, + 81.6, + 77.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 80.1, + 81.0, + 81.5, + 77.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.1, + 81.0, + 81.4, + 77.5 + ], + "up_blocks.1.attentions.0.proj_in": [ + 79.9, + 81.0, + 80.7, + 77.6 + ], + "down_blocks.1.resnets.1.conv1": [ + 79.8, + 80.8, + 81.6, + 77.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.8, + 80.7, + 81.5, + 77.5 + ], + "up_blocks.1.attentions.1.proj_in": [ + 79.6, + 80.5, + 81.5, + 77.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 79.2, + 80.2, + 81.3, + 77.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 79.4, + 80.3, + 81.2, + 77.5 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 79.2, + 80.2, + 81.0, + 77.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.2, + 80.1, + 81.1, + 77.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 79.0, + 79.8, + 80.8, + 77.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.9, + 80.0, + 80.7, + 77.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.9, + 79.9, + 77.5, + 77.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.4, + 79.7, + 76.8, + 77.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 78.3, + 79.4, + 76.6, + 77.2 + ], + "up_blocks.2.resnets.2.conv1": [ + 78.3, + 79.3, + 76.6, + 77.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.3, + 79.3, + 76.4, + 77.0 + ], + "up_blocks.2.resnets.2.conv2": [ + 78.2, + 79.1, + 76.4, + 76.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.2, + 79.2, + 76.3, + 76.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 78.2, + 79.1, + 76.2, + 76.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 78.0, + 79.0, + 76.1, + 76.5 + ], + "up_blocks.2.attentions.0.proj_in": [ + 77.8, + 78.7, + 76.2, + 76.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 77.8, + 78.8, + 75.9, + 76.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 77.8, + 78.7, + 76.1, + 76.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 77.8, + 78.6, + 76.0, + 76.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 77.8, + 78.6, + 75.7, + 76.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 77.6, + 78.5, + 75.5, + 76.5 + ], + "up_blocks.2.attentions.1.proj_out": [ + 77.5, + 78.4, + 75.4, + 76.3 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 77.4, + 78.2, + 75.6, + 76.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 77.3, + 78.1, + 77.1, + 76.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 77.2, + 77.9, + 77.1, + 76.1 + ], + "up_blocks.1.resnets.0.conv1": [ + 77.2, + 77.9, + 77.1, + 76.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 77.2, + 77.8, + 77.0, + 76.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 77.1, + 77.7, + 77.1, + 76.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 77.0, + 77.6, + 76.9, + 76.1 + ], + "down_blocks.0.resnets.0.conv1": [ + 76.9, + 77.4, + 74.8, + 76.1 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 76.6, + 77.3, + 75.4, + 76.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 76.6, + 77.1, + 74.7, + 75.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 76.4, + 76.8, + 75.3, + 76.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.5, + 76.9, + 77.0, + 76.0 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 76.5, + 77.0, + 76.5, + 76.0 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 76.6, + 77.1, + 75.7, + 75.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 76.5, + 76.9, + 75.6, + 75.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 76.5, + 76.6, + 75.5, + 75.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 76.7, + 76.7, + 75.5, + 75.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 76.6, + 76.8, + 75.4, + 75.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.6, + 76.7, + 75.4, + 75.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 76.5, + 76.6, + 75.6, + 75.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 76.7, + 76.7, + 74.4, + 75.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.5, + 76.4, + 74.3, + 75.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 76.2, + 76.1, + 74.1, + 75.3 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 75.9, + 75.6, + 74.0, + 75.1 + ], + "down_blocks.1.resnets.1.conv2": [ + 75.9, + 75.5, + 73.9, + 75.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 75.7, + 75.5, + 73.9, + 75.1 + ], + "down_blocks.2.resnets.0.conv2": [ + 75.8, + 75.6, + 75.1, + 75.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 75.7, + 75.5, + 75.7, + 74.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 75.3, + 75.1, + 76.6, + 74.8 + ], + "up_blocks.2.attentions.1.proj_in": [ + 75.1, + 74.5, + 76.2, + 74.5 + ], + "up_blocks.2.attentions.2.proj_out": [ + 75.0, + 74.5, + 76.1, + 74.3 + ], + "down_blocks.0.resnets.1.conv2": [ + 74.8, + 74.4, + 75.8, + 74.2 + ], + "down_blocks.2.resnets.0.conv1": [ + 74.8, + 74.5, + 74.5, + 74.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.7, + 74.3, + 74.4, + 74.0 + ], + "up_blocks.3.attentions.0.proj_out": [ + 74.6, + 74.2, + 74.3, + 74.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 74.5, + 74.1, + 74.2, + 74.0 + ], + "down_blocks.1.attentions.0.proj_in": [ + 74.4, + 74.3, + 73.3, + 74.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.4, + 74.3, + 73.8, + 74.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 74.4, + 74.4, + 73.5, + 73.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 74.4, + 74.5, + 74.9, + 73.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 74.1, + 74.2, + 74.7, + 73.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 74.0, + 74.1, + 74.5, + 73.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 73.9, + 73.9, + 74.8, + 73.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 73.8, + 73.8, + 74.5, + 72.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 73.7, + 73.7, + 74.3, + 72.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 73.7, + 73.6, + 74.5, + 72.9 + ], + "up_blocks.2.upsamplers.0.conv": [ + 73.6, + 73.6, + 74.3, + 72.8 + ], + "up_blocks.3.resnets.0.conv2": [ + 73.6, + 73.5, + 74.2, + 72.7 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 73.4, + 73.5, + 73.8, + 72.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 73.0, + 73.1, + 73.5, + 72.0 + ], + "up_blocks.3.resnets.0.conv1": [ + 72.9, + 73.0, + 73.3, + 71.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 72.7, + 72.7, + 73.3, + 71.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 72.7, + 72.9, + 73.0, + 71.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 72.6, + 72.8, + 72.8, + 71.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 72.3, + 72.7, + 72.0, + 71.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 72.1, + 72.7, + 72.0, + 71.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 72.0, + 72.4, + 71.6, + 71.3 + ], + "up_blocks.3.attentions.1.proj_out": [ + 71.9, + 72.2, + 71.5, + 71.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 71.8, + 72.1, + 71.0, + 71.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 71.8, + 72.1, + 71.7, + 71.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 71.8, + 72.2, + 71.7, + 71.1 + ], + "down_blocks.1.downsamplers.0.conv": [ + 71.7, + 72.0, + 71.3, + 70.4 + ], + "down_blocks.0.attentions.1.proj_in": [ + 71.2, + 70.3, + 71.3, + 70.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 70.5, + 70.0, + 68.8, + 70.5 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 70.5, + 71.2, + 69.6, + 70.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 70.6, + 71.6, + 70.7, + 70.7 + ], + "up_blocks.3.attentions.0.proj_in": [ + 70.5, + 71.4, + 70.4, + 70.5 + ], + "down_blocks.0.attentions.1.proj_out": [ + 70.3, + 71.3, + 71.1, + 70.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 69.4, + 70.2, + 69.4, + 70.5 + ], + "up_blocks.2.attentions.2.proj_in": [ + 69.3, + 70.0, + 69.4, + 70.1 + ], + "down_blocks.0.downsamplers.0.conv": [ + 69.4, + 70.1, + 67.7, + 69.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 69.3, + 70.0, + 69.7, + 69.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 68.3, + 68.9, + 69.5, + 69.5 + ], + "down_blocks.0.resnets.1.conv1": [ + 68.5, + 69.1, + 70.5, + 69.0 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 68.2, + 68.7, + 70.1, + 68.6 + ], + "time_embedding.linear_2": [ + 67.2, + 67.8, + 68.6, + 67.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 67.1, + 67.6, + 68.5, + 67.5 + ], + "down_blocks.0.attentions.0.proj_out": [ + 67.0, + 67.5, + 68.1, + 67.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 67.0, + 67.3, + 68.1, + 67.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 67.1, + 67.4, + 67.8, + 67.5 + ], + "down_blocks.1.resnets.0.conv2": [ + 67.0, + 67.6, + 66.9, + 67.1 + ], + "up_blocks.1.resnets.0.conv2": [ + 67.0, + 67.5, + 66.8, + 67.7 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 66.6, + 67.2, + 66.3, + 67.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 67.3, + 66.9, + 67.2, + 66.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 66.8, + 66.5, + 66.9, + 66.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 66.7, + 66.4, + 66.8, + 66.1 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 66.0, + 65.9, + 67.2, + 65.9 + ], + "up_blocks.3.attentions.2.proj_out": [ + 65.9, + 65.8, + 67.1, + 65.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 65.9, + 65.9, + 67.0, + 65.6 + ], + "up_blocks.3.resnets.1.conv1": [ + 65.9, + 65.8, + 66.8, + 65.5 + ], + "down_blocks.1.attentions.1.proj_in": [ + 65.3, + 65.9, + 66.6, + 65.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 65.2, + 65.8, + 66.5, + 65.3 + ], + "up_blocks.3.resnets.1.conv2": [ + 65.0, + 65.6, + 66.4, + 65.1 + ], + "up_blocks.3.resnets.2.conv1": [ + 65.0, + 65.6, + 66.4, + 65.2 + ], + "down_blocks.0.attentions.0.proj_in": [ + 65.3, + 65.7, + 66.8, + 65.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 65.8, + 66.3, + 66.3, + 65.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 65.7, + 66.1, + 66.2, + 65.6 + ], + "up_blocks.3.resnets.2.conv2": [ + 65.4, + 65.9, + 66.0, + 65.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 64.6, + 65.2, + 65.3, + 64.6 + ], + "up_blocks.3.attentions.1.proj_in": [ + 64.7, + 65.4, + 65.4, + 64.7 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 64.9, + 65.6, + 65.7, + 64.9 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 64.8, + 65.6, + 65.1, + 64.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 64.8, + 65.4, + 65.2, + 65.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 64.3, + 64.2, + 65.9, + 65.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 64.7, + 64.0, + 65.7, + 64.5 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 64.7, + 63.9, + 65.5, + 64.4 + ], + "up_blocks.3.attentions.2.proj_in": [ + 64.2, + 63.6, + 65.1, + 64.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 63.1, + 63.5, + 65.9, + 64.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 63.3, + 62.8, + 65.7, + 64.4 + ] + }, + "6": { + "metadata": { + "nbits": 6, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "up_blocks.0.resnets.0.conv_shortcut", + "mid_block.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.resnets.2.conv1", + "mid_block.attentions.0.proj_out", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.resnets.2.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.resnets.0.time_emb_proj", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.3.resnets.0.conv1", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.proj_in", + "up_blocks.0.resnets.1.conv2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.3.resnets.1.conv1", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.resnets.2.time_emb_proj", + "mid_block.resnets.1.conv1", + "mid_block.resnets.1.conv2", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.0.conv2", + "up_blocks.2.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.3.resnets.0.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.3.resnets.0.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.2.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.resnets.0.conv2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.0.conv2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.0.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.1.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "time_embedding.linear_1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.3.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.2.resnets.0.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.resnets.1.conv2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.resnets.0.conv1", + "up_blocks.2.resnets.0.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.conv2", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.0.proj_out", + "up_blocks.2.resnets.1.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.0.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.resnets.0.conv1", + "up_blocks.2.attentions.0.proj_in", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.3.resnets.2.time_emb_proj", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.2.resnets.2.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.1.conv1", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.time_emb_proj", + "up_blocks.2.resnets.2.conv1", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.2.attentions.1.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.upsamplers.0.conv", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.0.conv1", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "time_embedding.linear_2", + "down_blocks.0.attentions.0.proj_out", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.2.resnets.2.conv_shortcut", + "down_blocks.0.resnets.1.conv2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.downsamplers.0.conv", + "down_blocks.0.attentions.0.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.2.proj_out", + "up_blocks.3.resnets.0.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.conv2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.3.attentions.0.proj_in", + "down_blocks.0.attentions.1.proj_in", + "up_blocks.3.resnets.2.conv1", + "down_blocks.0.resnets.1.conv1", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.0.conv1", + "down_blocks.0.resnets.0.conv2", + "up_blocks.3.resnets.1.conv2", + "up_blocks.3.resnets.2.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.resnets.1.conv1", + "up_blocks.3.attentions.1.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.3.resnets.0.conv_shortcut", + "up_blocks.3.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.2.conv_shortcut", + "up_blocks.3.attentions.2.proj_in", + "down_blocks.0.attentions.1.proj_out", + "down_blocks.1.resnets.0.conv_shortcut", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj" + ], + "sizes": [ + 3276800, + 14745600, + 1310720, + 1310720, + 1638400, + 29491200, + 1638400, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 14745600, + 29491200, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 14745600, + 1638400, + 1638400, + 29491200, + 3276800, + 13107200, + 14745600, + 3276800, + 1638400, + 1638400, + 14745600, + 14745600, + 14745600, + 1638400, + 14745600, + 819200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1310720, + 1638400, + 655360, + 1638400, + 14745600, + 1638400, + 1638400, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 409600, + 1638400, + 1310720, + 22118400, + 14745600, + 409600, + 409600, + 14745600, + 1310720, + 29491200, + 655360, + 655360, + 409600, + 1638400, + 6553600, + 1638400, + 14745600, + 6553600, + 14745600, + 1310720, + 409600, + 1638400, + 6553600, + 14745600, + 1638400, + 1638400, + 1638400, + 1310720, + 14745600, + 655360, + 409600, + 102400, + 13107200, + 14745600, + 6553600, + 409600, + 102400, + 1638400, + 1638400, + 1638400, + 3276800, + 1638400, + 1638400, + 1638400, + 1638400, + 3276800, + 102400, + 409600, + 1638400, + 1638400, + 1310720, + 1638400, + 409600, + 3276800, + 409600, + 1638400, + 102400, + 409600, + 409600, + 409600, + 655360, + 1310720, + 819200, + 6553600, + 1638400, + 13107200, + 2457600, + 655360, + 409600, + 409600, + 1638400, + 1638400, + 409600, + 11059200, + 327680, + 102400, + 102400, + 1638400, + 1310720, + 14745600, + 409600, + 102400, + 1638400, + 409600, + 13107200, + 102400, + 3686400, + 1638400, + 3686400, + 102400, + 655360, + 7372800, + 3686400, + 327680, + 409600, + 102400, + 3686400, + 819200, + 1310720, + 1638400, + 327680, + 409600, + 7372800, + 102400, + 1638400, + 409600, + 409600, + 1638400, + 1638400, + 655360, + 102400, + 1843200, + 409600, + 409600, + 819200, + 102400, + 29491200, + 409600, + 327680, + 409600, + 13107200, + 3276800, + 102400, + 409600, + 327680, + 327680, + 327680, + 819200, + 3686400, + 409600, + 1638400, + 409600, + 1310720, + 327680, + 3686400, + 409600, + 409600, + 409600, + 5529600, + 102400, + 655360, + 1228800, + 655360, + 409600, + 409600, + 409600, + 102400, + 102400, + 102400, + 409600, + 13107200, + 409600, + 409600, + 409600, + 102400, + 327680, + 3276800, + 819200, + 3686400, + 3686400, + 409600, + 409600, + 409600, + 409600, + 102400, + 1638400, + 327680, + 819200, + 2764800, + 819200, + 102400, + 409600, + 409600, + 102400, + 102400, + 1638400, + 102400, + 409600, + 614400, + 921600, + 102400, + 921600, + 102400, + 102400, + 102400, + 102400, + 921600, + 819200, + 102400, + 819200, + 409600, + 14745600, + 102400, + 409600, + 102400, + 102400, + 1843200, + 921600, + 3276800, + 409600, + 102400, + 102400, + 819200, + 921600, + 921600, + 921600, + 921600, + 102400, + 102400, + 1843200, + 102400, + 1638400, + 307200, + 204800, + 819200, + 204800, + 102400, + 102400, + 204800, + 3276800 + ] + }, + "up_blocks.0.resnets.0.conv_shortcut": [ + 89.4, + 88.9, + 88.8, + 88.9 + ], + "mid_block.resnets.0.conv1": [ + 89.1, + 89.0, + 89.1, + 88.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 89.0, + 88.8, + 88.7, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.2, + 89.0, + 88.9, + 88.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.5, + 88.9, + 88.8 + ], + "up_blocks.0.resnets.2.conv1": [ + 89.2, + 88.9, + 88.6, + 88.9 + ], + "mid_block.attentions.0.proj_out": [ + 89.2, + 88.7, + 88.8, + 88.4 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 89.0, + 88.7, + 89.1, + 87.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.9, + 89.1, + 87.6 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 89.0, + 89.0, + 88.7, + 86.7 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.9, + 89.1, + 89.1, + 87.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 89.0, + 88.9, + 88.9, + 87.1 + ], + "mid_block.resnets.0.time_emb_proj": [ + 89.3, + 88.9, + 88.9, + 86.4 + ], + "mid_block.resnets.1.time_emb_proj": [ + 89.1, + 88.9, + 88.9, + 86.6 + ], + "down_blocks.3.resnets.0.conv1": [ + 89.1, + 88.8, + 88.8, + 86.8 + ], + "down_blocks.3.resnets.1.conv2": [ + 89.2, + 88.7, + 88.7, + 86.9 + ], + "up_blocks.0.resnets.1.conv1": [ + 89.0, + 88.7, + 88.6, + 85.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 89.0, + 88.6, + 88.6, + 87.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.9, + 88.7, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 89.0, + 88.7, + 88.6, + 88.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.9, + 88.9, + 89.0, + 86.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.1, + 89.0, + 88.6, + 87.9 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.8, + 88.8, + 88.8, + 87.7 + ], + "mid_block.attentions.0.proj_in": [ + 89.0, + 88.7, + 88.7, + 87.2 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.8, + 88.8, + 88.9, + 86.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.8, + 88.7, + 88.7, + 86.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 89.0, + 88.6, + 88.7, + 86.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.8, + 89.0, + 88.8, + 86.0 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.9, + 88.8, + 88.7, + 87.5 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 89.0, + 88.8, + 88.6, + 87.3 + ], + "down_blocks.3.resnets.1.conv1": [ + 89.0, + 88.7, + 88.8, + 85.8 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.9, + 88.8, + 88.5, + 86.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.7, + 88.7, + 87.4 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.6, + 88.5, + 89.1, + 86.8 + ], + "mid_block.resnets.1.conv1": [ + 89.0, + 88.7, + 88.6, + 87.3 + ], + "mid_block.resnets.1.conv2": [ + 88.9, + 88.7, + 88.7, + 87.7 + ], + "down_blocks.2.resnets.1.conv2": [ + 88.7, + 88.8, + 88.5, + 87.9 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 88.8, + 88.6, + 88.4, + 86.7 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.7, + 88.7, + 88.7, + 87.4 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.8, + 88.6, + 88.7, + 87.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.7, + 88.5, + 88.2, + 86.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.5, + 88.4, + 87.1 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 88.8, + 88.7, + 88.6, + 87.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.7, + 88.4, + 88.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.8, + 88.2, + 86.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.8, + 88.4, + 86.7 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 89.0, + 88.6, + 88.4, + 87.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.3, + 88.1, + 87.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.4, + 88.4, + 87.3 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.7, + 88.5, + 88.1, + 87.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.6, + 88.5, + 88.1, + 87.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.4, + 88.7, + 88.3, + 86.8 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 88.5, + 88.4, + 88.0, + 86.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.5, + 88.3, + 88.0, + 87.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.2, + 88.3, + 87.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.3, + 88.2, + 88.1, + 87.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.2, + 88.1, + 88.2, + 85.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.3, + 88.0, + 86.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.3, + 88.4, + 88.0, + 86.7 + ], + "up_blocks.0.resnets.2.conv2": [ + 88.3, + 88.3, + 88.0, + 86.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.2, + 88.0, + 86.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.3, + 88.1, + 88.0, + 85.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.3, + 88.0, + 86.7 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.1, + 88.2, + 87.8, + 86.9 + ], + "up_blocks.1.resnets.2.conv2": [ + 88.4, + 88.1, + 87.8, + 86.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.4, + 88.3, + 87.9, + 86.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.3, + 88.1, + 87.4 + ], + "mid_block.resnets.0.conv2": [ + 88.7, + 88.2, + 87.8, + 86.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.1, + 88.1, + 87.2 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.4, + 88.0, + 88.0, + 85.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.1, + 87.9, + 84.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.3, + 88.3, + 87.9, + 84.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 87.9, + 87.9, + 84.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 87.7, + 87.7, + 85.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.0, + 87.9, + 86.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.1, + 87.8, + 87.6, + 78.5 + ], + "up_blocks.0.upsamplers.0.conv": [ + 87.9, + 88.1, + 87.8, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.0, + 87.9, + 87.7, + 82.3 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.1, + 88.3, + 87.9, + 83.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.3, + 87.9, + 87.5, + 82.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 88.0, + 87.7, + 85.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 87.9, + 88.0, + 84.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.3, + 87.7, + 87.8, + 86.1 + ], + "down_blocks.2.downsamplers.0.conv": [ + 87.9, + 87.8, + 87.8, + 85.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 88.0, + 87.8, + 86.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.8, + 87.5, + 87.8, + 86.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.8, + 87.9, + 87.7, + 86.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.7, + 87.7, + 87.7, + 85.9 + ], + "up_blocks.1.resnets.0.conv2": [ + 87.9, + 87.6, + 87.5, + 87.0 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.1, + 87.8, + 87.6, + 86.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 87.9, + 87.7, + 86.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.9, + 87.5, + 87.5, + 86.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.7, + 87.6, + 87.5, + 86.7 + ], + "down_blocks.2.resnets.1.conv1": [ + 87.7, + 87.8, + 87.7, + 86.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.7, + 87.7, + 87.6, + 86.2 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.7, + 87.6, + 87.9, + 86.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.8, + 87.5, + 87.8, + 85.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.8, + 87.5, + 87.6, + 87.7 + ], + "up_blocks.1.attentions.0.proj_out": [ + 87.9, + 87.8, + 87.2, + 86.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.8, + 87.5, + 87.5, + 86.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.7, + 87.7, + 87.5, + 87.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.6, + 87.5, + 87.6, + 87.2 + ], + "down_blocks.2.attentions.0.proj_out": [ + 87.7, + 87.6, + 87.6, + 87.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 87.7, + 87.6, + 87.3, + 87.2 + ], + "up_blocks.1.attentions.1.proj_out": [ + 87.8, + 87.5, + 87.4, + 87.3 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 87.7, + 87.4, + 87.6, + 87.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 87.5, + 87.5, + 87.2, + 87.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 87.6, + 87.5, + 87.3, + 87.6 + ], + "down_blocks.2.attentions.1.proj_in": [ + 87.5, + 87.1, + 87.3, + 86.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.3, + 87.0, + 87.4, + 86.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.2, + 86.9, + 87.3, + 86.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.4, + 86.9, + 87.3, + 86.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.5, + 87.1, + 87.8, + 86.9 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 87.6, + 87.1, + 87.3, + 87.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.0, + 87.3, + 84.5 + ], + "up_blocks.1.attentions.2.proj_out": [ + 87.4, + 87.0, + 87.3, + 85.0 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 87.1, + 86.7, + 87.2, + 85.1 + ], + "time_embedding.linear_1": [ + 87.1, + 86.7, + 87.3, + 83.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.2, + 86.8, + 87.3, + 85.9 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.1, + 86.7, + 87.2, + 85.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 87.0, + 86.5, + 87.1, + 86.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.8, + 86.8, + 86.9, + 87.0 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 87.1, + 86.7, + 87.1, + 86.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.0, + 86.5, + 87.0, + 87.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.1, + 86.5, + 87.0, + 87.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.6, + 86.5, + 87.1, + 87.1 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 86.7, + 86.4, + 87.0, + 87.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 86.7, + 86.5, + 87.1, + 86.9 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 86.6, + 86.5, + 87.0, + 86.8 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 86.5, + 86.5, + 87.0, + 86.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 86.6, + 86.3, + 86.9, + 86.8 + ], + "down_blocks.2.attentions.0.proj_in": [ + 86.6, + 86.3, + 86.9, + 86.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.3, + 86.2, + 86.7, + 86.3 + ], + "up_blocks.2.resnets.0.conv1": [ + 86.4, + 86.5, + 86.8, + 86.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 86.3, + 86.4, + 86.8, + 86.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.5, + 86.2, + 86.9, + 86.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 86.5, + 86.2, + 86.9, + 86.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.6, + 85.9, + 86.9, + 86.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 86.7, + 86.2, + 86.7, + 86.3 + ], + "up_blocks.1.upsamplers.0.conv": [ + 86.4, + 86.1, + 86.6, + 86.0 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 86.3, + 86.2, + 86.6, + 85.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 86.2, + 86.0, + 86.8, + 85.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 86.2, + 86.0, + 86.6, + 85.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 86.3, + 85.8, + 86.5, + 85.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.2, + 85.8, + 86.5, + 85.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.1, + 85.7, + 86.3, + 85.3 + ], + "down_blocks.1.resnets.1.conv2": [ + 86.4, + 85.6, + 86.3, + 85.6 + ], + "up_blocks.1.attentions.2.proj_in": [ + 86.3, + 85.7, + 86.5, + 85.5 + ], + "up_blocks.2.resnets.1.conv2": [ + 86.2, + 85.7, + 86.4, + 85.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 86.2, + 85.4, + 86.4, + 85.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 85.9, + 85.1, + 86.4, + 85.6 + ], + "down_blocks.2.resnets.0.conv1": [ + 86.0, + 85.2, + 86.3, + 85.2 + ], + "up_blocks.2.resnets.0.conv2": [ + 85.9, + 85.2, + 86.3, + 84.9 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 85.9, + 85.0, + 86.1, + 84.9 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 85.8, + 85.3, + 86.1, + 84.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 85.6, + 85.4, + 86.0, + 84.9 + ], + "down_blocks.1.resnets.0.conv2": [ + 85.8, + 85.4, + 85.9, + 84.7 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 85.7, + 85.6, + 86.1, + 84.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 85.7, + 85.4, + 85.9, + 84.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 85.6, + 85.4, + 85.8, + 84.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 85.6, + 85.1, + 85.8, + 84.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 85.6, + 85.4, + 85.7, + 84.5 + ], + "up_blocks.2.resnets.1.conv1": [ + 85.6, + 85.3, + 85.8, + 84.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 85.4, + 85.4, + 85.4, + 84.4 + ], + "up_blocks.1.attentions.1.proj_in": [ + 85.5, + 85.4, + 85.5, + 84.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.5, + 85.5, + 85.7, + 84.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 85.5, + 85.5, + 85.5, + 84.3 + ], + "up_blocks.1.attentions.0.proj_in": [ + 85.2, + 85.3, + 85.5, + 84.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 85.3, + 85.4, + 85.5, + 84.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 85.3, + 85.5, + 85.3, + 84.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 85.1, + 85.4, + 85.4, + 84.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 85.1, + 85.3, + 85.5, + 84.2 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 85.2, + 85.4, + 85.4, + 84.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.0, + 85.0, + 85.5, + 84.2 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 85.2, + 85.4, + 85.5, + 84.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.1, + 85.2, + 85.6, + 84.1 + ], + "up_blocks.1.resnets.0.conv1": [ + 85.2, + 85.3, + 85.5, + 84.1 + ], + "up_blocks.2.attentions.0.proj_in": [ + 85.3, + 85.1, + 85.5, + 84.0 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 85.1, + 85.2, + 85.6, + 84.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 85.2, + 85.0, + 85.6, + 83.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.3, + 85.0, + 85.2, + 84.1 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.0, + 85.0, + 85.0, + 84.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 85.1, + 84.7, + 85.1, + 84.0 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 84.9, + 84.9, + 84.9, + 83.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 84.8, + 84.8, + 84.6, + 83.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 84.6, + 84.8, + 84.7, + 83.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 84.5, + 85.1, + 84.5, + 83.7 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 84.6, + 84.9, + 85.0, + 83.4 + ], + "up_blocks.2.resnets.2.conv2": [ + 84.5, + 84.8, + 85.0, + 83.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 84.5, + 84.9, + 85.1, + 81.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 84.4, + 84.8, + 85.0, + 80.7 + ], + "down_blocks.1.attentions.1.proj_in": [ + 84.5, + 84.7, + 85.2, + 78.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 84.5, + 84.7, + 84.8, + 78.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.3, + 84.7, + 84.8, + 78.6 + ], + "down_blocks.1.resnets.1.conv1": [ + 84.4, + 84.7, + 85.1, + 78.4 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 84.3, + 84.7, + 85.0, + 78.4 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 84.3, + 84.7, + 85.1, + 78.4 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 84.4, + 84.6, + 84.9, + 78.4 + ], + "up_blocks.2.resnets.2.conv1": [ + 84.4, + 84.7, + 85.1, + 78.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 84.3, + 84.6, + 84.7, + 78.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 84.4, + 84.3, + 84.7, + 78.4 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 84.4, + 84.3, + 84.6, + 78.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 84.4, + 84.3, + 85.0, + 78.9 + ], + "up_blocks.2.attentions.1.proj_in": [ + 84.4, + 84.2, + 84.7, + 79.1 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 84.2, + 84.2, + 84.6, + 79.2 + ], + "down_blocks.1.attentions.1.proj_out": [ + 84.2, + 84.2, + 84.5, + 78.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 84.1, + 84.0, + 84.4, + 78.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 84.0, + 84.0, + 84.3, + 78.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 83.9, + 84.0, + 83.8, + 78.3 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 84.0, + 84.0, + 83.8, + 78.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 84.0, + 84.0, + 78.5, + 78.4 + ], + "up_blocks.2.attentions.1.proj_out": [ + 83.8, + 83.8, + 78.4, + 78.3 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 83.4, + 83.5, + 78.4, + 78.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.5, + 83.5, + 78.4, + 78.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 83.5, + 83.4, + 78.3, + 78.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 83.5, + 83.4, + 78.6, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 83.4, + 83.6, + 79.0, + 78.3 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 83.3, + 83.3, + 79.1, + 78.3 + ], + "up_blocks.2.upsamplers.0.conv": [ + 83.2, + 83.1, + 78.5, + 78.2 + ], + "down_blocks.1.downsamplers.0.conv": [ + 83.2, + 83.1, + 83.4, + 78.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.1, + 83.2, + 82.8, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.9, + 83.0, + 77.4, + 78.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 83.0, + 83.0, + 80.6, + 78.3 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.9, + 82.9, + 79.3, + 78.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 83.0, + 82.8, + 77.8, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 83.0, + 82.8, + 76.9, + 78.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.8, + 82.8, + 76.9, + 78.2 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.8, + 82.9, + 76.9, + 78.2 + ], + "up_blocks.3.resnets.0.conv1": [ + 82.8, + 82.7, + 76.9, + 78.2 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 82.9, + 82.7, + 77.1, + 78.2 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 82.7, + 82.6, + 77.1, + 78.2 + ], + "up_blocks.2.attentions.2.proj_out": [ + 82.7, + 82.6, + 77.1, + 78.2 + ], + "up_blocks.2.attentions.2.proj_in": [ + 82.6, + 82.3, + 77.1, + 78.1 + ], + "up_blocks.3.attentions.0.proj_out": [ + 82.6, + 82.3, + 77.0, + 78.1 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.4, + 82.3, + 77.0, + 78.0 + ], + "time_embedding.linear_2": [ + 82.3, + 82.0, + 77.1, + 78.0 + ], + "down_blocks.0.attentions.0.proj_out": [ + 82.2, + 81.9, + 76.9, + 78.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 82.2, + 81.8, + 77.9, + 78.0 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 82.2, + 81.8, + 77.5, + 78.0 + ], + "down_blocks.0.resnets.1.conv2": [ + 82.0, + 81.9, + 77.9, + 78.0 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.1, + 81.9, + 77.5, + 78.0 + ], + "down_blocks.0.downsamplers.0.conv": [ + 81.9, + 81.7, + 78.3, + 77.9 + ], + "down_blocks.0.attentions.0.proj_in": [ + 81.9, + 81.7, + 78.1, + 77.9 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 81.6, + 77.9, + 77.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 81.5, + 77.9, + 77.9 + ], + "up_blocks.3.attentions.2.proj_out": [ + 81.8, + 81.4, + 77.9, + 77.9 + ], + "up_blocks.3.resnets.0.conv2": [ + 81.7, + 81.5, + 77.6, + 77.9 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.3, + 79.5, + 77.8 + ], + "up_blocks.3.attentions.1.proj_out": [ + 81.6, + 81.3, + 79.3, + 77.8 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.2, + 76.8, + 77.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.4, + 81.0, + 76.8, + 77.8 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.4, + 81.0, + 76.9, + 77.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.2, + 80.8, + 76.8, + 77.7 + ], + "down_blocks.1.attentions.0.proj_in": [ + 81.3, + 80.9, + 77.7, + 77.6 + ], + "up_blocks.3.attentions.0.proj_in": [ + 81.1, + 80.9, + 77.6, + 77.6 + ], + "down_blocks.0.attentions.1.proj_in": [ + 81.1, + 81.0, + 81.7, + 77.6 + ], + "up_blocks.3.resnets.2.conv1": [ + 81.0, + 80.9, + 81.5, + 77.6 + ], + "down_blocks.0.resnets.1.conv1": [ + 81.0, + 80.9, + 81.5, + 77.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.6, + 80.3, + 81.2, + 77.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.6, + 80.2, + 81.1, + 77.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.2, + 80.0, + 80.8, + 77.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.3, + 80.1, + 79.1, + 77.4 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.1, + 79.8, + 79.0, + 77.4 + ], + "down_blocks.0.resnets.0.conv1": [ + 80.1, + 79.9, + 80.6, + 77.4 + ], + "down_blocks.0.resnets.0.conv2": [ + 80.2, + 80.0, + 77.3, + 77.3 + ], + "up_blocks.3.resnets.1.conv2": [ + 80.0, + 79.9, + 77.3, + 77.3 + ], + "up_blocks.3.resnets.2.conv2": [ + 79.8, + 79.7, + 77.1, + 77.2 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 79.8, + 79.6, + 75.9, + 77.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 79.9, + 79.7, + 76.0, + 77.2 + ], + "up_blocks.3.resnets.1.conv1": [ + 79.7, + 79.7, + 75.9, + 77.1 + ], + "up_blocks.3.attentions.1.proj_in": [ + 79.7, + 79.5, + 75.9, + 77.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 79.5, + 79.5, + 75.7, + 77.2 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 79.5, + 79.3, + 75.6, + 77.1 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 79.3, + 79.1, + 75.5, + 76.9 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 79.1, + 79.0, + 75.5, + 76.8 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 79.1, + 78.9, + 75.5, + 76.9 + ], + "up_blocks.3.attentions.2.proj_in": [ + 79.1, + 79.0, + 75.5, + 76.8 + ], + "down_blocks.0.attentions.1.proj_out": [ + 79.1, + 79.1, + 77.1, + 76.8 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 79.1, + 79.0, + 78.6, + 76.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.2, + 79.0, + 78.6, + 76.7 + ] + }, + "8": { + "metadata": { + "nbits": 8, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.proj_in", + "mid_block.resnets.0.conv2", + "down_blocks.2.resnets.1.time_emb_proj", + "down_blocks.3.resnets.0.conv1", + "up_blocks.2.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.3.resnets.1.conv1", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.3.resnets.1.time_emb_proj", + "up_blocks.0.resnets.2.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv1", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.resnets.0.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.3.resnets.1.conv2", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.resnets.1.conv1", + "down_blocks.2.resnets.1.conv2", + "down_blocks.3.resnets.0.conv2", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.resnets.1.conv1", + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.resnets.2.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.upsamplers.0.conv", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.proj_out", + "mid_block.resnets.0.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "mid_block.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.resnets.2.conv1", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.2.proj_out", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.resnets.2.conv1", + "up_blocks.3.resnets.0.time_emb_proj", + "mid_block.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.0.proj_out", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0", + "time_embedding.linear_1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.resnets.0.conv2", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.2.resnets.1.conv1", + "up_blocks.1.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.conv1", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.2.resnets.0.conv2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.2.resnets.2.conv2", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.2.resnets.1.conv2", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.1.conv2", + "up_blocks.2.attentions.0.proj_in", + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.3.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.downsamplers.0.conv", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.2.attentions.1.proj_out", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.3.resnets.2.conv2", + "down_blocks.1.resnets.1.conv1", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.proj_in", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.3.resnets.0.conv2", + "mid_block.resnets.1.conv2", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.3.resnets.1.time_emb_proj", + "up_blocks.3.resnets.2.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.proj_out", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.3.resnets.2.conv1", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.resnets.0.conv1", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.2.attentions.2.proj_out", + "up_blocks.2.upsamplers.0.conv", + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k", + "time_embedding.linear_2", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.1.conv1", + "up_blocks.2.attentions.1.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.0.resnets.1.conv2", + "up_blocks.3.resnets.0.conv1", + "up_blocks.2.attentions.2.proj_in", + "up_blocks.3.attentions.0.proj_out", + "up_blocks.3.resnets.1.conv2", + "down_blocks.0.attentions.0.proj_out", + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.0.attentions.1.proj_out", + "up_blocks.3.attentions.2.proj_in", + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.3.attentions.0.proj_in", + "up_blocks.3.resnets.0.conv_shortcut", + "down_blocks.0.attentions.0.proj_in", + "down_blocks.0.resnets.0.conv2", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.3.attentions.1.proj_in", + "up_blocks.3.resnets.2.conv_shortcut", + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.3.resnets.1.conv_shortcut", + "down_blocks.0.resnets.1.conv1", + "down_blocks.0.attentions.1.proj_in", + "down_blocks.0.downsamplers.0.conv" + ], + "sizes": [ + 655360, + 6553600, + 1638400, + 1638400, + 1638400, + 655360, + 1638400, + 14745600, + 1638400, + 14745600, + 819200, + 409600, + 1310720, + 3276800, + 3276800, + 1638400, + 13107200, + 1638400, + 13107200, + 14745600, + 29491200, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 14745600, + 1310720, + 327680, + 6553600, + 102400, + 1310720, + 14745600, + 1310720, + 3276800, + 1638400, + 409600, + 11059200, + 327680, + 655360, + 1638400, + 102400, + 1638400, + 29491200, + 1638400, + 3276800, + 819200, + 102400, + 655360, + 409600, + 1638400, + 1638400, + 6553600, + 14745600, + 14745600, + 1638400, + 655360, + 13107200, + 1638400, + 14745600, + 14745600, + 14745600, + 1638400, + 409600, + 1310720, + 1638400, + 1310720, + 1310720, + 1310720, + 29491200, + 409600, + 409600, + 655360, + 29491200, + 3276800, + 1310720, + 1638400, + 14745600, + 1310720, + 1638400, + 14745600, + 1638400, + 819200, + 1638400, + 6553600, + 14745600, + 409600, + 409600, + 1638400, + 1638400, + 1638400, + 1310720, + 14745600, + 22118400, + 409600, + 409600, + 409600, + 102400, + 1638400, + 409600, + 655360, + 1638400, + 1638400, + 14745600, + 14745600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 5529600, + 409600, + 1638400, + 1638400, + 1638400, + 102400, + 102400, + 409600, + 1638400, + 7372800, + 1638400, + 6553600, + 1638400, + 409600, + 1638400, + 327680, + 655360, + 1638400, + 409600, + 409600, + 409600, + 409600, + 1638400, + 1638400, + 409600, + 409600, + 327680, + 3686400, + 409600, + 102400, + 13107200, + 1638400, + 655360, + 7372800, + 29491200, + 1638400, + 1843200, + 1638400, + 3686400, + 1638400, + 409600, + 819200, + 409600, + 409600, + 819200, + 6553600, + 1638400, + 3686400, + 1310720, + 1638400, + 409600, + 3276800, + 327680, + 102400, + 102400, + 3686400, + 1638400, + 1638400, + 14745600, + 1638400, + 3276800, + 102400, + 3686400, + 102400, + 3686400, + 409600, + 102400, + 1638400, + 409600, + 102400, + 655360, + 14745600, + 1638400, + 13107200, + 819200, + 1638400, + 1638400, + 14745600, + 1638400, + 102400, + 409600, + 102400, + 409600, + 1638400, + 409600, + 102400, + 409600, + 921600, + 3686400, + 102400, + 102400, + 102400, + 409600, + 409600, + 1638400, + 819200, + 327680, + 921600, + 14745600, + 102400, + 102400, + 102400, + 1638400, + 1638400, + 3276800, + 614400, + 409600, + 409600, + 3276800, + 2457600, + 409600, + 1638400, + 102400, + 102400, + 409600, + 1228800, + 1843200, + 327680, + 409600, + 819200, + 409600, + 102400, + 819200, + 409600, + 409600, + 102400, + 102400, + 102400, + 921600, + 409600, + 409600, + 3686400, + 102400, + 102400, + 819200, + 327680, + 327680, + 327680, + 1638400, + 409600, + 102400, + 819200, + 1843200, + 409600, + 3276800, + 409600, + 102400, + 921600, + 2764800, + 409600, + 102400, + 921600, + 102400, + 409600, + 102400, + 102400, + 409600, + 102400, + 307200, + 102400, + 921600, + 204800, + 102400, + 204800, + 819200, + 204800, + 921600, + 102400, + 921600 + ] + }, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.9, + 88.7, + 88.9, + 88.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 89.1, + 88.7, + 89.2, + 88.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 88.9, + 88.8, + 88.8, + 88.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.7, + 88.8, + 89.2, + 89.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.8, + 88.6, + 88.7, + 88.0 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.9, + 88.5, + 88.9, + 88.3 + ], + "mid_block.attentions.0.proj_in": [ + 88.8, + 88.6, + 88.6, + 87.3 + ], + "mid_block.resnets.0.conv2": [ + 88.8, + 88.5, + 89.0, + 89.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 88.8, + 88.9, + 88.8, + 87.8 + ], + "down_blocks.3.resnets.0.conv1": [ + 89.1, + 88.5, + 88.7, + 88.2 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 89.1, + 88.6, + 88.6, + 88.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.9, + 88.5, + 88.9, + 88.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 89.0, + 88.5, + 88.7, + 88.5 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 88.8, + 88.6, + 88.5, + 88.5 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 88.6, + 88.8, + 88.9, + 87.4 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 88.8, + 88.6, + 88.7, + 88.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 88.7, + 88.9, + 88.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.7, + 88.4, + 88.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 88.5, + 88.8, + 88.8 + ], + "down_blocks.3.resnets.1.conv1": [ + 89.1, + 88.7, + 88.7, + 88.7 + ], + "up_blocks.0.resnets.1.conv1": [ + 89.1, + 88.8, + 88.7, + 88.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.9, + 88.8, + 88.9, + 87.8 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.8, + 88.7, + 88.8 + ], + "mid_block.resnets.0.time_emb_proj": [ + 88.7, + 88.6, + 88.8, + 88.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.9, + 88.7, + 88.0, + 88.4 + ], + "down_blocks.3.resnets.1.time_emb_proj": [ + 89.0, + 88.7, + 88.0, + 88.7 + ], + "up_blocks.0.resnets.2.conv2": [ + 89.0, + 88.4, + 88.9, + 88.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 89.1, + 88.8, + 88.2, + 88.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.6, + 88.5, + 88.1, + 88.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.7, + 88.7, + 88.5, + 88.8 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.5, + 88.7, + 88.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.9, + 88.5, + 88.5, + 88.9 + ], + "up_blocks.0.resnets.0.conv2": [ + 88.7, + 88.6, + 88.7, + 87.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.3, + 88.5, + 88.8 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 88.6, + 88.6, + 88.5, + 88.4 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 88.8, + 88.4, + 88.5, + 88.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.7, + 88.6, + 88.6, + 88.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 89.0, + 88.6, + 88.5, + 88.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.5, + 88.8, + 88.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.3, + 88.5, + 88.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.4, + 88.5, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.5, + 88.2, + 88.4, + 87.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.3, + 88.7, + 88.4 + ], + "up_blocks.0.resnets.0.conv1": [ + 88.7, + 88.6, + 88.5, + 88.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 88.5, + 88.5, + 88.8, + 88.0 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 88.7, + 88.6, + 88.6, + 88.0 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 88.6, + 88.4, + 88.6, + 88.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.9, + 88.3, + 88.7, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.3, + 88.5, + 88.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.8, + 88.4, + 88.5, + 87.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.6, + 88.1, + 88.5, + 87.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.8, + 88.5, + 88.6, + 87.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.7, + 88.6, + 88.4, + 88.4 + ], + "down_blocks.3.resnets.1.conv2": [ + 88.9, + 88.4, + 88.5, + 88.1 + ], + "up_blocks.0.upsamplers.0.conv": [ + 88.7, + 88.3, + 88.6, + 88.3 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 88.6, + 88.6, + 88.6, + 88.2 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.1, + 88.5, + 88.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.9, + 88.1, + 88.6, + 88.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.8, + 88.5, + 88.3, + 87.6 + ], + "down_blocks.2.resnets.1.conv1": [ + 88.5, + 88.4, + 88.6, + 87.9 + ], + "down_blocks.2.resnets.1.conv2": [ + 89.0, + 88.1, + 88.5, + 87.5 + ], + "down_blocks.3.resnets.0.conv2": [ + 88.6, + 88.3, + 88.1, + 87.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 89.0, + 88.3, + 88.2, + 88.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.8, + 88.5, + 88.3, + 87.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.4, + 88.6, + 87.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.9, + 88.3, + 88.6, + 87.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.8, + 88.4, + 88.4, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.4, + 88.3, + 88.3, + 87.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.7, + 88.4, + 88.5, + 87.7 + ], + "up_blocks.1.resnets.1.conv1": [ + 88.5, + 88.2, + 88.8, + 87.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.6, + 88.3, + 88.6, + 87.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.8, + 88.3, + 88.2, + 87.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.5, + 88.3, + 88.5, + 87.7 + ], + "up_blocks.0.resnets.2.conv1": [ + 88.7, + 88.2, + 88.6, + 88.0 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 88.6, + 88.1, + 88.5, + 87.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.4, + 88.4, + 88.6, + 87.8 + ], + "up_blocks.1.attentions.1.proj_out": [ + 88.4, + 88.2, + 88.4, + 87.8 + ], + "up_blocks.1.upsamplers.0.conv": [ + 88.6, + 88.3, + 88.7, + 87.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.7, + 88.4, + 88.7, + 87.7 + ], + "mid_block.attentions.0.proj_out": [ + 88.6, + 88.4, + 88.4, + 87.6 + ], + "mid_block.resnets.0.conv1": [ + 88.3, + 88.3, + 88.6, + 87.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 88.3, + 88.4, + 87.7 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 88.5, + 88.4, + 88.6, + 87.7 + ], + "up_blocks.1.attentions.0.proj_out": [ + 88.7, + 88.3, + 88.5, + 87.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 88.5, + 88.1, + 88.8, + 87.7 + ], + "mid_block.resnets.1.conv1": [ + 88.5, + 88.3, + 88.5, + 87.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.4, + 88.4, + 88.8, + 87.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.5, + 88.2, + 88.5, + 87.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.3, + 88.5, + 87.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 88.3, + 88.2, + 88.4, + 87.7 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 88.4, + 88.3, + 88.6, + 87.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.5, + 88.5, + 88.2, + 87.6 + ], + "up_blocks.1.resnets.1.conv2": [ + 88.6, + 88.3, + 88.4, + 87.4 + ], + "up_blocks.1.resnets.2.conv1": [ + 88.4, + 88.3, + 88.4, + 87.8 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.2, + 88.2, + 88.8, + 87.4 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.5, + 88.5, + 88.5, + 87.6 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.6, + 88.5, + 88.5, + 87.5 + ], + "up_blocks.3.attentions.2.proj_out": [ + 88.3, + 88.0, + 88.0, + 87.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.2, + 88.0, + 88.2, + 87.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.4, + 88.2, + 87.9, + 87.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": [ + 88.2, + 88.2, + 88.3, + 87.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 87.9, + 88.2, + 87.7 + ], + "down_blocks.2.attentions.0.proj_out": [ + 88.4, + 88.0, + 88.4, + 87.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 88.2, + 87.9, + 88.3, + 87.4 + ], + "up_blocks.0.resnets.1.conv2": [ + 88.3, + 88.0, + 88.6, + 87.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.3, + 87.9, + 87.9, + 87.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 88.4, + 88.1, + 88.1, + 87.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.3, + 88.1, + 87.9, + 87.5 + ], + "up_blocks.1.attentions.1.proj_in": [ + 88.4, + 87.9, + 87.9, + 87.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.3, + 87.9, + 87.8, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 87.9, + 87.6, + 87.6 + ], + "up_blocks.2.resnets.2.conv1": [ + 88.0, + 88.2, + 87.9, + 87.7 + ], + "up_blocks.3.resnets.0.time_emb_proj": [ + 88.3, + 88.0, + 88.0, + 87.4 + ], + "mid_block.resnets.1.time_emb_proj": [ + 88.4, + 87.6, + 88.1, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.1, + 87.9, + 87.8, + 87.6 + ], + "up_blocks.1.attentions.0.proj_in": [ + 88.4, + 87.9, + 88.2, + 87.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.3, + 87.9, + 88.0, + 87.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 88.0, + 87.7, + 87.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 87.7, + 88.3, + 87.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 88.1, + 87.8, + 88.3, + 87.6 + ], + "down_blocks.2.resnets.0.conv1": [ + 88.1, + 87.9, + 88.2, + 87.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 88.0, + 88.3, + 87.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 88.3, + 88.1, + 88.3, + 87.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.1, + 87.9, + 88.2, + 87.5 + ], + "up_blocks.2.attentions.0.proj_out": [ + 88.2, + 87.9, + 88.4, + 87.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 88.3, + 87.7, + 88.0, + 87.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": [ + 88.0, + 87.9, + 87.4, + 87.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.9, + 87.9, + 87.3, + 87.5 + ], + "up_blocks.1.attentions.2.proj_out": [ + 88.1, + 87.8, + 87.9, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.1, + 87.8, + 88.3, + 87.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": [ + 88.3, + 87.8, + 87.6, + 87.7 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 88.2, + 87.7, + 87.8, + 87.7 + ], + "time_embedding.linear_1": [ + 88.1, + 87.8, + 88.4, + 87.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.1, + 87.8, + 87.7, + 87.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 88.3, + 87.6, + 87.8, + 87.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.9, + 87.7, + 87.7, + 87.8 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.9, + 87.6, + 87.7, + 87.7 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 87.8, + 87.8, + 87.7, + 87.6 + ], + "down_blocks.1.resnets.0.conv2": [ + 88.0, + 87.9, + 87.2, + 87.5 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.9, + 88.0, + 87.2, + 87.3 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 88.0, + 87.4, + 87.3, + 87.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 87.7, + 87.2, + 87.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.9, + 87.6, + 87.4, + 87.1 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": [ + 88.1, + 87.6, + 87.4, + 87.2 + ], + "up_blocks.2.resnets.1.conv1": [ + 88.0, + 87.6, + 87.3, + 87.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 87.8, + 87.9, + 87.2, + 87.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.7, + 87.4, + 87.3 + ], + "down_blocks.1.resnets.0.conv1": [ + 88.1, + 87.8, + 87.3, + 87.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.1, + 87.7, + 87.4, + 87.0 + ], + "up_blocks.2.resnets.0.conv2": [ + 88.2, + 87.8, + 87.4, + 87.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.2, + 87.7, + 87.7, + 87.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.1, + 87.7, + 87.3, + 87.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 88.1, + 87.7, + 87.8, + 87.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 88.1, + 87.6, + 87.4, + 87.0 + ], + "down_blocks.1.attentions.1.proj_out": [ + 87.9, + 87.6, + 87.0, + 87.0 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 88.1, + 87.8, + 87.0, + 86.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.9, + 87.6, + 86.7, + 86.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 88.3, + 87.7, + 87.3, + 86.7 + ], + "up_blocks.2.resnets.2.conv2": [ + 87.9, + 87.6, + 87.1, + 86.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 87.8, + 87.7, + 87.0, + 86.8 + ], + "down_blocks.2.attentions.0.proj_in": [ + 87.9, + 87.7, + 87.2, + 86.7 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 88.0, + 87.7, + 87.2, + 86.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 88.0, + 87.6, + 87.2, + 86.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": [ + 88.1, + 87.7, + 87.1, + 86.8 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.9, + 87.2, + 86.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 88.0, + 87.6, + 87.2, + 86.6 + ], + "down_blocks.1.downsamplers.0.conv": [ + 87.8, + 87.7, + 86.9, + 86.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.9, + 87.8, + 86.8, + 86.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 88.2, + 87.7, + 87.1, + 86.7 + ], + "up_blocks.1.resnets.0.conv2": [ + 87.8, + 87.8, + 87.1, + 86.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 88.0, + 87.8, + 86.8, + 86.5 + ], + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 88.1, + 87.6, + 87.0, + 86.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 87.9, + 86.9, + 86.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 87.9, + 87.7, + 86.8, + 86.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 88.0, + 87.5, + 86.6, + 86.7 + ], + "down_blocks.1.resnets.1.conv2": [ + 87.9, + 87.4, + 86.7, + 86.8 + ], + "up_blocks.2.attentions.0.proj_in": [ + 87.9, + 87.8, + 87.0, + 86.5 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 88.0, + 87.8, + 86.6, + 86.5 + ], + "down_blocks.3.resnets.0.time_emb_proj": [ + 87.9, + 87.8, + 86.8, + 86.6 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": [ + 87.7, + 87.7, + 86.5, + 86.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.0, + 87.8, + 86.9, + 86.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 88.0, + 87.8, + 86.9, + 86.6 + ], + "down_blocks.2.downsamplers.0.conv": [ + 88.0, + 87.7, + 86.9, + 86.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 88.0, + 88.0, + 87.0, + 86.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.8, + 87.7, + 87.1, + 86.4 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 88.0, + 87.5, + 86.9, + 86.5 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 88.0, + 87.6, + 86.7, + 86.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.7, + 87.7, + 87.1, + 86.5 + ], + "up_blocks.1.resnets.2.conv2": [ + 87.8, + 87.7, + 86.9, + 86.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.8, + 87.8, + 87.1, + 86.4 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": [ + 87.9, + 87.8, + 86.8, + 86.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 88.0, + 87.5, + 86.7, + 86.7 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": [ + 87.9, + 87.6, + 86.7, + 86.5 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 87.7, + 87.6, + 86.2, + 86.7 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.9, + 87.6, + 86.1, + 86.6 + ], + "up_blocks.2.attentions.1.proj_out": [ + 87.9, + 87.6, + 85.8, + 86.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 87.4, + 87.4, + 86.0, + 86.5 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.3, + 86.2, + 86.6 + ], + "up_blocks.3.resnets.2.conv2": [ + 87.6, + 87.2, + 86.1, + 86.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 87.6, + 87.1, + 86.4, + 86.3 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.4, + 87.5, + 86.4, + 86.1 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": [ + 87.4, + 87.2, + 86.6, + 86.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.5, + 87.3, + 86.8, + 86.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 87.5, + 87.2, + 86.4, + 86.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 87.5, + 87.2, + 86.8, + 86.4 + ], + "down_blocks.2.attentions.1.proj_in": [ + 87.4, + 87.5, + 87.1, + 86.3 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 87.5, + 87.2, + 86.9, + 86.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": [ + 87.4, + 87.2, + 87.1, + 86.6 + ], + "up_blocks.3.resnets.0.conv2": [ + 87.3, + 87.2, + 87.0, + 86.4 + ], + "mid_block.resnets.1.conv2": [ + 87.4, + 87.3, + 87.0, + 86.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 87.5, + 87.4, + 87.1, + 86.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": [ + 87.3, + 87.3, + 86.8, + 86.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 87.6, + 87.3, + 87.1, + 86.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.3, + 87.3, + 86.8, + 86.4 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 87.4, + 87.4, + 87.2, + 86.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 87.3, + 87.1, + 86.9, + 86.2 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 87.3, + 87.0, + 86.9, + 86.3 + ], + "up_blocks.3.resnets.1.time_emb_proj": [ + 87.3, + 87.3, + 86.9, + 86.5 + ], + "up_blocks.3.resnets.2.time_emb_proj": [ + 87.5, + 87.2, + 86.7, + 86.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 87.4, + 87.2, + 86.5, + 86.5 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 87.5, + 87.1, + 86.3, + 86.6 + ], + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 87.3, + 87.3, + 86.6, + 86.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 87.3, + 87.4, + 86.6, + 86.4 + ], + "up_blocks.3.attentions.1.proj_out": [ + 87.1, + 87.3, + 86.4, + 86.7 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": [ + 87.4, + 86.9, + 86.2, + 86.5 + ], + "down_blocks.1.attentions.0.proj_in": [ + 87.2, + 87.1, + 86.3, + 86.6 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 87.1, + 87.1, + 85.7, + 86.5 + ], + "up_blocks.3.resnets.2.conv1": [ + 87.1, + 87.0, + 86.0, + 86.5 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 87.0, + 87.0, + 86.1, + 86.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 86.8, + 87.1, + 85.1, + 86.4 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 87.0, + 87.0, + 85.3, + 86.5 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": [ + 87.1, + 87.0, + 85.2, + 86.6 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.9, + 86.7, + 85.4, + 86.6 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 86.8, + 86.5, + 85.4, + 86.5 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": [ + 86.8, + 86.5, + 84.8, + 86.4 + ], + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.8, + 86.7, + 85.3, + 86.6 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 86.9, + 86.9, + 85.2, + 86.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 86.8, + 86.6, + 85.0, + 86.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 86.8, + 86.7, + 86.2, + 86.4 + ], + "down_blocks.0.resnets.0.conv1": [ + 86.9, + 86.8, + 85.9, + 86.4 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 86.7, + 86.7, + 86.1, + 86.6 + ], + "up_blocks.2.attentions.2.proj_out": [ + 86.5, + 86.8, + 85.9, + 86.4 + ], + "up_blocks.2.upsamplers.0.conv": [ + 86.5, + 86.5, + 86.0, + 86.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.5, + 86.6, + 85.8, + 86.5 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 86.7, + 86.7, + 86.3, + 86.2 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 86.5, + 86.3, + 86.1, + 86.2 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.8, + 86.6, + 86.1, + 86.1 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 86.8, + 86.5, + 86.3, + 86.0 + ], + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": [ + 86.7, + 86.4, + 86.4, + 86.2 + ], + "time_embedding.linear_2": [ + 86.7, + 86.6, + 86.3, + 86.3 + ], + "down_blocks.1.attentions.0.proj_out": [ + 86.7, + 86.6, + 86.0, + 86.3 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.6, + 86.4, + 85.9, + 86.4 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.7, + 86.4, + 85.7, + 86.3 + ], + "up_blocks.3.resnets.1.conv1": [ + 86.7, + 86.6, + 85.7, + 86.3 + ], + "up_blocks.2.attentions.1.proj_in": [ + 86.5, + 86.4, + 85.7, + 86.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 86.5, + 86.5, + 85.4, + 86.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 86.5, + 86.2, + 82.3, + 86.3 + ], + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 86.3, + 86.2, + 82.3, + 86.0 + ], + "down_blocks.0.resnets.1.conv2": [ + 86.4, + 86.1, + 84.8, + 86.0 + ], + "up_blocks.3.resnets.0.conv1": [ + 86.6, + 86.2, + 85.0, + 85.9 + ], + "up_blocks.2.attentions.2.proj_in": [ + 86.2, + 85.9, + 85.0, + 85.8 + ], + "up_blocks.3.attentions.0.proj_out": [ + 86.2, + 86.1, + 84.9, + 85.8 + ], + "up_blocks.3.resnets.1.conv2": [ + 86.2, + 85.8, + 84.2, + 85.8 + ], + "down_blocks.0.attentions.0.proj_out": [ + 86.2, + 86.1, + 84.0, + 85.8 + ], + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": [ + 86.3, + 85.8, + 84.3, + 85.9 + ], + "down_blocks.0.attentions.1.proj_out": [ + 86.4, + 85.8, + 85.2, + 85.7 + ], + "up_blocks.3.attentions.2.proj_in": [ + 86.1, + 85.7, + 84.8, + 85.6 + ], + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 85.9, + 85.7, + 84.6, + 85.6 + ], + "up_blocks.3.attentions.0.proj_in": [ + 86.0, + 85.6, + 84.7, + 85.7 + ], + "up_blocks.3.resnets.0.conv_shortcut": [ + 86.0, + 85.7, + 84.7, + 85.3 + ], + "down_blocks.0.attentions.0.proj_in": [ + 86.0, + 85.7, + 85.0, + 85.4 + ], + "down_blocks.0.resnets.0.conv2": [ + 85.9, + 85.7, + 85.1, + 85.5 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 86.0, + 85.8, + 85.3, + 85.8 + ], + "up_blocks.3.attentions.1.proj_in": [ + 86.1, + 85.8, + 85.3, + 85.5 + ], + "up_blocks.3.resnets.2.conv_shortcut": [ + 85.9, + 85.6, + 85.1, + 85.6 + ], + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 85.9, + 85.7, + 85.4, + 85.3 + ], + "up_blocks.3.resnets.1.conv_shortcut": [ + 86.0, + 85.4, + 85.0, + 85.4 + ], + "down_blocks.0.resnets.1.conv1": [ + 85.6, + 85.4, + 84.8, + 84.3 + ], + "down_blocks.0.attentions.1.proj_in": [ + 85.8, + 85.4, + 80.7, + 85.2 + ], + "down_blocks.0.downsamplers.0.conv": [ + 85.8, + 85.3, + 82.1, + 82.5 + ] + } + }, + "baselines": { + "original": 89.0, + "linear_8bit": 79.3, + "recipe_9.57_bit_mixedpalette": 87.3, + "recipe_6.09_bit_mixedpalette": 84.8, + "recipe_5.03_bit_mixedpalette": 81.5, + "recipe_4.59_bit_mixedpalette": 81.4, + "recipe_4.21_bit_mixedpalette": 78.6, + "recipe_4.00_bit_mixedpalette": 78.85, + "recipe_3.71_bit_mixedpalette": 77.0 + }, + "recipes": { + "recipe_9.57_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 2, + "down_blocks.3.resnets.0.time_emb_proj": 2, + "mid_block.resnets.0.time_emb_proj": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.0.resnets.2.conv_shortcut": 6, + "mid_block.attentions.0.proj_out": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "down_blocks.3.resnets.1.conv1": 6, + "mid_block.resnets.0.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.0.conv_shortcut": 6, + "down_blocks.3.resnets.1.conv2": 6, + "up_blocks.0.resnets.0.conv1": 6, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "down_blocks.3.resnets.0.conv2": 6, + "down_blocks.3.resnets.0.conv1": 6, + "up_blocks.0.resnets.1.conv1": 8, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "mid_block.resnets.0.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "mid_block.resnets.1.conv1": 6, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.0.resnets.0.conv2": 4, + "mid_block.resnets.1.conv2": 6, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.downsamplers.0.conv": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.resnets.0.time_emb_proj": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.proj_out": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.resnets.2.conv2": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.resnets.0.conv1": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.resnets.1.conv1": 8, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.0.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.resnets.1.conv1": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.resnets.1.conv2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.resnets.0.conv2": 16, + "down_blocks.2.attentions.0.proj_in": 16, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.0.resnets.2.conv2": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.2.resnets.0.conv1": 16, + "down_blocks.1.resnets.1.time_emb_proj": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 16, + "down_blocks.1.attentions.1.proj_out": 16, + "down_blocks.2.attentions.1.proj_in": 16, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.resnets.1.conv_shortcut": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.upsamplers.0.conv": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.conv1": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.resnets.2.conv_shortcut": 16, + "down_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.0.upsamplers.0.conv": 8, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "down_blocks.2.attentions.0.proj_out": 8, + "up_blocks.1.attentions.0.proj_in": 8, + "up_blocks.2.resnets.1.conv2": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.resnets.0.conv2": 16, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.1.attentions.2.proj_out": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.1.attentions.2.proj_in": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 16, + "down_blocks.1.resnets.1.conv2": 16, + "up_blocks.3.resnets.0.time_emb_proj": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.1.resnets.1.conv1": 16, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.2.resnets.0.conv2": 16, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.2.resnets.2.conv1": 16, + "down_blocks.1.downsamplers.0.conv": 16, + "down_blocks.1.resnets.0.conv2": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.1.resnets.0.conv1": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.resnets.0.time_emb_proj": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 16, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 16, + "up_blocks.2.resnets.2.conv2": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 16, + "down_blocks.0.resnets.1.time_emb_proj": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.1.attentions.0.proj_out": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.attentions.1.proj_out": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.proj_in": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.resnets.2.time_emb_proj": 16, + "up_blocks.2.attentions.0.proj_in": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.attentions.0.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 16, + "down_blocks.0.resnets.0.conv1": 16, + "down_blocks.1.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.resnets.1.conv2": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 16, + "down_blocks.0.attentions.1.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.0.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.0.proj_out": 16, + "up_blocks.2.resnets.1.conv_shortcut": 16, + "down_blocks.0.attentions.0.proj_in": 16, + "up_blocks.2.attentions.2.proj_out": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 16, + "down_blocks.0.attentions.1.proj_in": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.resnets.0.conv_shortcut": 16, + "up_blocks.2.upsamplers.0.conv": 16, + "down_blocks.0.resnets.1.conv1": 16, + "up_blocks.2.attentions.2.proj_in": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.resnets.0.time_emb_proj": 16, + "up_blocks.3.resnets.0.conv2": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.resnets.2.conv_shortcut": 16, + "up_blocks.3.resnets.0.conv1": 16, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 16, + "up_blocks.3.attentions.0.proj_in": 16, + "up_blocks.3.resnets.1.time_emb_proj": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.1.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.resnets.0.conv2": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.proj_out": 16, + "up_blocks.2.resnets.0.conv_shortcut": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.proj_in": 16, + "up_blocks.3.resnets.1.conv1": 16, + "up_blocks.3.resnets.2.conv1": 16, + "up_blocks.3.resnets.1.conv2": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.resnets.1.conv_shortcut": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.resnets.2.time_emb_proj": 16, + "up_blocks.3.attentions.2.proj_in": 16, + "up_blocks.3.resnets.2.conv2": 16, + "time_embedding.linear_1": 8, + "time_embedding.linear_2": 16, + "up_blocks.3.resnets.2.conv_shortcut": 16, + "up_blocks.3.resnets.0.conv_shortcut": 16 + }, + "recipe_6.09_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 4, + "mid_block.attentions.0.proj_out": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "down_blocks.3.resnets.1.conv1": 4, + "mid_block.resnets.0.conv1": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "down_blocks.3.resnets.1.conv2": 4, + "up_blocks.0.resnets.0.conv1": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.0.resnets.1.conv_shortcut": 4, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv2": 4, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "mid_block.resnets.0.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "mid_block.resnets.1.conv1": 4, + "up_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.resnets.0.conv2": 4, + "mid_block.resnets.1.conv2": 4, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.1.conv2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.resnets.0.conv1": 8, + "down_blocks.1.resnets.1.time_emb_proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_out": 8, + "down_blocks.2.attentions.1.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.0.upsamplers.0.conv": 8, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.0.proj_in": 8, + "up_blocks.2.resnets.1.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.resnets.0.conv2": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.2.proj_in": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.1.conv1": 8, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "down_blocks.1.resnets.0.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.resnets.0.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.time_emb_proj": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 16, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.proj_out": 16, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "up_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.attentions.0.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 16, + "down_blocks.1.attentions.1.proj_in": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.0.resnets.1.conv2": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.0.attentions.1.proj_out": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.2.attentions.0.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.0.proj_out": 16, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "down_blocks.0.attentions.0.proj_in": 16, + "up_blocks.2.attentions.2.proj_out": 16, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.1.proj_in": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.resnets.0.conv_shortcut": 16, + "up_blocks.2.upsamplers.0.conv": 16, + "down_blocks.0.resnets.1.conv1": 16, + "up_blocks.2.attentions.2.proj_in": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.resnets.0.time_emb_proj": 16, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 16, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 16, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.3.attentions.0.proj_in": 16, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.resnets.0.conv2": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 16, + "up_blocks.3.resnets.1.conv1": 16, + "up_blocks.3.resnets.2.conv1": 16, + "up_blocks.3.resnets.1.conv2": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.3.resnets.1.conv_shortcut": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.resnets.2.time_emb_proj": 8, + "up_blocks.3.attentions.2.proj_in": 16, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 8, + "time_embedding.linear_2": 16, + "up_blocks.3.resnets.2.conv_shortcut": 16, + "up_blocks.3.resnets.0.conv_shortcut": 16 + }, + "recipe_5.03_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "mid_block.attentions.0.proj_out": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "down_blocks.3.resnets.1.conv1": 2, + "mid_block.resnets.0.conv1": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.0.conv_shortcut": 2, + "down_blocks.3.resnets.1.conv2": 4, + "up_blocks.0.resnets.0.conv1": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv_shortcut": 2, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv2": 2, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.1.resnets.1.time_emb_proj": 4, + "mid_block.resnets.0.conv2": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "mid_block.resnets.1.conv1": 2, + "up_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.resnets.0.conv2": 2, + "mid_block.resnets.1.conv2": 2, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.1.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.proj_out": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.1.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.0.conv1": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_out": 8, + "down_blocks.2.attentions.1.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.proj_in": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.0.proj_in": 8, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.resnets.0.conv2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.1.conv1": 8, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "down_blocks.1.resnets.0.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.resnets.0.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.time_emb_proj": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "up_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.1.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "up_blocks.2.attentions.2.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.1.proj_in": 16, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.0.resnets.1.conv1": 16, + "up_blocks.2.attentions.2.proj_in": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 8, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.resnets.2.time_emb_proj": 8, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8 + }, + "recipe_4.59_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "mid_block.attentions.0.proj_out": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "down_blocks.3.resnets.1.conv1": 2, + "mid_block.resnets.0.conv1": 1, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 1, + "up_blocks.0.resnets.0.conv_shortcut": 2, + "down_blocks.3.resnets.1.conv2": 2, + "up_blocks.0.resnets.0.conv1": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv_shortcut": 2, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv2": 2, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 2, + "up_blocks.1.resnets.1.time_emb_proj": 2, + "mid_block.resnets.0.conv2": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "mid_block.resnets.1.conv1": 2, + "up_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.resnets.0.conv2": 2, + "mid_block.resnets.1.conv2": 2, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.1.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.proj_out": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.resnets.1.conv1": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.1.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.0.conv1": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_out": 8, + "down_blocks.2.attentions.1.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.proj_in": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.resnets.0.conv2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.1.conv1": 8, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "down_blocks.1.resnets.0.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.2.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "up_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.1.attentions.1.proj_in": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "up_blocks.2.attentions.2.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.attentions.2.proj_in": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 8, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.3.resnets.1.time_emb_proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8 + }, + "recipe_4.21_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "mid_block.attentions.0.proj_out": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "down_blocks.3.resnets.1.conv1": 1, + "mid_block.resnets.0.conv1": 1, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "down_blocks.3.resnets.1.conv2": 1, + "up_blocks.0.resnets.0.conv1": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv_shortcut": 2, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "down_blocks.3.resnets.0.conv2": 2, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 1, + "up_blocks.1.resnets.1.time_emb_proj": 1, + "mid_block.resnets.0.conv2": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 1, + "mid_block.resnets.1.conv1": 1, + "up_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.resnets.0.conv2": 2, + "mid_block.resnets.1.conv2": 2, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.1.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.proj_out": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.resnets.1.conv1": 2, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.resnets.1.conv1": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.1.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.2.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.0.conv1": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.proj_out": 8, + "down_blocks.2.attentions.1.proj_in": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.1.attentions.1.proj_in": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.upsamplers.0.conv": 6, + "up_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.0.proj_out": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.resnets.0.conv2": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.downsamplers.0.conv": 8, + "down_blocks.1.resnets.0.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.2.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "up_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.1.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "up_blocks.2.attentions.2.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.attentions.2.proj_in": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8 + }, + "recipe_4.00_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "mid_block.attentions.0.proj_out": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "down_blocks.3.resnets.1.conv1": 1, + "mid_block.resnets.0.conv1": 1, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "down_blocks.3.resnets.1.conv2": 1, + "up_blocks.0.resnets.0.conv1": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.conv2": 1, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 1, + "up_blocks.1.resnets.1.time_emb_proj": 1, + "mid_block.resnets.0.conv2": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 1, + "mid_block.resnets.1.conv1": 1, + "up_blocks.2.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.0.conv2": 2, + "mid_block.resnets.1.conv2": 2, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.1.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.downsamplers.0.conv": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.proj_out": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.resnets.2.conv2": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.resnets.1.conv1": 2, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.resnets.1.conv1": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.resnets.1.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.0.conv1": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.2.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.0.conv1": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.proj_out": 6, + "down_blocks.2.attentions.1.proj_in": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.1.attentions.1.proj_in": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.upsamplers.0.conv": 6, + "up_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.2.resnets.1.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.proj_out": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.downsamplers.0.conv": 6, + "down_blocks.1.resnets.0.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.resnets.2.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_out": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.resnets.2.time_emb_proj": 4, + "up_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.1.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "down_blocks.0.attentions.0.proj_in": 8, + "up_blocks.2.attentions.2.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.proj_in": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.upsamplers.0.conv": 8, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.attentions.2.proj_in": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 6, + "time_embedding.linear_2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8 + }, + "recipe_3.71_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.resnets.1.time_emb_proj": 1, + "down_blocks.3.resnets.1.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "up_blocks.0.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.time_emb_proj": 1, + "mid_block.resnets.0.time_emb_proj": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.proj_in": 1, + "up_blocks.0.resnets.2.conv_shortcut": 1, + "mid_block.attentions.0.proj_out": 1, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 1, + "down_blocks.3.resnets.1.conv1": 1, + "mid_block.resnets.0.conv1": 1, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 1, + "up_blocks.0.resnets.0.conv_shortcut": 1, + "down_blocks.3.resnets.1.conv2": 1, + "up_blocks.0.resnets.0.conv1": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.resnets.1.conv_shortcut": 1, + "up_blocks.1.resnets.2.time_emb_proj": 1, + "down_blocks.3.resnets.0.conv2": 1, + "down_blocks.3.resnets.0.conv1": 4, + "up_blocks.0.resnets.1.conv1": 1, + "up_blocks.1.resnets.1.time_emb_proj": 1, + "mid_block.resnets.0.conv2": 1, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 1, + "mid_block.resnets.1.conv1": 1, + "up_blocks.2.resnets.0.time_emb_proj": 1, + "up_blocks.0.resnets.0.conv2": 1, + "mid_block.resnets.1.conv2": 1, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.resnets.1.conv2": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.downsamplers.0.conv": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.proj_out": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.1.resnets.2.conv2": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.resnets.1.conv1": 2, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.proj_out": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.1.resnets.2.conv1": 4, + "up_blocks.1.resnets.1.conv1": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 2, + "up_blocks.1.resnets.1.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.proj_in": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.0.conv1": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.2.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.2.resnets.0.conv1": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.proj_out": 6, + "down_blocks.2.attentions.1.proj_in": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.upsamplers.0.conv": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.conv1": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.1.attentions.1.proj_in": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.upsamplers.0.conv": 6, + "up_blocks.2.resnets.1.time_emb_proj": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.2.resnets.1.conv2": 4, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.resnets.0.conv2": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.proj_out": 4, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.3.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.resnets.0.conv2": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv1": 6, + "down_blocks.1.downsamplers.0.conv": 6, + "down_blocks.1.resnets.0.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.resnets.0.conv1": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.2.resnets.2.conv2": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.0.proj_out": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.1.proj_out": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.2.resnets.2.time_emb_proj": 4, + "up_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.0.attentions.0.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.0.resnets.0.conv1": 8, + "down_blocks.1.attentions.1.proj_in": 6, + "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.0.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.2.attentions.0.proj_out": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.0.proj_out": 8, + "up_blocks.2.resnets.1.conv_shortcut": 6, + "down_blocks.0.attentions.0.proj_in": 8, + "up_blocks.2.attentions.2.proj_out": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.0.attentions.1.proj_in": 6, + "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.2.upsamplers.0.conv": 6, + "down_blocks.0.resnets.1.conv1": 8, + "up_blocks.2.attentions.2.proj_in": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "up_blocks.3.resnets.0.conv2": 8, + "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.2.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv1": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.3.attentions.0.proj_in": 8, + "up_blocks.3.resnets.1.time_emb_proj": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.3.attentions.1.proj_out": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.0.resnets.0.conv2": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.attentions.2.proj_out": 8, + "up_blocks.2.resnets.0.conv_shortcut": 6, + "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.3.attentions.1.proj_in": 8, + "up_blocks.3.resnets.1.conv1": 8, + "up_blocks.3.resnets.2.conv1": 8, + "up_blocks.3.resnets.1.conv2": 8, + "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.3.resnets.1.conv_shortcut": 8, + "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.3.resnets.2.time_emb_proj": 6, + "up_blocks.3.attentions.2.proj_in": 8, + "up_blocks.3.resnets.2.conv2": 8, + "time_embedding.linear_1": 4, + "time_embedding.linear_2": 8, + "up_blocks.3.resnets.2.conv_shortcut": 8, + "up_blocks.3.resnets.0.conv_shortcut": 8 + } + } +} \ No newline at end of file diff --git a/recipes/stabilityai-stable-diffusion-xl-base-1.0_palettization_recipe.json b/recipes/stabilityai-stable-diffusion-xl-base-1.0_palettization_recipe.json new file mode 100644 index 0000000000000000000000000000000000000000..d0d06ee8ccf84c9b7c438321866ce97766855be4 --- /dev/null +++ b/recipes/stabilityai-stable-diffusion-xl-base-1.0_palettization_recipe.json @@ -0,0 +1,60293 @@ +{ + "single_layer": { + "1": { + "time_embedding.linear_1": [ + 48.0, + 47.9, + 48.2, + 48.1 + ], + "time_embedding.linear_2": [ + 48.5, + 48.4, + 48.7, + 48.6 + ], + "add_embedding.linear_1": [ + 69.2, + 69.8, + 71.0, + 70.8 + ], + "add_embedding.linear_2": [ + 73.4, + 68.1, + 71.1, + 72.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 55.0, + 55.7, + 55.2, + 55.4 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 55.9, + 56.2, + 55.9, + 56.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 52.3, + 52.3, + 52.5, + 52.4 + ], + "down_blocks.0.resnets.1.conv1": [ + 54.6, + 54.3, + 54.8, + 54.9 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 59.4, + 59.1, + 58.9, + 59.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 55.5, + 55.3, + 55.4, + 55.6 + ], + "down_blocks.0.downsamplers.0.conv": [ + 51.9, + 51.6, + 52.1, + 51.9 + ], + "down_blocks.1.attentions.0.proj_in": [ + 58.6, + 58.2, + 58.8, + 59.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 67.7, + 68.1, + 68.4, + 68.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 68.3, + 67.9, + 68.4, + 68.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 64.4, + 64.9, + 65.3, + 65.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 62.7, + 62.5, + 63.0, + 63.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 78.7, + 77.7, + 77.8, + 78.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 64.4, + 64.8, + 66.3, + 66.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 72.4, + 75.3, + 71.2, + 71.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 75.8, + 73.6, + 75.3, + 75.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 59.8, + 59.3, + 60.3, + 60.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 62.6, + 61.4, + 62.7, + 63.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 66.3, + 66.4, + 66.4, + 66.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 66.5, + 66.4, + 66.8, + 66.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 62.5, + 61.7, + 62.3, + 62.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 62.3, + 62.0, + 62.4, + 62.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 64.8, + 66.7, + 64.6, + 64.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 60.0, + 60.7, + 60.1, + 60.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 66.4, + 64.9, + 66.6, + 67.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 65.9, + 64.7, + 65.6, + 66.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 59.4, + 58.7, + 59.3, + 59.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 62.7, + 61.9, + 62.7, + 63.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 61.4, + 61.1, + 61.6, + 61.7 + ], + "down_blocks.1.attentions.1.proj_in": [ + 65.1, + 64.6, + 65.5, + 65.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 70.4, + 69.8, + 70.6, + 70.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 70.6, + 70.1, + 70.8, + 70.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 69.3, + 68.6, + 69.4, + 69.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 68.2, + 67.5, + 68.2, + 67.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 77.2, + 76.3, + 76.5, + 76.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 70.6, + 70.8, + 71.1, + 70.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.8, + 78.4, + 78.5, + 78.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.0, + 77.7, + 78.4, + 78.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 64.6, + 64.1, + 64.6, + 64.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 68.1, + 67.9, + 68.3, + 68.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 74.6, + 74.1, + 74.6, + 74.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 74.6, + 74.1, + 74.7, + 74.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 72.3, + 71.8, + 72.2, + 72.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 71.7, + 71.2, + 71.7, + 71.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 76.8, + 76.6, + 77.2, + 76.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 72.9, + 72.3, + 72.7, + 72.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 77.6, + 76.8, + 77.5, + 77.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 76.1, + 74.9, + 76.2, + 75.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 66.3, + 65.9, + 66.3, + 66.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 69.3, + 69.0, + 69.2, + 69.2 + ], + "down_blocks.1.attentions.1.proj_out": [ + 66.9, + 66.4, + 67.1, + 66.9 + ], + "down_blocks.1.resnets.0.conv1": [ + 59.8, + 60.0, + 59.7, + 59.9 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 67.7, + 67.9, + 68.1, + 68.2 + ], + "down_blocks.1.resnets.0.conv2": [ + 65.5, + 65.2, + 65.6, + 65.7 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 54.2, + 53.8, + 54.2, + 54.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 66.4, + 66.2, + 66.8, + 66.5 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 71.3, + 70.6, + 71.1, + 71.0 + ], + "down_blocks.1.resnets.1.conv2": [ + 68.0, + 67.7, + 68.2, + 67.9 + ], + "down_blocks.1.downsamplers.0.conv": [ + 65.0, + 62.9, + 63.4, + 63.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 76.7, + 76.2, + 76.9, + 76.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.9, + 81.1, + 80.9, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.9, + 81.3, + 81.3, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.9, + 79.6, + 79.9, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.1, + 80.0, + 80.1, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.4, + 81.7, + 81.4, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 81.7, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.6, + 79.4, + 79.7, + 79.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.0, + 80.0, + 80.3, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.5, + 81.8, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.7, + 81.8, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.4, + 81.3, + 81.6, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.3, + 81.3, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.4, + 82.1, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 77.2, + 76.8, + 77.2, + 77.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.7, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.7, + 82.0, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.0, + 80.1, + 80.1, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.5, + 80.4, + 80.7, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.0, + 81.1, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.3, + 81.1, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 80.7, + 80.9, + 80.9, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.2, + 80.9, + 81.2, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.7, + 82.0, + 82.0, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.4, + 81.8, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.8, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 79.9, + 79.9, + 79.8, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 80.3, + 80.3, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.1, + 80.4, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 80.1, + 80.2, + 80.6, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 79.7, + 79.8, + 79.8, + 79.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 79.5, + 79.8, + 80.0, + 79.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.6, + 81.8, + 81.8, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.1, + 81.8, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 81.9, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 80.4, + 80.6, + 80.8, + 80.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.0, + 81.0, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 80.9, + 81.2, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 80.9, + 81.2, + 81.2, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 80.4, + 80.5, + 80.3, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 80.7, + 80.7, + 80.4, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.8, + 82.0, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.8, + 82.1, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 80.2, + 80.0, + 80.5, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 80.6, + 80.5, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 80.4, + 80.8, + 80.9, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 80.6, + 80.8, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 80.5, + 80.5, + 80.6, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 80.6, + 80.2, + 80.3, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.7, + 81.9, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.8, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 78.9, + 78.4, + 78.6, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 80.0, + 79.9, + 79.9, + 79.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.1, + 81.3, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.3, + 81.3, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 80.9, + 81.0, + 81.4, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 80.5, + 80.8, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.2, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 79.5, + 79.5, + 79.7, + 79.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 80.2, + 80.4, + 80.2, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.5, + 81.7, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.6, + 81.5, + 81.8, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 80.8, + 80.7, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 80.9, + 80.9, + 81.4, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.8, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 80.1, + 80.2, + 80.6, + 80.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 80.7, + 80.8, + 80.9, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.2, + 81.5, + 81.7, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.2, + 81.4, + 81.8, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 80.9, + 81.1, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.2, + 81.1, + 81.5, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.1, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 80.5, + 80.6, + 80.7, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 80.8, + 80.9, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.7, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.9, + 81.9, + 82.0, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 80.8, + 81.0, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.0, + 81.2, + 81.1, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.8, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 80.9, + 81.4, + 81.3, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.2, + 81.5, + 81.6, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.4, + 81.5, + 81.6, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 80.1, + 80.1, + 80.4, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 80.6, + 80.5, + 80.9, + 80.5 + ], + "down_blocks.2.attentions.0.proj_out": [ + 76.2, + 75.9, + 76.3, + 76.2 + ], + "down_blocks.2.attentions.1.proj_in": [ + 80.1, + 79.6, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.5, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.7, + 81.5, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.7, + 80.3, + 80.8, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.6, + 80.5, + 80.3, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.8, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.6, + 81.5, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.9, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.9, + 81.9, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.0, + 79.3, + 79.8, + 80.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.4, + 80.0, + 80.5, + 80.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.6, + 81.5, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.4, + 81.1, + 81.4, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.3, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 80.4, + 79.7, + 79.3, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.7, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.7, + 80.3, + 80.8, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.0, + 80.8, + 81.3, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 81.5, + 81.6, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.3, + 81.0, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.4, + 81.2, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.0, + 82.0, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 81.9, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 81.9, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.1, + 80.9, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.4, + 81.2, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.0, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.4, + 81.2, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.2, + 80.9, + 81.2, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.4, + 81.2, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.0, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.5, + 81.6, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.1, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 82.0, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.2, + 81.1, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.4, + 81.5, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.7, + 81.4, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.6, + 82.0, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.2, + 82.4, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.9, + 82.2, + 82.4, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.2, + 81.0, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.0, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.5, + 81.8, + 81.9, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.7, + 81.7, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.4, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.4, + 81.1, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.3, + 80.9, + 81.5, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.7, + 81.7, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.7, + 81.7, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.3, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.2, + 81.2, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.0, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.0, + 82.3, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.7, + 81.8, + 81.9, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.0, + 81.9, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.0, + 80.8, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.4, + 81.3, + 81.4, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.0, + 81.6, + 82.1, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.0, + 81.8, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.3, + 81.5, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.4, + 81.7, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.2, + 82.4, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 80.6, + 80.1, + 80.8, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.1, + 80.9, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.1.proj_out": [ + 79.9, + 79.5, + 78.9, + 79.5 + ], + "down_blocks.2.resnets.0.conv1": [ + 74.0, + 73.5, + 73.3, + 73.7 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 79.6, + 79.4, + 79.7, + 79.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 78.4, + 77.4, + 76.8, + 78.3 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 67.6, + 64.9, + 65.7, + 65.0 + ], + "down_blocks.2.resnets.1.conv1": [ + 80.3, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 81.2, + 81.2, + 81.5, + 81.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 80.1, + 79.7, + 80.1, + 80.1 + ], + "up_blocks.0.attentions.0.proj_in": [ + 78.0, + 74.2, + 76.8, + 77.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.9, + 79.6, + 78.9, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.1, + 79.4, + 78.8, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.0, + 79.6, + 78.8, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.2, + 79.7, + 79.2, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.0, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.1, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.2, + 71.0, + 72.1, + 71.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.6, + 77.3, + 75.5, + 77.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.4, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.5, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.6, + 80.9, + 81.0, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.0, + 80.6, + 80.9, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.9, + 82.1, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.8, + 80.5, + 80.8, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.1, + 81.1, + 81.3, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.4, + 81.1, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.4, + 81.5, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.2, + 80.9, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 80.9, + 80.9, + 81.1, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.6, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.6, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.7, + 82.0, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 82.2, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.0, + 81.1, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.2, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.7, + 80.6, + 80.4, + 80.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 80.8, + 80.6, + 80.3, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 80.8, + 80.6, + 80.4, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 80.7, + 80.5, + 80.0, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.9, + 82.2, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.2, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.3, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.3, + 81.7, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.5, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.2, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.4, + 81.4, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.0, + 82.1, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.3, + 81.4, + 81.2, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.4, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.7, + 81.8, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 80.9, + 81.2, + 81.6, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.8, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.1, + 81.2, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.3, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.0, + 82.1, + 82.1, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.2, + 81.3, + 81.3, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.5, + 81.3, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.8, + 82.3, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.4, + 81.4, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.4, + 81.6, + 81.6, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.8, + 81.6, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.7, + 81.8, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.4, + 81.4, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.4, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.0, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.2, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.4, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.8, + 82.0, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.9, + 82.0, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.5, + 81.6, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 80.9, + 80.8, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.3, + 81.3, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.8, + 81.6, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.8, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.2, + 80.9, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.2, + 81.2, + 81.3, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.0, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 80.2, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 80.8, + 80.9, + 80.9, + 80.8 + ], + "up_blocks.0.attentions.0.proj_out": [ + 78.3, + 77.8, + 78.2, + 78.6 + ], + "up_blocks.0.attentions.1.proj_in": [ + 71.4, + 69.8, + 70.7, + 70.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.2, + 80.3, + 80.3, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.2, + 80.4, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.1, + 75.2, + 75.8, + 75.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 75.2, + 75.6, + 75.4, + 75.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.5, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.4, + 81.5, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.1, + 80.9, + 80.8, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.3, + 81.1, + 81.2, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 72.9, + 71.4, + 72.1, + 71.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 76.5, + 76.1, + 76.5, + 76.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.1, + 81.3, + 81.3, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.2, + 81.5, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 79.0, + 78.7, + 78.6, + 78.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 79.0, + 78.9, + 79.2, + 79.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.4, + 81.2, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 74.6, + 75.2, + 73.3, + 73.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 79.9, + 80.4, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 80.4, + 81.2, + 81.7, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 73.5, + 73.0, + 74.2, + 73.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 76.0, + 75.4, + 76.5, + 76.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 80.9, + 81.0, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 80.7, + 80.6, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 79.6, + 79.6, + 79.8, + 79.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 79.7, + 79.9, + 80.3, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.5, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 70.6, + 69.7, + 70.4, + 70.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 79.4, + 80.1, + 79.2, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.5, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 76.4, + 76.4, + 76.7, + 76.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 78.5, + 78.2, + 78.5, + 78.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 80.6, + 80.8, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 80.6, + 80.7, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 79.6, + 79.6, + 79.8, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 79.2, + 78.9, + 79.1, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.0, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 80.5, + 81.3, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 79.4, + 79.7, + 79.0, + 78.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.0, + 81.1, + 81.7, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 78.6, + 78.1, + 78.6, + 78.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 80.0, + 79.5, + 80.0, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 80.7, + 80.4, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 80.5, + 80.4, + 80.4, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 79.3, + 78.8, + 79.4, + 79.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 78.6, + 79.0, + 79.1, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.5, + 81.5, + 81.9, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.1, + 81.3, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 80.3, + 80.3, + 79.8, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.6, + 81.1, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 76.6, + 76.2, + 76.6, + 75.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 78.4, + 78.3, + 78.5, + 78.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 80.5, + 80.4, + 80.7, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 80.6, + 80.5, + 80.7, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 79.0, + 78.6, + 78.6, + 78.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 79.4, + 79.4, + 79.5, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.6, + 81.9, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 80.2, + 80.7, + 79.8, + 79.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.5, + 81.5, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 77.5, + 77.1, + 77.4, + 77.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 78.0, + 77.9, + 78.2, + 78.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 80.0, + 80.1, + 80.6, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 80.3, + 80.2, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 79.0, + 79.1, + 79.3, + 79.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 78.9, + 79.1, + 79.4, + 78.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.7, + 81.8, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.0, + 81.1, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.3, + 81.4, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 78.6, + 78.9, + 78.9, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 79.5, + 79.2, + 79.8, + 79.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 80.2, + 80.3, + 80.5, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 80.1, + 80.2, + 80.5, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 79.2, + 78.6, + 79.5, + 78.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 79.4, + 79.0, + 79.7, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.0, + 81.2, + 80.9, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 80.9, + 81.1, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.4, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 76.4, + 77.2, + 76.9, + 76.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 78.4, + 78.7, + 78.7, + 78.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 80.3, + 80.2, + 80.6, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 80.7, + 80.4, + 80.8, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 79.3, + 79.4, + 79.7, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 79.0, + 79.0, + 79.4, + 78.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.6, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.5, + 81.4, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.3, + 81.4, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 77.1, + 77.6, + 77.3, + 77.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 78.5, + 78.8, + 78.8, + 78.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 80.4, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 80.8, + 80.7, + 80.8, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 78.0, + 78.5, + 78.8, + 78.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 78.3, + 78.4, + 78.7, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.3, + 81.1, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.3, + 81.1, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.5, + 81.8, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.6, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 72.1, + 71.4, + 72.0, + 71.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 75.6, + 74.9, + 75.7, + 75.1 + ], + "up_blocks.0.attentions.1.proj_out": [ + 68.9, + 68.4, + 69.0, + 68.6 + ], + "up_blocks.0.attentions.2.proj_in": [ + 74.1, + 73.5, + 74.2, + 74.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.6, + 80.4, + 80.7, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 80.6, + 80.5, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 77.7, + 77.0, + 77.3, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 77.1, + 76.1, + 77.1, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.6, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 79.6, + 78.5, + 78.3, + 78.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.1, + 81.5, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.7, + 81.1, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 76.1, + 75.5, + 75.9, + 76.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 77.3, + 76.8, + 77.0, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.2, + 81.0, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 78.8, + 78.6, + 79.1, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 78.9, + 78.7, + 79.3, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.8, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.4, + 81.5, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 80.5, + 79.6, + 80.1, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 80.9, + 80.7, + 81.5, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 77.4, + 77.0, + 77.4, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 78.2, + 78.3, + 78.8, + 78.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.0, + 81.1, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 80.9, + 81.0, + 81.3, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 79.4, + 79.3, + 79.3, + 79.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 79.4, + 79.1, + 79.4, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.6, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 81.1, + 81.6, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 80.7, + 80.0, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 81.1, + 81.4, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 78.7, + 77.9, + 78.3, + 78.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 80.0, + 79.9, + 79.6, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 81.2, + 81.1, + 81.2, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 81.1, + 81.2, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 79.6, + 79.1, + 79.7, + 79.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 79.4, + 79.1, + 79.3, + 79.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 81.6, + 81.4, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 81.2, + 81.3, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 81.6, + 81.7, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 78.8, + 79.1, + 78.7, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 79.7, + 79.6, + 79.3, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 80.5, + 80.3, + 80.7, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 80.7, + 80.5, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 79.3, + 78.9, + 78.8, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 78.5, + 77.3, + 77.5, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 81.5, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 81.5, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 81.4, + 81.6, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 81.5, + 81.9, + 82.1, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 78.9, + 78.4, + 78.6, + 78.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 79.8, + 79.7, + 80.1, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 81.2, + 81.1, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 81.3, + 81.4, + 81.1, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 79.3, + 79.4, + 79.6, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 80.0, + 80.1, + 80.0, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 81.5, + 82.0, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 81.3, + 81.6, + 81.9, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 81.5, + 81.6, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 78.8, + 78.2, + 78.7, + 78.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 79.7, + 79.3, + 79.8, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 80.9, + 81.0, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 81.0, + 81.0, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 79.8, + 79.5, + 79.6, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 79.9, + 79.5, + 79.7, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 81.7, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 81.6, + 81.9, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 81.6, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 79.5, + 79.2, + 79.4, + 79.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 79.9, + 79.5, + 80.0, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 80.8, + 80.8, + 81.1, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 80.9, + 80.8, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 80.0, + 79.7, + 80.0, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 80.1, + 79.7, + 80.1, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 81.7, + 81.9, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 81.7, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 82.0, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 82.1, + 82.1, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 79.4, + 78.8, + 79.1, + 79.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 80.0, + 79.8, + 80.0, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 80.8, + 80.4, + 80.6, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 80.9, + 80.6, + 80.7, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 79.6, + 79.4, + 79.7, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 79.6, + 79.1, + 79.7, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.0, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.0, + 81.7, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 81.9, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.1, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 77.2, + 75.8, + 76.2, + 76.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 79.0, + 78.2, + 78.4, + 78.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 80.9, + 81.0, + 81.0, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 81.0, + 81.0, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 78.9, + 78.5, + 78.7, + 78.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 79.5, + 78.9, + 79.2, + 79.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 81.6, + 81.8, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 82.1, + 81.7, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 81.9, + 81.8, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.7, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 77.6, + 76.8, + 77.2, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 79.0, + 78.6, + 79.0, + 78.8 + ], + "up_blocks.0.attentions.2.proj_out": [ + 72.2, + 71.1, + 71.1, + 71.4 + ], + "up_blocks.0.resnets.0.conv1": [ + 80.4, + 80.1, + 79.5, + 80.2 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 80.9, + 81.0, + 81.1, + 81.2 + ], + "up_blocks.0.resnets.0.conv2": [ + 78.8, + 79.0, + 78.0, + 78.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 77.9, + 77.4, + 77.5, + 77.7 + ], + "up_blocks.0.resnets.1.conv1": [ + 79.0, + 78.5, + 78.7, + 78.7 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 79.8, + 79.6, + 79.9, + 79.9 + ], + "up_blocks.0.resnets.1.conv2": [ + 79.6, + 79.6, + 79.6, + 79.4 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 70.6, + 70.2, + 71.1, + 70.7 + ], + "up_blocks.0.resnets.2.conv1": [ + 78.2, + 77.4, + 78.0, + 78.0 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 79.4, + 79.3, + 79.6, + 79.5 + ], + "up_blocks.0.resnets.2.conv2": [ + 78.8, + 78.5, + 78.4, + 78.4 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 64.4, + 64.1, + 64.6, + 64.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 62.2, + 61.7, + 62.1, + 62.0 + ], + "up_blocks.1.attentions.0.proj_in": [ + 64.1, + 63.8, + 63.9, + 64.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 71.2, + 70.5, + 71.1, + 71.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 71.0, + 70.4, + 70.7, + 70.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 66.2, + 66.0, + 66.5, + 66.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 63.5, + 63.0, + 63.6, + 63.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 75.1, + 74.7, + 75.1, + 74.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 74.9, + 74.2, + 75.3, + 74.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 80.3, + 80.5, + 79.8, + 79.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.1, + 80.2, + 80.6, + 80.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 65.6, + 65.3, + 65.7, + 65.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 67.0, + 66.7, + 67.3, + 67.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 69.4, + 69.1, + 69.5, + 69.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 69.3, + 69.2, + 69.5, + 69.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 66.0, + 65.7, + 66.2, + 66.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 66.4, + 66.0, + 66.5, + 66.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 74.7, + 74.7, + 74.4, + 74.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 76.7, + 76.8, + 76.5, + 76.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 80.1, + 80.1, + 79.8, + 79.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 78.7, + 78.0, + 79.1, + 78.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 65.5, + 65.1, + 65.4, + 65.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 69.0, + 68.7, + 69.1, + 69.0 + ], + "up_blocks.1.attentions.0.proj_out": [ + 66.9, + 66.6, + 67.0, + 67.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 60.1, + 59.6, + 60.2, + 60.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 68.0, + 67.4, + 68.1, + 67.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 68.2, + 67.8, + 68.5, + 68.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 61.4, + 60.6, + 61.9, + 61.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 58.0, + 57.1, + 58.1, + 57.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 66.0, + 65.7, + 65.2, + 65.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 55.7, + 55.7, + 54.5, + 54.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.5, + 77.6, + 78.8, + 78.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 75.4, + 74.9, + 75.2, + 74.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 62.1, + 61.7, + 62.2, + 62.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 64.0, + 63.5, + 64.0, + 63.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 66.1, + 65.7, + 66.2, + 66.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 66.9, + 66.3, + 66.8, + 66.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 64.4, + 63.8, + 64.4, + 64.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 63.2, + 62.5, + 63.1, + 63.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 74.3, + 74.6, + 73.7, + 73.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 62.4, + 62.2, + 62.2, + 62.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 76.3, + 76.6, + 76.5, + 77.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 73.6, + 73.5, + 73.3, + 73.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 61.6, + 60.7, + 61.4, + 61.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 64.5, + 63.6, + 64.3, + 64.1 + ], + "up_blocks.1.attentions.1.proj_out": [ + 62.9, + 62.3, + 63.0, + 62.9 + ], + "up_blocks.1.attentions.2.proj_in": [ + 59.6, + 59.5, + 59.9, + 59.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 66.1, + 66.0, + 66.2, + 66.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 67.1, + 66.8, + 66.9, + 67.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 61.7, + 60.8, + 61.7, + 61.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 58.7, + 58.3, + 59.4, + 59.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 52.1, + 52.7, + 52.7, + 52.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 62.6, + 63.0, + 61.5, + 62.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 70.3, + 70.5, + 70.2, + 70.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 70.8, + 70.1, + 71.3, + 71.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 59.3, + 59.4, + 59.3, + 59.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 62.3, + 62.5, + 62.6, + 62.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 65.4, + 65.3, + 65.5, + 65.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 66.5, + 66.2, + 66.4, + 66.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 62.1, + 61.9, + 62.1, + 62.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 62.4, + 62.1, + 62.4, + 62.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 71.6, + 71.5, + 71.3, + 71.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 55.8, + 55.3, + 55.8, + 55.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 77.4, + 77.9, + 77.3, + 77.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 74.2, + 74.6, + 73.8, + 73.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 58.0, + 57.9, + 58.0, + 58.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 63.3, + 63.2, + 63.3, + 63.2 + ], + "up_blocks.1.attentions.2.proj_out": [ + 60.1, + 60.1, + 60.2, + 60.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 70.1, + 69.2, + 69.5, + 69.8 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 71.7, + 71.5, + 71.7, + 72.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 69.3, + 68.9, + 69.4, + 69.1 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 55.8, + 55.5, + 55.9, + 55.8 + ], + "up_blocks.1.resnets.1.conv1": [ + 60.4, + 60.1, + 60.3, + 60.3 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 65.3, + 64.7, + 65.3, + 65.0 + ], + "up_blocks.1.resnets.1.conv2": [ + 61.5, + 61.0, + 61.3, + 61.4 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 55.2, + 55.2, + 55.2, + 55.4 + ], + "up_blocks.1.resnets.2.conv1": [ + 59.5, + 59.3, + 59.6, + 59.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 63.5, + 63.5, + 63.5, + 63.7 + ], + "up_blocks.1.resnets.2.conv2": [ + 61.4, + 61.0, + 61.2, + 61.1 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 51.6, + 51.5, + 51.5, + 51.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 48.4, + 48.5, + 48.5, + 48.4 + ], + "up_blocks.2.resnets.0.conv1": [ + 51.9, + 51.7, + 52.0, + 51.9 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 59.0, + 58.6, + 59.3, + 59.2 + ], + "up_blocks.2.resnets.0.conv2": [ + 51.5, + 51.4, + 51.7, + 51.6 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 47.8, + 47.8, + 47.9, + 47.8 + ], + "up_blocks.2.resnets.1.conv1": [ + 48.5, + 48.4, + 48.5, + 48.5 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 57.4, + 57.5, + 57.6, + 57.4 + ], + "up_blocks.2.resnets.1.conv2": [ + 48.8, + 48.9, + 48.9, + 48.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 45.1, + 45.1, + 45.1, + 45.0 + ], + "up_blocks.2.resnets.2.conv1": [ + 46.9, + 46.8, + 47.0, + 46.9 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 56.6, + 56.7, + 56.9, + 56.8 + ], + "up_blocks.2.resnets.2.conv2": [ + 44.6, + 44.8, + 44.7, + 44.7 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 43.1, + 43.2, + 43.2, + 43.2 + ], + "mid_block.attentions.0.proj_in": [ + 76.4, + 76.4, + 75.3, + 75.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 78.7, + 78.3, + 78.3, + 78.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 78.7, + 78.2, + 78.3, + 78.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.1, + 78.8, + 78.8, + 79.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.1, + 78.7, + 78.8, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.7, + 82.1, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 82.3, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.5, + 78.3, + 79.0, + 79.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.5, + 79.9, + 80.4, + 80.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.5, + 78.6, + 79.0, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 79.5, + 78.7, + 78.7, + 79.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 79.0, + 78.3, + 78.7, + 78.7 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 79.2, + 78.2, + 78.5, + 78.9 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.7, + 82.0, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.8, + 82.3, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 79.6, + 79.8, + 79.9, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.4, + 80.3, + 80.2, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 80.8, + 80.6, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 80.9, + 80.8, + 81.1, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 80.7, + 80.1, + 80.9, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 80.6, + 80.5, + 80.8, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.8, + 82.2, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.8, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 80.4, + 80.1, + 80.7, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.1, + 80.9, + 81.2, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.8, + 80.8, + 80.6, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 80.7, + 80.7, + 80.4, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 80.8, + 80.4, + 79.6, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.0, + 80.8, + 80.7, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.9, + 82.3, + 82.4, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 80.3, + 80.0, + 80.3, + 80.1 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 80.8, + 80.8, + 80.8, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 80.3, + 79.7, + 79.6, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 80.6, + 79.7, + 79.8, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 80.3, + 79.5, + 79.7, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 80.1, + 79.5, + 79.6, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 80.8, + 80.7, + 80.6, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.0, + 81.0, + 81.2, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.0, + 80.9, + 81.1, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.1, + 81.1, + 81.1, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 80.9, + 80.8, + 80.9, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 80.8, + 80.8, + 80.7, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 80.8, + 80.4, + 80.7, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.3, + 81.0, + 80.9, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.3, + 81.5, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.4, + 81.4, + 81.5, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 80.9, + 80.9, + 81.1, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.1, + 81.1, + 81.3, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.4, + 81.5, + 81.7, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.5, + 81.6, + 81.5, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.2, + 81.2, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 80.9, + 81.1, + 81.3, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.2, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.0, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.2, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 80.9, + 80.6, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.2, + 81.2, + 81.3, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.6, + 81.5, + 81.8, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.7, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.4, + 81.6, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.3, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.6, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 80.9, + 80.7, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.1, + 81.1, + 81.4, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.4, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.5, + 81.7, + 81.6, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.1, + 81.2, + 81.2, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.1, + 81.2, + 81.4, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 80.8, + 81.2, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 80.8, + 81.0, + 81.3, + 80.9 + ], + "mid_block.attentions.0.proj_out": [ + 79.5, + 79.2, + 78.6, + 79.4 + ], + "mid_block.resnets.0.conv1": [ + 79.9, + 79.6, + 79.6, + 80.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 80.7, + 80.9, + 80.6, + 80.8 + ], + "mid_block.resnets.0.conv2": [ + 79.9, + 79.4, + 79.5, + 79.4 + ], + "mid_block.resnets.1.conv1": [ + 80.6, + 80.3, + 80.1, + 80.6 + ], + "mid_block.resnets.1.time_emb_proj": [ + 80.9, + 81.4, + 81.3, + 81.1 + ], + "mid_block.resnets.1.conv2": [ + 80.4, + 79.9, + 79.6, + 80.4 + ] + }, + "2": { + "time_embedding.linear_1": [ + 55.5, + 55.5, + 55.4, + 55.4 + ], + "time_embedding.linear_2": [ + 50.9, + 50.7, + 50.9, + 50.8 + ], + "add_embedding.linear_1": [ + 70.1, + 72.3, + 70.6, + 71.3 + ], + "add_embedding.linear_2": [ + 75.6, + 74.3, + 74.1, + 74.7 + ], + "down_blocks.0.resnets.0.conv1": [ + 59.7, + 60.9, + 59.9, + 60.0 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 60.3, + 60.4, + 60.1, + 60.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 56.2, + 56.3, + 56.4, + 56.4 + ], + "down_blocks.0.resnets.1.conv1": [ + 57.8, + 57.5, + 58.0, + 58.0 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 63.2, + 62.9, + 62.8, + 62.8 + ], + "down_blocks.0.resnets.1.conv2": [ + 58.3, + 57.9, + 58.1, + 58.5 + ], + "down_blocks.0.downsamplers.0.conv": [ + 59.0, + 58.7, + 59.4, + 59.3 + ], + "down_blocks.1.attentions.0.proj_in": [ + 63.7, + 63.7, + 64.1, + 64.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 75.0, + 75.2, + 75.3, + 75.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 75.7, + 75.6, + 76.3, + 75.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 70.8, + 71.4, + 71.9, + 71.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 68.4, + 67.9, + 68.3, + 68.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.9, + 80.3, + 81.1, + 81.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.9, + 79.1, + 80.4, + 80.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 75.8, + 77.7, + 74.9, + 74.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.5, + 77.0, + 77.5, + 77.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 63.7, + 63.9, + 64.1, + 64.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 68.0, + 67.4, + 67.6, + 68.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 74.4, + 74.1, + 74.5, + 74.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 74.9, + 75.0, + 74.8, + 75.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 69.7, + 69.6, + 70.1, + 69.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 70.2, + 69.8, + 70.2, + 69.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 72.6, + 74.6, + 74.1, + 74.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 62.2, + 63.0, + 62.2, + 62.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 70.0, + 68.8, + 69.7, + 70.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 69.5, + 68.6, + 69.0, + 69.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 63.7, + 62.9, + 63.8, + 64.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 67.8, + 66.9, + 67.8, + 68.0 + ], + "down_blocks.1.attentions.0.proj_out": [ + 68.6, + 68.0, + 68.6, + 68.6 + ], + "down_blocks.1.attentions.1.proj_in": [ + 70.9, + 70.5, + 71.1, + 71.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 77.3, + 77.1, + 77.6, + 77.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 77.4, + 77.2, + 77.6, + 77.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 74.8, + 74.4, + 75.2, + 74.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 74.9, + 74.6, + 75.0, + 74.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 80.7, + 80.3, + 80.7, + 80.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.2, + 79.6, + 80.8, + 80.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.7, + 80.6, + 80.5, + 80.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 79.4, + 79.0, + 79.7, + 79.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 71.1, + 70.6, + 71.1, + 71.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 72.9, + 72.6, + 72.7, + 72.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 78.9, + 78.7, + 78.9, + 78.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 79.2, + 79.0, + 79.2, + 79.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 77.2, + 77.1, + 77.4, + 77.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 76.3, + 76.3, + 76.4, + 76.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 80.7, + 80.5, + 81.1, + 80.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 80.1, + 79.8, + 80.7, + 80.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 79.7, + 79.5, + 79.7, + 79.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 78.1, + 76.8, + 78.3, + 77.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 71.6, + 71.1, + 71.4, + 71.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 74.5, + 74.0, + 74.4, + 74.4 + ], + "down_blocks.1.attentions.1.proj_out": [ + 72.6, + 72.1, + 72.9, + 72.7 + ], + "down_blocks.1.resnets.0.conv1": [ + 63.1, + 62.8, + 63.2, + 63.3 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 72.6, + 72.8, + 72.6, + 72.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 71.2, + 72.0, + 71.8, + 71.9 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 60.9, + 60.6, + 61.1, + 61.1 + ], + "down_blocks.1.resnets.1.conv1": [ + 72.2, + 72.0, + 72.5, + 72.1 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 74.6, + 74.0, + 74.6, + 74.5 + ], + "down_blocks.1.resnets.1.conv2": [ + 74.0, + 73.7, + 74.1, + 73.9 + ], + "down_blocks.1.downsamplers.0.conv": [ + 70.4, + 68.2, + 69.0, + 68.6 + ], + "down_blocks.2.attentions.0.proj_in": [ + 79.5, + 79.4, + 80.0, + 79.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.6, + 81.3, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.5, + 81.4, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.0, + 81.0, + 81.3, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.8, + 80.8, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.8, + 80.7, + 80.8, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.9, + 81.0, + 81.3, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.3, + 81.5, + 81.8, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.3, + 81.4, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.7, + 82.1, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.9, + 80.9, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.3, + 81.3, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.5, + 81.4, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.5, + 81.3, + 82.0, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.2, + 81.4, + 81.5, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.5, + 81.4, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 82.1, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.8, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.1, + 81.0, + 81.1, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.0, + 81.2, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.0, + 81.2, + 81.5, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.0, + 81.1, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 80.9, + 81.0, + 81.2, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 80.7, + 81.0, + 81.2, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.0, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.2, + 81.4, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.3, + 81.3, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.7, + 81.7, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.7, + 81.5, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.0, + 81.2, + 81.2, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 80.7, + 80.7, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.2, + 81.1, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.3, + 81.5, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.3, + 81.4, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.3, + 81.4, + 81.7, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 80.9, + 81.0, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 80.4, + 80.3, + 80.5, + 80.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 80.4, + 80.9, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.6, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.6, + 81.9, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.4, + 81.3, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.2, + 81.3, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.5, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 80.5, + 80.6, + 80.8, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.0, + 81.1, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.9, + 81.8, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.7, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.1, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.4, + 81.5, + 81.7, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 80.7, + 80.9, + 80.9, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.2, + 81.1, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.6, + 81.9, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.6, + 81.7, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.5, + 81.4, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.3, + 81.3, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.2, + 81.0, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.3, + 81.4, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.8, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.1, + 81.3, + 81.6, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.2, + 81.4, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.5, + 81.9, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.5, + 81.6, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.4, + 81.6, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 80.9, + 81.4, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.proj_out": [ + 79.6, + 79.7, + 79.7, + 79.5 + ], + "down_blocks.2.attentions.1.proj_in": [ + 81.0, + 80.7, + 80.9, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.3, + 81.2, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.1, + 81.1, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.2, + 82.6, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.1, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.9, + 80.7, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.6, + 81.7, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.4, + 81.0, + 81.3, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.6, + 81.8, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.4, + 81.4, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.6, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.0, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.4, + 81.7, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.5, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.5, + 81.5, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.7, + 82.1, + 82.0, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 82.0, + 82.1, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.8, + 82.4, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.6, + 81.4, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.9, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 81.8, + 82.2, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.5, + 81.2, + 81.9, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.5, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.7, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.6, + 81.7, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.6, + 81.7, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.3, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.8, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.7, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.6, + 81.6, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.6, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.8, + 82.0, + 81.9, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.1, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.5, + 81.7, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.1, + 82.5, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.8, + 81.5, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.1, + 81.1, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.6, + 81.4, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.1.proj_out": [ + 80.8, + 80.7, + 80.8, + 80.8 + ], + "down_blocks.2.resnets.0.conv1": [ + 79.6, + 79.5, + 79.6, + 79.5 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 80.7, + 80.8, + 81.0, + 80.9 + ], + "down_blocks.2.resnets.0.conv2": [ + 80.3, + 79.9, + 79.9, + 80.3 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 70.9, + 68.8, + 69.0, + 68.8 + ], + "down_blocks.2.resnets.1.conv1": [ + 80.9, + 80.8, + 81.1, + 80.9 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 81.6, + 81.5, + 81.9, + 81.5 + ], + "down_blocks.2.resnets.1.conv2": [ + 80.9, + 80.7, + 81.0, + 81.0 + ], + "up_blocks.0.attentions.0.proj_in": [ + 80.6, + 79.6, + 78.0, + 80.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.8, + 80.7, + 80.1, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.9, + 80.6, + 80.0, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.7, + 80.6, + 80.3, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.1, + 80.8, + 80.2, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.3, + 78.7, + 76.9, + 79.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.0, + 80.6, + 78.5, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.9, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.7, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.4, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.4, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.3, + 81.4, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.6, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.8, + 81.9, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.7, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.3, + 81.4, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.7, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.6, + 81.6, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.3, + 81.3, + 81.1, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.3, + 81.3, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.4, + 81.3, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.2, + 81.2, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.1, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.4, + 81.6, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.5, + 81.5, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.7, + 82.1, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.7, + 81.8, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.4, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.4, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.6, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.6, + 81.6, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.0, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.6, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.6, + 81.8, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.5, + 81.6, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.4, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.9, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.6, + 81.9, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.1, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.7, + 81.6, + 82.0, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.4, + 81.7, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.5, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.6, + 81.5, + 82.1, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.8, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.4, + 81.3, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.5, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.8, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.6, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.5, + 81.8, + 81.9, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 81.2, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.4, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.proj_out": [ + 80.3, + 80.2, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.1.proj_in": [ + 77.3, + 77.1, + 77.9, + 77.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.3, + 81.2, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.3, + 81.3, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.3, + 80.1, + 80.6, + 79.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.0, + 79.5, + 80.2, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.4, + 81.8, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.1, + 81.8, + 81.8, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 77.9, + 77.0, + 77.8, + 77.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 79.3, + 79.2, + 79.6, + 79.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.6, + 81.8, + 81.9, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 80.5, + 80.7, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.3, + 80.5, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 76.6, + 77.3, + 75.2, + 75.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.2, + 81.4, + 81.3, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.5, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 78.3, + 77.9, + 79.0, + 78.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 79.8, + 79.5, + 80.2, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.6, + 81.3, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 81.4, + 81.5, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 80.6, + 80.7, + 81.0, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 80.4, + 80.8, + 80.9, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.6, + 81.7, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 81.1, + 81.2, + 81.1, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.5, + 81.8, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 78.5, + 78.4, + 78.9, + 78.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 80.1, + 80.4, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.1, + 81.4, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 80.5, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 80.3, + 80.4, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.8, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 81.6, + 82.0, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 81.0, + 81.2, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.6, + 81.7, + 82.0, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 80.1, + 79.8, + 80.0, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 80.4, + 80.5, + 80.7, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.0, + 81.3, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.1, + 81.2, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 80.2, + 80.3, + 80.7, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 80.1, + 80.0, + 80.4, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.7, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.8, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 81.4, + 81.4, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 79.7, + 79.5, + 79.8, + 79.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 80.0, + 79.6, + 80.2, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.1, + 81.3, + 81.6, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 81.2, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 80.4, + 80.6, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 80.6, + 80.6, + 80.7, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.8, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.6, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.3, + 81.7, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.8, + 81.7, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 79.6, + 79.6, + 79.8, + 79.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 80.1, + 80.1, + 80.4, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.0, + 81.0, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.1, + 81.2, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 80.5, + 80.4, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 80.2, + 80.4, + 80.4, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.9, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.6, + 81.6, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 79.9, + 80.0, + 80.0, + 79.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 80.5, + 80.5, + 80.6, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.2, + 81.3, + 81.3, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.1, + 81.3, + 81.3, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 80.5, + 80.0, + 80.6, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 80.5, + 80.4, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.5, + 81.8, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 81.7, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 79.2, + 79.3, + 79.4, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 79.8, + 80.5, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 81.2, + 81.0, + 81.1, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.2, + 81.2, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 80.5, + 80.5, + 80.7, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 80.5, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.5, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 78.9, + 79.3, + 79.2, + 79.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 80.0, + 80.3, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 81.2, + 81.5, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 80.1, + 80.2, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 80.2, + 80.4, + 80.8, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 77.1, + 76.6, + 77.3, + 77.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 79.6, + 79.1, + 79.4, + 79.1 + ], + "up_blocks.0.attentions.1.proj_out": [ + 75.3, + 74.8, + 75.3, + 75.0 + ], + "up_blocks.0.attentions.2.proj_in": [ + 78.3, + 78.1, + 78.3, + 78.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.1, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.3, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.0, + 79.8, + 80.1, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 77.7, + 77.6, + 77.9, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.8, + 82.2, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.1, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 78.9, + 78.5, + 78.8, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 79.7, + 79.1, + 79.5, + 79.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.5, + 81.6, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.7, + 82.0, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 80.5, + 80.5, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 80.3, + 80.4, + 80.1, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.1, + 81.5, + 80.8, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.1, + 80.8, + 81.2, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.7, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 79.6, + 79.4, + 79.8, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 80.1, + 80.0, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.5, + 81.6, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 81.6, + 81.6, + 81.9, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 80.4, + 80.2, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 80.4, + 80.3, + 80.6, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.8, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 81.7, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 81.3, + 81.6, + 81.6, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 81.5, + 81.8, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 80.3, + 80.1, + 80.4, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 80.7, + 80.6, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 81.6, + 81.8, + 82.0, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 80.4, + 80.6, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 80.1, + 80.2, + 80.6, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 81.5, + 81.7, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 81.6, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 80.0, + 80.2, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 80.7, + 80.6, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 81.2, + 81.2, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 81.4, + 81.1, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 80.4, + 80.2, + 80.2, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 79.4, + 79.0, + 79.4, + 79.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 81.8, + 81.9, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 80.5, + 80.3, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 80.6, + 80.6, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 81.6, + 81.8, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 81.7, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 80.8, + 80.8, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 80.9, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.1, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 82.0, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 81.8, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 80.0, + 79.9, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 80.4, + 80.5, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 80.6, + 80.7, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 80.8, + 80.7, + 80.7, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.1, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 80.4, + 80.3, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 80.7, + 80.7, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 81.4, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 81.6, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 80.7, + 80.6, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 80.6, + 80.4, + 80.7, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 81.9, + 82.3, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 80.5, + 80.0, + 80.6, + 80.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 80.7, + 80.7, + 81.0, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 81.3, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 81.2, + 81.2, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 80.7, + 80.6, + 80.7, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 80.5, + 80.5, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.3, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 79.7, + 78.5, + 79.0, + 78.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 80.4, + 80.3, + 80.4, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 81.6, + 81.6, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 81.4, + 81.6, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 80.3, + 80.3, + 80.5, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 80.6, + 80.6, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.1, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 79.8, + 79.6, + 79.8, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 80.2, + 80.1, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.2.proj_out": [ + 77.1, + 76.6, + 77.1, + 76.9 + ], + "up_blocks.0.resnets.0.conv1": [ + 80.8, + 80.9, + 80.5, + 80.8 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 81.4, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.resnets.0.conv2": [ + 80.3, + 80.3, + 80.0, + 80.2 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 79.8, + 79.7, + 79.7, + 79.8 + ], + "up_blocks.0.resnets.1.conv1": [ + 80.7, + 80.7, + 81.0, + 80.6 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 80.4, + 80.6, + 80.8, + 80.5 + ], + "up_blocks.0.resnets.1.conv2": [ + 80.6, + 80.3, + 80.7, + 80.4 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 75.1, + 74.1, + 75.1, + 74.6 + ], + "up_blocks.0.resnets.2.conv1": [ + 80.2, + 80.2, + 80.4, + 80.0 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 80.6, + 80.8, + 80.8, + 80.8 + ], + "up_blocks.0.resnets.2.conv2": [ + 80.2, + 80.1, + 80.4, + 80.1 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 68.5, + 68.0, + 68.9, + 68.4 + ], + "up_blocks.0.upsamplers.0.conv": [ + 66.6, + 66.1, + 66.6, + 66.3 + ], + "up_blocks.1.attentions.0.proj_in": [ + 70.0, + 69.9, + 70.0, + 69.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 77.0, + 76.5, + 77.0, + 76.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 76.7, + 76.4, + 76.7, + 76.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 72.6, + 72.4, + 72.6, + 72.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 68.4, + 67.9, + 68.5, + 68.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.0, + 79.4, + 79.9, + 79.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.7, + 79.2, + 79.9, + 79.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.1, + 81.5, + 81.4, + 81.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.9, + 80.8, + 81.2, + 80.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 71.7, + 71.2, + 71.3, + 71.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 71.2, + 71.2, + 71.6, + 71.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 75.9, + 75.5, + 75.9, + 75.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 75.3, + 75.2, + 75.2, + 75.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 72.3, + 71.7, + 72.5, + 72.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 70.3, + 70.0, + 70.5, + 70.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 80.0, + 80.1, + 80.0, + 79.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.2, + 81.0, + 81.3, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.0, + 81.2, + 81.0, + 80.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 79.3, + 78.3, + 79.3, + 79.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 70.6, + 70.4, + 70.5, + 70.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 74.5, + 74.3, + 74.6, + 74.4 + ], + "up_blocks.1.attentions.0.proj_out": [ + 72.5, + 72.3, + 72.7, + 72.6 + ], + "up_blocks.1.attentions.1.proj_in": [ + 65.9, + 65.2, + 66.0, + 65.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 74.9, + 74.6, + 75.0, + 75.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 74.9, + 74.7, + 75.1, + 75.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 69.8, + 69.1, + 69.8, + 69.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 61.8, + 61.0, + 61.7, + 61.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 70.9, + 70.8, + 70.3, + 70.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 76.8, + 75.5, + 76.9, + 77.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.4, + 80.3, + 80.7, + 80.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 75.7, + 75.1, + 75.3, + 74.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 66.1, + 66.0, + 66.2, + 66.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 68.3, + 67.9, + 68.6, + 68.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 72.9, + 72.7, + 73.0, + 73.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 73.3, + 73.1, + 73.2, + 73.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 69.5, + 69.2, + 69.5, + 69.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 69.4, + 68.9, + 69.4, + 69.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 79.8, + 79.7, + 79.7, + 79.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 66.0, + 65.9, + 66.4, + 66.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 79.2, + 79.8, + 79.2, + 79.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 74.5, + 74.2, + 74.1, + 74.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 66.9, + 66.0, + 66.7, + 66.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 70.8, + 70.1, + 70.7, + 70.8 + ], + "up_blocks.1.attentions.1.proj_out": [ + 68.7, + 68.4, + 68.6, + 68.9 + ], + "up_blocks.1.attentions.2.proj_in": [ + 65.0, + 64.6, + 64.9, + 64.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 74.3, + 74.0, + 74.5, + 74.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 74.4, + 74.4, + 74.3, + 74.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 71.2, + 70.7, + 71.4, + 71.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 61.8, + 60.9, + 62.1, + 62.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 64.2, + 63.6, + 63.6, + 64.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 78.4, + 78.4, + 78.3, + 78.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 78.5, + 77.5, + 78.6, + 78.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 72.7, + 71.7, + 73.0, + 73.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 63.6, + 63.4, + 63.2, + 63.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 65.4, + 65.4, + 65.7, + 65.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 73.2, + 73.0, + 73.1, + 73.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 74.2, + 73.9, + 74.2, + 74.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 69.5, + 69.1, + 69.5, + 69.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 67.2, + 67.4, + 67.4, + 67.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 74.8, + 74.9, + 74.6, + 74.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 68.6, + 66.4, + 69.3, + 68.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 79.7, + 79.7, + 79.7, + 79.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 74.1, + 74.9, + 73.5, + 73.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 66.0, + 65.8, + 66.2, + 66.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 70.6, + 70.5, + 70.8, + 70.7 + ], + "up_blocks.1.attentions.2.proj_out": [ + 65.7, + 65.5, + 65.8, + 65.6 + ], + "up_blocks.1.resnets.0.conv1": [ + 74.0, + 73.3, + 73.5, + 73.6 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 73.8, + 73.5, + 73.7, + 74.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 74.1, + 73.7, + 74.2, + 74.0 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 61.5, + 60.7, + 61.5, + 61.5 + ], + "up_blocks.1.resnets.1.conv1": [ + 65.0, + 64.7, + 64.8, + 64.8 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 69.7, + 69.1, + 70.1, + 69.8 + ], + "up_blocks.1.resnets.1.conv2": [ + 67.6, + 67.2, + 67.5, + 67.7 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 60.6, + 60.4, + 60.5, + 60.7 + ], + "up_blocks.1.resnets.2.conv1": [ + 64.5, + 64.0, + 64.6, + 64.6 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 71.5, + 71.6, + 71.4, + 71.5 + ], + "up_blocks.1.resnets.2.conv2": [ + 67.7, + 67.2, + 67.5, + 67.4 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 58.2, + 58.0, + 58.0, + 58.1 + ], + "up_blocks.1.upsamplers.0.conv": [ + 54.6, + 54.5, + 54.7, + 54.6 + ], + "up_blocks.2.resnets.0.conv1": [ + 55.4, + 55.3, + 55.4, + 55.4 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 62.6, + 62.2, + 62.7, + 62.7 + ], + "up_blocks.2.resnets.0.conv2": [ + 54.8, + 54.5, + 54.9, + 54.8 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 52.3, + 52.4, + 52.4, + 52.4 + ], + "up_blocks.2.resnets.1.conv1": [ + 51.8, + 51.8, + 52.0, + 52.0 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 61.5, + 61.6, + 61.9, + 61.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 51.4, + 51.5, + 51.5, + 51.6 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 52.5, + 52.5, + 52.6, + 52.5 + ], + "up_blocks.2.resnets.2.conv1": [ + 50.1, + 50.1, + 50.3, + 50.2 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 60.3, + 60.3, + 60.6, + 60.4 + ], + "up_blocks.2.resnets.2.conv2": [ + 48.8, + 48.9, + 48.8, + 48.8 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 54.5, + 54.6, + 54.6, + 54.6 + ], + "mid_block.attentions.0.proj_in": [ + 80.2, + 79.8, + 79.6, + 79.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.3, + 80.2, + 80.4, + 80.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.3, + 80.3, + 80.3, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.6, + 80.6, + 80.7, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.5, + 80.5, + 80.6, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.9, + 82.1, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.9, + 80.5, + 80.8, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.2, + 81.0, + 81.2, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 80.9, + 80.4, + 80.6, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 80.8, + 80.5, + 80.5, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.3, + 80.1, + 80.5, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 80.4, + 80.3, + 80.7, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.4, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.4, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.2, + 80.9, + 81.3, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.2, + 81.3, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.4, + 81.4, + 81.5, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.5, + 81.3, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.2, + 81.2, + 81.5, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.4, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.8, + 82.3, + 82.2, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.3, + 81.1, + 81.3, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.4, + 81.4, + 81.7, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.6, + 81.7, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.5, + 81.4, + 81.1, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.4, + 81.4, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.0, + 81.1, + 81.1, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.3, + 81.4, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.0, + 80.8, + 80.9, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.2, + 81.0, + 80.9, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.2, + 80.6, + 80.8, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.0, + 80.4, + 80.8, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.2, + 81.3, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.4, + 81.4, + 81.9, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.6, + 81.8, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.6, + 81.8, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.6, + 81.5, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.3, + 81.1, + 81.4, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.6, + 81.9, + 81.9, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.8, + 81.8, + 81.9, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.3, + 81.4, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.6, + 81.7, + 82.0, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.7, + 81.9, + 82.0, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.8, + 81.8, + 81.8, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.4, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.3, + 81.3, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.5, + 81.7, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.7, + 81.6, + 81.9, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.7, + 81.8, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.3, + 81.6, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.6, + 81.7, + 81.8, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.8, + 82.1, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.4, + 81.4, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.6, + 81.9, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.3, + 82.4, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.4, + 81.1, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.5, + 81.9, + 81.8, + 81.7 + ], + "mid_block.attentions.0.proj_out": [ + 80.5, + 80.5, + 80.4, + 80.5 + ], + "mid_block.resnets.0.conv1": [ + 80.8, + 80.7, + 80.8, + 81.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 81.3, + 81.3, + 81.1, + 81.0 + ], + "mid_block.resnets.0.conv2": [ + 80.7, + 80.9, + 80.9, + 80.7 + ], + "mid_block.resnets.1.conv1": [ + 81.1, + 81.0, + 80.6, + 81.3 + ], + "mid_block.resnets.1.time_emb_proj": [ + 81.4, + 81.6, + 81.5, + 81.4 + ], + "mid_block.resnets.1.conv2": [ + 81.1, + 81.1, + 81.2, + 81.2 + ] + }, + "4": { + "time_embedding.linear_1": [ + 79.1, + 78.9, + 79.2, + 79.0 + ], + "time_embedding.linear_2": [ + 69.8, + 69.6, + 69.3, + 69.4 + ], + "add_embedding.linear_1": [ + 79.5, + 79.6, + 79.3, + 79.8 + ], + "add_embedding.linear_2": [ + 79.6, + 79.4, + 79.8, + 79.3 + ], + "down_blocks.0.resnets.0.conv1": [ + 73.7, + 74.2, + 73.6, + 74.2 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 76.5, + 77.2, + 76.6, + 76.1 + ], + "down_blocks.0.resnets.0.conv2": [ + 66.7, + 66.6, + 66.8, + 66.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 68.8, + 68.1, + 68.7, + 68.6 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 77.7, + 77.2, + 78.1, + 77.6 + ], + "down_blocks.0.resnets.1.conv2": [ + 67.6, + 67.2, + 67.3, + 67.8 + ], + "down_blocks.0.downsamplers.0.conv": [ + 71.3, + 70.9, + 71.3, + 71.1 + ], + "down_blocks.1.attentions.0.proj_in": [ + 74.2, + 73.8, + 73.8, + 74.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.7, + 80.7, + 81.0, + 80.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.7, + 80.9, + 80.7, + 80.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.0, + 78.9, + 79.0, + 79.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 76.8, + 77.3, + 76.9, + 77.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.7, + 82.2, + 82.1, + 82.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.8, + 82.0, + 81.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 77.9, + 80.2, + 77.4, + 77.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.4, + 80.4, + 81.1, + 81.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 74.2, + 73.5, + 74.3, + 74.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 75.6, + 75.6, + 75.2, + 75.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 80.1, + 80.3, + 80.4, + 80.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 80.4, + 80.4, + 80.5, + 80.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 78.6, + 78.7, + 78.9, + 79.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 77.9, + 78.0, + 77.7, + 78.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 80.7, + 80.4, + 80.5, + 80.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 76.8, + 76.2, + 76.2, + 76.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 79.4, + 79.2, + 79.4, + 79.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 79.0, + 78.4, + 79.1, + 79.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 74.1, + 73.9, + 74.2, + 74.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 78.0, + 78.0, + 78.2, + 78.3 + ], + "down_blocks.1.attentions.0.proj_out": [ + 77.4, + 76.9, + 77.2, + 77.3 + ], + "down_blocks.1.attentions.1.proj_in": [ + 78.5, + 78.6, + 78.8, + 78.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.8, + 81.1, + 81.4, + 81.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.0, + 81.1, + 81.4, + 81.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.6, + 80.4, + 80.7, + 80.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.0, + 79.9, + 80.1, + 79.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.7, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.0, + 81.8, + 82.2, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.5, + 81.8, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.5, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 78.9, + 78.7, + 78.8, + 78.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 79.6, + 79.4, + 79.7, + 79.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.2, + 81.2, + 81.5, + 81.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.3, + 81.3, + 81.4, + 81.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 80.5, + 80.9, + 80.9, + 80.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.1, + 80.1, + 80.5, + 80.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.4, + 80.9, + 81.7, + 81.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 79.0, + 78.8, + 79.1, + 79.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 79.8, + 79.9, + 80.1, + 79.9 + ], + "down_blocks.1.attentions.1.proj_out": [ + 79.5, + 79.5, + 79.6, + 79.5 + ], + "down_blocks.1.resnets.0.conv1": [ + 75.7, + 75.6, + 75.4, + 75.5 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 80.7, + 80.5, + 81.0, + 80.7 + ], + "down_blocks.1.resnets.0.conv2": [ + 79.8, + 79.6, + 79.7, + 79.6 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 72.1, + 71.9, + 72.3, + 72.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 78.2, + 78.2, + 78.6, + 78.2 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 80.0, + 79.7, + 80.1, + 79.9 + ], + "down_blocks.1.resnets.1.conv2": [ + 79.9, + 79.9, + 80.1, + 79.9 + ], + "down_blocks.1.downsamplers.0.conv": [ + 76.9, + 75.9, + 76.4, + 75.9 + ], + "down_blocks.2.attentions.0.proj_in": [ + 80.9, + 81.0, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.0, + 81.9, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.2, + 82.5, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.8, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.9, + 82.1, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.3, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.2, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.7, + 81.7, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.7, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.8, + 82.1, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.8, + 82.0, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.9, + 82.2, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.8, + 81.9, + 82.0, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.6, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.8, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.7, + 82.1, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.7, + 81.8, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.7, + 81.8, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.7, + 82.0, + 82.2, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.8, + 82.2, + 82.2, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.6, + 81.8, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.8, + 81.6, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.8, + 82.1, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.2, + 82.2, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.6, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.0, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.7, + 81.8, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.8, + 82.2, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 81.9, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 81.6, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.6, + 81.8, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.7, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.proj_out": [ + 81.3, + 81.4, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.1.proj_in": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.9, + 82.5, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.1, + 82.2, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.8, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.9, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.4, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.7, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.9, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.0, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 82.0, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.3, + 82.6, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.7, + 81.9, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.9, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.8, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.7, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.3, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.8, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.9, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.8, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 82.1, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 82.0, + 82.0, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.proj_out": [ + 81.7, + 81.8, + 82.0, + 81.5 + ], + "down_blocks.2.resnets.0.conv1": [ + 81.5, + 81.5, + 81.8, + 81.6 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 81.7, + 81.5, + 82.1, + 81.7 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.5, + 81.6, + 81.9, + 81.5 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 78.4, + 77.8, + 78.5, + 77.9 + ], + "down_blocks.2.resnets.1.conv1": [ + 81.5, + 81.7, + 81.9, + 81.5 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 82.2, + 82.1, + 82.1, + 82.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.proj_in": [ + 81.7, + 81.7, + 81.4, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.7, + 82.0, + 81.9, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.7, + 82.1, + 82.1, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 81.9, + 81.5, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.8, + 81.9, + 81.8, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.3, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.9, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.9, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.4, + 82.5, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.7, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.8, + 81.7, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.8, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.8, + 82.3, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.9, + 82.3, + 82.2, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.7, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.7, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.6, + 81.5, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.8, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.1, + 82.3, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.8, + 82.2, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 81.6, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.3, + 82.1, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.7, + 82.2, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.7, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.1, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.3, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.8, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.9, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.6, + 81.9, + 82.0, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 81.9, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.7, + 81.9, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.0.proj_out": [ + 81.3, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.1.proj_in": [ + 80.1, + 80.1, + 80.3, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.1, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.3, + 81.4, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.6, + 80.6, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.1, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.5, + 81.4, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.4, + 81.2, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.0, + 81.1, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 81.9, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.3, + 81.4, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.3, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.0, + 81.2, + 81.3, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.1, + 81.0, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.7, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.5, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.4, + 81.4, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.2, + 81.1, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.6, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.9, + 82.1, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.3, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.4, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.4, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 81.9, + 81.9, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.2, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.3, + 81.5, + 81.9, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.3, + 81.3, + 81.3, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.2, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.7, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.5, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.3, + 81.5, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.1, + 81.1, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.3, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.4, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.8, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 80.9, + 81.1, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.0, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 81.8, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.9, + 82.1, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.3, + 81.2, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.0, + 81.1, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.3, + 81.5, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 81.7, + 82.0, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.3, + 81.4, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.2, + 81.1, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.8, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 80.3, + 80.6, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.2, + 80.6, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.1.proj_out": [ + 80.3, + 80.4, + 80.8, + 80.3 + ], + "up_blocks.0.attentions.2.proj_in": [ + 80.7, + 80.6, + 80.6, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.1, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 80.0, + 80.3, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.1, + 81.2, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.0, + 80.9, + 81.3, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.7, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 81.1, + 81.3, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.8, + 82.1, + 82.0, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 81.1, + 81.1, + 81.5, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 81.7, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 81.2, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 81.8, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 81.7, + 82.2, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 81.3, + 81.5, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 81.4, + 81.6, + 81.5, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 82.1, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 81.8, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 81.2, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 81.0, + 81.0, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.1, + 82.1, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 81.3, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 81.6, + 81.4, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 81.5, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 80.9, + 81.0, + 81.1, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.1, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 81.4, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 81.5, + 81.6, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 81.3, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 81.4, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 81.2, + 81.2, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 81.4, + 81.2, + 81.7, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 81.3, + 81.1, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 81.6, + 81.5, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 81.6, + 81.6, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 81.2, + 81.2, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 81.8, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 81.3, + 81.5, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 81.3, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 81.2, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 81.2, + 81.2, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 81.5, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 80.9, + 81.4, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 81.0, + 81.0, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.2.proj_out": [ + 80.5, + 80.4, + 80.7, + 80.3 + ], + "up_blocks.0.resnets.0.conv1": [ + 81.4, + 81.7, + 81.6, + 81.5 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.resnets.0.conv2": [ + 81.7, + 81.9, + 81.8, + 81.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.resnets.1.conv1": [ + 81.7, + 81.6, + 81.9, + 81.5 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 81.7, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.0.resnets.1.conv2": [ + 81.5, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 79.3, + 79.4, + 79.5, + 79.4 + ], + "up_blocks.0.resnets.2.conv1": [ + 81.2, + 81.3, + 81.3, + 81.2 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 81.6, + 81.7, + 81.8, + 81.4 + ], + "up_blocks.0.resnets.2.conv2": [ + 81.4, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 75.1, + 75.1, + 75.4, + 74.9 + ], + "up_blocks.0.upsamplers.0.conv": [ + 74.5, + 74.1, + 74.4, + 74.0 + ], + "up_blocks.1.attentions.0.proj_in": [ + 77.6, + 77.3, + 77.6, + 77.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.8, + 80.6, + 81.2, + 80.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.8, + 80.7, + 81.0, + 80.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.4, + 79.4, + 79.5, + 79.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 74.6, + 74.4, + 75.0, + 74.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.5, + 82.1, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.1, + 82.0, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 81.8, + 82.1, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.7, + 78.6, + 78.8, + 78.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 77.0, + 76.9, + 77.6, + 77.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 80.4, + 80.5, + 80.5, + 80.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 80.3, + 80.3, + 80.5, + 80.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 79.2, + 79.3, + 79.5, + 79.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 76.5, + 76.3, + 77.0, + 76.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.7, + 81.6, + 82.0, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 78.7, + 78.4, + 78.9, + 78.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 79.9, + 79.7, + 79.8, + 79.6 + ], + "up_blocks.1.attentions.0.proj_out": [ + 79.0, + 79.2, + 79.3, + 79.1 + ], + "up_blocks.1.attentions.1.proj_in": [ + 75.1, + 75.2, + 75.2, + 75.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.4, + 80.3, + 80.6, + 80.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.5, + 80.4, + 80.7, + 80.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 78.6, + 78.3, + 78.6, + 78.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 68.5, + 67.9, + 68.5, + 68.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.8, + 81.5, + 81.9, + 81.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.9, + 80.7, + 80.9, + 80.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.0, + 81.9, + 81.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.0, + 81.1, + 81.1, + 80.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 75.7, + 75.6, + 75.8, + 75.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 75.5, + 75.3, + 75.8, + 75.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 79.6, + 79.6, + 79.8, + 79.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 79.6, + 79.7, + 79.9, + 79.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 78.2, + 78.1, + 78.2, + 78.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 76.5, + 76.3, + 76.6, + 76.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.7, + 81.9, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.6, + 81.6, + 81.4, + 81.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 80.9, + 81.0, + 81.1, + 80.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 76.8, + 76.5, + 76.8, + 77.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 78.6, + 78.7, + 78.9, + 78.8 + ], + "up_blocks.1.attentions.1.proj_out": [ + 77.5, + 77.5, + 77.8, + 77.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 74.1, + 74.1, + 74.2, + 74.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.3, + 80.2, + 80.7, + 80.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 80.7, + 80.5, + 80.7, + 80.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 78.6, + 78.6, + 78.8, + 78.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 74.6, + 74.1, + 74.7, + 74.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 79.6, + 79.0, + 80.0, + 79.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.1, + 80.8, + 81.5, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.0, + 80.6, + 81.4, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 79.6, + 79.6, + 80.2, + 80.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 75.0, + 74.5, + 75.0, + 75.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 74.7, + 74.7, + 75.1, + 75.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 79.5, + 79.4, + 79.8, + 79.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 79.7, + 80.2, + 80.3, + 80.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 76.8, + 76.6, + 77.0, + 76.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 77.0, + 76.8, + 77.1, + 77.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.1, + 81.2, + 81.3, + 81.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 74.1, + 71.3, + 75.7, + 75.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.5, + 81.9, + 82.0, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 81.3, + 81.1, + 80.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 75.4, + 75.3, + 75.5, + 75.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 77.6, + 77.8, + 78.1, + 78.0 + ], + "up_blocks.1.attentions.2.proj_out": [ + 75.9, + 75.8, + 76.0, + 75.9 + ], + "up_blocks.1.resnets.0.conv1": [ + 80.2, + 80.2, + 80.2, + 80.1 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 79.8, + 79.7, + 79.9, + 79.8 + ], + "up_blocks.1.resnets.0.conv2": [ + 79.7, + 79.9, + 79.9, + 79.7 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 73.9, + 73.5, + 73.9, + 73.7 + ], + "up_blocks.1.resnets.1.conv1": [ + 74.0, + 73.8, + 74.0, + 73.9 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 79.9, + 79.8, + 80.2, + 79.8 + ], + "up_blocks.1.resnets.1.conv2": [ + 76.9, + 76.9, + 76.9, + 76.6 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 71.1, + 70.9, + 70.7, + 71.0 + ], + "up_blocks.1.resnets.2.conv1": [ + 73.2, + 73.0, + 73.1, + 73.5 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 80.2, + 80.2, + 80.3, + 80.1 + ], + "up_blocks.1.resnets.2.conv2": [ + 77.5, + 77.3, + 77.7, + 77.5 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 72.0, + 71.7, + 71.8, + 72.0 + ], + "up_blocks.1.upsamplers.0.conv": [ + 67.6, + 67.7, + 67.6, + 67.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 64.7, + 64.2, + 64.7, + 64.7 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 78.3, + 78.2, + 78.5, + 78.4 + ], + "up_blocks.2.resnets.0.conv2": [ + 64.8, + 64.3, + 65.0, + 65.0 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 66.6, + 66.6, + 66.7, + 66.4 + ], + "up_blocks.2.resnets.1.conv1": [ + 60.4, + 60.3, + 60.5, + 60.3 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 77.6, + 77.6, + 76.8, + 77.0 + ], + "up_blocks.2.resnets.1.conv2": [ + 59.8, + 59.9, + 59.9, + 59.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 66.0, + 66.0, + 66.1, + 66.0 + ], + "up_blocks.2.resnets.2.conv1": [ + 61.7, + 61.7, + 61.9, + 61.9 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 75.9, + 75.0, + 75.7, + 75.4 + ], + "up_blocks.2.resnets.2.conv2": [ + 65.7, + 65.5, + 65.7, + 65.7 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 71.5, + 71.7, + 71.6, + 71.6 + ], + "mid_block.attentions.0.proj_in": [ + 81.4, + 81.7, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.8, + 81.9, + 81.9, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.8, + 81.8, + 81.9, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.7, + 81.8, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.9, + 81.9, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.3, + 82.2, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.9, + 82.0, + 82.0, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.7, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.8, + 81.8, + 81.9, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.7, + 82.1, + 82.0, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.7, + 81.9, + 81.8, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.4, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.7, + 82.0, + 82.1, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.2, + 81.9, + 82.3, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.8, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.9, + 82.0, + 82.1, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.9, + 82.2, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.0, + 81.9, + 82.2, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.9, + 82.0, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.8, + 82.3, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.9, + 82.1, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.9, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.9, + 81.9, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.9, + 82.1, + 82.0, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.9, + 81.9, + 82.0, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.8, + 81.7, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.4, + 82.5, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.0, + 82.2, + 82.3, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.3, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.9, + 82.1, + 82.1, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.2, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.0, + 82.2, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.0, + 82.0, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.8, + 82.1, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.8, + 82.2, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "mid_block.attentions.0.proj_out": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "mid_block.resnets.0.conv1": [ + 81.8, + 81.7, + 82.0, + 81.7 + ], + "mid_block.resnets.0.time_emb_proj": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "mid_block.resnets.0.conv2": [ + 81.9, + 81.8, + 82.0, + 81.7 + ], + "mid_block.resnets.1.conv1": [ + 81.6, + 82.0, + 81.9, + 81.8 + ], + "mid_block.resnets.1.time_emb_proj": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "mid_block.resnets.1.conv2": [ + 81.7, + 81.7, + 81.9, + 81.7 + ] + }, + "6": { + "time_embedding.linear_1": [ + 80.6, + 80.6, + 80.9, + 80.5 + ], + "time_embedding.linear_2": [ + 80.2, + 80.3, + 80.5, + 80.3 + ], + "add_embedding.linear_1": [ + 80.8, + 81.0, + 80.5, + 80.6 + ], + "add_embedding.linear_2": [ + 81.0, + 80.7, + 80.7, + 81.0 + ], + "down_blocks.0.resnets.0.conv1": [ + 79.5, + 79.5, + 79.8, + 79.8 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 80.1, + 80.4, + 80.4, + 80.5 + ], + "down_blocks.0.resnets.0.conv2": [ + 76.2, + 76.2, + 76.2, + 76.2 + ], + "down_blocks.0.resnets.1.conv1": [ + 76.4, + 76.5, + 76.6, + 76.9 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 80.8, + 80.7, + 81.2, + 80.4 + ], + "down_blocks.0.resnets.1.conv2": [ + 77.7, + 77.3, + 77.8, + 77.6 + ], + "down_blocks.0.downsamplers.0.conv": [ + 79.5, + 79.3, + 79.5, + 79.3 + ], + "down_blocks.1.attentions.0.proj_in": [ + 80.0, + 79.8, + 79.9, + 79.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.8, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.2, + 81.1, + 81.4, + 81.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.9, + 80.8, + 81.1, + 80.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.9, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.0, + 81.9, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.4, + 79.8, + 80.0, + 79.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.7, + 80.4, + 80.9, + 80.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.3, + 81.4, + 81.7, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.4, + 81.7, + 81.8, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.0, + 81.3, + 81.1, + 81.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.0, + 81.1, + 81.2, + 80.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.5, + 81.5, + 81.6, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.8, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.5, + 81.8, + 81.8, + 81.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 79.8, + 79.8, + 80.1, + 79.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.8, + 81.1, + 81.3, + 80.9 + ], + "down_blocks.1.attentions.0.proj_out": [ + 80.5, + 80.8, + 80.7, + 80.5 + ], + "down_blocks.1.attentions.1.proj_in": [ + 81.0, + 81.1, + 81.3, + 81.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.9, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.7, + 81.7, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.1, + 81.4, + 81.6, + 81.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.1, + 81.1, + 81.2, + 81.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.1, + 81.1, + 81.2, + 81.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.5, + 81.5, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.4, + 81.4, + 81.8, + 81.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.3, + 82.2, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 81.9, + 82.3, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.8, + 81.2, + 81.3, + 80.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.2, + 81.6, + 81.2 + ], + "down_blocks.1.attentions.1.proj_out": [ + 81.2, + 81.0, + 81.4, + 81.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 80.8, + 81.0, + 81.1, + 80.9 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 81.6, + 81.8, + 81.9, + 81.6 + ], + "down_blocks.1.resnets.0.conv2": [ + 81.4, + 81.4, + 81.5, + 81.2 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 79.4, + 79.3, + 79.5, + 79.1 + ], + "down_blocks.1.resnets.1.conv1": [ + 80.9, + 81.0, + 81.2, + 81.0 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 81.9, + 81.9, + 82.3, + 81.8 + ], + "down_blocks.1.resnets.1.conv2": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.1.downsamplers.0.conv": [ + 81.0, + 81.2, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 81.9, + 82.2, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.8, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.9, + 82.3, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.0, + 82.4, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.3, + 82.4, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.4, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.1, + 82.3, + 82.5, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.0, + 82.0, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.3, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.8, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.2, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.0, + 82.0, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.6, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.3, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.1, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.3, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.1, + 82.3, + 82.1, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.2, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.9, + 82.0, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.3, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.6, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.9, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.8, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.proj_out": [ + 81.9, + 81.9, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.proj_in": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.3, + 82.5, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.5, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.3, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.4, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.proj_out": [ + 81.9, + 82.3, + 82.1, + 82.1 + ], + "down_blocks.2.resnets.0.conv1": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.9, + 82.4, + 82.2, + 82.0 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 81.5, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.resnets.1.conv1": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.proj_in": [ + 82.1, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.0, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.3, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.3, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.0, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.3, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.2, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.7, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.9, + 82.4, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.0, + 82.1, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.proj_out": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.proj_in": [ + 81.7, + 81.6, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.7, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.3, + 81.6, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.9, + 81.8, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.7, + 81.6, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.6, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.8, + 81.9, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.7, + 81.8, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.8, + 82.2, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.8, + 82.0, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.7, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.1, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.8, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.4, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.8, + 82.0, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.1, + 81.9, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.3, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.7, + 81.7, + 82.2, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.7, + 81.6, + 82.0, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 81.8, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.3, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.6, + 81.9, + 82.1, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.6, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.2, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.9, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.7, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.8, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.8, + 82.0, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.6, + 81.8, + 81.9, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.8, + 82.0, + 82.3, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.7, + 82.0, + 82.1, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.2, + 82.6, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.4, + 81.5, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.8, + 81.9, + 82.1, + 81.5 + ], + "up_blocks.0.attentions.1.proj_out": [ + 81.2, + 81.4, + 81.7, + 81.2 + ], + "up_blocks.0.attentions.2.proj_in": [ + 81.6, + 81.6, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.6, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 82.1, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.6, + 81.7, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 81.6, + 82.0, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 81.8, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 81.5, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 81.7, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 82.0, + 82.2, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 81.8, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 82.3, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 81.7, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 81.7, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 81.7, + 81.8, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 81.6, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 81.6, + 81.7, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 82.3, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.4, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 81.7, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 81.8, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.5, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.4, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 81.8, + 81.7, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 81.7, + 82.0, + 82.2, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 81.8, + 81.9, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 82.0, + 82.2, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 81.7, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 81.9, + 82.2, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.1, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 81.8, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 81.8, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 81.8, + 82.1, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 81.6, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 81.8, + 81.7, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 82.0, + 82.1, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 82.0, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 81.9, + 81.6, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 81.7, + 81.8, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.proj_out": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.resnets.0.conv2": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.resnets.1.conv1": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 82.0, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.resnets.1.conv2": [ + 82.0, + 82.2, + 82.0, + 82.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 81.5, + 81.4, + 81.9, + 81.5 + ], + "up_blocks.0.resnets.2.conv1": [ + 82.0, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 82.0, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.resnets.2.conv2": [ + 81.7, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 80.2, + 80.3, + 80.5, + 80.5 + ], + "up_blocks.0.upsamplers.0.conv": [ + 80.3, + 80.4, + 80.8, + 80.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 80.6, + 80.6, + 80.9, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.1, + 81.2, + 81.6, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.6, + 80.6, + 80.9, + 80.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.7, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.8, + 80.8, + 81.1, + 80.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.4, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.6, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.9, + 80.9, + 81.2, + 80.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 80.7, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.9, + 81.0, + 81.3, + 80.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.4, + 81.4, + 81.2 + ], + "up_blocks.1.attentions.0.proj_out": [ + 81.0, + 81.0, + 81.4, + 81.0 + ], + "up_blocks.1.attentions.1.proj_in": [ + 80.1, + 80.2, + 80.3, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.6, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.5, + 81.8, + 81.9, + 81.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.8, + 80.9, + 80.9, + 80.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.1, + 80.1, + 80.4, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.3, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.2, + 80.4, + 80.5, + 80.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.6, + 80.5, + 80.5, + 80.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.2, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.3, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 80.7, + 80.8, + 80.8, + 80.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.6, + 80.6, + 80.9, + 80.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.4, + 80.1, + 80.3, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 80.9, + 80.9, + 81.2, + 80.9 + ], + "up_blocks.1.attentions.1.proj_out": [ + 80.8, + 80.7, + 81.0, + 80.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 80.0, + 79.7, + 79.9, + 79.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.5, + 81.9, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.8, + 80.6, + 81.1, + 80.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 80.1, + 80.1, + 80.2, + 80.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.1, + 82.2, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.3, + 82.0, + 81.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 81.8, + 82.1, + 81.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 79.8, + 79.7, + 80.0, + 79.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 80.2, + 80.3, + 80.7, + 80.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.3, + 81.4, + 81.6, + 81.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.1, + 81.2, + 81.5, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 80.7, + 80.8, + 81.0, + 80.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 80.1, + 80.0, + 80.4, + 80.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.8, + 82.2, + 82.4, + 81.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 80.4, + 80.6, + 80.6, + 80.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 80.7, + 80.7, + 81.0, + 80.5 + ], + "up_blocks.1.attentions.2.proj_out": [ + 80.1, + 80.1, + 80.3, + 79.9 + ], + "up_blocks.1.resnets.0.conv1": [ + 81.5, + 81.4, + 81.6, + 81.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 81.2, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 79.9, + 79.8, + 80.0, + 79.9 + ], + "up_blocks.1.resnets.1.conv1": [ + 80.4, + 80.4, + 80.7, + 80.3 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 81.3, + 81.3, + 81.6, + 81.1 + ], + "up_blocks.1.resnets.1.conv2": [ + 80.8, + 80.7, + 81.0, + 80.7 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 78.9, + 78.8, + 79.0, + 78.9 + ], + "up_blocks.1.resnets.2.conv1": [ + 80.3, + 80.1, + 80.5, + 80.2 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 81.1, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.1.resnets.2.conv2": [ + 80.4, + 80.6, + 80.9, + 80.5 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 79.4, + 79.4, + 79.5, + 79.5 + ], + "up_blocks.1.upsamplers.0.conv": [ + 77.2, + 77.5, + 77.6, + 77.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 77.6, + 77.6, + 77.8, + 77.5 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 80.7, + 81.1, + 81.0, + 81.0 + ], + "up_blocks.2.resnets.0.conv2": [ + 77.6, + 77.3, + 77.6, + 77.7 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 75.8, + 75.9, + 75.8, + 75.8 + ], + "up_blocks.2.resnets.1.conv1": [ + 73.7, + 73.7, + 74.1, + 73.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 80.8, + 81.0, + 81.0, + 80.8 + ], + "up_blocks.2.resnets.1.conv2": [ + 72.4, + 72.3, + 72.4, + 72.5 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 76.0, + 76.1, + 76.3, + 76.0 + ], + "up_blocks.2.resnets.2.conv1": [ + 73.0, + 73.3, + 73.3, + 73.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.2.resnets.2.conv2": [ + 75.0, + 75.2, + 75.4, + 75.2 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 79.5, + 79.9, + 79.9, + 79.7 + ], + "mid_block.attentions.0.proj_in": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.1, + 82.3, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.8, + 82.4, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.1, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.2, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.4, + 82.4, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.9, + 82.1, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.0, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.3, + 82.3, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.0, + 82.1, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.3, + 82.2, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.2, + 82.1, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.1, + 82.1, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.1, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 82.0, + 82.1, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.proj_out": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.resnets.0.conv1": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.resnets.0.time_emb_proj": [ + 82.2, + 82.4, + 82.3, + 82.3 + ], + "mid_block.resnets.0.conv2": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.resnets.1.conv1": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "mid_block.resnets.1.time_emb_proj": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.resnets.1.conv2": [ + 82.1, + 82.3, + 82.4, + 82.0 + ] + }, + "8": { + "time_embedding.linear_1": [ + 81.3, + 81.4, + 81.7, + 81.5 + ], + "time_embedding.linear_2": [ + 81.0, + 80.8, + 81.1, + 81.0 + ], + "add_embedding.linear_1": [ + 81.5, + 81.3, + 81.7, + 81.2 + ], + "add_embedding.linear_2": [ + 81.2, + 81.2, + 81.7, + 81.3 + ], + "down_blocks.0.resnets.0.conv1": [ + 80.7, + 80.9, + 81.0, + 80.9 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 81.1, + 80.8, + 81.0, + 81.1 + ], + "down_blocks.0.resnets.0.conv2": [ + 80.4, + 80.3, + 80.5, + 80.3 + ], + "down_blocks.0.resnets.1.conv1": [ + 79.6, + 79.7, + 79.6, + 79.9 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 81.0, + 81.0, + 81.4, + 81.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 80.3, + 80.6, + 80.9, + 80.3 + ], + "down_blocks.0.downsamplers.0.conv": [ + 81.2, + 81.4, + 81.4, + 81.2 + ], + "down_blocks.1.attentions.0.proj_in": [ + 81.5, + 81.4, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.9, + 82.1, + 82.1, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.7, + 82.1, + 82.0, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.7, + 81.9, + 82.0, + 81.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 81.4, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.5, + 81.6, + 81.9, + 81.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.7, + 81.8, + 81.9, + 81.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.1, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.8, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.1.attentions.0.proj_out": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.1.attentions.1.proj_in": [ + 81.7, + 81.9, + 82.1, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 81.9, + 81.9, + 81.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.6, + 82.0, + 82.0, + 81.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.9, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.1.attentions.1.proj_out": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.1.resnets.0.conv1": [ + 81.7, + 81.8, + 81.7, + 81.5 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 81.7, + 82.2, + 82.1, + 81.9 + ], + "down_blocks.1.resnets.0.conv2": [ + 82.0, + 82.1, + 81.9, + 81.6 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 81.8, + 82.0, + 82.2, + 82.1 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.1.resnets.1.conv2": [ + 82.0, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.1.downsamplers.0.conv": [ + 81.8, + 82.0, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.proj_in": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.0, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.8, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.0, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.4, + 82.6, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.3, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.3, + 82.2, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.3, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 82.2, + 82.2, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.3, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.4, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.3, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.proj_in": [ + 82.1, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.3, + 82.4, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.3, + 82.2, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.3, + 82.2, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.3, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.3, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.3, + 82.6, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.3, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 82.3, + 82.4, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.3, + 82.4, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.proj_out": [ + 82.2, + 82.4, + 82.6, + 82.1 + ], + "down_blocks.2.resnets.0.conv1": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.9, + 82.2, + 82.4, + 82.3 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 81.9, + 82.0, + 82.2, + 81.7 + ], + "down_blocks.2.resnets.1.conv1": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.resnets.1.conv2": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.proj_in": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.3, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.2, + 82.3, + 82.2, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.4, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.3, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.0, + 82.3, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.3, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.0, + 82.1, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.4, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.3, + 82.3, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.3, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.3, + 82.3, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.3, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.proj_out": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.proj_in": [ + 81.9, + 82.3, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.1, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 82.1, + 82.3, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.4, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.9, + 82.2, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.0, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.2, + 82.1, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 82.1, + 82.2, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.9, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.3, + 82.4, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.9, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.2, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 82.1, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.proj_out": [ + 81.7, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.proj_in": [ + 81.7, + 82.2, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.4, + 82.6, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 82.0, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.4, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 81.9, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 82.4, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 82.3, + 82.4, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 81.8, + 82.2, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 81.9, + 82.3, + 82.4, + 82.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 81.8, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.4, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 82.3, + 82.3, + 82.6, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 82.4, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.6, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 82.3, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 82.0, + 82.1, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 82.0, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.2, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 81.9, + 82.1, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.4, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 82.2, + 82.3, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.proj_out": [ + 81.8, + 82.0, + 82.0, + 81.6 + ], + "up_blocks.0.resnets.0.conv1": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.resnets.0.conv2": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 82.0, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.resnets.1.conv1": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.resnets.1.conv2": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 81.9, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.0.resnets.2.conv1": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.resnets.2.conv2": [ + 82.0, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 81.4, + 81.5, + 81.8, + 81.6 + ], + "up_blocks.0.upsamplers.0.conv": [ + 81.5, + 81.2, + 81.7, + 81.2 + ], + "up_blocks.1.attentions.0.proj_in": [ + 81.2, + 81.4, + 81.8, + 81.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.1, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.7, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.6, + 81.7, + 81.7, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.6, + 81.9, + 81.9, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.7, + 82.1, + 82.1, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.8, + 82.1, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.6, + 81.5, + 81.9, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.7, + 81.8, + 81.8, + 81.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.7, + 81.5, + 82.0, + 81.9 + ], + "up_blocks.1.attentions.0.proj_out": [ + 81.8, + 81.8, + 81.9, + 81.6 + ], + "up_blocks.1.attentions.1.proj_in": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.5, + 81.2, + 81.9, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.5, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.5, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.0, + 81.9, + 82.3, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.8, + 81.8, + 81.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.5, + 81.7, + 81.8, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.8, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.1.attentions.1.proj_out": [ + 81.6, + 81.4, + 81.8, + 81.6 + ], + "up_blocks.1.attentions.2.proj_in": [ + 81.3, + 81.1, + 81.4, + 81.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 81.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.4, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.1, + 81.1, + 81.7, + 81.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.2, + 81.4, + 81.6, + 81.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.5, + 81.4, + 81.9, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.8, + 82.0, + 82.0, + 81.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 81.1, + 81.3, + 81.4, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.4, + 81.8, + 81.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.3, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.6, + 81.6, + 81.7 + ], + "up_blocks.1.attentions.2.proj_out": [ + 81.3, + 81.3, + 81.4, + 81.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.1.resnets.0.conv2": [ + 81.6, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.1.resnets.1.conv1": [ + 81.4, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 81.8, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.1.resnets.1.conv2": [ + 81.8, + 81.8, + 81.8, + 81.6 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 80.9, + 81.1, + 81.2, + 81.0 + ], + "up_blocks.1.resnets.2.conv1": [ + 81.4, + 81.6, + 81.6, + 81.3 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 81.8, + 81.7, + 82.1, + 81.6 + ], + "up_blocks.1.resnets.2.conv2": [ + 81.4, + 81.5, + 81.7, + 81.3 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 81.0, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.1.upsamplers.0.conv": [ + 80.9, + 80.8, + 81.0, + 80.9 + ], + "up_blocks.2.resnets.0.conv1": [ + 80.3, + 80.3, + 80.7, + 80.5 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 81.4, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.2.resnets.0.conv2": [ + 80.7, + 80.9, + 80.9, + 80.8 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 80.4, + 80.5, + 80.5, + 80.3 + ], + "up_blocks.2.resnets.1.conv1": [ + 80.3, + 80.4, + 80.2, + 80.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.2.resnets.1.conv2": [ + 80.6, + 80.8, + 81.0, + 80.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 80.6, + 81.0, + 80.9, + 80.7 + ], + "up_blocks.2.resnets.2.conv1": [ + 80.2, + 80.2, + 80.3, + 80.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "up_blocks.2.resnets.2.conv2": [ + 81.0, + 81.0, + 81.2, + 81.0 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 81.7, + 81.6, + 82.0, + 81.5 + ], + "mid_block.attentions.0.proj_in": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.3, + 82.6, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 82.2, + 82.2, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.4, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.4, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.2, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.3, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.2, + 82.4, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.3, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.2, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.3, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.3, + 82.3, + 82.4, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.0, + 82.4, + 82.7, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.3, + 82.4, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.4, + 82.6, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.3, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.3, + 82.4, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.4, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.3, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.3, + 82.4, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.4, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.3, + 82.3, + 82.6, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.0, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.2, + 82.5, + 82.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.4, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.3, + 82.4, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 82.3, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "mid_block.attentions.0.proj_out": [ + 81.9, + 82.3, + 82.5, + 82.1 + ], + "mid_block.resnets.0.conv1": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "mid_block.resnets.0.time_emb_proj": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "mid_block.resnets.0.conv2": [ + 82.3, + 82.3, + 82.4, + 82.0 + ], + "mid_block.resnets.1.conv1": [ + 82.2, + 82.3, + 82.5, + 82.0 + ], + "mid_block.resnets.1.time_emb_proj": [ + 82.1, + 82.4, + 82.5, + 82.0 + ], + "mid_block.resnets.1.conv2": [ + 82.3, + 82.3, + 82.2, + 82.1 + ] + } + }, + "cumulative": { + "1": { + "metadata": { + "nbits": 1, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.7.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0", + "mid_block.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v", + "up_blocks.0.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.resnets.0.time_emb_proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.proj_in", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k", + "mid_block.resnets.1.conv2", + "down_blocks.2.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2", + "mid_block.resnets.0.conv1", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj", + "mid_block.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.1.conv2", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v", + "mid_block.attentions.0.proj_out", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.resnets.1.conv1", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.resnets.2.conv2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.0.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2", + "up_blocks.0.resnets.2.conv1", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.proj_in", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.proj_in", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.proj_in", + "down_blocks.2.resnets.0.conv1", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q", + "add_embedding.linear_2", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.resnets.1.conv_shortcut", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.proj_in", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "add_embedding.linear_1", + "up_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.2.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.1.resnets.0.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.1.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.upsamplers.0.conv", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.resnets.2.conv2", + "up_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.1.attentions.1.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.0.resnets.0.time_emb_proj", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k", + "down_blocks.0.resnets.1.conv2", + "down_blocks.0.resnets.0.conv1", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.0.resnets.1.conv1", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.0.resnets.0.conv2", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.2.resnets.0.conv1", + "up_blocks.2.resnets.0.conv2", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.resnets.1.conv2", + "time_embedding.linear_2", + "up_blocks.2.resnets.1.conv1", + "up_blocks.1.upsamplers.0.conv", + "time_embedding.linear_1", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.resnets.2.conv1", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.resnets.2.conv2", + "up_blocks.2.resnets.2.conv_shortcut" + ], + "sizes": [ + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 6553600, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 2621440, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 13107200, + 13107200, + 1638400, + 13107200, + 1638400, + 2621440, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 6553600, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 13107200, + 6553600, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 2621440, + 1638400, + 13107200, + 6553600, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 14745600, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 6553600, + 13107200, + 1638400, + 2621440, + 13107200, + 13107200, + 1638400, + 1638400, + 13107200, + 2621440, + 1638400, + 6553600, + 2621440, + 1638400, + 1638400, + 14745600, + 14745600, + 13107200, + 2621440, + 29491200, + 2621440, + 14745600, + 1638400, + 1310720, + 1638400, + 1638400, + 1638400, + 1310720, + 6553600, + 2621440, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 6553600, + 13107200, + 13107200, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 6553600, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 14745600, + 13107200, + 14745600, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 13107200, + 6553600, + 2621440, + 29491200, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 1638400, + 14745600, + 13107200, + 13107200, + 6553600, + 14745600, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1310720, + 1638400, + 6553600, + 13107200, + 6553600, + 6553600, + 1310720, + 13107200, + 1638400, + 409600, + 409600, + 6553600, + 22118400, + 1638400, + 14745600, + 3276800, + 6553600, + 1310720, + 1638400, + 13107200, + 13107200, + 1310720, + 13107200, + 13107200, + 2621440, + 6553600, + 409600, + 1638400, + 13107200, + 1638400, + 409600, + 1638400, + 1310720, + 1310720, + 13107200, + 13107200, + 13107200, + 6553600, + 1638400, + 6553600, + 1638400, + 13107200, + 409600, + 1638400, + 1638400, + 6553600, + 409600, + 409600, + 409600, + 1310720, + 409600, + 409600, + 409600, + 2621440, + 409600, + 409600, + 1638400, + 7372800, + 13107200, + 409600, + 1310720, + 1310720, + 13107200, + 409600, + 13107200, + 13107200, + 819200, + 409600, + 1638400, + 409600, + 1638400, + 819200, + 409600, + 409600, + 1310720, + 409600, + 3276800, + 409600, + 1638400, + 2621440, + 1310720, + 409600, + 3604480, + 11059200, + 409600, + 409600, + 1638400, + 3686400, + 409600, + 1638400, + 1638400, + 409600, + 1638400, + 409600, + 409600, + 819200, + 409600, + 3686400, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 409600, + 3686400, + 409600, + 409600, + 409600, + 409600, + 1310720, + 3276800, + 409600, + 409600, + 409600, + 819200, + 409600, + 3276800, + 409600, + 3686400, + 1310720, + 409600, + 3276800, + 409600, + 409600, + 819200, + 409600, + 3276800, + 2457600, + 409600, + 1638400, + 409600, + 1638400, + 3686400, + 819200, + 409600, + 1638400, + 409600, + 409600, + 409600, + 1638400, + 1638400, + 1638400, + 409600, + 409600, + 1310720, + 1310720, + 409600, + 3276800, + 409600, + 14745600, + 409600, + 409600, + 409600, + 3686400, + 3276800, + 3686400, + 7372800, + 1310720, + 409600, + 409600, + 3276800, + 1843200, + 409600, + 5529600, + 3276800, + 3276800, + 409600, + 409600, + 409600, + 409600, + 3276800, + 409600, + 409600, + 409600, + 409600, + 1228800, + 1310720, + 921600, + 921600, + 819200, + 1310720, + 921600, + 204800, + 409600, + 921600, + 921600, + 2764800, + 921600, + 614400, + 921600, + 1638400, + 1843200, + 3686400, + 409600, + 307200, + 1843200, + 204800, + 921600, + 204800 + ] + }, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.5, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.9, + 82.1, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.2, + 82.1, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.0, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.9, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.0, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.8, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.0, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.8, + 81.9, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.7, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.8, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.8, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.7, + 81.8, + 82.2, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.7, + 81.8, + 82.1, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.7, + 81.8, + 82.1, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.8, + 81.9, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.7, + 81.8, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.8, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 81.6, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.7, + 81.3, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.6, + 81.5, + 82.1, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.5, + 81.4, + 81.9, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.6, + 81.4, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.6, + 81.4, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.5, + 81.3, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 81.5, + 81.3, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.6, + 81.3, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.4, + 81.3, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.5, + 81.3, + 81.8, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.5, + 81.4, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.4, + 81.4, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.4, + 81.3, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.5, + 81.5, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.5, + 81.3, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.4, + 81.4, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.4, + 81.3, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.4, + 81.4, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.4, + 81.3, + 81.3, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.4, + 81.1, + 81.3, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.4, + 81.2, + 81.3, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.3, + 81.2, + 81.3, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.2, + 81.2, + 81.3, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.3, + 81.2, + 81.2, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.3, + 81.2, + 81.2, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 81.2, + 81.1, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.2, + 81.2, + 81.2, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.2, + 81.0, + 81.2, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.2, + 81.1, + 81.1, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.0, + 81.1, + 81.1, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.2, + 80.7, + 81.2, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.0, + 80.7, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.1, + 80.8, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 81.0, + 80.5, + 81.0, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 80.9, + 80.6, + 81.0, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.1, + 80.5, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 80.9, + 80.5, + 80.8, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 80.9, + 80.6, + 80.9, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 80.8, + 80.5, + 80.8, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 80.9, + 80.4, + 80.7, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 80.8, + 80.4, + 80.7, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 80.8, + 80.5, + 80.8, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 80.7, + 80.5, + 80.7, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 80.7, + 80.4, + 80.7, + 80.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 80.6, + 80.4, + 80.6, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 80.6, + 80.3, + 80.6, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 80.6, + 80.3, + 80.6, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 80.5, + 80.4, + 80.3, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 80.6, + 80.4, + 80.5, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 80.6, + 80.4, + 80.6, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 80.5, + 80.3, + 80.5, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 80.6, + 80.3, + 80.5, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.5, + 80.3, + 80.4, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 80.6, + 80.2, + 80.3, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 80.7, + 80.1, + 80.4, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 80.5, + 80.2, + 80.1, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 80.6, + 80.0, + 80.2, + 80.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 80.5, + 80.1, + 80.2, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.5, + 80.1, + 80.1, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.5, + 80.2, + 80.2, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 80.5, + 80.2, + 80.1, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 80.5, + 80.1, + 80.1, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 80.4, + 80.0, + 80.0, + 80.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 80.4, + 79.9, + 80.0, + 80.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 80.3, + 80.1, + 79.9, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 80.4, + 80.1, + 80.0, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 80.4, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 80.3, + 79.9, + 79.8, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 80.4, + 80.0, + 79.9, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 80.4, + 80.0, + 79.9, + 80.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 80.4, + 80.1, + 80.0, + 80.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 80.3, + 80.0, + 79.8, + 80.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 80.3, + 80.0, + 79.7, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 80.4, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 80.3, + 80.1, + 79.8, + 80.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 80.4, + 80.0, + 79.8, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 80.3, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 80.4, + 80.0, + 79.9, + 80.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 80.4, + 80.0, + 80.1, + 80.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 80.4, + 80.1, + 79.9, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 80.3, + 79.9, + 79.9, + 80.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 80.2, + 79.9, + 79.7, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 80.1, + 80.1, + 79.8, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 80.1, + 80.1, + 79.7, + 80.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 80.2, + 79.9, + 79.7, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 80.1, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 80.1, + 79.9, + 79.8, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 80.0, + 79.8, + 79.7, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 80.1, + 79.9, + 79.6, + 79.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 79.8, + 79.8, + 79.4, + 79.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.8, + 79.9, + 79.3, + 79.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 79.8, + 79.6, + 79.5, + 79.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 79.9, + 79.5, + 79.5, + 79.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 79.9, + 79.5, + 79.5, + 79.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 79.9, + 79.5, + 79.3, + 79.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 79.9, + 79.6, + 79.4, + 79.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 79.9, + 79.5, + 79.4, + 79.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 79.9, + 79.5, + 79.5, + 79.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 79.9, + 79.4, + 79.4, + 79.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 79.8, + 79.4, + 79.3, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 79.8, + 79.4, + 79.3, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 79.8, + 79.5, + 79.2, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 79.8, + 79.4, + 79.3, + 79.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 79.7, + 79.3, + 79.4, + 79.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 79.7, + 79.3, + 79.4, + 79.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 79.7, + 79.3, + 79.4, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 79.6, + 79.3, + 79.2, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 79.5, + 79.2, + 79.3, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.5, + 79.1, + 79.3, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 79.5, + 79.3, + 79.2, + 79.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 79.3, + 79.2, + 79.1, + 79.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 79.2, + 79.1, + 79.1, + 79.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 79.1, + 78.9, + 79.0, + 79.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 79.1, + 79.0, + 78.8, + 79.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 79.1, + 79.0, + 78.9, + 79.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 79.0, + 79.0, + 78.9, + 79.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 79.0, + 79.0, + 78.9, + 78.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 79.0, + 79.0, + 78.9, + 79.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 79.0, + 79.0, + 78.9, + 79.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 78.9, + 78.8, + 79.0, + 79.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 79.0, + 78.9, + 78.9, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 78.8, + 78.9, + 78.8, + 78.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 78.8, + 78.9, + 78.8, + 78.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 78.7, + 79.0, + 78.5, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 78.7, + 79.0, + 78.5, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 78.7, + 79.0, + 78.5, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 78.7, + 78.9, + 78.3, + 78.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 78.8, + 78.9, + 78.2, + 78.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 78.6, + 78.7, + 78.0, + 78.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 78.2, + 78.6, + 77.5, + 77.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 78.3, + 78.6, + 77.6, + 77.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 78.2, + 78.3, + 77.3, + 77.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 78.2, + 78.3, + 77.3, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 78.1, + 78.3, + 77.2, + 77.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 78.1, + 78.2, + 77.3, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 78.0, + 78.1, + 77.1, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 77.9, + 78.1, + 77.1, + 77.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 78.0, + 78.0, + 77.0, + 77.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 77.9, + 77.8, + 76.8, + 77.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 77.9, + 77.8, + 76.8, + 77.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 77.8, + 77.7, + 77.0, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 77.8, + 77.8, + 77.1, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 77.9, + 77.7, + 77.0, + 77.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 77.8, + 77.6, + 77.0, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 77.9, + 77.7, + 77.0, + 77.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 77.9, + 77.6, + 76.9, + 76.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 77.9, + 77.6, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 77.8, + 77.5, + 76.9, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 77.6, + 77.4, + 76.9, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 77.6, + 77.3, + 76.8, + 76.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 77.6, + 77.4, + 76.7, + 76.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 77.5, + 77.1, + 76.4, + 76.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 77.6, + 77.1, + 76.5, + 76.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 77.7, + 77.3, + 76.5, + 76.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 77.6, + 77.4, + 76.6, + 76.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 77.5, + 77.3, + 76.5, + 76.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 77.5, + 77.4, + 76.5, + 76.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 77.4, + 77.2, + 76.4, + 76.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 77.4, + 77.2, + 76.3, + 76.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 77.4, + 77.2, + 76.3, + 76.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 77.2, + 77.2, + 76.2, + 76.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 77.2, + 77.2, + 76.2, + 76.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 77.3, + 77.2, + 76.1, + 76.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 77.1, + 77.2, + 76.2, + 76.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 77.2, + 77.2, + 76.1, + 76.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 77.2, + 77.1, + 76.2, + 76.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 77.2, + 77.1, + 76.2, + 76.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 77.0, + 77.1, + 76.1, + 76.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 77.1, + 77.1, + 76.1, + 76.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 77.2, + 77.0, + 76.1, + 76.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 76.1, + 76.0, + 75.1, + 75.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 76.2, + 76.0, + 75.1, + 75.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 76.0, + 75.6, + 74.7, + 74.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 76.0, + 75.6, + 74.8, + 74.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 75.8, + 75.5, + 74.6, + 74.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 75.9, + 75.4, + 74.6, + 74.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 75.7, + 75.4, + 74.6, + 74.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 75.7, + 75.2, + 74.8, + 74.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 75.7, + 75.2, + 74.8, + 74.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 75.7, + 75.1, + 74.8, + 74.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 75.6, + 74.9, + 74.7, + 74.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 75.4, + 74.8, + 74.4, + 74.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 75.2, + 74.7, + 74.4, + 74.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 75.2, + 74.8, + 74.3, + 74.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 75.1, + 74.7, + 74.2, + 74.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 75.1, + 74.6, + 74.3, + 74.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 75.1, + 74.5, + 74.3, + 74.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 74.9, + 74.4, + 74.1, + 74.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 74.8, + 74.2, + 73.9, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 74.9, + 74.3, + 73.9, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 74.9, + 74.5, + 73.9, + 74.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 75.0, + 74.4, + 74.0, + 74.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 75.1, + 74.3, + 74.0, + 74.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 75.0, + 74.2, + 73.9, + 74.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 75.0, + 74.2, + 73.9, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 74.9, + 74.0, + 73.8, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 74.9, + 74.0, + 73.7, + 73.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 74.8, + 74.0, + 73.9, + 74.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 74.8, + 74.0, + 73.7, + 74.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 74.7, + 73.9, + 73.6, + 73.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 74.8, + 73.9, + 73.7, + 73.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 74.7, + 73.7, + 73.6, + 73.7 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 74.8, + 73.8, + 73.6, + 73.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 74.8, + 73.9, + 73.7, + 73.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 74.6, + 73.8, + 73.6, + 73.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 74.6, + 73.9, + 73.5, + 73.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 74.7, + 73.9, + 73.6, + 73.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 74.8, + 73.9, + 73.8, + 73.7 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 74.8, + 73.9, + 73.8, + 73.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 74.7, + 73.9, + 73.8, + 73.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 74.7, + 73.9, + 73.8, + 73.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 74.7, + 73.9, + 73.8, + 73.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 74.7, + 73.8, + 73.8, + 73.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 74.6, + 73.8, + 73.8, + 73.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 74.5, + 73.7, + 73.7, + 73.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 74.5, + 73.7, + 73.6, + 73.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 74.4, + 73.7, + 73.6, + 73.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 74.3, + 73.6, + 73.7, + 73.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 74.2, + 73.6, + 73.7, + 73.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 74.5, + 73.2, + 73.5, + 73.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 74.6, + 73.2, + 73.5, + 73.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 74.6, + 73.2, + 73.5, + 73.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 74.5, + 73.3, + 73.5, + 73.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 74.5, + 73.4, + 73.5, + 73.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 74.4, + 73.3, + 73.4, + 73.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 74.2, + 73.3, + 73.4, + 73.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 74.2, + 73.0, + 73.3, + 72.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 74.1, + 72.9, + 73.4, + 73.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 73.9, + 72.8, + 73.2, + 72.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 73.8, + 72.7, + 73.2, + 72.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 73.9, + 72.9, + 73.3, + 73.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 73.9, + 72.8, + 73.3, + 73.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 73.9, + 72.7, + 73.1, + 72.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 73.7, + 72.6, + 73.0, + 72.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 73.7, + 72.5, + 72.9, + 72.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 73.7, + 72.6, + 72.8, + 72.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 72.3, + 71.8, + 71.9, + 71.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 72.3, + 71.8, + 71.9, + 71.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 72.2, + 71.8, + 71.9, + 71.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 72.2, + 71.9, + 72.0, + 71.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 72.1, + 71.7, + 71.9, + 71.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 71.9, + 71.7, + 71.8, + 71.6 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 71.9, + 71.7, + 71.9, + 71.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 72.0, + 71.8, + 72.0, + 71.6 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 72.0, + 71.8, + 72.0, + 71.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 72.0, + 71.8, + 72.0, + 71.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 71.9, + 71.8, + 72.0, + 71.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 72.0, + 71.7, + 72.0, + 71.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 71.9, + 71.8, + 71.9, + 71.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 71.9, + 71.7, + 71.9, + 71.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 71.8, + 71.7, + 71.7, + 71.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 71.7, + 71.6, + 71.7, + 71.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 71.8, + 71.7, + 71.7, + 71.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 71.8, + 71.8, + 71.8, + 71.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 71.6, + 71.7, + 71.7, + 71.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 71.6, + 71.6, + 71.6, + 71.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 71.6, + 71.6, + 71.7, + 71.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 71.7, + 71.6, + 71.7, + 71.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 71.7, + 71.6, + 71.6, + 71.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 71.5, + 71.7, + 71.6, + 71.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 71.5, + 71.6, + 71.7, + 71.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 71.3, + 71.4, + 71.4, + 71.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 71.3, + 71.3, + 71.4, + 71.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 71.3, + 71.3, + 71.4, + 71.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 71.3, + 71.4, + 71.4, + 71.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 71.2, + 71.3, + 71.4, + 71.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 71.2, + 71.2, + 71.2, + 70.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 71.3, + 71.1, + 71.3, + 70.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 71.3, + 71.2, + 71.3, + 70.8 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 71.4, + 71.2, + 71.4, + 70.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 71.4, + 71.3, + 71.4, + 71.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 71.4, + 71.3, + 71.4, + 70.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 71.4, + 71.3, + 71.5, + 71.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 71.3, + 71.3, + 71.5, + 71.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 71.3, + 71.4, + 71.5, + 71.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 71.2, + 71.3, + 71.4, + 71.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 71.2, + 71.4, + 71.6, + 71.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 71.2, + 71.4, + 71.6, + 71.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 71.3, + 71.4, + 71.4, + 70.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 71.3, + 71.4, + 71.3, + 70.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 71.4, + 71.4, + 71.3, + 70.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 71.4, + 71.4, + 71.3, + 70.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 71.2, + 71.3, + 71.2, + 70.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 71.3, + 71.4, + 71.3, + 70.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 71.1, + 71.1, + 71.0, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 71.1, + 71.2, + 71.1, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 71.1, + 71.2, + 71.0, + 70.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 71.1, + 71.1, + 71.0, + 70.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 70.9, + 71.1, + 70.8, + 70.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 71.0, + 71.1, + 70.8, + 70.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 71.1, + 71.2, + 71.0, + 70.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 71.0, + 71.2, + 71.0, + 70.8 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 71.0, + 71.4, + 71.0, + 70.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 71.0, + 71.4, + 70.9, + 70.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 71.0, + 71.3, + 70.9, + 70.6 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 71.0, + 71.3, + 70.9, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 71.0, + 71.3, + 70.9, + 70.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 71.1, + 71.4, + 71.0, + 70.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 71.0, + 71.4, + 70.9, + 70.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 70.8, + 71.6, + 70.8, + 70.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 70.8, + 71.6, + 70.8, + 70.6 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 70.8, + 71.5, + 70.8, + 70.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 70.9, + 71.5, + 70.7, + 70.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 70.8, + 71.4, + 70.6, + 70.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 70.8, + 71.4, + 70.6, + 70.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 70.7, + 71.3, + 70.7, + 70.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 70.8, + 71.4, + 70.6, + 70.4 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 70.7, + 71.3, + 70.6, + 70.4 + ], + "mid_block.resnets.1.time_emb_proj": [ + 70.6, + 71.2, + 70.5, + 70.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 70.6, + 71.3, + 70.5, + 70.3 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 70.6, + 71.3, + 70.5, + 70.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 70.7, + 71.4, + 70.6, + 70.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 70.7, + 71.4, + 70.6, + 70.4 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 70.7, + 71.4, + 70.6, + 70.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 70.8, + 71.5, + 70.8, + 70.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 70.9, + 71.6, + 70.8, + 70.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 70.9, + 71.6, + 70.8, + 70.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 70.8, + 71.5, + 70.7, + 70.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 70.8, + 71.5, + 70.7, + 70.5 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 70.9, + 71.4, + 70.7, + 70.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 70.9, + 71.4, + 70.6, + 70.6 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 70.9, + 71.5, + 70.7, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 70.9, + 71.5, + 70.7, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 70.6, + 71.2, + 70.5, + 70.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 70.9, + 71.5, + 70.7, + 70.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 70.9, + 71.4, + 70.7, + 70.5 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 70.9, + 71.5, + 70.7, + 70.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 71.0, + 71.5, + 70.6, + 70.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 71.0, + 71.7, + 71.0, + 70.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 70.9, + 71.7, + 71.0, + 71.0 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 70.9, + 71.6, + 71.1, + 71.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 70.9, + 71.4, + 71.0, + 70.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 70.9, + 71.4, + 71.0, + 70.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 70.9, + 71.5, + 71.2, + 71.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 70.9, + 71.5, + 71.3, + 71.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 70.9, + 71.4, + 71.1, + 71.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 70.7, + 71.3, + 71.0, + 71.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 70.7, + 71.2, + 71.1, + 71.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 70.5, + 71.2, + 71.0, + 71.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 70.4, + 70.9, + 70.9, + 70.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 70.3, + 70.9, + 70.8, + 70.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 70.2, + 70.8, + 70.7, + 70.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 70.2, + 70.8, + 70.8, + 70.7 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 70.1, + 70.7, + 70.7, + 70.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 70.1, + 70.7, + 70.6, + 70.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 70.0, + 70.7, + 70.6, + 70.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 70.1, + 70.6, + 70.6, + 70.5 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 70.0, + 70.6, + 70.6, + 70.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 69.9, + 70.5, + 70.6, + 70.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 69.7, + 70.4, + 70.4, + 70.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 69.6, + 70.3, + 70.3, + 70.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 69.6, + 70.5, + 70.3, + 70.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 69.5, + 70.4, + 70.2, + 70.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 69.5, + 70.3, + 70.1, + 70.1 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 69.4, + 70.3, + 70.1, + 70.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 69.5, + 70.3, + 70.1, + 70.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 69.5, + 70.2, + 70.2, + 70.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 69.6, + 70.4, + 70.2, + 70.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 69.4, + 70.2, + 70.1, + 70.1 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 69.4, + 70.2, + 70.0, + 70.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 67.1, + 67.7, + 67.5, + 67.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 67.1, + 67.8, + 67.6, + 67.3 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 67.2, + 67.8, + 67.6, + 67.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 67.1, + 67.7, + 67.4, + 67.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 67.2, + 67.6, + 67.5, + 67.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 67.1, + 67.6, + 67.4, + 67.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 66.9, + 67.4, + 67.3, + 67.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 66.9, + 67.5, + 67.3, + 67.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 66.9, + 67.4, + 67.2, + 67.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 66.9, + 67.4, + 67.2, + 67.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 67.0, + 67.5, + 67.3, + 67.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 66.9, + 67.5, + 67.2, + 67.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 66.8, + 67.2, + 67.1, + 66.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 66.2, + 66.7, + 66.5, + 66.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 66.0, + 66.6, + 66.3, + 66.2 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 66.0, + 66.5, + 66.4, + 66.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 66.0, + 66.5, + 66.3, + 66.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 65.9, + 66.4, + 66.4, + 66.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 66.0, + 66.4, + 66.3, + 66.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 65.9, + 66.4, + 66.3, + 66.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 66.1, + 66.4, + 66.3, + 66.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 66.1, + 66.5, + 66.4, + 66.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 65.9, + 66.3, + 66.1, + 65.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 66.0, + 66.4, + 66.2, + 66.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 66.0, + 66.3, + 66.1, + 66.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 65.9, + 66.2, + 66.0, + 65.9 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 65.8, + 66.1, + 65.9, + 65.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 65.5, + 66.0, + 65.8, + 65.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 65.4, + 65.9, + 65.7, + 65.5 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 65.4, + 65.8, + 65.6, + 65.4 + ], + "mid_block.resnets.0.time_emb_proj": [ + 65.3, + 65.8, + 65.5, + 65.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 65.2, + 65.7, + 65.4, + 65.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 65.2, + 65.6, + 65.5, + 65.4 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 65.3, + 65.7, + 65.5, + 65.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 65.2, + 65.7, + 65.5, + 65.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 65.1, + 65.5, + 65.3, + 65.3 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 65.1, + 65.5, + 65.3, + 65.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 65.0, + 65.2, + 65.2, + 65.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 64.8, + 65.1, + 65.1, + 65.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 64.9, + 65.1, + 65.2, + 65.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 64.8, + 65.0, + 65.2, + 65.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 64.8, + 65.0, + 65.2, + 65.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 65.1, + 65.3, + 65.4, + 65.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 65.1, + 65.3, + 65.4, + 65.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 65.1, + 65.2, + 65.4, + 65.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 65.1, + 65.3, + 65.4, + 65.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 65.1, + 65.3, + 65.4, + 65.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 65.2, + 65.3, + 65.5, + 65.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 65.0, + 65.2, + 65.3, + 65.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 65.0, + 65.2, + 65.3, + 65.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 64.4, + 64.6, + 64.8, + 64.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 64.4, + 64.6, + 64.7, + 64.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 64.4, + 64.5, + 64.8, + 64.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 64.3, + 64.5, + 64.8, + 64.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 64.4, + 64.5, + 64.8, + 64.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 64.1, + 64.2, + 64.5, + 64.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 64.0, + 64.1, + 64.4, + 64.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 64.1, + 64.1, + 64.4, + 64.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 64.1, + 64.1, + 64.4, + 64.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 64.1, + 64.2, + 64.5, + 64.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 64.0, + 64.0, + 64.3, + 64.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 63.4, + 63.6, + 63.9, + 64.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 63.3, + 63.4, + 63.8, + 63.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 63.2, + 63.3, + 63.7, + 63.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 63.0, + 63.2, + 63.6, + 63.6 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 63.0, + 63.1, + 63.5, + 63.6 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 63.1, + 63.1, + 63.5, + 63.6 + ], + "mid_block.resnets.1.conv1": [ + 62.9, + 63.0, + 63.4, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 62.9, + 63.0, + 63.5, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 62.8, + 63.0, + 63.4, + 63.4 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 62.9, + 63.0, + 63.5, + 63.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 62.9, + 63.1, + 63.5, + 63.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 63.0, + 63.0, + 63.5, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 63.0, + 63.1, + 63.5, + 63.5 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 63.0, + 63.1, + 63.5, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 63.0, + 63.1, + 63.5, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 63.0, + 63.1, + 63.5, + 63.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 62.8, + 62.8, + 63.2, + 63.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 62.8, + 62.9, + 63.2, + 63.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 62.9, + 63.0, + 63.2, + 63.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 62.9, + 62.9, + 63.2, + 63.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 63.2, + 63.3, + 63.6, + 63.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 63.2, + 63.4, + 63.6, + 63.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 63.3, + 63.4, + 63.6, + 63.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 63.2, + 63.3, + 63.6, + 63.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 63.1, + 63.3, + 63.7, + 63.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 63.1, + 63.3, + 63.7, + 63.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 63.2, + 63.3, + 63.7, + 63.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 63.2, + 63.3, + 63.7, + 63.6 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 63.1, + 63.3, + 63.6, + 63.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 63.0, + 63.2, + 63.5, + 63.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 63.0, + 63.2, + 63.4, + 63.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 63.0, + 63.2, + 63.4, + 63.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 63.1, + 63.2, + 63.4, + 63.3 + ], + "down_blocks.2.attentions.1.proj_in": [ + 63.2, + 63.3, + 63.5, + 63.4 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 63.2, + 63.4, + 63.4, + 63.4 + ], + "mid_block.resnets.1.conv2": [ + 63.3, + 63.4, + 63.5, + 63.4 + ], + "down_blocks.2.resnets.1.conv1": [ + 63.2, + 63.4, + 63.5, + 63.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 63.3, + 63.5, + 63.6, + 63.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 63.2, + 63.4, + 63.4, + 63.4 + ], + "up_blocks.0.resnets.0.conv1": [ + 63.0, + 63.2, + 63.2, + 63.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 62.9, + 63.2, + 63.2, + 63.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 63.0, + 63.2, + 63.2, + 63.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 62.9, + 63.1, + 63.1, + 62.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 62.9, + 63.0, + 63.1, + 62.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 62.9, + 63.0, + 63.0, + 62.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 62.8, + 63.0, + 63.0, + 63.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 63.0, + 63.1, + 63.2, + 63.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 62.9, + 63.0, + 63.1, + 63.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 62.9, + 63.0, + 63.0, + 63.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 62.9, + 63.0, + 63.0, + 63.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 62.9, + 63.0, + 63.0, + 63.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 63.0, + 63.1, + 63.0, + 62.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 62.9, + 63.1, + 63.0, + 63.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 63.0, + 63.1, + 63.1, + 63.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 63.0, + 63.1, + 63.0, + 63.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 62.9, + 63.0, + 63.0, + 62.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 62.9, + 63.1, + 62.8, + 62.8 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 62.9, + 63.0, + 62.8, + 62.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 62.9, + 63.0, + 62.8, + 62.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 62.9, + 63.0, + 62.6, + 62.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 62.9, + 63.1, + 62.6, + 62.8 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 62.8, + 63.0, + 62.5, + 62.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 62.5, + 62.6, + 62.2, + 62.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 62.3, + 62.5, + 62.1, + 62.3 + ], + "mid_block.resnets.0.conv1": [ + 62.3, + 62.5, + 62.0, + 62.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 62.3, + 62.5, + 62.1, + 62.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 62.3, + 62.4, + 62.0, + 62.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 62.1, + 62.2, + 61.8, + 62.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 62.1, + 62.1, + 61.7, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 62.1, + 62.2, + 61.7, + 62.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 61.9, + 62.0, + 61.5, + 61.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 61.9, + 61.9, + 61.4, + 61.8 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 61.8, + 61.9, + 61.5, + 61.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 61.8, + 61.9, + 61.5, + 61.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 61.8, + 61.8, + 61.4, + 61.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 61.7, + 61.7, + 61.3, + 61.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 61.7, + 61.7, + 61.3, + 61.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 61.8, + 61.7, + 61.4, + 61.7 + ], + "mid_block.resnets.0.conv2": [ + 61.8, + 61.8, + 61.5, + 61.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 61.8, + 61.7, + 61.4, + 61.7 + ], + "up_blocks.0.resnets.1.conv2": [ + 62.1, + 62.1, + 61.7, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 62.1, + 62.0, + 61.7, + 61.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 62.1, + 61.9, + 61.7, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 62.1, + 62.0, + 61.6, + 61.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 62.1, + 61.9, + 61.6, + 61.8 + ], + "down_blocks.2.attentions.1.proj_out": [ + 62.2, + 62.0, + 61.8, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 62.2, + 61.9, + 61.7, + 61.8 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 62.1, + 61.8, + 61.6, + 61.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 62.1, + 61.9, + 61.6, + 61.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 62.4, + 62.2, + 62.0, + 62.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 62.5, + 62.4, + 62.0, + 62.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 62.3, + 62.2, + 61.8, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 62.4, + 62.2, + 61.9, + 62.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 62.2, + 62.2, + 61.8, + 62.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 62.3, + 62.2, + 61.9, + 62.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 62.4, + 62.3, + 62.0, + 62.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 62.4, + 62.3, + 61.9, + 62.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 62.6, + 62.5, + 62.1, + 62.3 + ], + "mid_block.attentions.0.proj_out": [ + 62.6, + 62.5, + 62.1, + 62.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 62.6, + 62.5, + 62.1, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 62.5, + 62.3, + 61.9, + 62.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 62.7, + 62.5, + 62.1, + 62.4 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 62.7, + 62.5, + 62.1, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 62.7, + 62.6, + 62.1, + 62.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 62.7, + 62.5, + 62.1, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 62.6, + 62.4, + 61.9, + 62.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 62.6, + 62.5, + 62.1, + 62.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 62.7, + 62.7, + 62.2, + 62.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 62.7, + 62.6, + 62.2, + 62.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 62.6, + 62.6, + 62.2, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 62.6, + 62.7, + 62.2, + 62.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 62.6, + 62.7, + 62.3, + 62.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 62.7, + 62.7, + 62.2, + 62.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 62.7, + 62.7, + 62.3, + 62.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 62.7, + 62.8, + 62.3, + 62.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 62.6, + 62.8, + 62.2, + 62.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 62.7, + 62.8, + 62.3, + 62.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 62.7, + 62.9, + 62.3, + 62.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 62.6, + 62.7, + 62.2, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 62.6, + 62.8, + 62.3, + 62.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 62.6, + 62.7, + 62.3, + 62.6 + ], + "up_blocks.0.resnets.1.conv1": [ + 62.6, + 62.7, + 62.2, + 62.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 62.5, + 62.7, + 62.1, + 62.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 62.5, + 62.6, + 62.1, + 62.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 62.2, + 62.4, + 61.8, + 62.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 61.8, + 62.2, + 61.5, + 61.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 61.8, + 62.1, + 61.5, + 61.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 61.8, + 62.1, + 61.4, + 61.8 + ], + "up_blocks.0.resnets.0.conv2": [ + 61.6, + 62.2, + 61.2, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 61.7, + 62.3, + 61.4, + 62.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 61.8, + 62.3, + 61.4, + 62.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 61.7, + 62.2, + 61.4, + 61.9 + ], + "up_blocks.0.resnets.2.conv2": [ + 61.7, + 62.2, + 61.4, + 61.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 61.6, + 62.2, + 61.4, + 61.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 61.2, + 61.9, + 61.0, + 61.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 61.4, + 61.9, + 61.2, + 61.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 61.4, + 61.9, + 61.1, + 61.7 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 61.4, + 61.9, + 61.1, + 61.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 61.4, + 61.9, + 61.1, + 61.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 61.1, + 61.6, + 60.9, + 61.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 61.1, + 61.6, + 60.9, + 61.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 60.9, + 61.3, + 60.7, + 61.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 61.1, + 61.4, + 60.9, + 61.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 61.1, + 61.5, + 60.9, + 61.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 61.1, + 61.4, + 60.8, + 61.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 61.0, + 61.4, + 60.8, + 61.4 + ], + "up_blocks.0.attentions.0.proj_out": [ + 61.6, + 61.9, + 61.2, + 61.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 61.5, + 61.8, + 61.2, + 61.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 61.5, + 61.7, + 61.2, + 61.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 61.7, + 61.8, + 61.4, + 61.9 + ], + "up_blocks.0.resnets.2.conv1": [ + 61.7, + 61.8, + 61.4, + 61.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 61.7, + 61.8, + 61.4, + 61.9 + ], + "down_blocks.2.resnets.0.conv2": [ + 61.5, + 61.7, + 61.2, + 61.7 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 61.7, + 61.8, + 61.4, + 61.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 61.7, + 61.8, + 61.4, + 61.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 61.6, + 61.7, + 61.3, + 61.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 61.3, + 61.3, + 61.1, + 61.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 61.4, + 61.5, + 61.2, + 61.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 61.6, + 61.5, + 61.3, + 61.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 61.3, + 61.2, + 61.1, + 61.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 61.4, + 61.2, + 61.2, + 61.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 61.2, + 61.0, + 61.0, + 61.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 61.1, + 60.9, + 61.0, + 61.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 61.1, + 60.8, + 60.9, + 61.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 61.3, + 61.0, + 61.0, + 61.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 60.5, + 60.5, + 60.4, + 60.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 60.6, + 60.6, + 60.5, + 60.7 + ], + "down_blocks.2.attentions.0.proj_in": [ + 60.3, + 60.4, + 60.3, + 60.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 60.3, + 60.4, + 60.2, + 60.5 + ], + "up_blocks.0.attentions.0.proj_in": [ + 60.6, + 60.5, + 60.4, + 60.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 60.4, + 60.3, + 60.2, + 60.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 60.4, + 60.3, + 60.2, + 60.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 60.2, + 60.2, + 60.0, + 60.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 60.4, + 60.3, + 60.1, + 60.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 60.2, + 60.2, + 60.0, + 60.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 60.5, + 60.3, + 60.3, + 60.4 + ], + "down_blocks.2.attentions.0.proj_out": [ + 60.3, + 60.1, + 60.1, + 60.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 60.3, + 59.9, + 60.0, + 60.1 + ], + "mid_block.attentions.0.proj_in": [ + 60.2, + 59.9, + 60.0, + 60.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 60.0, + 59.7, + 59.8, + 59.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 59.9, + 59.5, + 59.7, + 59.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 60.1, + 59.6, + 59.8, + 59.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 60.2, + 59.6, + 59.8, + 59.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 60.2, + 59.9, + 59.8, + 60.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 60.1, + 59.8, + 59.7, + 59.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 60.0, + 59.8, + 59.7, + 59.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 60.0, + 59.8, + 59.7, + 59.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 57.0, + 56.9, + 56.6, + 56.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 56.9, + 56.9, + 56.6, + 56.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 57.1, + 57.0, + 56.7, + 56.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 57.1, + 57.0, + 56.8, + 56.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 57.0, + 56.9, + 56.6, + 56.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 56.9, + 56.8, + 56.5, + 56.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 56.9, + 56.7, + 56.4, + 56.5 + ], + "up_blocks.0.attentions.2.proj_in": [ + 56.6, + 56.6, + 56.3, + 56.4 + ], + "down_blocks.2.resnets.0.conv1": [ + 56.5, + 56.5, + 56.1, + 56.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 56.3, + 56.2, + 55.8, + 56.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 56.2, + 56.1, + 55.7, + 55.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 56.2, + 56.1, + 55.6, + 55.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 56.1, + 56.1, + 55.5, + 55.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 55.9, + 55.8, + 55.4, + 55.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 55.8, + 55.8, + 55.4, + 55.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 55.4, + 55.3, + 54.9, + 55.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 55.2, + 55.2, + 54.8, + 54.9 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 54.8, + 54.9, + 54.6, + 54.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 54.8, + 54.9, + 54.6, + 54.7 + ], + "up_blocks.0.attentions.2.proj_out": [ + 54.7, + 54.7, + 54.4, + 54.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 54.7, + 54.7, + 54.3, + 54.5 + ], + "add_embedding.linear_2": [ + 54.7, + 53.9, + 54.4, + 54.6 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 54.1, + 53.3, + 53.8, + 54.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 54.0, + 53.1, + 53.7, + 53.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 53.9, + 53.1, + 53.6, + 53.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 53.7, + 52.9, + 53.3, + 53.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 53.7, + 53.0, + 53.4, + 53.7 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 53.9, + 53.2, + 53.6, + 53.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 54.0, + 53.2, + 53.8, + 54.0 + ], + "up_blocks.0.attentions.1.proj_in": [ + 53.4, + 52.9, + 53.3, + 53.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 52.2, + 51.6, + 52.3, + 52.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 52.2, + 51.5, + 52.2, + 52.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 52.4, + 51.7, + 52.5, + 52.4 + ], + "add_embedding.linear_1": [ + 52.2, + 51.8, + 52.7, + 52.5 + ], + "up_blocks.1.resnets.0.conv1": [ + 52.6, + 52.0, + 52.9, + 52.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 52.4, + 51.9, + 52.7, + 52.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 52.3, + 51.7, + 52.6, + 52.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 51.8, + 51.2, + 52.1, + 51.9 + ], + "up_blocks.1.resnets.0.conv2": [ + 51.9, + 51.3, + 52.2, + 52.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 51.8, + 51.2, + 52.1, + 52.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 51.9, + 51.2, + 52.2, + 52.0 + ], + "up_blocks.0.attentions.1.proj_out": [ + 52.4, + 51.7, + 52.8, + 52.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 52.3, + 51.5, + 52.6, + 52.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 52.1, + 51.4, + 52.5, + 52.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 52.0, + 51.3, + 52.3, + 52.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 51.7, + 51.1, + 52.0, + 51.9 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 51.5, + 50.7, + 51.7, + 51.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 51.4, + 50.7, + 51.7, + 51.5 + ], + "down_blocks.1.resnets.1.conv2": [ + 51.3, + 50.5, + 51.6, + 51.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 51.2, + 50.4, + 51.5, + 51.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 51.1, + 50.3, + 51.4, + 51.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 51.0, + 50.3, + 51.4, + 51.1 + ], + "up_blocks.1.attentions.0.proj_out": [ + 51.0, + 50.2, + 51.3, + 51.0 + ], + "down_blocks.1.attentions.1.proj_out": [ + 50.8, + 50.1, + 51.3, + 50.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 50.6, + 49.9, + 51.0, + 50.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 50.4, + 49.7, + 50.8, + 50.4 + ], + "down_blocks.1.resnets.1.conv1": [ + 50.4, + 49.7, + 50.8, + 50.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 50.1, + 49.4, + 50.4, + 50.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 49.9, + 49.2, + 50.2, + 49.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 49.6, + 48.8, + 49.9, + 49.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 49.6, + 48.9, + 49.9, + 49.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 49.7, + 48.9, + 50.0, + 49.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 49.6, + 48.9, + 50.0, + 49.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 49.5, + 48.8, + 49.8, + 49.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 49.2, + 48.5, + 49.6, + 49.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 49.0, + 48.3, + 49.5, + 49.1 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 48.7, + 48.0, + 49.1, + 48.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 48.7, + 48.1, + 49.1, + 48.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 48.6, + 48.0, + 49.0, + 48.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 48.6, + 48.0, + 49.0, + 48.7 + ], + "down_blocks.1.resnets.0.conv2": [ + 48.4, + 47.8, + 48.7, + 48.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 48.1, + 47.5, + 48.4, + 48.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 47.8, + 47.2, + 48.1, + 47.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 47.7, + 47.1, + 48.1, + 47.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 47.7, + 47.1, + 48.0, + 47.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 47.5, + 47.0, + 47.9, + 47.7 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 46.5, + 46.0, + 46.9, + 46.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 46.3, + 45.9, + 46.8, + 46.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 46.2, + 45.8, + 46.7, + 46.5 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 46.5, + 46.0, + 46.9, + 46.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 45.9, + 45.5, + 46.3, + 46.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 46.1, + 45.8, + 46.5, + 46.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 46.4, + 45.9, + 46.8, + 46.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 46.4, + 46.0, + 46.8, + 46.6 + ], + "down_blocks.1.downsamplers.0.conv": [ + 46.4, + 46.1, + 46.9, + 46.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 45.7, + 45.5, + 46.1, + 46.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 45.7, + 45.5, + 46.2, + 46.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 45.3, + 45.1, + 45.7, + 45.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 45.0, + 44.8, + 45.4, + 45.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 44.9, + 44.7, + 45.3, + 45.1 + ], + "up_blocks.1.attentions.1.proj_out": [ + 45.0, + 44.9, + 45.5, + 45.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 45.2, + 45.1, + 45.7, + 45.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 44.8, + 44.6, + 45.2, + 45.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 44.7, + 44.6, + 45.2, + 44.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 44.5, + 44.4, + 45.0, + 44.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 44.5, + 44.4, + 44.9, + 44.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 44.5, + 44.3, + 44.9, + 44.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 44.5, + 44.3, + 44.9, + 44.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 44.5, + 44.3, + 44.9, + 44.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 44.5, + 44.3, + 44.9, + 44.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 44.1, + 43.9, + 44.5, + 44.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 44.3, + 44.1, + 44.7, + 44.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 44.4, + 44.2, + 44.8, + 44.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 44.4, + 44.1, + 44.7, + 44.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 44.3, + 44.1, + 44.7, + 44.4 + ], + "up_blocks.1.resnets.1.conv2": [ + 44.2, + 44.0, + 44.6, + 44.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 44.0, + 43.8, + 44.3, + 44.1 + ], + "up_blocks.1.resnets.2.conv2": [ + 43.5, + 43.3, + 43.8, + 43.6 + ], + "up_blocks.1.resnets.1.conv1": [ + 43.5, + 43.4, + 43.9, + 43.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 43.5, + 43.3, + 43.9, + 43.6 + ], + "up_blocks.1.attentions.2.proj_out": [ + 43.7, + 43.5, + 44.0, + 43.8 + ], + "up_blocks.1.attentions.1.proj_in": [ + 43.7, + 43.5, + 44.0, + 43.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 43.7, + 43.5, + 44.0, + 43.8 + ], + "down_blocks.1.resnets.0.conv1": [ + 43.6, + 43.4, + 43.9, + 43.7 + ], + "up_blocks.1.attentions.2.proj_in": [ + 43.7, + 43.4, + 44.0, + 43.8 + ], + "up_blocks.1.resnets.2.conv1": [ + 43.6, + 43.4, + 44.0, + 43.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 43.1, + 42.8, + 43.4, + 43.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 43.1, + 42.8, + 43.4, + 43.2 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 43.0, + 42.8, + 43.4, + 43.1 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 42.4, + 42.3, + 42.8, + 42.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 42.7, + 42.6, + 43.1, + 42.8 + ], + "down_blocks.1.attentions.0.proj_in": [ + 42.7, + 42.6, + 43.1, + 42.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 42.3, + 42.1, + 42.6, + 42.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 42.3, + 42.2, + 42.7, + 42.4 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 42.2, + 42.1, + 42.5, + 42.2 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 42.8, + 42.8, + 43.1, + 42.9 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 42.4, + 42.5, + 42.6, + 42.5 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 42.3, + 42.3, + 42.4, + 42.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 42.0, + 42.0, + 42.2, + 41.9 + ], + "down_blocks.0.resnets.1.conv2": [ + 41.9, + 41.9, + 42.1, + 41.8 + ], + "down_blocks.0.resnets.0.conv1": [ + 41.9, + 41.9, + 42.0, + 41.8 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 41.6, + 41.6, + 41.7, + 41.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 41.7, + 41.7, + 41.8, + 41.6 + ], + "down_blocks.0.resnets.1.conv1": [ + 41.4, + 41.4, + 41.5, + 41.3 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 41.1, + 41.2, + 41.3, + 41.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 41.1, + 41.1, + 41.3, + 41.1 + ], + "down_blocks.0.resnets.0.conv2": [ + 41.2, + 41.1, + 41.4, + 41.1 + ], + "down_blocks.0.downsamplers.0.conv": [ + 41.1, + 40.9, + 41.2, + 40.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 41.0, + 41.0, + 41.0, + 40.8 + ], + "up_blocks.2.resnets.0.conv2": [ + 41.4, + 41.3, + 41.3, + 41.1 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 41.1, + 41.1, + 41.0, + 40.9 + ], + "up_blocks.2.resnets.1.conv2": [ + 40.8, + 40.7, + 40.6, + 40.6 + ], + "time_embedding.linear_2": [ + 40.6, + 40.5, + 40.6, + 40.6 + ], + "up_blocks.2.resnets.1.conv1": [ + 40.4, + 40.3, + 40.4, + 40.4 + ], + "up_blocks.1.upsamplers.0.conv": [ + 40.5, + 40.5, + 40.5, + 40.5 + ], + "time_embedding.linear_1": [ + 40.7, + 40.6, + 40.7, + 40.7 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 40.7, + 40.6, + 40.7, + 40.8 + ], + "up_blocks.2.resnets.2.conv1": [ + 40.8, + 40.7, + 40.8, + 40.9 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 40.5, + 40.3, + 40.4, + 40.5 + ], + "up_blocks.2.resnets.2.conv2": [ + 39.6, + 39.5, + 39.6, + 39.7 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 41.5, + 41.5, + 41.5, + 41.5 + ] + }, + "2": { + "metadata": { + "nbits": 2, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.ff.net.2", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.6.ff.net.2", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.ff.net.2", + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k", + "up_blocks.0.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q", + "down_blocks.2.resnets.1.conv1", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.resnets.0.time_emb_proj", + "mid_block.resnets.0.conv1", + "mid_block.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.proj_out", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.1.conv1", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v", + "up_blocks.0.resnets.1.conv2", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0", + "mid_block.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.resnets.2.conv1", + "up_blocks.0.resnets.2.conv2", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.proj_in", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.proj_in", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.resnets.0.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.2.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q", + "add_embedding.linear_2", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.resnets.0.conv2", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.resnets.0.conv1", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.1.resnets.1.conv1", + "down_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "add_embedding.linear_1", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.0.resnets.2.conv_shortcut", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.resnets.2.conv2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.1.resnets.1.conv1", + "up_blocks.1.resnets.2.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.resnets.0.conv1", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.2.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.resnets.0.conv_shortcut", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.0.resnets.0.conv1", + "down_blocks.0.downsamplers.0.conv", + "down_blocks.0.resnets.1.conv2", + "up_blocks.1.resnets.2.conv_shortcut", + "down_blocks.0.resnets.1.conv1", + "down_blocks.0.resnets.0.conv2", + "time_embedding.linear_1", + "up_blocks.2.resnets.0.conv1", + "up_blocks.2.resnets.0.conv2", + "up_blocks.1.upsamplers.0.conv", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.resnets.1.conv1", + "up_blocks.2.resnets.1.conv2", + "time_embedding.linear_2", + "up_blocks.2.resnets.2.conv1", + "up_blocks.2.resnets.2.conv2" + ], + "sizes": [ + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 6553600, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 6553600, + 13107200, + 6553600, + 1638400, + 6553600, + 2621440, + 6553600, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 13107200, + 13107200, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 13107200, + 1638400, + 1638400, + 13107200, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 13107200, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 1310720, + 1638400, + 13107200, + 2621440, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 1310720, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 1638400, + 13107200, + 1638400, + 2621440, + 6553600, + 14745600, + 6553600, + 6553600, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 13107200, + 13107200, + 6553600, + 1638400, + 1310720, + 1638400, + 14745600, + 1638400, + 2621440, + 13107200, + 1638400, + 14745600, + 1638400, + 14745600, + 1638400, + 1638400, + 1638400, + 409600, + 13107200, + 2621440, + 13107200, + 409600, + 1638400, + 1638400, + 14745600, + 14745600, + 13107200, + 1638400, + 13107200, + 6553600, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 29491200, + 29491200, + 1638400, + 1638400, + 6553600, + 13107200, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1310720, + 1638400, + 1638400, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1310720, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 13107200, + 1310720, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 6553600, + 6553600, + 14745600, + 22118400, + 14745600, + 1638400, + 1638400, + 13107200, + 13107200, + 1310720, + 6553600, + 14745600, + 1638400, + 13107200, + 6553600, + 409600, + 1310720, + 1638400, + 6553600, + 1638400, + 13107200, + 13107200, + 6553600, + 1638400, + 3276800, + 409600, + 1638400, + 13107200, + 1310720, + 409600, + 13107200, + 1638400, + 13107200, + 1310720, + 1310720, + 1638400, + 13107200, + 7372800, + 409600, + 6553600, + 1310720, + 6553600, + 6553600, + 1638400, + 13107200, + 409600, + 13107200, + 13107200, + 409600, + 409600, + 13107200, + 13107200, + 13107200, + 1310720, + 13107200, + 1310720, + 1638400, + 409600, + 1638400, + 409600, + 13107200, + 1638400, + 409600, + 409600, + 409600, + 13107200, + 1638400, + 409600, + 409600, + 1310720, + 409600, + 2621440, + 1310720, + 409600, + 409600, + 409600, + 409600, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 409600, + 3276800, + 409600, + 1638400, + 1638400, + 819200, + 409600, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 3686400, + 409600, + 3686400, + 819200, + 11059200, + 409600, + 409600, + 409600, + 1638400, + 819200, + 409600, + 409600, + 409600, + 409600, + 409600, + 3686400, + 3686400, + 819200, + 3276800, + 409600, + 1638400, + 3276800, + 409600, + 3604480, + 3276800, + 409600, + 1638400, + 409600, + 1638400, + 3276800, + 409600, + 409600, + 409600, + 1310720, + 409600, + 819200, + 409600, + 409600, + 819200, + 409600, + 409600, + 409600, + 3686400, + 409600, + 2457600, + 409600, + 409600, + 1638400, + 409600, + 1310720, + 1638400, + 1638400, + 3686400, + 3686400, + 409600, + 3276800, + 14745600, + 3276800, + 1310720, + 3276800, + 409600, + 409600, + 1638400, + 409600, + 7372800, + 5529600, + 3276800, + 409600, + 409600, + 3276800, + 3276800, + 1843200, + 409600, + 409600, + 1310720, + 409600, + 409600, + 409600, + 1228800, + 204800, + 819200, + 409600, + 409600, + 921600, + 921600, + 921600, + 614400, + 921600, + 921600, + 409600, + 2764800, + 921600, + 3686400, + 204800, + 204800, + 307200, + 1843200, + 921600, + 1638400, + 1843200, + 921600 + ] + }, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.0, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.9, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.0, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.9, + 82.0, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.0, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.0, + 82.3, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.0, + 82.0, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.9, + 82.1, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.1, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.8, + 82.0, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.6, + 82.1, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.7, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.1, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.7, + 81.9, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.7, + 81.9, + 81.9, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.6, + 81.9, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.8, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.6, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.7, + 82.0, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.7, + 81.9, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.7, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.7, + 82.0, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.6, + 81.9, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.7, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.6, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.6, + 81.9, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.6, + 81.9, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.7, + 81.7, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.8, + 81.8, + 81.8, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.7, + 81.8, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 81.9, + 81.8, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 81.8, + 81.8, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.7, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.6, + 81.8, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.7, + 81.6, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 81.6, + 81.7, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.5, + 81.7, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.7, + 81.6, + 81.8, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.6, + 81.6, + 81.6, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.8, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.7, + 81.8, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.7, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.7, + 81.8, + 81.8, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.7, + 81.7, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.6, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.8, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.6, + 81.6, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.5, + 81.5, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.4, + 81.5, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.3, + 81.5, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.4, + 81.5, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.4, + 81.6, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.4, + 81.5, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.4, + 81.4, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.4, + 81.4, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.5, + 81.3, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.5, + 81.4, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.3, + 81.4, + 81.6, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.3, + 81.4, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.4, + 81.4, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.3, + 81.3, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 81.3, + 81.3, + 81.4, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.3, + 81.4, + 81.5, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.3, + 81.6, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.5, + 81.3, + 81.5, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.5, + 81.4, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.4, + 81.3, + 81.6, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.4, + 81.3, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 81.2, + 81.4, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 81.3, + 81.3, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.3, + 81.3, + 81.3, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.3, + 81.3, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.2, + 81.2, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.3, + 81.3, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.3, + 81.2, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.3, + 81.3, + 81.5, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.2, + 81.3, + 81.4, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.3, + 81.2, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.2, + 81.3, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.1, + 81.1, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 81.1, + 81.2, + 81.3, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.2, + 81.1, + 81.1, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.3, + 81.1, + 81.2, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.2, + 81.1, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.2, + 81.1, + 81.1, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.2, + 81.0, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.2, + 81.1, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.1, + 81.1, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.1, + 81.1, + 81.0, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.2, + 81.0, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.0, + 80.9, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.1, + 80.9, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.0, + 80.9, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.1, + 80.8, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.0, + 81.0, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.0, + 81.0, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.0, + 80.8, + 81.0, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.1, + 81.0, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.1, + 81.0, + 81.0, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.0, + 80.8, + 81.0, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.0, + 81.0, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 81.0, + 80.9, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 81.1, + 80.8, + 81.1, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 80.7, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.0, + 80.9, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.1, + 80.9, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.1, + 80.8, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 80.9, + 80.8, + 81.0, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 80.9, + 80.8, + 81.0, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 80.9, + 80.8, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 80.9, + 80.9, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 80.9, + 80.7, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 80.9, + 80.9, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 80.9, + 80.9, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.0, + 80.8, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 80.8, + 80.8, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 80.8, + 80.7, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 80.8, + 80.7, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 80.9, + 80.8, + 80.9, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.0, + 80.9, + 81.0, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.0, + 80.9, + 81.0, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.8, + 80.8, + 81.0, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 80.8, + 81.0, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 80.9, + 80.7, + 81.0, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 80.9, + 80.7, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 80.8, + 80.7, + 81.0, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 80.9, + 80.8, + 80.9, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 80.8, + 80.7, + 81.0, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 80.8, + 80.7, + 81.0, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 80.8, + 80.8, + 80.9, + 80.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 80.9, + 80.7, + 80.9, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.8, + 80.8, + 80.8, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 80.7, + 80.7, + 80.8, + 80.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 80.7, + 80.7, + 80.8, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 80.7, + 80.6, + 80.8, + 80.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 80.7, + 80.7, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 80.7, + 80.7, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 80.8, + 80.7, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 80.7, + 80.7, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 80.7, + 80.7, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 80.8, + 80.6, + 80.7, + 80.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 80.6, + 80.6, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.6, + 80.5, + 80.7, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 80.5, + 80.5, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 80.6, + 80.4, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 80.6, + 80.4, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 80.5, + 80.5, + 80.5, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 80.5, + 80.5, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 80.5, + 80.4, + 80.5, + 80.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 80.4, + 80.5, + 80.5, + 80.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.5, + 80.4, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 80.4, + 80.6, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 80.4, + 80.5, + 80.4, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 80.4, + 80.5, + 80.4, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 80.4, + 80.5, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.4, + 80.5, + 80.4, + 80.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 80.4, + 80.5, + 80.3, + 80.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 80.2, + 80.3, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.2, + 80.3, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 80.3, + 80.4, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 80.2, + 80.2, + 80.2, + 80.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 80.2, + 80.3, + 80.1, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 80.1, + 80.2, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 80.0, + 80.2, + 80.1, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 80.0, + 80.3, + 80.1, + 80.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 80.0, + 80.1, + 80.1, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 79.9, + 80.2, + 80.0, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 79.9, + 80.2, + 80.1, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 79.9, + 80.1, + 80.0, + 80.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 79.9, + 80.1, + 80.0, + 80.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 79.9, + 80.1, + 80.0, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 79.9, + 79.8, + 79.8, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 79.6, + 79.7, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 79.6, + 79.8, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 79.7, + 79.7, + 79.8, + 79.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 79.6, + 79.6, + 79.9, + 79.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 79.6, + 79.7, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 79.7, + 79.6, + 79.9, + 79.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 79.7, + 79.6, + 79.8, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 79.5, + 79.6, + 79.7, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 79.5, + 79.5, + 79.6, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 79.5, + 79.6, + 79.6, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 79.5, + 79.5, + 79.6, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 79.5, + 79.5, + 79.6, + 79.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 79.3, + 79.5, + 79.5, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 79.5, + 79.5, + 79.5, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 79.4, + 79.5, + 79.5, + 79.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 79.4, + 79.5, + 79.5, + 79.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 79.3, + 79.5, + 79.5, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 79.3, + 79.4, + 79.4, + 79.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 79.1, + 79.3, + 79.5, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 79.2, + 79.3, + 79.5, + 79.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 79.2, + 79.4, + 79.6, + 79.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 79.2, + 79.3, + 79.5, + 79.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 79.1, + 79.3, + 79.4, + 79.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 79.2, + 79.3, + 79.4, + 79.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 79.1, + 79.4, + 79.3, + 79.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 79.3, + 79.3, + 79.4, + 79.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 79.1, + 79.3, + 79.5, + 79.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 79.2, + 79.3, + 79.4, + 79.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 79.3, + 79.3, + 79.3, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 79.2, + 79.3, + 79.2, + 79.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 79.0, + 79.2, + 79.2, + 79.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 79.0, + 79.1, + 79.3, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 79.0, + 79.3, + 79.2, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 79.1, + 79.3, + 79.2, + 79.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 79.1, + 79.2, + 79.2, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 79.0, + 79.2, + 79.2, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 79.1, + 79.2, + 79.2, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 79.0, + 79.1, + 79.2, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 79.1, + 79.2, + 79.3, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 79.1, + 79.2, + 79.3, + 79.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 79.2, + 79.2, + 79.2, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 79.1, + 79.2, + 79.3, + 79.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 79.0, + 79.1, + 79.2, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 79.2, + 79.1, + 79.4, + 79.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 79.0, + 79.2, + 79.4, + 79.4 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 79.1, + 79.1, + 79.3, + 79.3 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 79.0, + 79.0, + 79.3, + 79.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.1, + 79.0, + 79.3, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 79.0, + 79.0, + 79.2, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 79.0, + 79.0, + 79.3, + 79.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 79.0, + 78.9, + 79.3, + 79.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 78.9, + 79.0, + 79.2, + 79.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 79.0, + 78.9, + 79.3, + 79.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 79.0, + 79.0, + 79.3, + 79.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 79.0, + 78.9, + 79.2, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 79.0, + 78.9, + 79.0, + 79.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 79.0, + 78.9, + 79.1, + 79.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 78.9, + 78.9, + 79.1, + 79.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 79.0, + 78.8, + 79.2, + 79.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 79.0, + 78.7, + 79.3, + 79.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 79.1, + 78.7, + 79.3, + 79.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.0, + 78.8, + 79.1, + 79.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 79.0, + 78.7, + 79.1, + 79.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 78.9, + 78.6, + 79.1, + 79.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 78.9, + 78.5, + 79.2, + 79.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 78.7, + 78.4, + 79.1, + 79.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 78.6, + 78.3, + 79.0, + 79.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 78.4, + 78.1, + 78.9, + 79.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 78.4, + 78.2, + 79.0, + 79.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 78.4, + 78.2, + 79.0, + 79.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 78.3, + 78.2, + 78.9, + 78.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 78.3, + 78.2, + 78.8, + 79.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 78.4, + 78.3, + 78.9, + 78.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 78.2, + 78.3, + 78.7, + 78.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 78.2, + 78.3, + 78.6, + 78.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 78.2, + 78.3, + 78.8, + 78.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 78.2, + 78.4, + 78.6, + 78.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 78.1, + 78.3, + 78.6, + 78.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 78.2, + 78.0, + 78.4, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 78.1, + 78.1, + 78.4, + 78.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 78.1, + 77.9, + 78.5, + 78.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 77.9, + 78.0, + 78.5, + 78.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 78.0, + 78.0, + 78.5, + 78.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 78.1, + 78.1, + 78.4, + 78.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 78.1, + 77.9, + 78.5, + 78.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 78.1, + 77.9, + 78.5, + 78.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 78.0, + 78.0, + 78.4, + 78.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 78.0, + 78.0, + 78.4, + 78.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 77.8, + 77.9, + 78.4, + 78.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 77.6, + 78.0, + 78.4, + 78.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 77.6, + 77.9, + 78.3, + 78.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 77.8, + 77.9, + 78.4, + 78.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 77.8, + 77.9, + 78.3, + 78.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 77.7, + 77.8, + 78.3, + 78.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 77.7, + 77.9, + 78.3, + 78.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 77.8, + 77.9, + 78.3, + 78.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 77.8, + 78.0, + 78.3, + 78.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 77.7, + 77.9, + 78.3, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 77.9, + 77.9, + 78.3, + 78.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 77.9, + 78.0, + 78.3, + 78.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 77.8, + 77.8, + 78.3, + 78.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 77.7, + 77.9, + 78.4, + 78.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 77.8, + 77.9, + 78.3, + 78.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 77.6, + 77.9, + 78.1, + 78.4 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 77.7, + 77.9, + 78.2, + 78.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 77.7, + 77.9, + 78.2, + 78.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 77.7, + 77.8, + 78.2, + 78.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 77.7, + 77.9, + 78.2, + 78.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 77.7, + 77.8, + 78.2, + 78.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 77.5, + 77.8, + 77.9, + 78.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 77.5, + 77.7, + 77.9, + 78.2 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 77.5, + 77.8, + 77.9, + 78.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 77.5, + 77.8, + 77.9, + 78.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 77.6, + 77.8, + 78.0, + 78.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 77.6, + 77.7, + 77.9, + 78.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 77.7, + 77.8, + 77.8, + 78.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 77.7, + 77.7, + 77.8, + 78.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 77.7, + 77.8, + 77.7, + 78.1 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 77.6, + 77.7, + 77.8, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 77.6, + 77.7, + 77.7, + 78.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 77.5, + 77.7, + 77.7, + 77.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 77.5, + 77.7, + 77.7, + 77.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 77.5, + 77.7, + 77.7, + 78.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 77.2, + 77.5, + 77.8, + 78.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 77.2, + 77.5, + 77.7, + 78.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 77.2, + 77.6, + 77.7, + 78.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 77.1, + 77.6, + 77.7, + 78.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 77.1, + 77.6, + 77.7, + 77.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 77.2, + 77.5, + 77.7, + 77.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 77.1, + 77.5, + 77.7, + 77.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 77.1, + 77.4, + 77.7, + 78.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 76.9, + 77.5, + 77.7, + 77.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 77.0, + 77.4, + 77.7, + 77.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 76.9, + 77.4, + 77.6, + 77.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 76.9, + 77.4, + 77.7, + 77.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 76.9, + 77.3, + 77.7, + 77.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 77.0, + 77.4, + 77.7, + 77.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 77.0, + 77.4, + 77.7, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 77.0, + 77.4, + 77.7, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 77.0, + 77.3, + 77.7, + 77.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 77.0, + 77.4, + 77.5, + 77.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 77.0, + 77.5, + 77.4, + 77.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 77.1, + 77.4, + 77.5, + 77.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 77.0, + 77.4, + 77.4, + 77.5 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 77.0, + 77.4, + 77.3, + 77.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 76.9, + 77.3, + 77.2, + 77.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 77.0, + 77.2, + 77.2, + 77.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 76.8, + 77.2, + 77.3, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 76.7, + 77.2, + 77.2, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 76.8, + 77.2, + 77.2, + 77.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 76.7, + 77.2, + 77.2, + 77.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 76.6, + 77.1, + 77.2, + 77.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 76.5, + 77.2, + 77.2, + 77.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 76.3, + 77.1, + 77.1, + 77.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 76.5, + 77.1, + 77.1, + 77.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 76.3, + 77.1, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 76.3, + 77.1, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 76.4, + 77.0, + 76.8, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 76.4, + 77.2, + 76.7, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 76.3, + 77.1, + 76.7, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 76.4, + 77.0, + 76.7, + 76.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 76.4, + 77.1, + 76.7, + 76.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 76.5, + 77.1, + 76.6, + 76.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 76.4, + 77.1, + 76.8, + 76.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 76.3, + 77.0, + 76.7, + 76.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 76.3, + 76.9, + 76.7, + 76.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 76.2, + 76.9, + 76.6, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 76.2, + 76.9, + 76.6, + 77.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 76.1, + 77.0, + 76.6, + 76.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 76.2, + 76.9, + 76.7, + 76.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 76.2, + 76.9, + 76.6, + 76.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 76.2, + 76.9, + 76.6, + 76.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 76.2, + 77.0, + 76.7, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 76.2, + 77.0, + 76.6, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 76.1, + 77.0, + 76.5, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 76.0, + 77.0, + 76.5, + 77.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 76.1, + 76.9, + 76.5, + 77.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 76.2, + 76.9, + 76.5, + 77.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 76.0, + 76.8, + 76.5, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 76.1, + 76.9, + 76.5, + 76.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 76.1, + 76.8, + 76.6, + 77.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 76.1, + 76.7, + 76.6, + 76.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 76.1, + 76.7, + 76.6, + 76.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 76.0, + 76.7, + 76.5, + 76.7 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 75.9, + 76.7, + 76.4, + 76.8 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 76.0, + 76.7, + 76.3, + 76.7 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 76.0, + 76.7, + 76.5, + 76.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 75.9, + 76.7, + 76.3, + 76.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 76.0, + 76.7, + 76.5, + 76.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 75.8, + 76.7, + 76.4, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 76.0, + 76.6, + 76.6, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 75.8, + 76.5, + 76.1, + 76.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 75.8, + 76.5, + 76.1, + 76.5 + ], + "mid_block.resnets.1.time_emb_proj": [ + 75.8, + 76.5, + 76.1, + 76.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 75.7, + 76.6, + 76.0, + 76.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 75.5, + 76.5, + 76.1, + 76.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 75.6, + 76.5, + 76.1, + 76.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 75.5, + 76.4, + 76.0, + 76.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 75.4, + 76.3, + 76.0, + 76.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 75.4, + 76.4, + 76.0, + 76.5 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 75.4, + 76.4, + 76.0, + 76.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 75.3, + 76.4, + 75.9, + 76.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 75.3, + 76.4, + 76.0, + 76.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 75.3, + 76.3, + 75.9, + 76.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 75.2, + 76.2, + 75.9, + 76.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 75.1, + 76.2, + 75.8, + 76.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 75.1, + 76.2, + 75.8, + 76.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 75.0, + 76.0, + 75.8, + 76.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 74.8, + 75.9, + 75.7, + 76.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 74.7, + 75.9, + 75.5, + 76.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 74.8, + 75.9, + 75.7, + 76.1 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 74.7, + 75.9, + 75.7, + 76.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 74.7, + 75.9, + 75.7, + 76.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 74.7, + 75.9, + 75.7, + 76.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 74.7, + 75.8, + 75.6, + 76.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 74.7, + 75.7, + 75.7, + 76.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 74.6, + 75.7, + 75.5, + 76.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 74.7, + 75.8, + 75.4, + 76.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 74.7, + 75.8, + 75.5, + 76.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 74.7, + 75.7, + 75.3, + 76.0 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 74.6, + 75.5, + 75.2, + 76.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 74.6, + 75.3, + 75.2, + 75.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 74.4, + 75.3, + 75.1, + 75.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 74.2, + 75.2, + 75.1, + 75.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 74.3, + 75.1, + 75.0, + 75.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 74.4, + 75.2, + 74.9, + 75.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 74.2, + 75.0, + 74.8, + 75.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 74.3, + 75.1, + 74.8, + 75.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 74.2, + 75.0, + 74.8, + 75.6 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 74.1, + 74.9, + 74.7, + 75.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 74.2, + 75.0, + 74.8, + 75.6 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 74.3, + 75.1, + 74.8, + 75.6 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 74.2, + 75.1, + 74.7, + 75.6 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 74.3, + 75.0, + 74.7, + 75.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 74.2, + 75.0, + 74.7, + 75.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 74.2, + 74.9, + 74.6, + 75.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 74.0, + 74.7, + 74.5, + 75.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 74.1, + 74.6, + 74.4, + 75.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 73.9, + 74.7, + 74.3, + 75.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 73.9, + 74.6, + 74.4, + 75.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 74.0, + 74.6, + 74.4, + 75.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 73.8, + 74.4, + 74.1, + 74.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 73.7, + 74.3, + 74.0, + 74.6 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 73.7, + 74.3, + 73.9, + 74.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 73.7, + 74.2, + 73.9, + 74.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 73.5, + 74.2, + 73.9, + 74.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 73.5, + 74.1, + 73.9, + 74.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 73.6, + 74.0, + 73.8, + 74.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 73.7, + 74.1, + 73.8, + 74.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 73.4, + 74.0, + 73.7, + 74.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 73.4, + 73.9, + 73.6, + 74.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 73.4, + 73.8, + 73.6, + 74.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 73.4, + 73.8, + 73.6, + 74.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 73.3, + 73.7, + 73.5, + 74.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 73.2, + 73.7, + 73.5, + 74.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 73.2, + 73.5, + 73.4, + 74.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 73.2, + 73.5, + 73.4, + 74.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 73.3, + 73.5, + 73.5, + 74.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 73.0, + 73.3, + 73.2, + 73.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 73.1, + 73.1, + 73.2, + 73.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 73.2, + 73.1, + 73.1, + 73.6 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 73.1, + 73.1, + 73.2, + 73.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 73.3, + 73.3, + 73.1, + 73.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 73.3, + 73.3, + 73.0, + 73.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 72.9, + 73.1, + 72.9, + 73.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 73.0, + 73.1, + 73.0, + 73.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 72.9, + 73.1, + 72.9, + 73.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 72.9, + 72.9, + 72.7, + 73.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 72.9, + 72.8, + 72.7, + 73.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 72.9, + 72.7, + 72.6, + 73.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 72.7, + 72.6, + 72.5, + 73.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 72.4, + 72.5, + 72.4, + 72.9 + ], + "mid_block.resnets.0.time_emb_proj": [ + 72.2, + 72.4, + 72.2, + 72.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 72.1, + 72.3, + 72.0, + 72.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 72.2, + 72.3, + 72.1, + 72.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 72.2, + 72.3, + 72.1, + 72.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 72.1, + 72.2, + 71.9, + 72.5 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 72.1, + 72.1, + 71.9, + 72.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 71.9, + 72.0, + 71.8, + 72.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 71.9, + 71.9, + 71.5, + 71.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 71.8, + 71.8, + 71.4, + 71.9 + ], + "mid_block.resnets.1.conv2": [ + 71.9, + 71.9, + 71.5, + 72.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 72.1, + 71.9, + 71.7, + 72.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 72.1, + 71.9, + 71.6, + 72.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 72.2, + 72.0, + 71.7, + 72.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 72.2, + 72.0, + 71.6, + 72.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 71.2, + 71.3, + 70.8, + 71.2 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 71.2, + 71.3, + 70.7, + 71.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 71.1, + 71.3, + 70.7, + 71.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 71.1, + 71.3, + 70.7, + 71.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 71.2, + 71.1, + 70.6, + 71.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 71.2, + 71.2, + 70.6, + 71.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 71.2, + 71.1, + 70.6, + 71.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 71.1, + 71.0, + 70.6, + 71.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 71.2, + 71.1, + 70.6, + 71.0 + ], + "mid_block.resnets.1.conv1": [ + 71.2, + 71.0, + 70.6, + 71.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 71.0, + 70.9, + 70.5, + 70.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 70.9, + 70.9, + 70.4, + 70.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 70.7, + 70.7, + 70.2, + 70.6 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 70.8, + 70.8, + 70.1, + 70.6 + ], + "down_blocks.2.resnets.1.conv1": [ + 70.8, + 70.7, + 70.1, + 70.5 + ], + "down_blocks.2.attentions.1.proj_in": [ + 70.9, + 70.8, + 70.2, + 70.6 + ], + "down_blocks.2.resnets.1.conv2": [ + 70.8, + 70.7, + 70.1, + 70.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 70.7, + 70.6, + 70.0, + 70.5 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 70.8, + 70.7, + 70.1, + 70.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 70.8, + 70.7, + 70.1, + 70.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 70.8, + 70.7, + 70.1, + 70.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 70.7, + 70.5, + 70.0, + 70.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 70.6, + 70.3, + 69.8, + 70.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 70.6, + 70.3, + 69.7, + 70.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 70.6, + 70.3, + 69.6, + 70.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 70.6, + 70.3, + 69.7, + 70.2 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 70.3, + 70.0, + 69.4, + 69.8 + ], + "mid_block.resnets.0.conv1": [ + 70.2, + 70.0, + 69.4, + 69.8 + ], + "mid_block.resnets.0.conv2": [ + 70.3, + 70.0, + 69.4, + 69.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 70.3, + 70.1, + 69.5, + 69.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 70.4, + 70.1, + 69.5, + 70.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 70.4, + 70.1, + 69.5, + 69.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 70.2, + 70.1, + 69.4, + 69.9 + ], + "down_blocks.2.attentions.1.proj_out": [ + 70.3, + 70.0, + 69.3, + 69.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 70.3, + 70.0, + 69.3, + 69.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 70.4, + 70.2, + 69.4, + 69.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 70.3, + 70.1, + 69.3, + 69.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 70.2, + 70.1, + 69.2, + 69.7 + ], + "up_blocks.0.resnets.0.conv1": [ + 70.1, + 70.1, + 69.1, + 69.7 + ], + "up_blocks.0.resnets.1.conv1": [ + 70.4, + 70.4, + 69.3, + 70.0 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 70.0, + 70.0, + 69.0, + 69.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 69.8, + 69.8, + 68.9, + 69.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 70.0, + 69.9, + 69.1, + 69.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 69.9, + 69.8, + 69.1, + 69.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 70.0, + 69.8, + 69.1, + 69.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 70.1, + 69.8, + 69.2, + 69.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 69.8, + 69.7, + 69.0, + 69.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 69.8, + 69.7, + 68.9, + 69.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 69.9, + 69.8, + 68.9, + 69.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 70.0, + 69.8, + 69.1, + 69.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 70.0, + 69.8, + 69.0, + 69.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 70.0, + 69.8, + 69.0, + 69.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 70.0, + 69.8, + 68.9, + 69.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 69.9, + 69.7, + 68.8, + 69.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 70.0, + 69.8, + 68.8, + 69.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 70.1, + 69.9, + 68.9, + 69.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 70.0, + 69.7, + 68.8, + 69.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 70.0, + 69.8, + 68.9, + 69.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 70.0, + 69.8, + 68.9, + 69.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 70.1, + 69.8, + 68.9, + 69.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 70.0, + 69.8, + 68.9, + 69.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 69.9, + 69.7, + 68.8, + 69.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 69.9, + 69.7, + 68.7, + 69.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 69.8, + 69.6, + 68.7, + 69.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 69.8, + 69.6, + 68.8, + 69.3 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 69.4, + 69.2, + 68.4, + 68.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 69.4, + 69.2, + 68.4, + 68.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 69.4, + 69.2, + 68.4, + 68.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 69.2, + 69.0, + 68.2, + 68.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 69.4, + 69.1, + 68.3, + 68.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 69.4, + 69.2, + 68.3, + 68.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 69.3, + 69.0, + 68.2, + 68.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 69.3, + 69.0, + 68.2, + 68.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 69.3, + 69.0, + 68.2, + 68.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 69.1, + 69.0, + 68.1, + 68.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 69.1, + 68.9, + 68.1, + 68.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 68.9, + 68.8, + 67.9, + 68.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 69.0, + 68.8, + 68.0, + 68.4 + ], + "up_blocks.0.resnets.1.conv2": [ + 69.1, + 69.0, + 68.1, + 68.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 69.2, + 69.1, + 68.1, + 68.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 69.1, + 68.9, + 68.0, + 68.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 68.8, + 68.7, + 67.9, + 68.2 + ], + "mid_block.attentions.0.proj_out": [ + 68.8, + 68.6, + 67.8, + 68.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 68.7, + 68.7, + 67.8, + 68.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 68.8, + 68.7, + 67.9, + 68.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 68.9, + 68.8, + 68.0, + 68.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 68.6, + 68.4, + 67.7, + 68.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 68.6, + 68.5, + 67.8, + 68.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 68.7, + 68.5, + 67.7, + 68.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 68.7, + 68.4, + 67.8, + 68.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 68.7, + 68.4, + 67.7, + 68.1 + ], + "up_blocks.0.attentions.0.proj_out": [ + 68.7, + 68.4, + 67.8, + 68.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 68.6, + 68.4, + 67.7, + 68.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 68.8, + 68.4, + 67.9, + 68.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 68.8, + 68.4, + 67.9, + 68.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 68.8, + 68.5, + 68.0, + 68.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 68.9, + 68.5, + 68.0, + 68.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 68.9, + 68.5, + 67.9, + 68.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 68.6, + 68.3, + 67.7, + 68.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 68.6, + 68.2, + 67.7, + 68.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 68.4, + 68.2, + 67.6, + 68.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 68.3, + 68.1, + 67.4, + 67.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 68.3, + 68.1, + 67.5, + 68.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 68.1, + 67.9, + 67.2, + 67.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 68.1, + 67.9, + 67.3, + 67.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 68.1, + 67.9, + 67.3, + 67.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 68.2, + 67.9, + 67.3, + 67.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 68.2, + 67.8, + 67.3, + 67.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 68.1, + 67.9, + 67.3, + 67.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 68.1, + 67.9, + 67.4, + 68.0 + ], + "up_blocks.0.resnets.0.conv2": [ + 68.1, + 67.9, + 67.3, + 68.0 + ], + "up_blocks.0.resnets.2.conv1": [ + 68.3, + 68.0, + 67.4, + 68.1 + ], + "up_blocks.0.resnets.2.conv2": [ + 68.4, + 68.1, + 67.6, + 68.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 68.4, + 68.2, + 67.6, + 68.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 68.3, + 68.2, + 67.6, + 68.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 68.4, + 68.2, + 67.8, + 68.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 68.6, + 68.3, + 67.8, + 68.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 68.5, + 68.3, + 67.9, + 68.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 68.6, + 68.2, + 67.8, + 68.3 + ], + "down_blocks.2.resnets.0.conv2": [ + 68.5, + 68.2, + 67.8, + 68.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 68.5, + 68.2, + 67.8, + 68.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 68.4, + 68.1, + 67.7, + 68.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 68.2, + 67.9, + 67.6, + 68.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 68.1, + 67.8, + 67.5, + 67.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 67.9, + 67.5, + 67.3, + 67.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 67.6, + 67.3, + 67.1, + 67.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 67.5, + 67.0, + 67.0, + 67.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 67.4, + 67.1, + 67.1, + 67.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 67.4, + 67.2, + 67.1, + 67.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 67.3, + 67.0, + 67.0, + 67.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 67.2, + 66.7, + 67.0, + 67.3 + ], + "mid_block.attentions.0.proj_in": [ + 67.1, + 66.5, + 66.9, + 67.3 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 66.9, + 66.2, + 66.8, + 67.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 66.8, + 66.2, + 66.8, + 67.2 + ], + "down_blocks.2.attentions.0.proj_in": [ + 66.8, + 66.2, + 66.8, + 67.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 67.0, + 66.3, + 67.0, + 67.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 67.0, + 66.2, + 67.0, + 67.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 66.9, + 66.2, + 66.9, + 67.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 66.8, + 66.0, + 66.7, + 67.1 + ], + "down_blocks.2.attentions.0.proj_out": [ + 66.6, + 65.8, + 66.7, + 67.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 66.7, + 65.8, + 66.8, + 67.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 66.7, + 65.9, + 66.7, + 67.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 66.6, + 65.7, + 66.5, + 66.9 + ], + "up_blocks.0.attentions.0.proj_in": [ + 66.8, + 65.9, + 66.7, + 67.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 66.6, + 65.7, + 66.6, + 66.9 + ], + "down_blocks.2.resnets.0.conv1": [ + 66.5, + 65.7, + 66.6, + 66.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 66.4, + 65.6, + 66.5, + 66.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 66.3, + 65.4, + 66.4, + 66.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 66.3, + 65.5, + 66.5, + 66.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 66.5, + 65.4, + 66.4, + 66.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 66.6, + 65.8, + 66.6, + 67.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 66.5, + 65.6, + 66.4, + 66.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 66.4, + 65.5, + 66.3, + 66.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 66.4, + 65.5, + 66.3, + 66.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 66.2, + 65.5, + 66.3, + 66.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 66.4, + 65.5, + 66.3, + 66.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 66.4, + 65.5, + 66.3, + 66.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 66.2, + 65.4, + 66.3, + 66.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 66.3, + 65.4, + 66.3, + 66.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 66.2, + 65.3, + 66.0, + 66.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 65.9, + 65.0, + 65.8, + 66.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 65.8, + 64.9, + 65.7, + 65.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 65.6, + 64.5, + 65.4, + 65.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 65.5, + 64.4, + 65.4, + 65.6 + ], + "up_blocks.0.attentions.2.proj_in": [ + 65.3, + 64.3, + 65.2, + 65.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 65.2, + 64.1, + 65.0, + 65.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 64.6, + 63.5, + 64.4, + 64.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 64.7, + 63.6, + 64.3, + 64.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 64.6, + 63.5, + 64.2, + 64.3 + ], + "up_blocks.0.attentions.1.proj_in": [ + 65.1, + 63.9, + 64.3, + 64.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 64.9, + 63.8, + 64.3, + 64.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 64.7, + 63.5, + 64.2, + 64.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 64.6, + 63.5, + 64.0, + 64.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 64.6, + 63.7, + 64.1, + 64.2 + ], + "up_blocks.0.attentions.2.proj_out": [ + 64.4, + 63.5, + 63.9, + 64.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 64.3, + 63.4, + 63.9, + 63.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 64.1, + 63.2, + 63.6, + 63.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 64.0, + 63.2, + 63.6, + 63.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 63.8, + 62.9, + 63.4, + 63.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 63.8, + 63.0, + 63.3, + 63.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 63.7, + 62.9, + 63.1, + 63.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 63.5, + 62.7, + 63.0, + 63.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 63.3, + 62.6, + 62.9, + 62.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 63.2, + 62.5, + 62.7, + 62.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 62.9, + 62.2, + 62.4, + 62.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 62.7, + 62.0, + 62.2, + 62.3 + ], + "up_blocks.0.attentions.1.proj_out": [ + 62.5, + 62.0, + 62.1, + 62.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 62.3, + 61.7, + 61.9, + 61.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 62.3, + 61.6, + 61.8, + 61.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 62.0, + 61.4, + 61.5, + 61.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 61.7, + 61.1, + 61.2, + 61.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 61.5, + 60.9, + 61.1, + 61.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 61.5, + 60.9, + 61.0, + 60.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 61.6, + 61.0, + 61.0, + 61.0 + ], + "add_embedding.linear_2": [ + 61.5, + 60.6, + 60.9, + 60.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 61.4, + 60.5, + 60.8, + 60.8 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 61.0, + 60.2, + 60.5, + 60.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 60.7, + 60.0, + 60.3, + 60.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 60.5, + 59.8, + 60.0, + 60.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 60.4, + 59.7, + 60.0, + 60.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 60.1, + 59.4, + 59.8, + 59.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 60.1, + 59.4, + 59.8, + 59.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 59.7, + 58.9, + 59.4, + 59.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 59.4, + 58.6, + 59.1, + 59.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 59.3, + 58.5, + 58.9, + 58.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 59.1, + 58.4, + 58.8, + 58.7 + ], + "down_blocks.1.resnets.1.conv2": [ + 59.0, + 58.2, + 58.6, + 58.6 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 58.8, + 58.1, + 58.4, + 58.4 + ], + "up_blocks.1.resnets.0.conv1": [ + 58.7, + 57.9, + 58.3, + 58.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 58.4, + 57.7, + 58.1, + 58.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 58.1, + 57.4, + 57.8, + 57.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 57.9, + 57.1, + 57.6, + 57.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 57.9, + 57.1, + 57.6, + 57.6 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 57.9, + 57.1, + 57.6, + 57.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 57.5, + 56.7, + 57.3, + 57.2 + ], + "down_blocks.1.attentions.1.proj_out": [ + 57.5, + 56.7, + 57.3, + 57.2 + ], + "up_blocks.1.attentions.0.proj_out": [ + 57.4, + 56.5, + 57.1, + 57.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 57.3, + 56.5, + 57.1, + 57.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 57.2, + 56.3, + 57.0, + 56.9 + ], + "down_blocks.1.resnets.1.conv1": [ + 57.1, + 56.2, + 56.9, + 56.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 56.8, + 55.9, + 56.7, + 56.5 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 56.8, + 55.9, + 56.5, + 56.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 56.8, + 55.9, + 56.5, + 56.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 56.8, + 55.8, + 56.5, + 56.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 56.5, + 55.6, + 56.3, + 56.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 56.3, + 55.4, + 56.2, + 56.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 56.2, + 55.4, + 56.0, + 55.9 + ], + "add_embedding.linear_1": [ + 55.8, + 55.3, + 55.6, + 55.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 55.8, + 55.3, + 55.7, + 55.6 + ], + "down_blocks.1.attentions.1.proj_in": [ + 55.7, + 55.2, + 55.7, + 55.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 55.3, + 54.9, + 55.3, + 55.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 55.4, + 54.9, + 55.3, + 55.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 55.2, + 54.7, + 55.1, + 55.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 55.0, + 54.6, + 54.9, + 54.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 55.1, + 54.7, + 55.0, + 54.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 55.1, + 54.7, + 55.0, + 54.9 + ], + "up_blocks.1.attentions.0.proj_in": [ + 54.8, + 54.4, + 54.7, + 54.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 54.7, + 54.3, + 54.7, + 54.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 54.6, + 54.1, + 54.5, + 54.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 54.1, + 53.7, + 54.1, + 53.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 53.9, + 53.5, + 53.9, + 53.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 53.6, + 53.2, + 53.6, + 53.4 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 53.4, + 52.9, + 53.3, + 53.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 53.0, + 52.6, + 52.9, + 52.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 52.7, + 52.3, + 52.6, + 52.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 52.6, + 52.2, + 52.6, + 52.4 + ], + "down_blocks.1.downsamplers.0.conv": [ + 52.4, + 51.9, + 52.3, + 52.1 + ], + "up_blocks.1.attentions.1.proj_out": [ + 52.2, + 51.7, + 52.1, + 51.8 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 51.7, + 51.4, + 51.7, + 51.5 + ], + "down_blocks.1.attentions.0.proj_out": [ + 51.6, + 51.2, + 51.6, + 51.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 51.5, + 51.1, + 51.5, + 51.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 51.4, + 51.0, + 51.4, + 51.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 51.3, + 50.9, + 51.4, + 51.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 51.3, + 50.9, + 51.3, + 51.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 51.4, + 50.9, + 51.3, + 51.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 51.3, + 50.8, + 51.3, + 51.2 + ], + "up_blocks.1.resnets.1.conv2": [ + 51.2, + 50.8, + 51.2, + 51.2 + ], + "up_blocks.1.resnets.2.conv2": [ + 50.9, + 50.5, + 51.0, + 50.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 50.6, + 50.1, + 50.6, + 50.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 50.4, + 50.0, + 50.4, + 50.3 + ], + "up_blocks.0.upsamplers.0.conv": [ + 50.2, + 49.8, + 50.2, + 50.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 50.1, + 49.8, + 50.2, + 50.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 50.1, + 49.7, + 50.2, + 50.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 49.8, + 49.5, + 49.9, + 49.8 + ], + "up_blocks.1.attentions.1.proj_in": [ + 49.7, + 49.4, + 49.8, + 49.7 + ], + "up_blocks.1.attentions.2.proj_out": [ + 49.4, + 49.2, + 49.5, + 49.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 49.2, + 49.0, + 49.3, + 49.2 + ], + "up_blocks.1.attentions.2.proj_in": [ + 49.1, + 48.9, + 49.2, + 49.1 + ], + "up_blocks.1.resnets.1.conv1": [ + 49.1, + 49.0, + 49.2, + 49.2 + ], + "up_blocks.1.resnets.2.conv1": [ + 49.1, + 49.0, + 49.2, + 49.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 49.1, + 49.0, + 49.2, + 49.1 + ], + "down_blocks.1.attentions.0.proj_in": [ + 49.0, + 48.9, + 49.1, + 49.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 49.0, + 48.9, + 49.1, + 49.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 49.0, + 48.8, + 49.1, + 49.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 48.9, + 48.8, + 49.0, + 48.9 + ], + "down_blocks.1.resnets.0.conv1": [ + 48.6, + 48.5, + 48.6, + 48.5 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 48.5, + 48.4, + 48.5, + 48.4 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 48.1, + 48.1, + 48.2, + 48.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 48.0, + 47.9, + 48.1, + 48.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 48.0, + 47.9, + 48.0, + 48.0 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 47.8, + 47.8, + 47.9, + 47.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 47.7, + 47.7, + 47.8, + 47.7 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 47.7, + 47.8, + 47.9, + 47.8 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 47.7, + 47.7, + 47.9, + 47.7 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 47.2, + 47.2, + 47.4, + 47.3 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 46.7, + 46.8, + 47.0, + 46.9 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 46.4, + 46.5, + 46.6, + 46.5 + ], + "down_blocks.0.resnets.0.conv1": [ + 46.4, + 46.5, + 46.6, + 46.5 + ], + "down_blocks.0.downsamplers.0.conv": [ + 46.0, + 46.0, + 46.3, + 46.1 + ], + "down_blocks.0.resnets.1.conv2": [ + 45.8, + 45.8, + 46.0, + 45.8 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 44.8, + 44.9, + 45.0, + 44.8 + ], + "down_blocks.0.resnets.1.conv1": [ + 44.6, + 44.7, + 44.8, + 44.6 + ], + "down_blocks.0.resnets.0.conv2": [ + 44.4, + 44.4, + 44.6, + 44.3 + ], + "time_embedding.linear_1": [ + 43.8, + 43.8, + 43.9, + 43.5 + ], + "up_blocks.2.resnets.0.conv1": [ + 43.8, + 43.7, + 43.8, + 43.5 + ], + "up_blocks.2.resnets.0.conv2": [ + 44.1, + 44.1, + 44.1, + 43.9 + ], + "up_blocks.1.upsamplers.0.conv": [ + 43.7, + 43.7, + 43.7, + 43.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 44.2, + 44.2, + 44.2, + 44.1 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 43.7, + 43.7, + 43.8, + 43.6 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 43.3, + 43.2, + 43.3, + 43.1 + ], + "up_blocks.2.resnets.1.conv1": [ + 42.9, + 42.7, + 42.9, + 42.7 + ], + "up_blocks.2.resnets.1.conv2": [ + 42.7, + 42.6, + 42.8, + 42.7 + ], + "time_embedding.linear_2": [ + 43.1, + 42.9, + 43.1, + 43.1 + ], + "up_blocks.2.resnets.2.conv1": [ + 42.6, + 42.4, + 42.6, + 42.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 42.1, + 42.0, + 42.0, + 42.0 + ] + }, + "4": { + "metadata": { + "nbits": 4, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.ff.net.2", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.ff.net.2", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.ff.net.2", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q", + "up_blocks.0.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q", + "mid_block.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.proj_out", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj", + "mid_block.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.resnets.0.conv2", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k", + "mid_block.resnets.0.conv1", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.1.proj_out", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.resnets.1.conv2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v", + "up_blocks.0.resnets.1.conv1", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.1.conv1", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.0.resnets.2.time_emb_proj", + "down_blocks.2.resnets.0.conv1", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.proj_in", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.resnets.0.conv1", + "mid_block.attentions.0.proj_in", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2", + "up_blocks.0.resnets.1.conv2", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.resnets.2.conv2", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.proj_in", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.2.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.resnets.2.time_emb_proj", + "up_blocks.1.resnets.0.conv1", + "up_blocks.0.attentions.1.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.conv2", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.0.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.1.resnets.0.conv2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "add_embedding.linear_1", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q", + "add_embedding.linear_2", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.0.proj_out", + "time_embedding.linear_1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.2.resnets.0.time_emb_proj", + "down_blocks.1.resnets.1.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.1.resnets.2.conv2", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.0.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.1.attentions.2.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.resnets.0.conv1", + "up_blocks.2.resnets.2.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.1.resnets.1.conv1", + "down_blocks.0.resnets.0.conv1", + "up_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.2.conv1", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.2.resnets.2.conv_shortcut", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.1.resnets.1.conv_shortcut", + "time_embedding.linear_2", + "down_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.0.resnets.1.conv2", + "down_blocks.0.resnets.0.conv2", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.resnets.2.conv2", + "up_blocks.2.resnets.0.conv2", + "up_blocks.2.resnets.0.conv1", + "up_blocks.2.resnets.2.conv1", + "up_blocks.2.resnets.1.conv1", + "up_blocks.2.resnets.1.conv2" + ], + "sizes": [ + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 6553600, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 6553600, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 6553600, + 6553600, + 1638400, + 6553600, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1310720, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 409600, + 1638400, + 13107200, + 6553600, + 13107200, + 2621440, + 13107200, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 409600, + 1638400, + 1310720, + 1638400, + 6553600, + 6553600, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 409600, + 1310720, + 409600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1310720, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 6553600, + 13107200, + 13107200, + 1638400, + 2621440, + 409600, + 6553600, + 13107200, + 13107200, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 1310720, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 14745600, + 1638400, + 13107200, + 13107200, + 6553600, + 13107200, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 13107200, + 14745600, + 13107200, + 6553600, + 13107200, + 6553600, + 409600, + 1310720, + 1638400, + 1638400, + 1638400, + 14745600, + 6553600, + 14745600, + 1310720, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 14745600, + 409600, + 13107200, + 6553600, + 1638400, + 6553600, + 1310720, + 1638400, + 13107200, + 13107200, + 1638400, + 1638400, + 13107200, + 1310720, + 14745600, + 409600, + 13107200, + 13107200, + 1638400, + 1638400, + 409600, + 1310720, + 29491200, + 1310720, + 1310720, + 14745600, + 1638400, + 14745600, + 6553600, + 6553600, + 1638400, + 1638400, + 7372800, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 29491200, + 1638400, + 6553600, + 1638400, + 6553600, + 6553600, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 14745600, + 1638400, + 13107200, + 1310720, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 6553600, + 13107200, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 3276800, + 6553600, + 1638400, + 13107200, + 1638400, + 13107200, + 6553600, + 409600, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 409600, + 409600, + 13107200, + 6553600, + 1638400, + 6553600, + 1638400, + 22118400, + 409600, + 1638400, + 6553600, + 13107200, + 1310720, + 13107200, + 409600, + 13107200, + 6553600, + 6553600, + 13107200, + 1638400, + 13107200, + 1310720, + 1638400, + 13107200, + 1638400, + 409600, + 6553600, + 409600, + 13107200, + 409600, + 409600, + 13107200, + 6553600, + 409600, + 1638400, + 409600, + 409600, + 1310720, + 409600, + 819200, + 409600, + 409600, + 409600, + 1638400, + 1638400, + 409600, + 13107200, + 409600, + 409600, + 1638400, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 409600, + 1638400, + 409600, + 819200, + 11059200, + 1638400, + 409600, + 409600, + 3686400, + 1638400, + 819200, + 819200, + 409600, + 3686400, + 819200, + 1638400, + 409600, + 409600, + 3686400, + 409600, + 3604480, + 1638400, + 409600, + 1638400, + 409600, + 1310720, + 409600, + 3276800, + 409600, + 409600, + 409600, + 409600, + 3276800, + 409600, + 409600, + 3276800, + 1638400, + 3276800, + 409600, + 3276800, + 409600, + 409600, + 409600, + 3686400, + 1310720, + 819200, + 409600, + 1638400, + 409600, + 1638400, + 409600, + 409600, + 409600, + 3686400, + 409600, + 1638400, + 409600, + 409600, + 409600, + 3686400, + 409600, + 3276800, + 409600, + 409600, + 1310720, + 409600, + 3686400, + 409600, + 3276800, + 1638400, + 1843200, + 409600, + 3276800, + 1638400, + 409600, + 2457600, + 3276800, + 1638400, + 409600, + 409600, + 14745600, + 409600, + 1310720, + 3276800, + 3276800, + 409600, + 7372800, + 921600, + 1228800, + 5529600, + 204800, + 614400, + 204800, + 921600, + 819200, + 1638400, + 921600, + 409600, + 3686400, + 921600, + 921600, + 307200, + 204800, + 921600, + 921600, + 2764800, + 1843200, + 1843200, + 921600 + ] + }, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.4, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.8, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.3, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.8, + 82.4, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.3, + 82.5, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.3, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 81.9, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.9, + 82.3, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.7, + 82.3, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.0, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 81.8, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.2, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.0, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.0, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.9, + 82.0, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.0, + 81.9, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.0, + 81.9, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.0, + 82.0, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.1, + 81.8, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.0, + 81.9, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.7, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 82.0, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 82.0, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.8, + 81.8, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.8, + 81.9, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.0, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.0, + 81.7, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.7, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.9, + 81.9, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.9, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.0, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.8, + 82.1, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.9, + 81.7, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.9, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.9, + 82.1, + 82.2, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.9, + 81.9, + 82.1, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.8, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.9, + 81.7, + 82.3, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.9, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.8, + 81.7, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 81.7, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.8, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.9, + 81.8, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.9, + 81.6, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 81.8, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.9, + 81.8, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.0, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.9, + 81.8, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.8, + 81.6, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.7, + 82.0, + 82.0, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.7, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.7, + 81.7, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.8, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.8, + 81.5, + 82.0, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 81.8, + 81.7, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 81.8, + 81.6, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 81.8, + 81.6, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 81.6, + 81.9, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.7, + 81.9, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 81.8, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.8, + 81.8, + 81.8, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.7, + 81.7, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.8, + 81.8, + 81.9, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.7, + 81.7, + 81.9, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.7, + 81.7, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.9, + 81.8, + 81.8, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.8, + 81.6, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.9, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 81.8, + 81.9, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.8, + 81.7, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.7, + 81.7, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.8, + 81.7, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 81.8, + 81.7, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.7, + 81.6, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.7, + 81.8, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.8, + 81.7, + 81.8, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.8, + 81.8, + 81.9, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.8, + 81.7, + 81.9, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.7, + 81.7, + 81.8, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.7, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.8, + 81.7, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.8, + 81.7, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.8, + 81.7, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.7, + 81.7, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.6, + 81.6, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.7, + 81.7, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.5, + 81.7, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.6, + 81.7, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 81.6, + 81.6, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.5, + 81.6, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.6, + 81.7, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.6, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.6, + 81.8, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.6, + 81.6, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.5, + 81.5, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.6, + 81.6, + 81.8, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.5, + 81.7, + 81.9, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.5, + 81.6, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.6, + 81.8, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.6, + 81.7, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.7, + 81.5, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.6, + 81.7, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.7, + 81.6, + 81.7, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.8, + 81.5, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.7, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.6, + 81.5, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.6, + 81.6, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.6, + 81.6, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.6, + 81.6, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.6, + 81.8, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.5, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 81.4, + 81.5, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 81.4, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 81.4, + 81.6, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 81.4, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.6, + 81.5, + 81.9, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.5, + 81.7, + 81.9, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.5, + 81.6, + 81.7, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.5, + 81.6, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.5, + 81.6, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.5, + 81.5, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.5, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.4, + 81.6, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.4, + 81.5, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.4, + 81.5, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.4, + 81.6, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.5, + 81.5, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.4, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.3, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.4, + 81.5, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.4, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.3, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.3, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.3, + 81.5, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.3, + 81.5, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.3, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.3, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.3, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 81.3, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 81.3, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 81.3, + 81.4, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.4, + 81.4, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.3, + 81.4, + 81.8, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.3, + 81.4, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.2, + 81.5, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.2, + 81.4, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.2, + 81.4, + 81.8, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.3, + 81.4, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.3, + 81.4, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.3, + 81.4, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.2, + 81.4, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.3, + 81.3, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.2, + 81.3, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.1, + 81.4, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.2, + 81.3, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.2, + 81.1, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.3, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.2, + 81.3, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.2, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.2, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.3, + 81.2, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 81.3, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 81.2, + 81.2, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 81.3, + 81.1, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 81.2, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.1, + 81.1, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.1, + 81.2, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.2, + 81.1, + 81.5, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.1, + 81.1, + 81.6, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.1, + 81.1, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.1, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.2, + 81.1, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.1, + 81.2, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.3, + 81.1, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.2, + 81.1, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.2, + 81.1, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.1, + 81.1, + 81.4, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.0, + 81.2, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.2, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.0, + 81.3, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.1, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.1, + 80.9, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.0, + 80.8, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.0, + 80.9, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.0, + 80.9, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 81.1, + 80.9, + 81.6, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 81.1, + 80.9, + 81.6, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.0, + 81.0, + 81.6, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.0, + 81.0, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 80.9, + 81.1, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 80.9, + 81.0, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 80.9, + 81.2, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.0, + 81.0, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.9, + 80.9, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.1, + 80.9, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 80.9, + 81.2, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 80.9, + 81.0, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.9, + 81.0, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 80.9, + 81.0, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 80.8, + 81.1, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 80.8, + 80.9, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 80.9, + 81.2, + 81.3, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 80.9, + 81.0, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.0, + 81.0, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.0, + 80.8, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 81.0, + 80.9, + 81.4, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 81.0, + 80.9, + 81.5, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 81.0, + 80.8, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 81.0, + 80.8, + 81.5, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.9, + 81.0, + 81.4, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 80.9, + 80.7, + 81.4, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.0, + 80.9, + 81.4, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 80.9, + 81.0, + 81.4, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.0, + 81.1, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.0, + 80.9, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 80.9, + 81.0, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.9, + 81.0, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.0, + 81.0, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 80.9, + 80.9, + 81.3, + 81.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.0, + 80.9, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 80.9, + 80.8, + 81.4, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.0, + 80.8, + 81.2, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 80.9, + 81.1, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 80.9, + 81.0, + 81.3, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 80.8, + 81.0, + 81.1, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 80.8, + 81.0, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 80.9, + 80.9, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.0, + 80.9, + 81.1, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 80.8, + 80.9, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 80.8, + 80.8, + 81.3, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 80.8, + 80.9, + 81.3, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 80.8, + 81.0, + 81.2, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 80.8, + 80.8, + 81.2, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.0, + 80.9, + 81.2, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.0, + 80.9, + 81.2, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.0, + 80.9, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 80.9, + 80.8, + 81.3, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 80.9, + 80.9, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 80.8, + 80.9, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 80.8, + 80.8, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.8, + 81.0, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 80.9, + 81.0, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 80.8, + 80.9, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 80.7, + 80.8, + 81.2, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 80.8, + 80.6, + 81.1, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 80.8, + 80.7, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 80.8, + 80.8, + 81.1, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 80.8, + 80.8, + 81.2, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 80.8, + 80.8, + 81.1, + 80.8 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 80.8, + 80.7, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 80.8, + 80.7, + 81.2, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 80.9, + 80.7, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 80.8, + 80.8, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 80.8, + 80.9, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 80.8, + 80.7, + 81.2, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 80.8, + 80.7, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 80.9, + 80.8, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 80.9, + 80.7, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 80.9, + 80.8, + 81.1, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 80.8, + 80.8, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 80.8, + 80.8, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 80.9, + 80.9, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 80.7, + 80.8, + 81.0, + 80.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 80.8, + 80.7, + 81.1, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 80.8, + 80.7, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 80.9, + 80.8, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 80.7, + 80.8, + 81.1, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 80.7, + 80.7, + 81.1, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 80.9, + 80.7, + 81.1, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 80.7, + 80.7, + 80.9, + 80.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 80.8, + 80.6, + 81.1, + 80.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 80.8, + 80.5, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.8, + 80.7, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 80.9, + 80.6, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 80.9, + 80.8, + 80.9, + 80.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 80.8, + 80.7, + 80.9, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 80.8, + 80.8, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 80.8, + 80.7, + 80.9, + 80.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 80.8, + 80.7, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 80.7, + 80.7, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 80.7, + 80.7, + 80.8, + 80.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 80.7, + 80.6, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 80.8, + 80.6, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 80.7, + 80.7, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 80.8, + 80.7, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 80.7, + 80.6, + 81.0, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 80.7, + 80.6, + 81.0, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 80.7, + 80.5, + 81.0, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 80.8, + 80.7, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 80.7, + 80.8, + 80.8, + 80.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 80.7, + 80.9, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 80.6, + 80.8, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 80.8, + 80.8, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 80.7, + 80.7, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 80.7, + 80.7, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 80.7, + 80.7, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 80.7, + 80.7, + 81.0, + 80.7 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 80.7, + 80.7, + 81.0, + 80.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 80.7, + 80.8, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 80.6, + 80.9, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.7, + 80.6, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 80.6, + 80.5, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 80.7, + 80.6, + 80.9, + 80.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 80.7, + 80.6, + 80.8, + 80.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 80.7, + 80.7, + 80.7, + 80.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 80.5, + 80.6, + 80.8, + 80.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 80.6, + 80.6, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.6, + 80.6, + 80.9, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 80.6, + 80.4, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 80.6, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 80.6, + 80.4, + 80.6, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 80.6, + 80.4, + 80.7, + 80.5 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 80.6, + 80.3, + 80.7, + 80.5 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.6, + 80.5, + 80.7, + 80.4 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 80.5, + 80.4, + 80.7, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 80.6, + 80.4, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 80.6, + 80.5, + 80.6, + 80.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 80.6, + 80.4, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 80.6, + 80.4, + 80.6, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 80.6, + 80.5, + 80.6, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.5, + 80.5, + 80.5, + 80.5 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 80.5, + 80.4, + 80.6, + 80.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 80.4, + 80.2, + 80.5, + 80.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.5, + 80.4, + 80.5, + 80.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.5, + 80.4, + 80.5, + 80.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 80.5, + 80.3, + 80.4, + 80.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 80.5, + 80.3, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.5, + 80.4, + 80.5, + 80.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.5, + 80.3, + 80.3, + 80.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 80.5, + 80.2, + 80.3, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.4, + 80.1, + 80.4, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 80.4, + 80.3, + 80.3, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 80.4, + 80.3, + 80.3, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 80.4, + 80.2, + 80.4, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 80.3, + 80.3, + 80.3, + 80.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 80.3, + 80.2, + 80.3, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 80.3, + 80.2, + 80.3, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 80.3, + 80.2, + 80.3, + 80.3 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 80.3, + 80.1, + 80.3, + 80.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 80.3, + 80.1, + 80.2, + 80.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 80.4, + 80.2, + 80.3, + 80.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 80.2, + 80.2, + 80.0, + 80.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 80.2, + 80.1, + 80.1, + 80.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 80.1, + 80.0, + 80.0, + 79.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 80.0, + 80.0, + 80.1, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 80.1, + 80.0, + 79.9, + 80.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 80.1, + 80.0, + 80.0, + 80.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.1, + 79.9, + 80.0, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 80.2, + 80.0, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 80.1, + 80.0, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 80.1, + 79.9, + 79.9, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 80.0, + 79.9, + 80.0, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 80.0, + 80.0, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 80.0, + 80.0, + 79.7, + 79.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 80.0, + 79.9, + 79.8, + 79.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 80.0, + 79.9, + 79.8, + 79.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 79.9, + 79.9, + 79.8, + 79.8 + ], + "mid_block.resnets.1.time_emb_proj": [ + 80.0, + 79.9, + 79.7, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 79.9, + 80.0, + 79.7, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 80.0, + 79.9, + 79.8, + 79.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 79.9, + 79.9, + 79.7, + 79.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 79.9, + 79.9, + 79.7, + 79.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 79.9, + 80.0, + 79.6, + 79.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 79.9, + 79.8, + 79.7, + 79.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 80.0, + 79.8, + 79.6, + 79.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 79.9, + 79.8, + 79.7, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 79.8, + 79.7, + 79.5, + 79.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 79.8, + 79.7, + 79.5, + 79.6 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 79.8, + 79.7, + 79.4, + 79.6 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 79.7, + 79.7, + 79.4, + 79.6 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 79.9, + 79.7, + 79.5, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 79.7, + 79.7, + 79.5, + 79.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 79.8, + 79.7, + 79.5, + 79.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 79.7, + 79.7, + 79.4, + 79.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 79.7, + 79.7, + 79.4, + 79.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 79.8, + 79.7, + 79.3, + 79.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 79.7, + 79.6, + 79.2, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 79.7, + 79.6, + 79.2, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 79.7, + 79.6, + 79.3, + 79.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.7, + 79.6, + 79.4, + 79.3 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.6, + 79.5, + 79.2, + 79.1 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 79.6, + 79.5, + 79.3, + 79.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 79.6, + 79.5, + 79.3, + 79.1 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 79.7, + 79.4, + 79.2, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 79.5, + 79.4, + 79.1, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 79.6, + 79.4, + 79.1, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 79.5, + 79.4, + 79.1, + 79.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 79.6, + 79.5, + 79.2, + 79.1 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 79.5, + 79.3, + 79.1, + 79.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 79.4, + 79.4, + 79.1, + 79.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.4, + 79.5, + 79.2, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 79.5, + 79.4, + 79.0, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 79.3, + 79.4, + 79.2, + 79.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 79.4, + 79.4, + 79.1, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 79.4, + 79.5, + 79.1, + 79.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 79.4, + 79.4, + 79.1, + 79.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 79.4, + 79.4, + 79.1, + 79.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 79.5, + 79.4, + 79.1, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 79.6, + 79.4, + 79.1, + 79.0 + ], + "down_blocks.2.attentions.1.proj_in": [ + 79.5, + 79.5, + 79.0, + 79.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 79.5, + 79.5, + 79.0, + 79.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.6, + 79.5, + 78.9, + 79.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 79.4, + 79.4, + 79.0, + 79.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 79.5, + 79.4, + 79.0, + 79.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 79.5, + 79.4, + 79.0, + 79.0 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 79.5, + 79.3, + 79.0, + 78.9 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 79.6, + 79.4, + 79.0, + 79.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 79.4, + 79.4, + 78.9, + 78.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 79.4, + 79.3, + 78.9, + 78.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 79.4, + 79.3, + 78.7, + 78.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 79.4, + 79.3, + 78.6, + 78.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 79.4, + 79.2, + 78.6, + 78.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 79.4, + 79.2, + 78.8, + 78.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.4, + 79.3, + 78.7, + 78.9 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.2, + 79.1, + 78.6, + 78.7 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.2, + 79.1, + 78.6, + 78.7 + ], + "mid_block.attentions.0.proj_out": [ + 79.4, + 79.1, + 78.6, + 78.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 79.2, + 79.0, + 78.5, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 79.3, + 79.0, + 78.5, + 78.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 79.1, + 78.9, + 78.5, + 78.6 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 79.1, + 79.1, + 78.5, + 78.6 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 79.1, + 78.9, + 78.4, + 78.6 + ], + "mid_block.resnets.0.conv2": [ + 79.0, + 78.8, + 78.5, + 78.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 79.0, + 78.9, + 78.5, + 78.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 79.1, + 78.9, + 78.5, + 78.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.0, + 78.8, + 78.5, + 78.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 79.1, + 78.8, + 78.4, + 78.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 79.1, + 78.7, + 78.5, + 78.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.9, + 78.6, + 78.3, + 78.4 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 79.0, + 78.7, + 78.5, + 78.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 79.1, + 78.7, + 78.4, + 78.5 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 79.2, + 78.6, + 78.3, + 78.6 + ], + "mid_block.resnets.1.conv1": [ + 79.2, + 78.8, + 78.3, + 78.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 79.1, + 78.8, + 78.4, + 78.7 + ], + "up_blocks.0.resnets.0.conv2": [ + 79.1, + 78.6, + 78.3, + 78.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 79.1, + 78.5, + 78.2, + 78.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 79.1, + 78.5, + 78.3, + 78.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 79.1, + 78.5, + 78.3, + 78.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 79.1, + 78.5, + 78.2, + 78.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 79.0, + 78.5, + 78.2, + 78.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 78.9, + 78.5, + 78.2, + 78.4 + ], + "mid_block.resnets.0.conv1": [ + 78.9, + 78.5, + 78.1, + 78.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 78.8, + 78.4, + 78.1, + 78.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 78.9, + 78.3, + 78.2, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 78.9, + 78.3, + 78.1, + 78.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 78.8, + 78.3, + 78.0, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 78.9, + 78.2, + 78.1, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.8, + 78.3, + 78.0, + 78.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 78.8, + 78.3, + 78.1, + 78.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 78.7, + 78.2, + 78.0, + 78.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 78.6, + 78.3, + 78.0, + 78.2 + ], + "down_blocks.2.attentions.1.proj_out": [ + 78.7, + 78.2, + 77.9, + 78.2 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 78.6, + 78.2, + 78.0, + 78.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.6, + 78.3, + 78.1, + 78.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 78.5, + 78.1, + 78.0, + 78.0 + ], + "mid_block.resnets.1.conv2": [ + 78.5, + 78.0, + 78.0, + 78.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 78.5, + 78.1, + 77.8, + 77.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 78.6, + 78.1, + 77.9, + 78.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 78.5, + 78.1, + 78.0, + 78.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 78.3, + 78.0, + 77.8, + 77.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 78.4, + 77.9, + 77.9, + 78.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 78.2, + 77.9, + 77.8, + 77.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 78.2, + 77.9, + 77.8, + 77.9 + ], + "up_blocks.0.resnets.1.conv1": [ + 78.1, + 77.7, + 77.7, + 77.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 78.2, + 77.8, + 77.7, + 77.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 78.1, + 77.8, + 77.7, + 77.8 + ], + "down_blocks.2.resnets.1.conv1": [ + 78.1, + 77.8, + 77.8, + 77.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 78.1, + 77.6, + 77.8, + 77.8 + ], + "down_blocks.2.resnets.0.conv2": [ + 78.1, + 77.6, + 77.8, + 77.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 77.9, + 77.8, + 77.7, + 77.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 78.0, + 77.7, + 77.7, + 77.7 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 78.0, + 77.7, + 77.8, + 77.6 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 77.9, + 77.6, + 77.7, + 77.7 + ], + "down_blocks.2.resnets.0.conv1": [ + 77.9, + 77.6, + 77.7, + 77.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 77.8, + 77.6, + 77.6, + 77.5 + ], + "up_blocks.0.attentions.0.proj_in": [ + 77.8, + 77.6, + 77.7, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 77.8, + 77.6, + 77.7, + 77.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 77.8, + 77.6, + 77.6, + 77.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 77.8, + 77.5, + 77.6, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 77.7, + 77.6, + 77.5, + 77.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 77.7, + 77.6, + 77.6, + 77.6 + ], + "mid_block.attentions.0.proj_in": [ + 77.8, + 77.6, + 77.6, + 77.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 77.8, + 77.5, + 77.6, + 77.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 77.8, + 77.6, + 77.6, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 77.9, + 77.5, + 77.5, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 77.8, + 77.4, + 77.5, + 77.5 + ], + "up_blocks.0.resnets.1.conv2": [ + 77.8, + 77.5, + 77.5, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 77.7, + 77.5, + 77.5, + 77.6 + ], + "up_blocks.0.attentions.0.proj_out": [ + 77.8, + 77.5, + 77.5, + 77.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 77.7, + 77.3, + 77.4, + 77.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 77.6, + 77.3, + 77.4, + 77.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 77.7, + 77.3, + 77.4, + 77.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 77.7, + 77.2, + 77.5, + 77.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 77.7, + 77.2, + 77.5, + 77.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 77.7, + 77.3, + 77.4, + 77.5 + ], + "up_blocks.0.resnets.2.conv2": [ + 77.7, + 77.3, + 77.4, + 77.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 77.6, + 77.1, + 77.3, + 77.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 77.7, + 77.2, + 77.2, + 77.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 77.6, + 77.1, + 77.2, + 77.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 77.6, + 77.2, + 77.2, + 77.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 77.6, + 77.1, + 77.3, + 77.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 77.5, + 77.0, + 77.1, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 77.5, + 77.0, + 77.2, + 77.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 77.4, + 77.1, + 77.2, + 77.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 77.4, + 77.1, + 77.2, + 77.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 77.5, + 77.0, + 77.2, + 77.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 77.4, + 77.0, + 77.2, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 77.4, + 77.1, + 77.2, + 77.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 77.3, + 77.0, + 77.1, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 77.4, + 77.1, + 77.1, + 77.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 77.3, + 76.9, + 77.0, + 77.0 + ], + "down_blocks.2.attentions.0.proj_out": [ + 77.3, + 77.0, + 77.1, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 77.3, + 77.0, + 77.1, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 77.2, + 77.0, + 77.1, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 77.2, + 76.9, + 77.0, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 77.2, + 77.0, + 77.1, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 77.1, + 76.9, + 77.0, + 76.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 77.2, + 76.9, + 77.1, + 76.8 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 77.1, + 76.9, + 77.1, + 76.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 77.2, + 76.9, + 77.1, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 77.2, + 76.9, + 77.2, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 77.2, + 76.9, + 77.2, + 77.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 77.2, + 76.9, + 77.0, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 77.2, + 77.0, + 77.1, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 77.4, + 76.9, + 77.1, + 77.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 77.2, + 76.5, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 77.2, + 76.6, + 76.9, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 77.3, + 76.6, + 76.8, + 77.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 77.2, + 76.5, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 77.2, + 76.5, + 76.8, + 76.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 77.2, + 76.4, + 76.7, + 76.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 77.1, + 76.5, + 76.6, + 76.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 77.2, + 76.4, + 76.7, + 76.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 77.1, + 76.3, + 76.6, + 76.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 77.2, + 76.3, + 76.6, + 76.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 77.3, + 76.4, + 76.7, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 77.3, + 76.4, + 76.7, + 76.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 77.2, + 76.3, + 76.6, + 76.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 77.2, + 76.4, + 76.6, + 76.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 77.2, + 76.4, + 76.7, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.2, + 76.5, + 76.7, + 76.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 77.2, + 76.5, + 76.7, + 76.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 77.2, + 76.6, + 76.8, + 76.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 77.2, + 76.5, + 76.8, + 76.9 + ], + "up_blocks.0.resnets.2.conv1": [ + 77.2, + 76.5, + 76.7, + 76.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 77.1, + 76.4, + 76.5, + 76.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 77.0, + 76.4, + 76.6, + 76.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 77.1, + 76.4, + 76.7, + 76.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 77.1, + 76.3, + 76.5, + 76.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 77.0, + 76.0, + 76.4, + 76.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 76.8, + 75.9, + 76.4, + 76.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 76.7, + 75.9, + 76.3, + 76.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 76.8, + 75.9, + 76.3, + 76.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 76.6, + 75.8, + 76.2, + 76.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 76.7, + 75.8, + 76.1, + 76.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 76.7, + 75.8, + 76.3, + 76.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 76.4, + 75.7, + 76.0, + 76.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 76.4, + 75.7, + 76.0, + 76.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 76.5, + 75.6, + 76.1, + 76.2 + ], + "down_blocks.2.attentions.0.proj_in": [ + 76.4, + 75.6, + 76.0, + 76.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 76.3, + 75.6, + 76.1, + 76.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 76.2, + 75.4, + 75.9, + 76.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 76.3, + 75.4, + 75.9, + 76.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 76.3, + 75.4, + 75.9, + 76.0 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 75.9, + 75.3, + 75.6, + 75.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 75.9, + 75.2, + 75.5, + 75.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 75.8, + 75.1, + 75.3, + 75.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 75.7, + 74.9, + 75.0, + 75.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 75.6, + 74.9, + 75.0, + 75.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 75.6, + 74.9, + 75.0, + 75.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 75.3, + 74.6, + 74.6, + 74.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 75.1, + 74.4, + 74.5, + 74.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 75.1, + 74.3, + 74.4, + 74.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 75.0, + 74.2, + 74.2, + 74.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 74.9, + 74.0, + 73.9, + 74.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 74.9, + 73.9, + 73.9, + 74.1 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 74.8, + 73.8, + 74.1, + 74.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 74.6, + 73.8, + 74.2, + 74.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 74.7, + 73.9, + 74.0, + 74.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 74.6, + 73.8, + 74.0, + 74.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 74.6, + 73.6, + 73.9, + 73.8 + ], + "up_blocks.0.attentions.2.proj_in": [ + 74.4, + 73.5, + 73.7, + 73.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 74.4, + 73.5, + 73.7, + 73.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 74.4, + 73.5, + 73.8, + 73.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 74.2, + 73.4, + 73.7, + 73.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 74.3, + 73.3, + 73.6, + 73.6 + ], + "up_blocks.0.attentions.2.proj_out": [ + 74.1, + 73.3, + 73.7, + 73.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 74.0, + 73.1, + 73.5, + 73.6 + ], + "up_blocks.0.attentions.1.proj_out": [ + 73.9, + 73.1, + 73.4, + 73.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 73.6, + 73.0, + 73.3, + 73.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 73.5, + 72.8, + 73.2, + 73.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 73.5, + 72.7, + 73.0, + 73.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 73.4, + 72.7, + 73.0, + 73.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 73.3, + 72.6, + 72.9, + 73.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 72.9, + 72.3, + 72.7, + 72.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 72.8, + 72.3, + 72.5, + 72.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 72.7, + 72.2, + 72.4, + 72.6 + ], + "up_blocks.1.resnets.0.conv1": [ + 72.6, + 72.1, + 72.4, + 72.4 + ], + "up_blocks.0.attentions.1.proj_in": [ + 72.2, + 71.7, + 71.9, + 72.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 72.1, + 71.5, + 71.8, + 71.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 71.8, + 71.3, + 71.5, + 71.8 + ], + "down_blocks.1.resnets.1.conv2": [ + 71.6, + 71.1, + 71.4, + 71.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 71.7, + 71.1, + 71.4, + 71.5 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 71.5, + 70.9, + 71.2, + 71.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 71.2, + 70.6, + 70.9, + 71.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 71.0, + 70.5, + 70.9, + 71.1 + ], + "up_blocks.1.resnets.0.conv2": [ + 70.9, + 70.3, + 70.7, + 70.8 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 70.6, + 69.9, + 70.4, + 70.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 70.4, + 69.8, + 70.2, + 70.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 70.3, + 69.7, + 70.0, + 70.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 70.2, + 69.6, + 70.0, + 70.2 + ], + "down_blocks.1.resnets.0.conv2": [ + 70.0, + 69.5, + 69.8, + 70.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 70.0, + 69.5, + 69.8, + 70.1 + ], + "add_embedding.linear_1": [ + 69.9, + 69.5, + 69.5, + 69.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 69.8, + 69.4, + 69.5, + 69.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 69.6, + 69.2, + 69.5, + 69.6 + ], + "add_embedding.linear_2": [ + 69.5, + 69.1, + 69.3, + 69.5 + ], + "down_blocks.1.attentions.1.proj_out": [ + 69.4, + 68.9, + 69.2, + 69.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 69.3, + 68.9, + 69.1, + 69.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 69.1, + 68.7, + 68.9, + 69.0 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 68.8, + 68.4, + 68.7, + 68.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 68.6, + 68.3, + 68.5, + 68.6 + ], + "up_blocks.1.attentions.0.proj_out": [ + 68.5, + 68.3, + 68.4, + 68.5 + ], + "time_embedding.linear_1": [ + 68.6, + 68.3, + 68.5, + 68.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 68.3, + 68.2, + 68.3, + 68.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 68.2, + 67.9, + 68.1, + 68.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 68.1, + 67.7, + 68.0, + 68.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 67.5, + 67.5, + 67.7, + 67.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 67.4, + 67.4, + 67.5, + 67.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 67.3, + 67.2, + 67.4, + 67.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 67.2, + 67.1, + 67.4, + 67.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 67.2, + 67.1, + 67.3, + 67.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 67.0, + 66.9, + 67.2, + 67.4 + ], + "down_blocks.1.attentions.1.proj_in": [ + 66.9, + 66.8, + 67.1, + 67.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 66.7, + 66.6, + 66.9, + 67.0 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 66.4, + 66.4, + 66.6, + 66.8 + ], + "down_blocks.1.resnets.1.conv1": [ + 66.3, + 66.4, + 66.6, + 66.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 66.2, + 66.3, + 66.4, + 66.6 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 65.7, + 65.8, + 65.9, + 66.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 65.4, + 65.6, + 65.5, + 65.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 65.4, + 65.6, + 65.6, + 65.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 65.2, + 65.4, + 65.4, + 65.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 65.1, + 65.4, + 65.3, + 65.5 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 64.8, + 65.0, + 65.2, + 65.4 + ], + "up_blocks.1.attentions.1.proj_out": [ + 64.8, + 65.0, + 65.1, + 65.4 + ], + "up_blocks.1.attentions.0.proj_in": [ + 64.7, + 64.9, + 64.9, + 65.2 + ], + "up_blocks.1.resnets.2.conv2": [ + 64.4, + 64.7, + 64.7, + 64.9 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 64.4, + 64.6, + 64.6, + 64.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 64.3, + 64.5, + 64.5, + 64.8 + ], + "down_blocks.1.attentions.0.proj_out": [ + 64.1, + 64.3, + 64.3, + 64.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 63.8, + 64.0, + 63.9, + 64.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 63.6, + 63.8, + 63.8, + 64.1 + ], + "up_blocks.1.resnets.1.conv2": [ + 63.7, + 63.8, + 63.9, + 64.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 63.8, + 63.8, + 63.9, + 64.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 63.6, + 63.6, + 63.7, + 64.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 63.7, + 63.7, + 63.9, + 64.2 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 63.7, + 63.7, + 63.8, + 64.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 63.5, + 63.4, + 63.5, + 63.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 63.4, + 63.3, + 63.4, + 63.8 + ], + "down_blocks.1.downsamplers.0.conv": [ + 62.8, + 62.7, + 62.8, + 63.2 + ], + "up_blocks.1.attentions.2.proj_out": [ + 62.7, + 62.6, + 62.7, + 63.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 62.6, + 62.5, + 62.6, + 63.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 62.5, + 62.4, + 62.6, + 63.0 + ], + "down_blocks.1.resnets.0.conv1": [ + 62.2, + 62.0, + 62.2, + 62.6 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 61.8, + 61.6, + 61.8, + 62.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 61.4, + 61.2, + 61.4, + 61.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 61.5, + 61.3, + 61.5, + 61.8 + ], + "up_blocks.1.attentions.1.proj_in": [ + 61.3, + 61.0, + 61.2, + 61.6 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 60.9, + 60.6, + 60.8, + 61.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 60.7, + 60.3, + 60.6, + 60.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 60.4, + 60.2, + 60.5, + 60.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 60.1, + 59.8, + 60.1, + 60.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 60.2, + 59.8, + 60.1, + 60.4 + ], + "up_blocks.0.upsamplers.0.conv": [ + 59.7, + 59.3, + 59.7, + 59.8 + ], + "up_blocks.1.attentions.2.proj_in": [ + 59.4, + 59.1, + 59.4, + 59.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 58.3, + 57.6, + 58.3, + 58.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 58.2, + 57.4, + 58.1, + 58.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 58.1, + 57.3, + 58.0, + 58.2 + ], + "down_blocks.1.attentions.0.proj_in": [ + 58.0, + 57.2, + 58.0, + 58.1 + ], + "up_blocks.1.resnets.1.conv1": [ + 57.8, + 57.0, + 57.8, + 57.9 + ], + "down_blocks.0.resnets.0.conv1": [ + 57.9, + 57.0, + 57.8, + 58.0 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 57.8, + 57.0, + 57.7, + 57.9 + ], + "up_blocks.1.resnets.2.conv1": [ + 57.6, + 56.8, + 57.5, + 57.7 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 57.4, + 56.6, + 57.3, + 57.5 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 57.2, + 56.5, + 57.1, + 57.3 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 57.3, + 56.5, + 57.2, + 57.4 + ], + "down_blocks.0.downsamplers.0.conv": [ + 57.0, + 56.2, + 56.9, + 57.1 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 56.5, + 55.8, + 56.4, + 56.6 + ], + "time_embedding.linear_2": [ + 56.2, + 55.6, + 56.1, + 56.3 + ], + "down_blocks.0.resnets.1.conv1": [ + 56.1, + 55.3, + 55.9, + 56.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 56.2, + 55.4, + 55.9, + 56.1 + ], + "up_blocks.1.upsamplers.0.conv": [ + 55.4, + 54.7, + 55.1, + 55.3 + ], + "down_blocks.0.resnets.1.conv2": [ + 55.3, + 54.7, + 55.1, + 55.2 + ], + "down_blocks.0.resnets.0.conv2": [ + 55.0, + 54.5, + 54.9, + 54.9 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 54.5, + 54.0, + 54.3, + 54.3 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 53.9, + 53.5, + 53.8, + 53.8 + ], + "up_blocks.2.resnets.2.conv2": [ + 53.9, + 53.4, + 53.7, + 53.7 + ], + "up_blocks.2.resnets.0.conv2": [ + 53.8, + 53.4, + 53.7, + 53.7 + ], + "up_blocks.2.resnets.0.conv1": [ + 53.6, + 53.2, + 53.6, + 53.5 + ], + "up_blocks.2.resnets.2.conv1": [ + 53.0, + 52.6, + 52.9, + 52.8 + ], + "up_blocks.2.resnets.1.conv1": [ + 52.3, + 52.0, + 52.3, + 52.2 + ], + "up_blocks.2.resnets.1.conv2": [ + 51.4, + 51.2, + 51.5, + 51.4 + ] + }, + "6": { + "metadata": { + "nbits": 6, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.ff.net.2", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v", + "mid_block.resnets.0.time_emb_proj", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0", + "mid_block.resnets.0.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k", + "up_blocks.0.resnets.2.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.8.ff.net.2", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2", + "down_blocks.2.resnets.0.conv1", + "down_blocks.2.resnets.0.time_emb_proj", + "down_blocks.2.resnets.1.time_emb_proj", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k", + "down_blocks.2.resnets.1.conv1", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.ff.net.2", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v", + "mid_block.attentions.0.proj_out", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q", + "mid_block.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.7.ff.net.2", + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.9.ff.net.2", + "mid_block.resnets.0.conv1", + "mid_block.resnets.1.conv1", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q", + "up_blocks.0.resnets.0.conv1", + "up_blocks.0.resnets.1.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.resnets.0.conv2", + "up_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.0.attentions.0.proj_in", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.resnets.0.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v", + "up_blocks.0.resnets.1.conv2", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v", + "down_blocks.1.resnets.1.time_emb_proj", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2", + "up_blocks.0.resnets.2.conv2", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2", + "up_blocks.0.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.proj_in", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.resnets.0.conv_shortcut", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q", + "down_blocks.1.resnets.0.conv2", + "up_blocks.0.attentions.1.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.1.resnets.0.conv2", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.resnets.2.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.downsamplers.0.conv", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "down_blocks.1.resnets.0.conv1", + "up_blocks.2.resnets.0.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.2.resnets.1.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "add_embedding.linear_2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.resnets.1.conv2", + "down_blocks.0.resnets.1.time_emb_proj", + "add_embedding.linear_1", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "time_embedding.linear_1", + "up_blocks.1.attentions.0.proj_in", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.1.resnets.2.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.0.resnets.2.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.0.resnets.0.time_emb_proj", + "time_embedding.linear_2", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.2.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.2.resnets.2.conv_shortcut", + "down_blocks.0.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.2.conv_shortcut", + "down_blocks.0.downsamplers.0.conv", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.resnets.0.conv1", + "down_blocks.0.resnets.1.conv2", + "up_blocks.2.resnets.0.conv2", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.0.resnets.1.conv1", + "down_blocks.0.resnets.0.conv2", + "up_blocks.2.resnets.1.conv_shortcut", + "up_blocks.2.resnets.0.conv_shortcut", + "up_blocks.2.resnets.2.conv2", + "up_blocks.2.resnets.1.conv1", + "up_blocks.2.resnets.2.conv1", + "up_blocks.2.resnets.1.conv2" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 13107200, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 1310720, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 6553600, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1310720, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 6553600, + 2621440, + 409600, + 1310720, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 2621440, + 1638400, + 13107200, + 6553600, + 1638400, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 13107200, + 6553600, + 2621440, + 2621440, + 2621440, + 13107200, + 6553600, + 2621440, + 2621440, + 1638400, + 1638400, + 13107200, + 6553600, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1310720, + 1310720, + 409600, + 2621440, + 1638400, + 2621440, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 14745600, + 2621440, + 2621440, + 6553600, + 1638400, + 6553600, + 1638400, + 13107200, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 2621440, + 6553600, + 1638400, + 409600, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 7372800, + 1638400, + 1638400, + 14745600, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1310720, + 409600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 409600, + 6553600, + 1638400, + 1638400, + 13107200, + 6553600, + 6553600, + 1638400, + 1638400, + 6553600, + 2621440, + 2621440, + 13107200, + 2621440, + 14745600, + 2621440, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1310720, + 409600, + 13107200, + 1638400, + 13107200, + 2621440, + 1638400, + 6553600, + 2621440, + 13107200, + 1638400, + 13107200, + 13107200, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 14745600, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 13107200, + 13107200, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1310720, + 409600, + 1310720, + 409600, + 1638400, + 1638400, + 1638400, + 6553600, + 13107200, + 6553600, + 14745600, + 14745600, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 29491200, + 29491200, + 409600, + 409600, + 1638400, + 1638400, + 2621440, + 13107200, + 1310720, + 1638400, + 1310720, + 409600, + 6553600, + 13107200, + 13107200, + 1638400, + 6553600, + 14745600, + 13107200, + 14745600, + 1638400, + 1310720, + 6553600, + 409600, + 409600, + 1310720, + 1638400, + 1638400, + 1638400, + 6553600, + 22118400, + 819200, + 6553600, + 1638400, + 1638400, + 409600, + 1310720, + 1310720, + 1638400, + 409600, + 1310720, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1310720, + 409600, + 13107200, + 13107200, + 6553600, + 6553600, + 1638400, + 14745600, + 6553600, + 1638400, + 6553600, + 6553600, + 1638400, + 409600, + 13107200, + 6553600, + 409600, + 13107200, + 1638400, + 819200, + 1638400, + 409600, + 409600, + 409600, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 1638400, + 1638400, + 409600, + 1310720, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 6553600, + 13107200, + 1638400, + 6553600, + 13107200, + 13107200, + 13107200, + 6553600, + 14745600, + 6553600, + 1638400, + 1638400, + 13107200, + 6553600, + 409600, + 13107200, + 6553600, + 6553600, + 13107200, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 6553600, + 1638400, + 409600, + 1638400, + 1638400, + 409600, + 1638400, + 6553600, + 3276800, + 1310720, + 1638400, + 1638400, + 1638400, + 13107200, + 13107200, + 6553600, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 1638400, + 409600, + 1638400, + 819200, + 6553600, + 409600, + 1638400, + 409600, + 409600, + 13107200, + 409600, + 1638400, + 409600, + 409600, + 3276800, + 409600, + 409600, + 409600, + 1638400, + 409600, + 1310720, + 11059200, + 409600, + 13107200, + 409600, + 1638400, + 409600, + 819200, + 3686400, + 409600, + 3686400, + 1638400, + 409600, + 409600, + 409600, + 1638400, + 1638400, + 3686400, + 819200, + 409600, + 409600, + 819200, + 409600, + 409600, + 3686400, + 409600, + 409600, + 3276800, + 1638400, + 409600, + 409600, + 1638400, + 3276800, + 3686400, + 3276800, + 1638400, + 1843200, + 409600, + 409600, + 409600, + 409600, + 1638400, + 3276800, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 3686400, + 409600, + 3604480, + 1638400, + 409600, + 409600, + 409600, + 409600, + 409600, + 409600, + 3686400, + 1638400, + 1638400, + 14745600, + 3276800, + 7372800, + 1638400, + 2457600, + 3276800, + 409600, + 1638400, + 5529600, + 3276800, + 409600, + 409600, + 409600, + 409600, + 409600, + 3276800, + 1228800, + 409600, + 409600, + 3276800, + 204800, + 921600, + 3276800, + 614400, + 921600, + 204800, + 819200, + 2764800, + 921600, + 921600, + 3686400, + 921600, + 921600, + 204800, + 307200, + 921600, + 1843200, + 1843200, + 921600 + ] + }, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.4, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.0, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.3, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.2, + 82.4, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.2, + 82.2, + 82.5, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.4, + 82.3, + 82.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.2, + 82.3, + 82.2, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.3, + 82.4, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.2, + 82.2, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.2, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.1, + 82.2, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 82.1, + 82.2, + 82.2, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "mid_block.resnets.0.time_emb_proj": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.1, + 82.5, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.0, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.1, + 82.4, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.4, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.0, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.1, + 82.1, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.1, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.1, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 81.8, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 82.1, + 81.9, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.2, + 82.3, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.2, + 82.4, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.8, + 82.1, + 82.4, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.8, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.2, + 82.2, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.0, + 82.2, + 82.4, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.8, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.2, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.8, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 82.2, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.8, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.9, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.1, + 82.1, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.0, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.1, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.8, + 82.2, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.7, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.9, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.8, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 81.8, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.9, + 82.1, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 81.9, + 82.1, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.0, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 82.0, + 82.0, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 82.0, + 82.0, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.0, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.9, + 82.0, + 82.4, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.0, + 82.3, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 82.2, + 82.3, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.0, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.8, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.9, + 82.3, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.0, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.0, + 82.1, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 81.8, + 82.1, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.8, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.8, + 81.9, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 81.8, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 81.8, + 81.9, + 82.1, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 81.9, + 81.9, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 81.8, + 81.8, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.9, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 81.8, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.8, + 81.8, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.8, + 81.8, + 82.2, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.7, + 81.9, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.8, + 81.8, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.8, + 81.9, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.5, + 81.9, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.7, + 81.6, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.7, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.9, + 81.8, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.7, + 81.9, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.8, + 81.8, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.7, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.7, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.7, + 81.8, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.6, + 81.8, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.8, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.8, + 81.7, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.7, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.7, + 81.6, + 82.1, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.7, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.7, + 81.7, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 81.7, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.6, + 81.7, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 81.7, + 81.7, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.7, + 81.8, + 82.1, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.7, + 81.6, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.7, + 81.8, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.7, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.6, + 81.7, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.6, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.6, + 81.9, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.6, + 81.7, + 82.0, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.7, + 81.8, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.6, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.6, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.6, + 81.7, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.6, + 81.6, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.6, + 81.7, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 81.7, + 81.8, + 81.7, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.6, + 81.7, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.7, + 81.6, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.6, + 81.9, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.6, + 81.8, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.6, + 81.5, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.7, + 81.7, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.6, + 81.6, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.6, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.6, + 81.6, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.6, + 81.6, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.5, + 81.6, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.6, + 81.6, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.6, + 81.7, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.5, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.6, + 81.6, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.6, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.7, + 81.7, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.6, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.7, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.5, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.5, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.6, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.6, + 81.7, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.5, + 81.6, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.6, + 81.6, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.5, + 81.6, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.6, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.4, + 81.5, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.5, + 81.5, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 81.4, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 81.5, + 81.6, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 81.6, + 81.6, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.4, + 81.7, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 81.5, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.4, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.4, + 81.5, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.6, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.4, + 81.7, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.4, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.5, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.6, + 81.5, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 81.6, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 81.6, + 81.7, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 81.5, + 81.6, + 81.6, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 81.4, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 81.5, + 81.6, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 81.5, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.6, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.6, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.5, + 81.5, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.5, + 81.4, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.7, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.5, + 81.6, + 81.8, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.4, + 81.6, + 81.8, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.6, + 81.6, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.5, + 81.5, + 81.8, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.6, + 81.6, + 81.8, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.5, + 81.7, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.5, + 81.7, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 81.5, + 81.6, + 81.9, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.5, + 81.7, + 81.8, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.4, + 81.6, + 81.9, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 81.5, + 81.5, + 81.5, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.5, + 81.6, + 81.9, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.7, + 81.6, + 81.9, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.5, + 81.7, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.7, + 81.6, + 81.6, + 81.5 + ], + "mid_block.resnets.0.conv2": [ + 81.4, + 81.6, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.4, + 81.7, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.4, + 81.6, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.4, + 81.6, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.4, + 81.6, + 81.9, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.6, + 81.6, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.5, + 81.4, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.5, + 81.4, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.4, + 81.6, + 81.8, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.5, + 81.7, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.5, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 81.5, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.5, + 81.9, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.4, + 81.5, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 81.5, + 81.6, + 81.8, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 81.5, + 81.6, + 81.9, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 81.6, + 81.3, + 81.6, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.5, + 81.5, + 81.6, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.6, + 81.6, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.5, + 81.6, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.5, + 81.5, + 81.5, + 81.5 + ], + "mid_block.resnets.1.time_emb_proj": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.5, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.5, + 81.5, + 81.6, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.5, + 81.5, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.6, + 81.5, + 81.6, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.6, + 81.6, + 81.6, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.5, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.4, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.5, + 81.5, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.5, + 81.4, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.6, + 81.5, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.5, + 81.4, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.5, + 81.5, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.6, + 81.6, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.5, + 81.5, + 81.8, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.6, + 81.5, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.4, + 81.4, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.6, + 81.6, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.5, + 81.5, + 81.7, + 81.3 + ], + "down_blocks.2.resnets.0.conv1": [ + 81.5, + 81.5, + 81.6, + 81.4 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 81.4, + 81.4, + 81.6, + 81.4 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 81.4, + 81.3, + 81.7, + 81.3 + ], + "down_blocks.2.resnets.1.conv2": [ + 81.4, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 81.2, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.3, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.4, + 81.3, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.4, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.3, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.3, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.3, + 81.6, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 81.4, + 81.3, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.3, + 81.5, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.4, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.4, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.4, + 81.3, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 81.4, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.4, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.4, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.4, + 81.3, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 81.4, + 81.3, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.4, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 81.5, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.3, + 81.2, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 81.4, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 81.3, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 81.4, + 81.3, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.3, + 81.4, + 81.6, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.4, + 81.4, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.4, + 81.3, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.3, + 81.3, + 81.8, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.3, + 81.3, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.4, + 81.5, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.3, + 81.2, + 81.5, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.4, + 81.2, + 81.6, + 81.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.3, + 81.2, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.2, + 81.3, + 81.4, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.3, + 81.4, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.4, + 81.3, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.4, + 81.2, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.3, + 81.2, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.5, + 81.3, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.3, + 81.2, + 81.5, + 81.4 + ], + "down_blocks.2.resnets.1.conv1": [ + 81.4, + 81.4, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.5, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.2, + 81.1, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.3, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.3, + 81.3, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.3, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.4, + 81.0, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.3, + 81.1, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.4, + 81.1, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 81.4, + 81.2, + 81.4, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.2, + 81.3, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 81.4, + 81.1, + 81.6, + 81.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.4, + 81.2, + 81.6, + 81.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.3, + 81.2, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.3, + 81.2, + 81.4, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.3, + 81.3, + 81.4, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.5, + 81.3, + 81.5, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.2, + 81.3, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.4, + 81.0, + 81.4, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 81.3, + 81.2, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.4, + 81.2, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "mid_block.attentions.0.proj_out": [ + 81.4, + 81.3, + 81.4, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.3, + 81.2, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.3, + 81.3, + 81.6, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.4, + 81.2, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.3, + 81.3, + 81.6, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.2, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 81.3, + 81.2, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.3, + 81.1, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 81.3, + 81.2, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.4, + 81.2, + 81.5, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.4, + 81.2, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.2, + 81.2, + 81.6, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.4, + 81.2, + 81.5, + 81.2 + ], + "mid_block.resnets.1.conv2": [ + 81.3, + 81.1, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.3, + 81.0, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.2, + 81.1, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.3, + 81.3, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.3, + 81.3, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.3, + 80.9, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.3, + 81.2, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 81.1, + 81.1, + 81.6, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 81.3, + 81.1, + 81.5, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.3, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.2, + 81.2, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.2, + 81.1, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.3, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.2, + 81.2, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.2, + 81.1, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 81.1, + 81.1, + 81.6, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.3, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.3, + 81.1, + 81.4, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.3, + 81.3, + 81.4, + 81.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.2, + 81.1, + 81.4, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.2, + 81.0, + 81.5, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 81.2, + 81.1, + 81.3, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.2, + 81.1, + 81.4, + 81.2 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.1, + 81.0, + 81.5, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.1, + 81.2, + 81.4, + 81.2 + ], + "mid_block.resnets.0.conv1": [ + 81.2, + 81.1, + 81.4, + 81.2 + ], + "mid_block.resnets.1.conv1": [ + 81.1, + 81.0, + 81.4, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.2, + 81.0, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.1, + 81.0, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.2, + 81.1, + 81.5, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.3, + 81.1, + 81.4, + 81.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.1, + 81.0, + 81.4, + 81.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.1, + 81.1, + 81.4, + 81.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.1, + 81.0, + 81.3, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.2, + 81.1, + 81.3, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.2, + 81.0, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.2, + 81.0, + 81.4, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.1, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.2, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.0, + 81.1, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.2, + 81.1, + 81.3, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.1, + 81.1, + 81.3, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.1, + 81.1, + 81.5, + 81.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 81.0, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.resnets.0.conv1": [ + 81.0, + 81.1, + 81.4, + 81.3 + ], + "up_blocks.0.resnets.1.conv1": [ + 81.1, + 81.0, + 81.3, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.1, + 81.1, + 81.3, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.2, + 81.2, + 81.3, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.2, + 81.1, + 81.3, + 81.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.2, + 81.1, + 81.3, + 81.1 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.2, + 81.1, + 81.4, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.1, + 81.2, + 81.3, + 81.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.3, + 81.1, + 81.3, + 81.3 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.2, + 81.0, + 81.4, + 81.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.2, + 81.1, + 81.4, + 81.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.1, + 81.0, + 81.2, + 81.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.1, + 81.0, + 81.2, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.1, + 80.8, + 81.5, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 80.9, + 81.3, + 80.9 + ], + "down_blocks.2.attentions.1.proj_in": [ + 81.1, + 80.9, + 81.3, + 81.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.1, + 80.8, + 81.3, + 81.1 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.1, + 80.9, + 81.3, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.1, + 80.8, + 81.3, + 81.0 + ], + "up_blocks.0.resnets.0.conv2": [ + 81.0, + 80.9, + 81.4, + 81.0 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 80.9, + 80.9, + 81.3, + 80.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.0, + 80.7, + 81.2, + 80.8 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.1, + 80.9, + 81.3, + 80.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.0, + 80.9, + 81.2, + 80.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 81.1, + 81.2, + 80.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.0, + 80.9, + 81.2, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.9, + 80.9, + 81.3, + 80.9 + ], + "down_blocks.2.attentions.1.proj_out": [ + 81.1, + 81.0, + 81.2, + 80.8 + ], + "up_blocks.0.attentions.0.proj_in": [ + 81.2, + 80.8, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.0, + 81.0, + 81.2, + 80.8 + ], + "up_blocks.0.resnets.2.conv1": [ + 81.1, + 80.9, + 81.3, + 80.9 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 81.1, + 80.9, + 81.2, + 81.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.2, + 80.9, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.1, + 80.8, + 81.2, + 81.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 81.2, + 80.9, + 81.1, + 81.1 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.2, + 80.8, + 81.1, + 80.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.1, + 80.8, + 81.1, + 80.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.0, + 80.9, + 81.0, + 80.8 + ], + "mid_block.attentions.0.proj_in": [ + 81.1, + 80.8, + 81.1, + 80.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.0, + 80.7, + 81.0, + 80.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 80.9, + 80.9, + 80.9, + 80.5 + ], + "down_blocks.2.attentions.0.proj_in": [ + 80.9, + 80.8, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.9, + 80.8, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.0, + 80.6, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 80.9, + 80.8, + 81.1, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.0, + 80.9, + 81.0, + 80.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.0, + 80.7, + 81.0, + 80.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 80.9, + 80.6, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.0, + 81.0, + 80.9, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 80.9, + 80.9, + 81.0, + 80.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 80.8, + 80.9, + 81.0, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 81.0, + 80.9, + 80.9, + 80.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 80.8, + 80.9, + 81.1, + 80.7 + ], + "up_blocks.0.resnets.1.conv2": [ + 80.9, + 80.8, + 81.0, + 80.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.0, + 80.9, + 81.1, + 80.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 80.9, + 81.0, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.0, + 81.0, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 81.0, + 80.9, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 80.9, + 80.8, + 81.1, + 80.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 80.9, + 80.7, + 81.0, + 80.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 80.8, + 80.9, + 80.9, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 80.9, + 80.8, + 81.0, + 80.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 80.8, + 80.7, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 80.8, + 80.8, + 80.8, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 80.7, + 80.6, + 80.9, + 80.8 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 80.8, + 80.5, + 81.0, + 80.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 80.7, + 80.7, + 80.9, + 80.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 80.7, + 80.6, + 80.7, + 80.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 80.6, + 80.7, + 80.7, + 80.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 80.6, + 80.5, + 80.9, + 80.6 + ], + "up_blocks.0.attentions.0.proj_out": [ + 80.7, + 80.4, + 80.7, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.6, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 80.7, + 80.6, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 80.6, + 80.4, + 80.8, + 80.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 80.7, + 80.4, + 80.7, + 80.4 + ], + "down_blocks.2.attentions.0.proj_out": [ + 80.6, + 80.4, + 80.8, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 80.7, + 80.4, + 80.7, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 80.6, + 80.2, + 80.5, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 80.6, + 80.2, + 80.6, + 80.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 80.6, + 80.3, + 80.5, + 80.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 80.6, + 80.3, + 80.7, + 80.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 80.6, + 80.4, + 80.6, + 80.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 80.4, + 80.3, + 80.6, + 80.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 80.3, + 80.1, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 80.3, + 80.1, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 80.3, + 80.1, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 80.2, + 80.0, + 80.3, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 80.4, + 80.2, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 80.4, + 79.9, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 80.3, + 80.2, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 80.4, + 80.0, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 80.2, + 80.1, + 80.3, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 80.2, + 80.1, + 80.3, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 80.3, + 80.1, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 80.1, + 80.1, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 80.3, + 80.1, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 80.3, + 80.0, + 80.4, + 80.2 + ], + "up_blocks.0.resnets.2.conv2": [ + 80.3, + 80.2, + 80.3, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 80.3, + 80.1, + 80.4, + 80.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 80.2, + 80.1, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 80.2, + 80.0, + 80.5, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 80.2, + 80.1, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 80.2, + 80.1, + 80.5, + 80.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.3, + 79.9, + 80.4, + 80.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.2, + 80.1, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 80.1, + 80.0, + 80.4, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 80.2, + 80.0, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 80.1, + 80.1, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 80.1, + 79.9, + 80.3, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 80.2, + 79.9, + 80.3, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 80.2, + 79.9, + 80.1, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 80.1, + 79.9, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 80.2, + 79.8, + 80.2, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 80.1, + 79.9, + 80.2, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 80.2, + 80.0, + 80.1, + 80.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 80.1, + 80.0, + 80.3, + 79.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 80.1, + 79.9, + 80.0, + 80.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 80.1, + 80.0, + 80.2, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 80.1, + 79.9, + 80.3, + 80.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 80.0, + 79.9, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.0, + 79.8, + 80.3, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 80.1, + 79.9, + 80.1, + 79.7 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 80.0, + 79.9, + 80.2, + 79.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 79.9, + 79.9, + 80.0, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 80.1, + 79.9, + 80.0, + 79.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 80.1, + 79.8, + 80.0, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 80.0, + 79.6, + 80.0, + 79.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 79.8, + 79.5, + 79.9, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 79.9, + 79.6, + 80.0, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 80.0, + 79.7, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 80.0, + 79.8, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.0, + 79.6, + 80.1, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 79.9, + 79.7, + 80.0, + 80.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 79.9, + 79.7, + 80.0, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 80.0, + 79.6, + 79.9, + 79.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 80.0, + 79.7, + 80.2, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 79.8, + 79.6, + 79.9, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 80.0, + 79.6, + 79.8, + 79.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 80.0, + 79.7, + 79.9, + 79.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 79.9, + 79.6, + 79.8, + 79.5 + ], + "up_blocks.0.attentions.1.proj_in": [ + 79.9, + 79.5, + 79.9, + 79.6 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 79.9, + 79.6, + 79.8, + 79.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 79.8, + 79.5, + 79.8, + 79.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 79.8, + 79.4, + 79.9, + 79.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 79.9, + 79.6, + 79.9, + 79.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 79.8, + 79.4, + 79.7, + 79.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 79.7, + 79.5, + 79.7, + 79.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 79.6, + 79.6, + 79.6, + 79.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 79.7, + 79.6, + 79.5, + 79.4 + ], + "up_blocks.0.attentions.2.proj_in": [ + 79.6, + 79.5, + 79.5, + 79.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 79.8, + 79.5, + 79.4, + 79.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 79.6, + 79.4, + 79.5, + 79.5 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 79.6, + 79.5, + 79.4, + 79.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 79.4, + 79.3, + 79.5, + 79.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.5, + 79.3, + 79.6, + 79.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 79.4, + 79.3, + 79.4, + 79.4 + ], + "up_blocks.0.attentions.2.proj_out": [ + 79.2, + 79.3, + 79.5, + 79.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 79.2, + 79.2, + 79.5, + 79.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 79.1, + 79.0, + 79.4, + 79.1 + ], + "up_blocks.1.resnets.0.conv1": [ + 79.1, + 79.1, + 79.3, + 79.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 79.0, + 79.0, + 79.3, + 79.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 79.1, + 78.9, + 79.2, + 79.0 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 79.0, + 78.8, + 79.1, + 79.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.8, + 78.7, + 79.1, + 79.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 78.9, + 78.6, + 79.1, + 79.0 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 78.8, + 78.5, + 79.0, + 78.9 + ], + "down_blocks.1.resnets.1.conv2": [ + 78.9, + 78.3, + 79.0, + 78.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 78.7, + 78.4, + 78.8, + 78.8 + ], + "down_blocks.1.resnets.0.conv2": [ + 78.7, + 78.4, + 78.8, + 78.6 + ], + "up_blocks.0.attentions.1.proj_out": [ + 78.7, + 78.3, + 78.9, + 78.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 78.7, + 78.2, + 78.8, + 78.7 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 78.5, + 78.1, + 78.7, + 78.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 78.4, + 78.1, + 78.7, + 78.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 78.3, + 78.1, + 78.7, + 78.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 78.6, + 78.0, + 78.6, + 78.5 + ], + "up_blocks.1.resnets.0.conv2": [ + 78.5, + 78.0, + 78.4, + 78.4 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 78.4, + 78.1, + 78.3, + 78.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 78.3, + 77.7, + 78.2, + 78.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.3, + 77.8, + 78.3, + 78.1 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 78.0, + 78.0, + 78.3, + 78.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 78.0, + 77.6, + 78.2, + 78.1 + ], + "down_blocks.1.attentions.1.proj_out": [ + 77.9, + 77.6, + 78.1, + 77.9 + ], + "down_blocks.1.downsamplers.0.conv": [ + 77.8, + 77.6, + 78.1, + 77.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 77.7, + 77.6, + 78.0, + 77.8 + ], + "down_blocks.1.attentions.1.proj_in": [ + 77.8, + 77.5, + 77.7, + 77.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 77.5, + 77.1, + 77.5, + 77.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 77.4, + 77.1, + 77.3, + 77.5 + ], + "up_blocks.1.attentions.0.proj_out": [ + 77.4, + 76.9, + 77.5, + 77.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 77.2, + 76.9, + 77.1, + 77.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 77.1, + 76.9, + 76.9, + 76.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 77.1, + 76.7, + 76.9, + 77.1 + ], + "down_blocks.1.resnets.1.conv1": [ + 76.9, + 76.6, + 77.0, + 77.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 77.0, + 76.7, + 76.9, + 76.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 77.0, + 76.5, + 76.7, + 76.9 + ], + "down_blocks.1.resnets.0.conv1": [ + 76.7, + 76.6, + 76.8, + 76.9 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 77.0, + 76.6, + 76.9, + 76.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 76.8, + 76.5, + 76.6, + 76.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 76.5, + 76.6, + 76.5, + 76.7 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 76.4, + 76.4, + 76.3, + 76.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 76.5, + 76.2, + 76.2, + 76.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 76.3, + 76.1, + 76.2, + 76.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 76.3, + 76.2, + 76.0, + 76.4 + ], + "add_embedding.linear_2": [ + 76.4, + 76.2, + 76.0, + 76.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 76.4, + 76.2, + 75.9, + 76.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 76.2, + 76.0, + 75.8, + 76.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 76.0, + 75.7, + 75.7, + 76.0 + ], + "up_blocks.1.attentions.1.proj_out": [ + 75.9, + 75.8, + 75.6, + 75.9 + ], + "up_blocks.1.resnets.1.conv2": [ + 75.9, + 75.8, + 75.5, + 75.8 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 75.8, + 75.8, + 75.5, + 75.8 + ], + "add_embedding.linear_1": [ + 75.7, + 75.7, + 75.4, + 75.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 75.7, + 75.8, + 75.3, + 75.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 75.5, + 75.4, + 75.3, + 75.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 75.7, + 75.5, + 75.2, + 75.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 75.6, + 75.5, + 75.1, + 75.3 + ], + "time_embedding.linear_1": [ + 75.5, + 75.5, + 75.1, + 75.3 + ], + "up_blocks.1.attentions.0.proj_in": [ + 75.5, + 75.3, + 75.0, + 75.1 + ], + "down_blocks.1.attentions.0.proj_out": [ + 75.4, + 75.1, + 74.9, + 75.1 + ], + "up_blocks.1.resnets.2.conv2": [ + 75.2, + 75.0, + 74.7, + 74.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 75.3, + 74.8, + 74.8, + 74.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 75.2, + 74.8, + 74.7, + 74.7 + ], + "up_blocks.0.upsamplers.0.conv": [ + 74.9, + 74.5, + 74.3, + 74.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 74.8, + 74.6, + 74.2, + 74.4 + ], + "up_blocks.1.resnets.1.conv1": [ + 74.7, + 74.4, + 74.3, + 74.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 74.6, + 74.2, + 74.2, + 74.3 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 74.4, + 74.1, + 74.1, + 74.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 74.4, + 74.1, + 73.9, + 74.1 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 74.4, + 74.0, + 74.0, + 74.0 + ], + "time_embedding.linear_2": [ + 74.2, + 73.9, + 74.1, + 74.2 + ], + "up_blocks.1.resnets.2.conv1": [ + 74.1, + 73.8, + 73.8, + 73.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 74.0, + 73.6, + 73.8, + 73.7 + ], + "up_blocks.1.attentions.1.proj_in": [ + 73.8, + 73.5, + 73.6, + 73.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 73.6, + 73.3, + 73.5, + 73.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 73.8, + 73.5, + 73.6, + 73.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 73.7, + 73.3, + 73.4, + 73.4 + ], + "up_blocks.1.attentions.2.proj_out": [ + 73.4, + 73.1, + 73.0, + 73.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 73.3, + 72.9, + 72.8, + 72.8 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 73.2, + 72.8, + 72.8, + 72.7 + ], + "down_blocks.1.attentions.0.proj_in": [ + 72.9, + 72.5, + 72.5, + 72.6 + ], + "up_blocks.1.attentions.2.proj_in": [ + 72.9, + 72.5, + 72.4, + 72.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 72.7, + 72.1, + 72.3, + 72.4 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 72.5, + 71.9, + 72.2, + 72.3 + ], + "down_blocks.0.resnets.0.conv1": [ + 72.3, + 72.0, + 72.1, + 72.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 72.2, + 71.9, + 71.9, + 72.2 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 72.0, + 71.6, + 71.7, + 71.9 + ], + "down_blocks.0.downsamplers.0.conv": [ + 71.8, + 71.5, + 71.7, + 71.8 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 71.7, + 71.3, + 71.4, + 71.4 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 71.3, + 70.9, + 70.9, + 71.2 + ], + "up_blocks.2.resnets.0.conv1": [ + 70.9, + 70.6, + 70.6, + 70.7 + ], + "down_blocks.0.resnets.1.conv2": [ + 70.6, + 70.2, + 70.5, + 70.3 + ], + "up_blocks.2.resnets.0.conv2": [ + 70.2, + 69.8, + 70.1, + 70.1 + ], + "up_blocks.1.upsamplers.0.conv": [ + 69.6, + 69.3, + 69.5, + 69.6 + ], + "down_blocks.0.resnets.1.conv1": [ + 69.0, + 68.6, + 68.8, + 68.9 + ], + "down_blocks.0.resnets.0.conv2": [ + 68.8, + 68.4, + 68.7, + 68.6 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 68.0, + 67.7, + 68.0, + 67.9 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 67.9, + 67.5, + 67.8, + 67.6 + ], + "up_blocks.2.resnets.2.conv2": [ + 67.6, + 67.2, + 67.5, + 67.4 + ], + "up_blocks.2.resnets.1.conv1": [ + 66.8, + 66.4, + 66.6, + 66.6 + ], + "up_blocks.2.resnets.2.conv1": [ + 66.1, + 65.6, + 65.9, + 65.9 + ], + "up_blocks.2.resnets.1.conv2": [ + 65.3, + 65.0, + 65.2, + 65.1 + ] + }, + "8": { + "metadata": { + "nbits": 8, + "out_ngroups": 1, + "in_ngroups": 1, + "cumulative": true, + "candidates": [ + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.1.proj_out", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k", + "down_blocks.2.resnets.1.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v", + "down_blocks.2.resnets.1.conv2", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q", + "up_blocks.0.resnets.1.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.ff.net.2", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.resnets.0.conv1", + "down_blocks.2.resnets.1.conv1", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k", + "up_blocks.0.resnets.0.time_emb_proj", + "up_blocks.0.resnets.0.conv2", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.1.ff.net.2", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.ff.net.2", + "mid_block.attentions.0.transformer_blocks.5.ff.net.2", + "mid_block.attentions.0.transformer_blocks.6.ff.net.2", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.9.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.1.proj_in", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2", + "down_blocks.2.resnets.0.time_emb_proj", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.proj_in", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.2.ff.net.2", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.3.ff.net.2", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0", + "mid_block.resnets.0.conv2", + "mid_block.resnets.1.conv1", + "mid_block.resnets.1.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k", + "mid_block.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.resnets.1.time_emb_proj", + "down_blocks.2.attentions.0.proj_in", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k", + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.0.proj_in", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k", + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2", + "up_blocks.0.resnets.1.conv2", + "up_blocks.0.resnets.2.time_emb_proj", + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj", + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v", + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.8.ff.net.2", + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj", + "mid_block.resnets.0.time_emb_proj", + "mid_block.resnets.1.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k", + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.2.resnets.0.conv2", + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v", + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q", + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k", + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0", + "mid_block.attentions.0.transformer_blocks.7.ff.net.2", + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0", + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2", + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.0.proj_out", + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k", + "up_blocks.0.resnets.0.conv1", + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q", + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q", + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v", + "mid_block.attentions.0.proj_out", + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v", + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj", + "up_blocks.1.resnets.0.time_emb_proj", + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0", + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v", + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2", + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj", + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0", + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k", + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0", + "up_blocks.0.resnets.1.conv1", + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k", + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q", + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v", + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.resnets.2.conv1", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k", + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q", + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k", + "up_blocks.0.attentions.1.proj_in", + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0", + "up_blocks.0.resnets.2.conv2", + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k", + "down_blocks.2.attentions.0.proj_out", + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q", + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj", + "up_blocks.0.resnets.0.conv_shortcut", + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj", + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k", + "up_blocks.1.resnets.0.conv1", + "down_blocks.1.downsamplers.0.conv", + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "up_blocks.0.attentions.1.proj_out", + "up_blocks.0.resnets.1.conv_shortcut", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.1.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q", + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.resnets.1.conv2", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "down_blocks.1.resnets.0.time_emb_proj", + "up_blocks.0.attentions.2.proj_in", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "down_blocks.2.resnets.0.conv_shortcut", + "up_blocks.1.resnets.1.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q", + "down_blocks.1.attentions.1.proj_in", + "down_blocks.1.resnets.0.conv2", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.0.attentions.2.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q", + "up_blocks.1.resnets.0.conv2", + "up_blocks.2.resnets.2.time_emb_proj", + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "down_blocks.1.attentions.1.proj_out", + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.2.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv2", + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v", + "up_blocks.2.resnets.2.conv_shortcut", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v", + "down_blocks.1.resnets.0.conv1", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2", + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2", + "down_blocks.1.attentions.0.proj_out", + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0", + "up_blocks.1.attentions.1.proj_out", + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2", + "up_blocks.1.resnets.1.conv1", + "up_blocks.0.resnets.2.conv_shortcut", + "down_blocks.1.attentions.0.proj_in", + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2", + "up_blocks.2.resnets.1.time_emb_proj", + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj", + "up_blocks.2.resnets.0.time_emb_proj", + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v", + "up_blocks.1.attentions.0.proj_in", + "up_blocks.1.resnets.2.conv2", + "time_embedding.linear_1", + "up_blocks.1.resnets.2.conv1", + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj", + "add_embedding.linear_1", + "up_blocks.0.upsamplers.0.conv", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0", + "add_embedding.linear_2", + "up_blocks.1.attentions.1.proj_in", + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj", + "up_blocks.1.resnets.0.conv_shortcut", + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj", + "down_blocks.0.downsamplers.0.conv", + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0", + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v", + "up_blocks.1.attentions.2.proj_out", + "down_blocks.1.resnets.0.conv_shortcut", + "up_blocks.1.attentions.2.proj_in", + "down_blocks.0.resnets.1.time_emb_proj", + "up_blocks.1.resnets.1.conv_shortcut", + "up_blocks.2.resnets.2.conv2", + "down_blocks.0.resnets.0.time_emb_proj", + "time_embedding.linear_2", + "up_blocks.1.resnets.2.conv_shortcut", + "up_blocks.1.upsamplers.0.conv", + "down_blocks.0.resnets.0.conv1", + "up_blocks.2.resnets.0.conv2", + "up_blocks.2.resnets.1.conv2", + "up_blocks.2.resnets.1.conv_shortcut", + "down_blocks.0.resnets.1.conv2", + "up_blocks.2.resnets.0.conv1", + "up_blocks.2.resnets.0.conv_shortcut", + "down_blocks.0.resnets.0.conv2", + "up_blocks.2.resnets.1.conv1", + "up_blocks.2.resnets.2.conv1", + "down_blocks.0.resnets.1.conv1" + ], + "sizes": [ + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 6553600, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 1638400, + 6553600, + 13107200, + 2621440, + 1638400, + 409600, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 409600, + 2621440, + 1638400, + 13107200, + 2621440, + 2621440, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 2621440, + 2621440, + 14745600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 6553600, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 2621440, + 409600, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 13107200, + 7372800, + 14745600, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 2621440, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 2621440, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 14745600, + 1310720, + 409600, + 1638400, + 1638400, + 2621440, + 2621440, + 6553600, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 13107200, + 1638400, + 2621440, + 6553600, + 6553600, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 409600, + 1310720, + 409600, + 409600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 6553600, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 2621440, + 6553600, + 13107200, + 6553600, + 1638400, + 2621440, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 1638400, + 2621440, + 2621440, + 13107200, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 6553600, + 2621440, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 2621440, + 13107200, + 2621440, + 1638400, + 1310720, + 409600, + 1310720, + 1310720, + 1310720, + 1310720, + 1310720, + 1310720, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 13107200, + 1638400, + 1638400, + 2621440, + 1638400, + 2621440, + 1638400, + 14745600, + 14745600, + 1638400, + 1310720, + 2621440, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 409600, + 409600, + 1310720, + 1638400, + 1638400, + 14745600, + 1310720, + 409600, + 819200, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 6553600, + 1638400, + 1638400, + 1638400, + 6553600, + 13107200, + 1638400, + 6553600, + 1638400, + 2621440, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 2621440, + 1638400, + 1638400, + 6553600, + 14745600, + 1638400, + 409600, + 1310720, + 409600, + 409600, + 2621440, + 13107200, + 1638400, + 2621440, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 2621440, + 13107200, + 1638400, + 14745600, + 409600, + 409600, + 1310720, + 409600, + 1638400, + 13107200, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 14745600, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 2621440, + 13107200, + 1638400, + 1638400, + 2621440, + 1638400, + 1638400, + 1638400, + 13107200, + 2621440, + 13107200, + 409600, + 409600, + 409600, + 1310720, + 1310720, + 409600, + 1638400, + 1638400, + 1638400, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 2621440, + 29491200, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 13107200, + 1638400, + 6553600, + 1638400, + 1638400, + 13107200, + 1638400, + 1638400, + 13107200, + 819200, + 1638400, + 1638400, + 409600, + 409600, + 1310720, + 6553600, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 2621440, + 1638400, + 1638400, + 1638400, + 6553600, + 6553600, + 1638400, + 1638400, + 29491200, + 2621440, + 2621440, + 13107200, + 6553600, + 1310720, + 409600, + 1638400, + 1638400, + 13107200, + 6553600, + 1638400, + 1638400, + 1638400, + 1638400, + 13107200, + 1638400, + 6553600, + 13107200, + 1638400, + 13107200, + 13107200, + 1638400, + 13107200, + 1638400, + 13107200, + 1638400, + 1638400, + 22118400, + 409600, + 409600, + 409600, + 1638400, + 1638400, + 1638400, + 14745600, + 1310720, + 409600, + 1310720, + 1638400, + 1638400, + 6553600, + 1638400, + 13107200, + 3276800, + 13107200, + 13107200, + 409600, + 409600, + 409600, + 6553600, + 13107200, + 6553600, + 1638400, + 1638400, + 6553600, + 409600, + 409600, + 13107200, + 1638400, + 409600, + 11059200, + 3686400, + 13107200, + 409600, + 409600, + 409600, + 1638400, + 3276800, + 409600, + 409600, + 3686400, + 409600, + 409600, + 3686400, + 409600, + 409600, + 819200, + 1638400, + 1638400, + 819200, + 819200, + 409600, + 409600, + 3686400, + 1638400, + 1638400, + 1638400, + 409600, + 409600, + 3686400, + 409600, + 3276800, + 409600, + 409600, + 3276800, + 819200, + 409600, + 3276800, + 3276800, + 1638400, + 409600, + 1638400, + 409600, + 1638400, + 3686400, + 409600, + 204800, + 409600, + 1843200, + 1638400, + 409600, + 1638400, + 409600, + 409600, + 409600, + 409600, + 1638400, + 7372800, + 2457600, + 409600, + 3276800, + 1638400, + 409600, + 3276800, + 409600, + 3276800, + 409600, + 409600, + 409600, + 409600, + 3686400, + 409600, + 5529600, + 3276800, + 3604480, + 14745600, + 409600, + 1638400, + 409600, + 3276800, + 1228800, + 3276800, + 921600, + 409600, + 409600, + 409600, + 204800, + 409600, + 409600, + 819200, + 921600, + 409600, + 1638400, + 614400, + 3686400, + 921600, + 921600, + 921600, + 204800, + 921600, + 2764800, + 307200, + 921600, + 1843200, + 1843200, + 921600 + ] + }, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.4, + 82.6, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.2, + 82.4, + 82.3, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.3, + 82.4, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.2, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.4, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": [ + 82.2, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.4, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.2, + 82.4, + 82.4, + 82.3 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": [ + 82.3, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 82.0, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.4, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.3, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": [ + 82.3, + 82.3, + 82.5, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": [ + 82.3, + 82.3, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 82.2, + 82.3, + 82.5, + 82.3 + ], + "down_blocks.2.attentions.1.proj_out": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.1, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.2, + 82.4, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": [ + 82.2, + 82.4, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": [ + 82.1, + 82.3, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": [ + 82.1, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": [ + 82.1, + 82.2, + 82.5, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": [ + 82.2, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.3, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.1, + 82.4, + 82.5, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.1, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 82.1, + 82.1, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.resnets.1.time_emb_proj": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": [ + 82.2, + 82.2, + 82.5, + 82.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.9, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": [ + 82.1, + 82.3, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.resnets.1.conv2": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 82.2, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": [ + 82.2, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.9, + 82.1, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.1, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": [ + 82.0, + 82.3, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.0, + 82.3, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": [ + 82.1, + 82.3, + 82.2, + 82.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": [ + 82.1, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.1, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.3, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "up_blocks.0.resnets.1.time_emb_proj": [ + 82.1, + 82.1, + 82.3, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.2, + 82.2, + 82.2, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.3, + 82.2, + 82.2, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.9, + 82.2, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.9, + 82.3, + 82.3, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 82.1, + 82.4, + 82.5, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.9, + 82.2, + 82.4, + 82.2 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 82.0, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.9, + 82.3, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": [ + 82.1, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.0, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": [ + 82.1, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 82.1, + 82.3, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": [ + 82.2, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.8, + 82.3, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": [ + 82.2, + 82.3, + 82.4, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.2, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": [ + 82.2, + 82.3, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.0, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 82.3, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 82.2, + 82.1, + 82.4, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 82.2, + 82.2, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 82.2, + 82.3, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 82.1, + 82.3, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 82.1, + 82.1, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.8, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.0, + 82.3, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": [ + 82.1, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.2 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": [ + 82.2, + 82.1, + 82.3, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": [ + 82.2, + 82.2, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 82.1, + 82.2, + 82.4, + 82.1 + ], + "down_blocks.2.resnets.0.conv1": [ + 82.0, + 82.2, + 82.3, + 82.1 + ], + "down_blocks.2.resnets.1.conv1": [ + 82.1, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": [ + 82.2, + 82.2, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": [ + 82.1, + 82.3, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.2, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": [ + 82.1, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": [ + 82.1, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": [ + 82.0, + 82.1, + 82.2, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": [ + 82.2, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 82.0, + 82.2, + 82.4, + 82.2 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": [ + 81.8, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.8, + 82.3, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": [ + 82.1, + 82.3, + 82.4, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": [ + 82.1, + 82.0, + 82.3, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.1, + 82.2, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": [ + 82.1, + 82.1, + 82.4, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.9, + 82.2, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": [ + 82.0, + 82.1, + 82.4, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.2, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": [ + 82.1, + 82.1, + 82.2, + 82.1 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": [ + 82.0, + 82.2, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": [ + 81.8, + 82.1, + 82.4, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": [ + 81.9, + 82.3, + 82.4, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": [ + 81.9, + 82.2, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.2, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.2, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.1, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": [ + 81.7, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": [ + 81.8, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": [ + 81.9, + 82.2, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": [ + 82.1, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.resnets.0.time_emb_proj": [ + 81.8, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.resnets.0.conv2": [ + 81.9, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.7, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.8, + 82.1, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.8, + 82.2, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.8, + 81.9, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.9, + 82.1, + 82.3, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.9, + 82.1, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.8, + 82.1, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 81.9, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.9, + 82.0, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": [ + 81.8, + 82.1, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": [ + 82.0, + 82.0, + 82.2, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": [ + 82.1, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": [ + 81.8, + 82.0, + 82.2, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": [ + 82.0, + 82.1, + 82.2, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 81.7, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": [ + 82.1, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.1, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.7, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": [ + 81.8, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": [ + 81.8, + 82.1, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": [ + 82.0, + 82.1, + 82.3, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.9, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.9, + 82.2, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.9, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 82.0, + 82.1, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.1, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": [ + 81.9, + 82.2, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": [ + 81.9, + 81.9, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": [ + 82.0, + 82.1, + 81.9, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.0, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": [ + 82.0, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": [ + 82.0, + 82.0, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.9, + 82.1, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 82.1, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.8, + 82.1, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.proj_in": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": [ + 82.0, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": [ + 82.0, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.9, + 82.0, + 82.2, + 82.1 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 82.0, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": [ + 82.0, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": [ + 81.9, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": [ + 82.0, + 82.0, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": [ + 82.2, + 82.0, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.8, + 82.0, + 82.3, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.1, + 82.3, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 82.1, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": [ + 82.0, + 82.0, + 82.2, + 82.0 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": [ + 82.0, + 82.0, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "down_blocks.2.resnets.0.time_emb_proj": [ + 81.9, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": [ + 82.0, + 82.0, + 82.2, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.8, + 82.0, + 82.3, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.9, + 81.9, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": [ + 81.8, + 81.9, + 82.0, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": [ + 82.0, + 82.1, + 82.0, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": [ + 82.2, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": [ + 82.0, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": [ + 82.0, + 82.1, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": [ + 81.8, + 82.1, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": [ + 82.0, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": [ + 82.0, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.9, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": [ + 81.9, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": [ + 82.0, + 81.9, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 82.0, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.0, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.9, + 81.9, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.9, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.9, + 82.0, + 82.3, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": [ + 82.1, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.8, + 82.0, + 82.3, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": [ + 81.9, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": [ + 81.9, + 82.0, + 82.2, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": [ + 82.0, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": [ + 81.8, + 82.2, + 82.2, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": [ + 82.0, + 82.0, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": [ + 81.7, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": [ + 81.9, + 82.0, + 81.9, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": [ + 81.9, + 82.0, + 82.0, + 82.0 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": [ + 81.8, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": [ + 81.7, + 81.9, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.9, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.8, + 81.8, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": [ + 81.8, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": [ + 82.1, + 82.0, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": [ + 81.6, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": [ + 82.0, + 81.9, + 82.1, + 82.0 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": [ + 81.8, + 82.1, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": [ + 81.8, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": [ + 82.1, + 81.7, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": [ + 81.8, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": [ + 81.9, + 82.0, + 82.1, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 82.0, + 82.0, + 82.0, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.0, + 82.1, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.7, + 82.0, + 82.2, + 81.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.9, + 82.1, + 82.1, + 81.7 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": [ + 81.8, + 82.1, + 82.2, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.0, + 82.2, + 81.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.proj_in": [ + 81.8, + 82.0, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.9, + 81.9, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 81.9, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 82.0, + 81.8, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": [ + 82.0, + 82.0, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": [ + 82.0, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": [ + 82.0, + 82.0, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.8, + 82.0, + 82.1, + 82.0 + ], + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": [ + 81.9, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 82.1, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": [ + 81.8, + 81.8, + 82.2, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": [ + 81.9, + 82.0, + 81.9, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.9, + 81.9, + 81.9, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 82.0, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": [ + 81.9, + 81.9, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": [ + 82.0, + 81.9, + 82.1, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": [ + 81.8, + 82.0, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.7, + 82.0, + 82.0, + 81.9 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 81.8, + 82.0, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": [ + 82.0, + 82.1, + 82.0, + 82.0 + ], + "mid_block.resnets.0.conv2": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "mid_block.resnets.1.conv1": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "mid_block.resnets.1.time_emb_proj": [ + 81.9, + 82.1, + 82.1, + 81.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": [ + 81.9, + 82.0, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": [ + 82.0, + 81.9, + 82.0, + 81.9 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": [ + 81.9, + 81.9, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": [ + 81.8, + 81.9, + 82.0, + 82.0 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": [ + 81.9, + 81.9, + 82.1, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.9, + 82.0, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": [ + 82.0, + 81.9, + 82.1, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": [ + 82.0, + 82.1, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 81.9, + 82.0, + 81.9 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": [ + 81.8, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": [ + 81.9, + 81.9, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": [ + 81.9, + 82.0, + 82.0, + 81.7 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.7, + 82.0, + 81.9, + 81.9 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.9, + 82.0, + 81.9, + 81.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": [ + 81.7, + 81.9, + 82.1, + 81.8 + ], + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.8, + 81.9, + 81.9, + 81.8 + ], + "mid_block.resnets.0.conv1": [ + 81.8, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.9, + 81.8, + 81.9, + 81.7 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.9, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.1.resnets.1.time_emb_proj": [ + 81.8, + 81.9, + 81.8, + 81.8 + ], + "down_blocks.2.attentions.0.proj_in": [ + 82.0, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.8, + 81.8, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.9, + 81.7, + 82.1, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.6, + 81.8, + 81.8, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.8, + 81.6, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": [ + 81.8, + 81.9, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": [ + 82.0, + 81.8, + 82.0, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.8, + 81.6, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": [ + 82.0, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": [ + 81.7, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": [ + 81.9, + 81.7, + 81.9, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.6, + 81.8, + 82.0, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.7, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 81.7, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.9, + 81.9, + 81.9, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": [ + 81.9, + 81.8, + 81.9, + 81.8 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": [ + 81.9, + 81.9, + 82.0, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.8, + 81.9, + 81.8, + 81.7 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.proj_in": [ + 81.9, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.8, + 81.5, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.9, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": [ + 81.7, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": [ + 81.8, + 81.8, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": [ + 81.7, + 81.9, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.9, + 81.8, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": [ + 81.9, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": [ + 81.8, + 81.8, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.7, + 81.8, + 82.0, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.8, + 81.7, + 81.8, + 81.8 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.7, + 81.7, + 82.0, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.9, + 81.9, + 81.9, + 81.8 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": [ + 81.8, + 81.3, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": [ + 81.8, + 81.6, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": [ + 81.8, + 81.8, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": [ + 81.8, + 81.8, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": [ + 81.8, + 81.8, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": [ + 81.8, + 81.8, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": [ + 81.9, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": [ + 81.8, + 81.7, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.8, + 81.8, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": [ + 81.9, + 81.7, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": [ + 82.0, + 81.8, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 81.6, + 81.7, + 81.8, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": [ + 81.8, + 81.9, + 81.6, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": [ + 81.8, + 81.8, + 81.9, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": [ + 81.7, + 81.9, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": [ + 81.7, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": [ + 81.7, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": [ + 81.7, + 81.6, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": [ + 81.7, + 81.6, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": [ + 81.7, + 81.7, + 81.9, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": [ + 81.8, + 81.8, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": [ + 81.7, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": [ + 81.6, + 81.7, + 81.9, + 81.7 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": [ + 81.8, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.resnets.1.conv2": [ + 81.6, + 81.7, + 81.8, + 81.6 + ], + "up_blocks.0.resnets.2.time_emb_proj": [ + 81.7, + 81.7, + 81.6, + 81.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": [ + 81.6, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.6, + 81.6, + 81.7 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": [ + 81.6, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": [ + 81.6, + 81.5, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.6, + 81.7, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.6, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": [ + 81.8, + 81.7, + 81.7, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": [ + 81.8, + 81.6, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": [ + 81.7, + 81.7, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": [ + 81.7, + 81.7, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": [ + 81.7, + 81.7, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": [ + 81.7, + 81.7, + 81.7, + 81.7 + ], + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.7, + 81.7, + 81.7, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": [ + 81.8, + 81.6, + 81.8, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": [ + 81.6, + 81.7, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": [ + 81.5, + 81.7, + 81.8, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 81.7, + 81.7, + 81.7 + ], + "mid_block.resnets.0.time_emb_proj": [ + 81.7, + 81.7, + 81.6, + 81.5 + ], + "mid_block.resnets.1.conv2": [ + 81.7, + 81.7, + 81.8, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 81.7, + 81.7, + 81.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.7, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.7, + 81.6, + 81.7, + 81.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": [ + 81.7, + 81.6, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.7, + 81.6, + 81.7, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": [ + 81.7, + 81.7, + 81.8, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": [ + 81.6, + 81.7, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": [ + 81.7, + 81.7, + 81.7, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": [ + 81.7, + 81.7, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": [ + 81.6, + 81.7, + 81.7, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "down_blocks.2.resnets.0.conv2": [ + 81.8, + 81.6, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": [ + 81.8, + 81.6, + 81.6, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 81.7, + 81.7, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": [ + 81.7, + 81.7, + 81.7, + 81.7 + ], + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": [ + 81.7, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": [ + 81.8, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": [ + 81.7, + 81.6, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.8, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.7, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.7, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.8, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": [ + 81.8, + 81.7, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": [ + 81.6, + 81.7, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": [ + 81.6, + 81.5, + 81.8, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": [ + 81.7, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": [ + 81.5, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": [ + 81.7, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": [ + 81.5, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.7, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": [ + 81.7, + 81.4, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": [ + 81.6, + 81.6, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": [ + 81.7, + 81.7, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 81.5, + 81.6, + 81.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": [ + 81.6, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": [ + 81.7, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.7, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": [ + 81.6, + 81.6, + 81.6, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": [ + 81.7, + 81.5, + 81.8, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.6, + 81.6, + 81.7, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.7, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": [ + 81.6, + 81.6, + 81.6, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.6, + 81.4, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": [ + 81.6, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": [ + 81.6, + 81.4, + 81.5, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.6, + 81.5, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": [ + 81.5, + 81.6, + 81.6, + 81.5 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": [ + 81.5, + 81.5, + 81.6, + 81.7 + ], + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": [ + 81.7, + 81.5, + 81.6, + 81.6 + ], + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.7, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.0.proj_out": [ + 81.6, + 81.4, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": [ + 81.6, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": [ + 81.7, + 81.4, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": [ + 81.7, + 81.5, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": [ + 81.6, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.0.resnets.0.conv1": [ + 81.7, + 81.5, + 81.6, + 81.6 + ], + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": [ + 81.5, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": [ + 81.6, + 81.4, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": [ + 81.7, + 81.5, + 81.6, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "mid_block.attentions.0.proj_out": [ + 81.7, + 81.5, + 81.6, + 81.6 + ], + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": [ + 81.7, + 81.5, + 81.7, + 81.5 + ], + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.7, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": [ + 81.5, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.6, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": [ + 81.7, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": [ + 81.7, + 81.5, + 81.5, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": [ + 81.7, + 81.6, + 81.7, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": [ + 81.6, + 81.7, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 81.6, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": [ + 81.5, + 81.6, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": [ + 81.5, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": [ + 81.6, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.1.resnets.0.time_emb_proj": [ + 81.5, + 81.5, + 81.5, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": [ + 81.5, + 81.6, + 81.6, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": [ + 81.6, + 81.6, + 81.5, + 81.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.5, + 81.6, + 81.6, + 81.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": [ + 81.4, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": [ + 81.6, + 81.6, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 81.5, + 81.6, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": [ + 81.5, + 81.6, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": [ + 81.7, + 81.6, + 81.6, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": [ + 81.5, + 81.6, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": [ + 81.5, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.6, + 81.6, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": [ + 81.6, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 81.5, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 81.5, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": [ + 81.6, + 81.5, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": [ + 81.5, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": [ + 81.5, + 81.5, + 81.6, + 81.6 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": [ + 81.5, + 81.4, + 81.7, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": [ + 81.4, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": [ + 81.5, + 81.6, + 81.5, + 81.5 + ], + "up_blocks.0.resnets.1.conv1": [ + 81.5, + 81.5, + 81.5, + 81.5 + ], + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": [ + 81.4, + 81.4, + 81.5, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": [ + 81.6, + 81.6, + 81.6, + 81.4 + ], + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": [ + 81.5, + 81.5, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": [ + 81.4, + 81.5, + 81.6, + 81.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": [ + 81.5, + 81.5, + 81.5, + 81.4 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.4, + 81.4, + 81.5, + 81.4 + ], + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": [ + 81.4, + 81.5, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.5, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 81.4, + 81.4, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": [ + 81.3, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.2, + 81.5, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": [ + 81.4, + 81.5, + 81.5, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": [ + 81.4, + 81.3, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": [ + 81.3, + 81.7, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": [ + 81.4, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": [ + 81.5, + 81.4, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": [ + 81.4, + 81.5, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": [ + 81.5, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": [ + 81.5, + 81.5, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": [ + 81.5, + 81.5, + 81.6, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": [ + 81.5, + 81.4, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": [ + 81.5, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": [ + 81.4, + 81.5, + 81.5, + 81.4 + ], + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": [ + 81.5, + 81.5, + 81.6, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": [ + 81.4, + 81.6, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": [ + 81.5, + 81.5, + 81.7, + 81.5 + ], + "up_blocks.0.resnets.2.conv1": [ + 81.3, + 81.5, + 81.7, + 81.4 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.4, + 81.5, + 81.5, + 81.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": [ + 81.3, + 81.4, + 81.5, + 81.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": [ + 81.3, + 81.5, + 81.7, + 81.4 + ], + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": [ + 81.3, + 81.5, + 81.6, + 81.5 + ], + "up_blocks.0.attentions.1.proj_in": [ + 81.3, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": [ + 81.4, + 81.6, + 81.6, + 81.3 + ], + "up_blocks.0.resnets.2.conv2": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": [ + 81.3, + 81.4, + 81.7, + 81.4 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.5, + 81.5, + 81.5, + 81.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": [ + 81.3, + 81.4, + 81.7, + 81.3 + ], + "down_blocks.2.attentions.0.proj_out": [ + 81.4, + 81.4, + 81.6, + 81.3 + ], + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": [ + 81.5, + 81.4, + 81.7, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": [ + 81.4, + 81.4, + 81.6, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": [ + 81.4, + 81.5, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": [ + 81.4, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.resnets.0.conv_shortcut": [ + 81.4, + 81.4, + 81.4, + 81.4 + ], + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": [ + 81.2, + 81.3, + 81.3, + 81.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": [ + 81.4, + 81.2, + 81.5, + 81.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": [ + 81.3, + 81.2, + 81.4, + 81.3 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.2, + 81.2, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.4, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": [ + 81.1, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": [ + 81.2, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": [ + 81.3, + 81.1, + 81.4, + 81.2 + ], + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": [ + 81.4, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": [ + 81.2, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": [ + 81.2, + 81.2, + 81.4, + 81.3 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 81.3, + 81.3, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 81.3, + 81.4, + 81.5, + 81.3 + ], + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": [ + 81.3, + 81.4, + 81.3, + 81.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": [ + 81.2, + 81.4, + 81.4, + 81.2 + ], + "up_blocks.1.resnets.0.conv1": [ + 81.2, + 81.3, + 81.4, + 81.3 + ], + "down_blocks.1.downsamplers.0.conv": [ + 81.1, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": [ + 81.0, + 81.2, + 81.5, + 81.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": [ + 81.1, + 81.1, + 81.4, + 81.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": [ + 81.2, + 81.3, + 81.5, + 81.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 81.2, + 81.2, + 81.3, + 81.2 + ], + "up_blocks.0.attentions.1.proj_out": [ + 81.2, + 81.2, + 81.4, + 81.2 + ], + "up_blocks.0.resnets.1.conv_shortcut": [ + 81.2, + 81.3, + 81.4, + 81.3 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": [ + 81.1, + 81.2, + 81.5, + 81.2 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 81.1, + 81.3, + 81.3, + 81.2 + ], + "down_blocks.1.resnets.1.conv1": [ + 81.2, + 81.2, + 81.5, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": [ + 81.2, + 81.3, + 81.5, + 81.0 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 81.1, + 81.2, + 81.3, + 81.2 + ], + "down_blocks.1.resnets.1.conv2": [ + 81.1, + 81.1, + 81.4, + 81.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": [ + 81.1, + 81.3, + 81.3, + 81.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 81.0, + 81.2, + 81.5, + 81.0 + ], + "down_blocks.1.resnets.0.time_emb_proj": [ + 81.0, + 81.2, + 81.3, + 81.1 + ], + "up_blocks.0.attentions.2.proj_in": [ + 81.0, + 81.3, + 81.3, + 81.1 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 81.0, + 81.1, + 81.2, + 81.2 + ], + "down_blocks.2.resnets.0.conv_shortcut": [ + 80.9, + 81.2, + 81.2, + 81.1 + ], + "up_blocks.1.resnets.1.time_emb_proj": [ + 81.1, + 81.2, + 81.1, + 81.0 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": [ + 81.0, + 81.2, + 81.2, + 81.1 + ], + "down_blocks.1.attentions.1.proj_in": [ + 80.9, + 81.1, + 81.1, + 81.0 + ], + "down_blocks.1.resnets.0.conv2": [ + 80.8, + 81.1, + 81.3, + 80.9 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.9, + 81.1, + 81.3, + 80.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.8, + 81.0, + 81.2, + 80.9 + ], + "up_blocks.0.attentions.2.proj_out": [ + 80.9, + 81.0, + 81.1, + 81.0 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": [ + 80.9, + 81.1, + 81.2, + 80.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": [ + 80.7, + 81.0, + 81.2, + 81.0 + ], + "up_blocks.1.resnets.0.conv2": [ + 80.8, + 81.0, + 81.1, + 80.8 + ], + "up_blocks.2.resnets.2.time_emb_proj": [ + 80.6, + 80.8, + 81.1, + 80.8 + ], + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.6, + 80.8, + 81.0, + 80.7 + ], + "down_blocks.1.attentions.1.proj_out": [ + 80.6, + 80.7, + 81.0, + 80.7 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.6, + 80.6, + 80.9, + 80.6 + ], + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 80.6, + 80.6, + 80.9, + 80.7 + ], + "up_blocks.1.resnets.2.time_emb_proj": [ + 80.6, + 80.6, + 80.9, + 80.6 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 80.7, + 80.6, + 80.9, + 80.6 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 80.7, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 80.7, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": [ + 80.5, + 80.6, + 80.9, + 80.5 + ], + "up_blocks.1.attentions.0.proj_out": [ + 80.6, + 80.5, + 80.8, + 80.5 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": [ + 80.5, + 80.4, + 80.8, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.7, + 80.5, + 80.7, + 80.5 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.5, + 80.5, + 80.7, + 80.5 + ], + "up_blocks.1.resnets.1.conv2": [ + 80.6, + 80.4, + 80.6, + 80.5 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": [ + 80.5, + 80.5, + 80.6, + 80.5 + ], + "up_blocks.2.resnets.2.conv_shortcut": [ + 80.4, + 80.5, + 80.5, + 80.3 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": [ + 80.5, + 80.6, + 80.6, + 80.2 + ], + "down_blocks.1.resnets.0.conv1": [ + 80.5, + 80.5, + 80.7, + 80.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": [ + 80.3, + 80.3, + 80.6, + 80.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": [ + 80.3, + 80.4, + 80.4, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": [ + 80.4, + 80.4, + 80.3, + 80.2 + ], + "down_blocks.1.attentions.0.proj_out": [ + 80.2, + 80.5, + 80.4, + 80.2 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": [ + 80.2, + 80.4, + 80.5, + 80.2 + ], + "up_blocks.1.attentions.1.proj_out": [ + 80.3, + 80.4, + 80.3, + 80.2 + ], + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": [ + 80.1, + 80.3, + 80.4, + 80.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": [ + 80.2, + 80.3, + 80.3, + 80.1 + ], + "up_blocks.1.resnets.1.conv1": [ + 80.1, + 80.2, + 80.2, + 80.2 + ], + "up_blocks.0.resnets.2.conv_shortcut": [ + 80.1, + 80.1, + 80.1, + 80.2 + ], + "down_blocks.1.attentions.0.proj_in": [ + 80.1, + 80.2, + 80.3, + 80.1 + ], + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": [ + 80.1, + 80.2, + 80.2, + 79.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": [ + 79.9, + 80.1, + 80.3, + 79.8 + ], + "up_blocks.2.resnets.1.time_emb_proj": [ + 79.8, + 80.0, + 80.2, + 79.9 + ], + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": [ + 79.8, + 79.9, + 80.1, + 79.9 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": [ + 79.9, + 79.9, + 80.1, + 79.9 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": [ + 79.8, + 79.7, + 80.0, + 79.7 + ], + "up_blocks.2.resnets.0.time_emb_proj": [ + 79.8, + 79.8, + 79.8, + 79.8 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": [ + 79.8, + 79.8, + 79.8, + 79.8 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": [ + 79.9, + 79.8, + 80.0, + 79.8 + ], + "up_blocks.1.attentions.0.proj_in": [ + 79.7, + 79.8, + 80.0, + 80.0 + ], + "up_blocks.1.resnets.2.conv2": [ + 79.6, + 79.9, + 80.0, + 79.7 + ], + "time_embedding.linear_1": [ + 79.7, + 79.6, + 79.8, + 79.6 + ], + "up_blocks.1.resnets.2.conv1": [ + 79.7, + 79.5, + 79.8, + 79.6 + ], + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": [ + 79.7, + 79.4, + 79.8, + 79.5 + ], + "add_embedding.linear_1": [ + 79.7, + 79.4, + 79.7, + 79.5 + ], + "up_blocks.0.upsamplers.0.conv": [ + 79.5, + 79.3, + 79.6, + 79.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": [ + 79.6, + 79.3, + 79.6, + 79.4 + ], + "add_embedding.linear_2": [ + 79.6, + 79.4, + 79.6, + 79.4 + ], + "up_blocks.1.attentions.1.proj_in": [ + 79.6, + 79.3, + 79.5, + 79.4 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": [ + 79.4, + 79.4, + 79.5, + 79.3 + ], + "up_blocks.1.resnets.0.conv_shortcut": [ + 79.4, + 79.1, + 79.2, + 79.2 + ], + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": [ + 79.4, + 79.0, + 79.4, + 79.0 + ], + "down_blocks.0.downsamplers.0.conv": [ + 79.3, + 78.9, + 79.5, + 79.1 + ], + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": [ + 79.3, + 78.8, + 79.4, + 79.2 + ], + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": [ + 79.2, + 79.0, + 79.3, + 79.1 + ], + "up_blocks.1.attentions.2.proj_out": [ + 79.3, + 79.0, + 79.1, + 79.3 + ], + "down_blocks.1.resnets.0.conv_shortcut": [ + 79.2, + 78.8, + 79.0, + 78.9 + ], + "up_blocks.1.attentions.2.proj_in": [ + 79.3, + 78.8, + 79.1, + 79.0 + ], + "down_blocks.0.resnets.1.time_emb_proj": [ + 79.0, + 78.8, + 78.9, + 78.8 + ], + "up_blocks.1.resnets.1.conv_shortcut": [ + 78.9, + 78.5, + 78.7, + 78.5 + ], + "up_blocks.2.resnets.2.conv2": [ + 78.4, + 78.0, + 78.0, + 78.1 + ], + "down_blocks.0.resnets.0.time_emb_proj": [ + 78.1, + 78.0, + 78.2, + 78.1 + ], + "time_embedding.linear_2": [ + 78.1, + 77.9, + 78.5, + 78.6 + ], + "up_blocks.1.resnets.2.conv_shortcut": [ + 77.9, + 77.8, + 78.4, + 78.3 + ], + "up_blocks.1.upsamplers.0.conv": [ + 77.7, + 77.5, + 78.2, + 78.1 + ], + "down_blocks.0.resnets.0.conv1": [ + 77.6, + 77.4, + 78.1, + 78.2 + ], + "up_blocks.2.resnets.0.conv2": [ + 77.4, + 77.3, + 77.7, + 78.0 + ], + "up_blocks.2.resnets.1.conv2": [ + 77.4, + 77.3, + 77.8, + 77.8 + ], + "up_blocks.2.resnets.1.conv_shortcut": [ + 77.2, + 77.1, + 77.6, + 77.6 + ], + "down_blocks.0.resnets.1.conv2": [ + 77.3, + 77.2, + 77.6, + 77.8 + ], + "up_blocks.2.resnets.0.conv1": [ + 77.3, + 77.0, + 77.3, + 77.5 + ], + "up_blocks.2.resnets.0.conv_shortcut": [ + 77.1, + 77.1, + 77.0, + 77.0 + ], + "down_blocks.0.resnets.0.conv2": [ + 77.0, + 77.0, + 77.0, + 77.0 + ], + "up_blocks.2.resnets.1.conv1": [ + 76.7, + 76.5, + 76.7, + 76.4 + ], + "up_blocks.2.resnets.2.conv1": [ + 76.2, + 76.1, + 76.1, + 76.0 + ], + "down_blocks.0.resnets.1.conv1": [ + 75.8, + 75.5, + 75.9, + 75.9 + ] + } + }, + "model_version": "stabilityai/stable-diffusion-xl-base-1.0", + "baselines": { + "original": 82.2, + "linear_8bit": 66.025, + "recipe_6.55_bit_mixedpalette": 79.9, + "recipe_4.50_bit_mixedpalette": 75.8, + "recipe_3.41_bit_mixedpalette": 71.7, + "recipe_3.05_bit_mixedpalette": 69.2, + "recipe_2.81_bit_mixedpalette": 66.8, + "recipe_2.31_bit_mixedpalette": 64.3 + }, + "recipes": { + "recipe_6.55_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 6, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 4, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 8, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 6, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 6, + "mid_block.resnets.1.time_emb_proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 6, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 8, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 6, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 6, + "up_blocks.0.resnets.0.time_emb_proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 8, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 8, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 6, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 6, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 8, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 6, + "mid_block.resnets.0.time_emb_proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 6, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 8, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 6, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 8, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 8, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 6, + "mid_block.resnets.1.conv1": 8, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 8, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 6, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 6, + "down_blocks.2.attentions.1.proj_in": 8, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 6, + "mid_block.resnets.1.conv2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 6, + "up_blocks.0.resnets.0.conv1": 8, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 6, + "down_blocks.2.resnets.1.conv2": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 8, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 8, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 8, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 8, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 8, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 8, + "up_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 8, + "mid_block.resnets.0.conv1": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 8, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 8, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 6, + "mid_block.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.0.resnets.1.conv2": 8, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 8, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 8, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 8, + "down_blocks.2.attentions.1.proj_out": 8, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 8, + "up_blocks.0.resnets.2.time_emb_proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 8, + "mid_block.attentions.0.proj_out": 6, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 8, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 8, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 8, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 8, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 8, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.0.resnets.1.conv1": 8, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 8, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.resnets.0.conv2": 8, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 8, + "up_blocks.0.resnets.2.conv2": 8, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 8, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 8, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 8, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 8, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 8, + "up_blocks.0.attentions.0.proj_out": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 8, + "up_blocks.0.resnets.2.conv1": 8, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 8, + "down_blocks.2.resnets.0.conv2": 8, + "up_blocks.0.resnets.0.conv_shortcut": 8, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 8, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 8, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 8, + "down_blocks.2.attentions.0.proj_in": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "up_blocks.0.attentions.0.proj_in": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 8, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.2.attentions.0.proj_out": 8, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 8, + "mid_block.attentions.0.proj_in": 8, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 8, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 6, + "up_blocks.0.attentions.2.proj_in": 16, + "down_blocks.2.resnets.0.conv1": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 8, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 8, + "up_blocks.1.resnets.0.time_emb_proj": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.0.attentions.2.proj_out": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 6, + "add_embedding.linear_2": 16, + "down_blocks.1.resnets.1.time_emb_proj": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.0.resnets.1.conv_shortcut": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.0.attentions.1.proj_in": 8, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "add_embedding.linear_1": 16, + "up_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 16, + "up_blocks.1.resnets.0.conv2": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 16, + "up_blocks.0.attentions.1.proj_out": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 8, + "down_blocks.1.resnets.0.time_emb_proj": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.resnets.1.conv2": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 8, + "up_blocks.1.attentions.0.proj_out": 16, + "down_blocks.1.attentions.1.proj_out": 16, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 8, + "down_blocks.1.resnets.1.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 16, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 16, + "down_blocks.2.resnets.0.conv_shortcut": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 8, + "down_blocks.1.resnets.0.conv2": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 16, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 8, + "down_blocks.1.attentions.1.proj_in": 16, + "up_blocks.1.resnets.1.time_emb_proj": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 16, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.0.resnets.2.conv_shortcut": 16, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 16, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 16, + "up_blocks.1.attentions.0.proj_in": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 16, + "down_blocks.1.downsamplers.0.conv": 8, + "up_blocks.1.resnets.2.time_emb_proj": 16, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 16, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.1.attentions.1.proj_out": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 16, + "up_blocks.0.upsamplers.0.conv": 16, + "down_blocks.1.attentions.0.proj_out": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 16, + "up_blocks.1.resnets.1.conv2": 16, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 16, + "up_blocks.1.resnets.2.conv2": 16, + "up_blocks.1.resnets.1.conv1": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 8, + "up_blocks.1.attentions.2.proj_out": 16, + "up_blocks.1.attentions.1.proj_in": 16, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.resnets.0.conv1": 16, + "up_blocks.1.attentions.2.proj_in": 16, + "up_blocks.1.resnets.2.conv1": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 16, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 16, + "down_blocks.0.resnets.1.time_emb_proj": 16, + "up_blocks.2.resnets.0.time_emb_proj": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 16, + "down_blocks.1.attentions.0.proj_in": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 16, + "up_blocks.2.resnets.1.time_emb_proj": 16, + "up_blocks.2.resnets.2.time_emb_proj": 16, + "down_blocks.0.resnets.0.time_emb_proj": 16, + "up_blocks.1.resnets.0.conv_shortcut": 16, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 16, + "down_blocks.0.resnets.0.conv1": 16, + "up_blocks.1.resnets.1.conv_shortcut": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv1": 16, + "down_blocks.1.resnets.0.conv_shortcut": 16, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.resnets.0.conv2": 16, + "down_blocks.0.downsamplers.0.conv": 16, + "up_blocks.2.resnets.0.conv1": 16, + "up_blocks.2.resnets.0.conv2": 16, + "up_blocks.1.resnets.2.conv_shortcut": 16, + "up_blocks.2.resnets.1.conv2": 16, + "time_embedding.linear_2": 16, + "up_blocks.2.resnets.1.conv1": 16, + "up_blocks.1.upsamplers.0.conv": 16, + "time_embedding.linear_1": 16, + "up_blocks.2.resnets.0.conv_shortcut": 16, + "up_blocks.2.resnets.2.conv1": 16, + "up_blocks.2.resnets.1.conv_shortcut": 16, + "up_blocks.2.resnets.2.conv2": 16, + "up_blocks.2.resnets.2.conv_shortcut": 16 + }, + "recipe_4.50_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "down_blocks.2.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 4, + "mid_block.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 4, + "up_blocks.0.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "mid_block.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 6, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "mid_block.resnets.1.conv1": 6, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 6, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 4, + "down_blocks.2.attentions.1.proj_in": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "mid_block.resnets.1.conv2": 6, + "down_blocks.2.resnets.1.conv1": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 4, + "up_blocks.0.resnets.0.conv1": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 4, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 6, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 6, + "up_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 6, + "mid_block.resnets.0.conv1": 6, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 6, + "down_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 6, + "mid_block.resnets.0.conv2": 6, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.1.conv2": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 6, + "down_blocks.2.attentions.1.proj_out": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 6, + "up_blocks.0.resnets.2.time_emb_proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 6, + "mid_block.attentions.0.proj_out": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 6, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv1": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 6, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.resnets.0.conv2": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 6, + "up_blocks.0.resnets.2.conv2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 6, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 6, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 6, + "up_blocks.0.attentions.0.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 6, + "up_blocks.0.resnets.2.conv1": 6, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.conv2": 6, + "up_blocks.0.resnets.0.conv_shortcut": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.attentions.0.proj_in": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.2.attentions.0.proj_out": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 6, + "mid_block.attentions.0.proj_in": 6, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.proj_in": 6, + "down_blocks.2.resnets.0.conv1": 6, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 6, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 6, + "add_embedding.linear_2": 8, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.attentions.1.proj_in": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "add_embedding.linear_1": 8, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 8, + "up_blocks.1.resnets.0.conv2": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 8, + "up_blocks.0.attentions.1.proj_out": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.resnets.1.conv2": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.proj_out": 8, + "down_blocks.1.attentions.1.proj_out": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.resnets.1.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 8, + "down_blocks.2.resnets.0.conv_shortcut": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.1.resnets.0.conv2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 8, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_in": 8, + "up_blocks.1.resnets.1.time_emb_proj": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 8, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.0.resnets.2.conv_shortcut": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 8, + "up_blocks.1.attentions.0.proj_in": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.downsamplers.0.conv": 8, + "up_blocks.1.resnets.2.time_emb_proj": 8, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.1.proj_out": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.upsamplers.0.conv": 8, + "down_blocks.1.attentions.0.proj_out": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.resnets.1.conv2": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.resnets.2.conv2": 8, + "up_blocks.1.resnets.1.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 8, + "up_blocks.1.attentions.2.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.2.resnets.0.time_emb_proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 8, + "down_blocks.0.resnets.0.time_emb_proj": 16, + "up_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 16, + "down_blocks.0.resnets.0.conv1": 16, + "up_blocks.1.resnets.1.conv_shortcut": 16, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv1": 16, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.resnets.0.conv2": 16, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.2.resnets.0.conv1": 16, + "up_blocks.2.resnets.0.conv2": 16, + "up_blocks.1.resnets.2.conv_shortcut": 16, + "up_blocks.2.resnets.1.conv2": 16, + "time_embedding.linear_2": 16, + "up_blocks.2.resnets.1.conv1": 16, + "up_blocks.1.upsamplers.0.conv": 16, + "time_embedding.linear_1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 16, + "up_blocks.2.resnets.2.conv1": 16, + "up_blocks.2.resnets.1.conv_shortcut": 16, + "up_blocks.2.resnets.2.conv2": 16, + "up_blocks.2.resnets.2.conv_shortcut": 8 + }, + "recipe_3.41_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "down_blocks.2.resnets.1.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 4, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "mid_block.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.resnets.0.time_emb_proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "mid_block.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 4, + "down_blocks.2.attentions.1.proj_in": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 4, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 4, + "mid_block.resnets.0.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "mid_block.resnets.0.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.resnets.2.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 4, + "mid_block.attentions.0.proj_out": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 6, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.resnets.0.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.proj_out": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 4, + "up_blocks.0.resnets.2.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 6, + "down_blocks.2.resnets.0.conv2": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 6, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 6, + "down_blocks.2.attentions.0.proj_in": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.attentions.0.proj_in": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 6, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.proj_in": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.proj_in": 6, + "down_blocks.2.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 6, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 4, + "add_embedding.linear_2": 8, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.0.attentions.1.proj_in": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "add_embedding.linear_1": 8, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 6, + "down_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv_shortcut": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.0.proj_in": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.proj_out": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 8, + "up_blocks.0.upsamplers.0.conv": 8, + "down_blocks.1.attentions.0.proj_out": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 8, + "up_blocks.1.resnets.1.conv2": 8, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.resnets.2.conv2": 8, + "up_blocks.1.resnets.1.conv1": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 8, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.time_emb_proj": 8, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "up_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.0.resnets.0.conv1": 8, + "up_blocks.1.resnets.1.conv_shortcut": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv1": 16, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.resnets.0.conv2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.2.resnets.0.conv2": 8, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.resnets.1.conv2": 8, + "time_embedding.linear_2": 8, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.1.upsamplers.0.conv": 8, + "time_embedding.linear_1": 8, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv1": 16, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8 + }, + "recipe_3.05_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 4, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 2, + "down_blocks.2.resnets.1.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "mid_block.resnets.1.time_emb_proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.resnets.0.time_emb_proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.1.proj_in": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "mid_block.resnets.1.conv2": 4, + "down_blocks.2.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 4, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 4, + "mid_block.resnets.0.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "mid_block.resnets.0.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.resnets.2.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 4, + "mid_block.attentions.0.proj_out": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.resnets.0.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.proj_out": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 4, + "up_blocks.0.resnets.2.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.resnets.0.conv2": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.proj_in": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.attentions.0.proj_in": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.proj_in": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.proj_in": 6, + "down_blocks.2.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 6, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.0.attentions.2.proj_out": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 4, + "add_embedding.linear_2": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.proj_in": 6, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "add_embedding.linear_1": 6, + "up_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 6, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 6, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv_shortcut": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.1.resnets.2.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "down_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 8, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.2.proj_out": 8, + "up_blocks.1.attentions.1.proj_in": 8, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_in": 8, + "up_blocks.1.resnets.2.conv1": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 8, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 8, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 8, + "down_blocks.1.attentions.0.proj_in": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 8, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 8, + "up_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.0.resnets.0.conv1": 8, + "up_blocks.1.resnets.1.conv_shortcut": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.resnets.1.conv1": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.resnets.0.conv2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.2.resnets.0.conv2": 8, + "up_blocks.1.resnets.2.conv_shortcut": 8, + "up_blocks.2.resnets.1.conv2": 8, + "time_embedding.linear_2": 8, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.1.upsamplers.0.conv": 8, + "time_embedding.linear_1": 6, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv1": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 8 + }, + "recipe_2.81_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 2, + "down_blocks.2.resnets.1.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "mid_block.resnets.1.time_emb_proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.resnets.0.time_emb_proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.resnets.0.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "mid_block.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 4, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.1.proj_in": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 4, + "mid_block.resnets.1.conv2": 2, + "down_blocks.2.resnets.1.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 2, + "up_blocks.0.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 4, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 4, + "mid_block.resnets.0.conv1": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 4, + "down_blocks.2.resnets.0.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 4, + "mid_block.resnets.0.conv2": 4, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 4, + "down_blocks.2.attentions.1.proj_out": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.resnets.2.time_emb_proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 4, + "mid_block.attentions.0.proj_out": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.resnets.0.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.proj_out": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 4, + "up_blocks.0.resnets.2.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.resnets.0.conv2": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.proj_in": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.attentions.0.proj_in": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.proj_in": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.proj_in": 4, + "down_blocks.2.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.1.resnets.0.time_emb_proj": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.proj_out": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 4, + "add_embedding.linear_2": 6, + "down_blocks.1.resnets.1.time_emb_proj": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.proj_in": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "add_embedding.linear_1": 6, + "up_blocks.1.resnets.0.conv1": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.0.attentions.1.proj_out": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.resnets.0.conv2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 6, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.1.time_emb_proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv_shortcut": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "down_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.2.proj_out": 6, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 8, + "down_blocks.0.resnets.0.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 8, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.resnets.1.conv1": 8, + "down_blocks.1.resnets.0.conv_shortcut": 8, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 6, + "down_blocks.0.resnets.0.conv2": 8, + "down_blocks.0.downsamplers.0.conv": 8, + "up_blocks.2.resnets.0.conv1": 8, + "up_blocks.2.resnets.0.conv2": 8, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 8, + "time_embedding.linear_2": 6, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.1.upsamplers.0.conv": 8, + "time_embedding.linear_1": 6, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv1": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 6 + }, + "recipe_2.31_bit_mixedpalette": { + "mid_block.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.0.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q": 1, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k": 1, + "mid_block.attentions.0.transformer_blocks.8.attn1.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2": 1, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2": 1, + "up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v": 1, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj": 1, + "up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj": 1, + "up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj": 1, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0": 1, + "down_blocks.2.resnets.1.time_emb_proj": 1, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.7.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k": 1, + "mid_block.attentions.0.transformer_blocks.8.ff.net.2": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0": 1, + "mid_block.resnets.1.time_emb_proj": 1, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2": 1, + "mid_block.attentions.0.transformer_blocks.6.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj": 1, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.attn1.to_v": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q": 1, + "up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj": 1, + "up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k": 1, + "mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0": 1, + "down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj": 1, + "mid_block.attentions.0.transformer_blocks.5.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj": 1, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k": 1, + "mid_block.attentions.0.transformer_blocks.2.ff.net.2": 1, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2": 1, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0": 1, + "mid_block.attentions.0.transformer_blocks.4.ff.net.2": 1, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_k": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v": 1, + "down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v": 1, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q": 1, + "up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v": 1, + "up_blocks.0.resnets.0.time_emb_proj": 1, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0": 1, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2": 1, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k": 1, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0": 1, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k": 1, + "up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k": 1, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q": 1, + "mid_block.attentions.0.transformer_blocks.9.ff.net.2": 1, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": 1, + "down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v": 1, + "up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0": 1, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k": 1, + "mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj": 1, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.3.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "mid_block.resnets.0.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.2.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": 2, + "up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.1.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "mid_block.resnets.1.conv1": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0": 2, + "mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.0.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2": 2, + "down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k": 2, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q": 2, + "mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj": 2, + "up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v": 2, + "down_blocks.2.attentions.1.proj_in": 2, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_k": 2, + "mid_block.resnets.1.conv2": 2, + "down_blocks.2.resnets.1.conv1": 2, + "down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v": 2, + "up_blocks.0.resnets.0.conv1": 2, + "up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v": 2, + "down_blocks.2.resnets.1.conv2": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0": 2, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 2, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2": 2, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_v": 2, + "down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": 2, + "mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2": 2, + "up_blocks.0.resnets.1.time_emb_proj": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2": 2, + "mid_block.resnets.0.conv1": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0": 2, + "down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2": 2, + "down_blocks.2.resnets.0.time_emb_proj": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v": 2, + "up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": 2, + "down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj": 2, + "mid_block.resnets.0.conv2": 2, + "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": 2, + "up_blocks.0.resnets.1.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v": 2, + "down_blocks.2.attentions.1.proj_out": 2, + "up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0": 2, + "up_blocks.0.resnets.2.time_emb_proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v": 4, + "mid_block.attentions.0.proj_out": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_q": 2, + "up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": 2, + "up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_k": 2, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_v": 2, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k": 2, + "up_blocks.0.resnets.1.conv1": 2, + "up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v": 2, + "up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 4, + "mid_block.attentions.0.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.resnets.0.conv2": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2": 4, + "up_blocks.0.resnets.2.conv2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0": 4, + "down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2": 4, + "mid_block.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.0.proj_out": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 2, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2": 4, + "up_blocks.0.resnets.2.conv1": 4, + "up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0": 4, + "down_blocks.2.resnets.0.conv2": 4, + "up_blocks.0.resnets.0.conv_shortcut": 4, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 2, + "up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj": 4, + "down_blocks.2.attentions.0.proj_in": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.0.attentions.0.proj_in": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 2, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj": 4, + "up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.2.attentions.0.proj_out": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2": 4, + "mid_block.attentions.0.proj_in": 4, + "up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q": 4, + "up_blocks.0.attentions.2.proj_in": 4, + "down_blocks.2.resnets.0.conv1": 4, + "up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": 6, + "up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 4, + "up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": 4, + "up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj": 4, + "up_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 4, + "up_blocks.0.attentions.2.proj_out": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q": 4, + "add_embedding.linear_2": 6, + "down_blocks.1.resnets.1.time_emb_proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.resnets.1.conv_shortcut": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.0.attentions.1.proj_in": 4, + "up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k": 2, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "add_embedding.linear_1": 4, + "up_blocks.1.resnets.0.conv1": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 4, + "up_blocks.1.resnets.0.conv2": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 4, + "up_blocks.0.attentions.1.proj_out": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": 4, + "down_blocks.1.resnets.0.time_emb_proj": 4, + "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 4, + "down_blocks.1.resnets.1.conv2": 4, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": 4, + "up_blocks.1.attentions.0.proj_out": 6, + "down_blocks.1.attentions.1.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k": 4, + "down_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "down_blocks.2.resnets.0.conv_shortcut": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0": 6, + "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.1.resnets.0.conv2": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q": 4, + "up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q": 4, + "down_blocks.1.attentions.1.proj_in": 6, + "up_blocks.1.resnets.1.time_emb_proj": 4, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": 6, + "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.0.resnets.2.conv_shortcut": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.0.proj_in": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.downsamplers.0.conv": 6, + "up_blocks.1.resnets.2.time_emb_proj": 4, + "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.1.attentions.1.proj_out": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": 4, + "up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k": 4, + "down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v": 6, + "up_blocks.0.upsamplers.0.conv": 6, + "down_blocks.1.attentions.0.proj_out": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": 6, + "up_blocks.1.resnets.1.conv2": 6, + "up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.resnets.2.conv2": 6, + "up_blocks.1.resnets.1.conv1": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k": 6, + "up_blocks.1.attentions.2.proj_out": 6, + "up_blocks.1.attentions.1.proj_in": 6, + "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.resnets.0.conv1": 6, + "up_blocks.1.attentions.2.proj_in": 6, + "up_blocks.1.resnets.2.conv1": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": 6, + "down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj": 6, + "down_blocks.0.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.0.time_emb_proj": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": 6, + "down_blocks.1.attentions.0.proj_in": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": 6, + "up_blocks.2.resnets.1.time_emb_proj": 6, + "up_blocks.2.resnets.2.time_emb_proj": 6, + "down_blocks.0.resnets.0.time_emb_proj": 6, + "up_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k": 6, + "down_blocks.0.resnets.1.conv2": 6, + "down_blocks.0.resnets.0.conv1": 6, + "up_blocks.1.resnets.1.conv_shortcut": 6, + "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": 4, + "down_blocks.0.resnets.1.conv1": 8, + "down_blocks.1.resnets.0.conv_shortcut": 6, + "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": 4, + "down_blocks.0.resnets.0.conv2": 8, + "down_blocks.0.downsamplers.0.conv": 6, + "up_blocks.2.resnets.0.conv1": 6, + "up_blocks.2.resnets.0.conv2": 6, + "up_blocks.1.resnets.2.conv_shortcut": 6, + "up_blocks.2.resnets.1.conv2": 8, + "time_embedding.linear_2": 6, + "up_blocks.2.resnets.1.conv1": 8, + "up_blocks.1.upsamplers.0.conv": 6, + "time_embedding.linear_1": 6, + "up_blocks.2.resnets.0.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv1": 8, + "up_blocks.2.resnets.1.conv_shortcut": 8, + "up_blocks.2.resnets.2.conv2": 8, + "up_blocks.2.resnets.2.conv_shortcut": 6 + } + } +} \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..adcc6fda2c3ca799ad2ceef44785908891dbc1e0 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a7781177716a8de945e014f253c4046b5fb4f2f90b0629ff5e3ebd99a3756c1 +size 243 diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..6ec4882d93c41f4d859f419572182f8503ec75fc --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aea3887ffdc8e059925f3981259e1cd3227b827e5f91edff613c73ac0ea16f6 +size 1338 diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/metadata.json b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..8187971a982aeccc76c1e2289c75eee16bc75725 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/metadata.json @@ -0,0 +1,124 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process", + "shape" : "[]", + "name" : "noise_pred", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Mixed (Float16, Palettized (1 bits), Palettized (2 bits), Palettized (4 bits), Palettized (6 bits), Palettized (8 bits))", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "UpsampleNearestNeighbor" : 2, + "Ios16.reduceMean" : 512, + "Ios16.sin" : 2, + "Ios16.softmax" : 140, + "Split" : 70, + "Ios16.add" : 722, + "Concat" : 14, + "Ios16.realDiv" : 46, + "Ios16.square" : 46, + "ExpandDims" : 6, + "Ios16.sub" : 256, + "Ios16.cast" : 1, + "Ios16.conv" : 794, + "Ios16.constexprLutToDense" : 790, + "Ios16.gelu" : 70, + "Ios16.matmul" : 280, + "Ios16.batchNorm" : 46, + "Ios16.reshape" : 675, + "Ios16.rsqrt" : 210, + "Ios16.silu" : 38, + "Ios16.sqrt" : 46, + "Ios16.mul" : 842, + "Ios16.cos" : 2, + "SliceByIndex" : 4 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "visionOS" : "1.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 4 × 128 × 128)", + "shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion", + "shape" : "[2, 4, 128, 128]", + "name" : "sample", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2)", + "shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule", + "shape" : "[2]", + "name" : "timestep", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 2048 × 1 × 77)", + "shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.", + "shape" : "[2, 2048, 1, 77]", + "name" : "encoder_hidden_states", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 12)", + "shortDescription" : "", + "shape" : "[12]", + "name" : "time_ids", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 1280)", + "shortDescription" : "", + "shape" : "[2, 1280]", + "name" : "text_embeds", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117", + "com.github.apple.ml-stable-diffusion.version" : "1.0.0" + }, + "generatedClassName" : "recipe_3_41_bit_mixedpalette", + "method" : "predict" + } +] \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/model.mil b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..9f370ab4cb80942ac5e69a0f608e5bd59cd87cfe --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/model.mil @@ -0,0 +1,12327 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.0.48"}})] +{ + func main(tensor encoder_hidden_states, tensor sample, tensor text_embeds, tensor time_ids, tensor timestep) { + tensor var_24 = const()[name = tensor("op_24"), val = tensor(-1)]; + tensor var_41_axes_0 = const()[name = tensor("op_41_axes_0"), val = tensor([1])]; + tensor var_41_cast = expand_dims(axes = var_41_axes_0, x = timestep)[name = tensor("op_41_cast")]; + tensor var_43_to_fp16 = const()[name = tensor("op_43_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor emb_3_cast = mul(x = var_41_cast, y = var_43_to_fp16)[name = tensor("emb_3_cast")]; + tensor var_48_cast = sin(x = emb_3_cast)[name = tensor("op_48_cast")]; + tensor var_49_cast = cos(x = emb_3_cast)[name = tensor("op_49_cast")]; + tensor emb_7_interleave_0 = const()[name = tensor("emb_7_interleave_0"), val = tensor(false)]; + tensor emb_7_cast = concat(axis = var_24, interleave = emb_7_interleave_0, values = (var_48_cast, var_49_cast))[name = tensor("emb_7_cast")]; + tensor var_53_begin_0 = const()[name = tensor("op_53_begin_0"), val = tensor([0, 160])]; + tensor var_53_end_0 = const()[name = tensor("op_53_end_0"), val = tensor([2, 320])]; + tensor var_53_end_mask_0 = const()[name = tensor("op_53_end_mask_0"), val = tensor([true, true])]; + tensor var_53_cast = slice_by_index(begin = var_53_begin_0, end = var_53_end_0, end_mask = var_53_end_mask_0, x = emb_7_cast)[name = tensor("op_53_cast")]; + tensor var_55_begin_0 = const()[name = tensor("op_55_begin_0"), val = tensor([0, 0])]; + tensor var_55_end_0 = const()[name = tensor("op_55_end_0"), val = tensor([2, 160])]; + tensor var_55_end_mask_0 = const()[name = tensor("op_55_end_mask_0"), val = tensor([true, false])]; + tensor var_55_cast = slice_by_index(begin = var_55_begin_0, end = var_55_end_0, end_mask = var_55_end_mask_0, x = emb_7_cast)[name = tensor("op_55_cast")]; + tensor sample_3_interleave_0 = const()[name = tensor("sample_3_interleave_0"), val = tensor(false)]; + tensor sample_3_cast = concat(axis = var_24, interleave = sample_3_interleave_0, values = (var_53_cast, var_55_cast))[name = tensor("sample_3_cast")]; + tensor var_58 = const()[name = tensor("op_58"), val = tensor(1)]; + tensor var_65_axes_0 = const()[name = tensor("op_65_axes_0"), val = tensor([-1])]; + tensor var_65_cast = expand_dims(axes = var_65_axes_0, x = sample_3_cast)[name = tensor("op_65_cast")]; + tensor input_1_axes_0 = const()[name = tensor("input_1_axes_0"), val = tensor([-1])]; + tensor input_1_cast = expand_dims(axes = input_1_axes_0, x = var_65_cast)[name = tensor("input_1_cast")]; + tensor var_69 = const()[name = tensor("op_69"), val = tensor([1, 1])]; + tensor var_71 = const()[name = tensor("op_71"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410112))), name = tensor("time_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 320, 1, 1])]; + tensor time_embedding_linear_1_bias_to_fp16 = const()[name = tensor("time_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410688)))]; + tensor input_3_cast = conv(bias = time_embedding_linear_1_bias_to_fp16, dilations = var_71, groups = var_58, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_69, weight = time_embedding_linear_1_weight_to_fp16_palettized, x = input_1_cast)[name = tensor("input_3_cast")]; + tensor input_5_cast = silu(x = input_3_cast)[name = tensor("input_5_cast")]; + tensor var_77 = const()[name = tensor("op_77"), val = tensor([1, 1])]; + tensor var_79 = const()[name = tensor("op_79"), val = tensor([1, 1])]; + tensor emb_pad_type_0 = const()[name = tensor("emb_pad_type_0"), val = tensor("custom")]; + tensor emb_pad_0 = const()[name = tensor("emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2051776))), name = tensor("time_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor time_embedding_linear_2_bias_to_fp16 = const()[name = tensor("time_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2052352)))]; + tensor emb_cast = conv(bias = time_embedding_linear_2_bias_to_fp16, dilations = var_79, groups = var_58, pad = emb_pad_0, pad_type = emb_pad_type_0, strides = var_77, weight = time_embedding_linear_2_weight_to_fp16_palettized, x = input_5_cast)[name = tensor("emb_cast")]; + tensor var_85 = const()[name = tensor("op_85"), val = tensor(-1)]; + tensor var_102_axes_0 = const()[name = tensor("op_102_axes_0"), val = tensor([1])]; + tensor var_102_cast = expand_dims(axes = var_102_axes_0, x = time_ids)[name = tensor("op_102_cast")]; + tensor var_104_to_fp16 = const()[name = tensor("op_104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2054976)))]; + tensor emb_11_cast = mul(x = var_102_cast, y = var_104_to_fp16)[name = tensor("emb_11_cast")]; + tensor var_109_cast = sin(x = emb_11_cast)[name = tensor("op_109_cast")]; + tensor var_110_cast = cos(x = emb_11_cast)[name = tensor("op_110_cast")]; + tensor emb_15_interleave_0 = const()[name = tensor("emb_15_interleave_0"), val = tensor(false)]; + tensor emb_15_cast = concat(axis = var_85, interleave = emb_15_interleave_0, values = (var_109_cast, var_110_cast))[name = tensor("emb_15_cast")]; + tensor var_114_begin_0 = const()[name = tensor("op_114_begin_0"), val = tensor([0, 128])]; + tensor var_114_end_0 = const()[name = tensor("op_114_end_0"), val = tensor([12, 256])]; + tensor var_114_end_mask_0 = const()[name = tensor("op_114_end_mask_0"), val = tensor([true, true])]; + tensor var_114_cast = slice_by_index(begin = var_114_begin_0, end = var_114_end_0, end_mask = var_114_end_mask_0, x = emb_15_cast)[name = tensor("op_114_cast")]; + tensor var_116_begin_0 = const()[name = tensor("op_116_begin_0"), val = tensor([0, 0])]; + tensor var_116_end_0 = const()[name = tensor("op_116_end_0"), val = tensor([12, 128])]; + tensor var_116_end_mask_0 = const()[name = tensor("op_116_end_mask_0"), val = tensor([true, false])]; + tensor var_116_cast = slice_by_index(begin = var_116_begin_0, end = var_116_end_0, end_mask = var_116_end_mask_0, x = emb_15_cast)[name = tensor("op_116_cast")]; + tensor time_embeds_1_interleave_0 = const()[name = tensor("time_embeds_1_interleave_0"), val = tensor(false)]; + tensor time_embeds_1_cast = concat(axis = var_85, interleave = time_embeds_1_interleave_0, values = (var_114_cast, var_116_cast))[name = tensor("time_embeds_1_cast")]; + tensor var_124 = const()[name = tensor("op_124"), val = tensor([2, -1])]; + tensor time_embeds_cast = reshape(shape = var_124, x = time_embeds_1_cast)[name = tensor("time_embeds_cast")]; + tensor var_127 = const()[name = tensor("op_127"), val = tensor(-1)]; + tensor sample_interleave_0 = const()[name = tensor("sample_interleave_0"), val = tensor(false)]; + tensor sample_cast = concat(axis = var_127, interleave = sample_interleave_0, values = (text_embeds, time_embeds_cast))[name = tensor("sample_cast")]; + tensor var_129 = const()[name = tensor("op_129"), val = tensor(1)]; + tensor var_136_axes_0 = const()[name = tensor("op_136_axes_0"), val = tensor([-1])]; + tensor var_136_cast = expand_dims(axes = var_136_axes_0, x = sample_cast)[name = tensor("op_136_cast")]; + tensor input_7_axes_0 = const()[name = tensor("input_7_axes_0"), val = tensor([-1])]; + tensor input_7_cast = expand_dims(axes = input_7_axes_0, x = var_136_cast)[name = tensor("input_7_cast")]; + tensor var_140 = const()[name = tensor("op_140"), val = tensor([1, 1])]; + tensor var_142 = const()[name = tensor("op_142"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5659840))), name = tensor("add_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 2816, 1, 1])]; + tensor add_embedding_linear_1_bias_to_fp16 = const()[name = tensor("add_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5660416)))]; + tensor input_9_cast = conv(bias = add_embedding_linear_1_bias_to_fp16, dilations = var_142, groups = var_129, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_140, weight = add_embedding_linear_1_weight_to_fp16_palettized, x = input_7_cast)[name = tensor("input_9_cast")]; + tensor input_11_cast = silu(x = input_9_cast)[name = tensor("input_11_cast")]; + tensor var_148 = const()[name = tensor("op_148"), val = tensor([1, 1])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 1])]; + tensor aug_emb_pad_type_0 = const()[name = tensor("aug_emb_pad_type_0"), val = tensor("custom")]; + tensor aug_emb_pad_0 = const()[name = tensor("aug_emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(5663040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7301504))), name = tensor("add_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor add_embedding_linear_2_bias_to_fp16 = const()[name = tensor("add_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7302080)))]; + tensor aug_emb_cast = conv(bias = add_embedding_linear_2_bias_to_fp16, dilations = var_150, groups = var_129, pad = aug_emb_pad_0, pad_type = aug_emb_pad_type_0, strides = var_148, weight = add_embedding_linear_2_weight_to_fp16_palettized, x = input_11_cast)[name = tensor("aug_emb_cast")]; + tensor input_19_cast = add(x = emb_cast, y = aug_emb_cast)[name = tensor("input_19_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor(1)]; + tensor var_161 = const()[name = tensor("op_161"), val = tensor([1, 1])]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_in_weight_to_fp16 = const()[name = tensor("conv_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7304704)))]; + tensor conv_in_bias_to_fp16 = const()[name = tensor("conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7327808)))]; + tensor input_13_cast = conv(bias = conv_in_bias_to_fp16, dilations = var_163, groups = var_158, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_161, weight = conv_in_weight_to_fp16, x = sample)[name = tensor("input_13_cast")]; + tensor var_172 = const()[name = tensor("op_172"), val = tensor(1)]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_13_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7328512)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7329216)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7329920)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7330624)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_17_cast = silu(x = add_1_cast)[name = tensor("input_17_cast")]; + tensor var_190 = const()[name = tensor("op_190"), val = tensor([1, 1])]; + tensor var_192 = const()[name = tensor("op_192"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7331328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8252992))), name = tensor("down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8253568)))]; + tensor hidden_states_1_cast = conv(bias = down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_192, groups = var_172, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_190, weight = down_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_17_cast)[name = tensor("hidden_states_1_cast")]; + tensor input_21_cast = silu(x = input_19_cast)[name = tensor("input_21_cast")]; + tensor var_198 = const()[name = tensor("op_198"), val = tensor([1, 1])]; + tensor var_200 = const()[name = tensor("op_200"), val = tensor([1, 1])]; + tensor temb_1_pad_type_0 = const()[name = tensor("temb_1_pad_type_0"), val = tensor("custom")]; + tensor temb_1_pad_0 = const()[name = tensor("temb_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8254272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8663936))), name = tensor("down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8664512)))]; + tensor temb_1_cast = conv(bias = down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_200, groups = var_172, pad = temb_1_pad_0, pad_type = temb_1_pad_type_0, strides = var_198, weight = down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_1_cast")]; + tensor input_23_cast = add(x = hidden_states_1_cast, y = temb_1_cast)[name = tensor("input_23_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_23_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8665216)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8665920)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_27_cast = silu(x = add_3_cast)[name = tensor("input_27_cast")]; + tensor var_210 = const()[name = tensor("op_210"), val = tensor([1, 1])]; + tensor var_212 = const()[name = tensor("op_212"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8666624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9588288))), name = tensor("down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9588864)))]; + tensor hidden_states_3_cast = conv(bias = down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_212, groups = var_172, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_210, weight = down_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_27_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_29_cast = add(x = input_13_cast, y = hidden_states_3_cast)[name = tensor("input_29_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = input_29_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9589568)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9590272)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_33_cast = silu(x = add_5_cast)[name = tensor("input_33_cast")]; + tensor var_227 = const()[name = tensor("op_227"), val = tensor([1, 1])]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(9590976)))]; + tensor down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11434240)))]; + tensor hidden_states_5_cast = conv(bias = down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_229, groups = var_172, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_227, weight = down_blocks_0_resnets_1_conv1_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_5_cast")]; + tensor var_235 = const()[name = tensor("op_235"), val = tensor([1, 1])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 1])]; + tensor temb_3_pad_type_0 = const()[name = tensor("temb_3_pad_type_0"), val = tensor("custom")]; + tensor temb_3_pad_0 = const()[name = tensor("temb_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11434944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11844608))), name = tensor("down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11845184)))]; + tensor temb_3_cast = conv(bias = down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_237, groups = var_172, pad = temb_3_pad_0, pad_type = temb_3_pad_type_0, strides = var_235, weight = down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_3_cast")]; + tensor input_37_cast = add(x = hidden_states_5_cast, y = temb_3_cast)[name = tensor("input_37_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_37_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11845888)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11846592)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_41_cast = silu(x = add_7_cast)[name = tensor("input_41_cast")]; + tensor var_247 = const()[name = tensor("op_247"), val = tensor([1, 1])]; + tensor var_249 = const()[name = tensor("op_249"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11847296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(12768960))), name = tensor("down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(12769536)))]; + tensor hidden_states_7_cast = conv(bias = down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_249, groups = var_172, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_247, weight = down_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_41_cast)[name = tensor("hidden_states_7_cast")]; + tensor input_43_cast = add(x = input_29_cast, y = hidden_states_7_cast)[name = tensor("input_43_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([2, 2])]; + tensor var_258 = const()[name = tensor("op_258"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(12770240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13691904))), name = tensor("down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13692480)))]; + tensor input_45_cast = conv(bias = down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_258, groups = var_172, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_256, weight = down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor(3)]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor(true)]; + tensor var_282 = const()[name = tensor("op_282"), val = tensor(1)]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([2, 32, 10, 64, 64])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_45_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([2, 320, 64, 64])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13693184)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13693888)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_49_cast = silu(x = add_9_cast)[name = tensor("input_49_cast")]; + tensor var_305 = const()[name = tensor("op_305"), val = tensor([1, 1])]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13694592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15077056))), name = tensor("down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 320, 3, 3])]; + tensor down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15077248)))]; + tensor hidden_states_9_cast = conv(bias = down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_307, groups = var_282, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_305, weight = down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_49_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_313 = const()[name = tensor("op_313"), val = tensor([1, 1])]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, 1])]; + tensor temb_5_pad_type_0 = const()[name = tensor("temb_5_pad_type_0"), val = tensor("custom")]; + tensor temb_5_pad_0 = const()[name = tensor("temb_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15078592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15693056))), name = tensor("down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15693248)))]; + tensor temb_5_cast = conv(bias = down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_315, groups = var_282, pad = temb_5_pad_0, pad_type = temb_5_pad_type_0, strides = var_313, weight = down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_5_cast")]; + tensor input_53_cast = add(x = hidden_states_9_cast, y = temb_5_cast)[name = tensor("input_53_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_53_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15694592)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15695936)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15697280)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15698624)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_57_cast = silu(x = add_11_cast)[name = tensor("input_57_cast")]; + tensor var_325 = const()[name = tensor("op_325"), val = tensor([1, 1])]; + tensor var_327 = const()[name = tensor("op_327"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15699968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18464832))), name = tensor("down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18465024)))]; + tensor hidden_states_11_cast = conv(bias = down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_327, groups = var_282, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_325, weight = down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_57_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([1, 1])]; + tensor var_334 = const()[name = tensor("op_334"), val = tensor([1, 1])]; + tensor x_1_pad_type_0 = const()[name = tensor("x_1_pad_type_0"), val = tensor("custom")]; + tensor x_1_pad_0 = const()[name = tensor("x_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18466368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18671232))), name = tensor("down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 320, 1, 1])]; + tensor down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18671808)))]; + tensor x_1_cast = conv(bias = down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_334, groups = var_282, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = var_332, weight = down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_45_cast)[name = tensor("x_1_cast")]; + tensor hidden_states_13_cast = add(x = x_1_cast, y = hidden_states_11_cast)[name = tensor("hidden_states_13_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = hidden_states_13_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18673152)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18674496)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor var_356 = const()[name = tensor("op_356"), val = tensor([1, 1])]; + tensor var_358 = const()[name = tensor("op_358"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18675840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19085504))), name = tensor("down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19086080)))]; + tensor hidden_states_15_cast = conv(bias = down_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_358, groups = var_282, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_356, weight = down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_13_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_1_cast = reshape(shape = var_363, x = hidden_states_15_cast)[name = tensor("inputs_1_cast")]; + tensor var_373 = const()[name = tensor("op_373"), val = tensor([1])]; + tensor channels_mean_1_cast = reduce_mean(axes = var_373, keep_dims = var_277, x = inputs_1_cast)[name = tensor("channels_mean_1_cast")]; + tensor zero_mean_1_cast = sub(x = inputs_1_cast, y = channels_mean_1_cast)[name = tensor("zero_mean_1_cast")]; + tensor zero_mean_sq_1_cast = mul(x = zero_mean_1_cast, y = zero_mean_1_cast)[name = tensor("zero_mean_sq_1_cast")]; + tensor var_377 = const()[name = tensor("op_377"), val = tensor([1])]; + tensor var_378_cast = reduce_mean(axes = var_377, keep_dims = var_277, x = zero_mean_sq_1_cast)[name = tensor("op_378_cast")]; + tensor var_379_to_fp16 = const()[name = tensor("op_379_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_380_cast = add(x = var_378_cast, y = var_379_to_fp16)[name = tensor("op_380_cast")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_380_cast)[name = tensor("denom_1_cast")]; + tensor out_1_cast = mul(x = zero_mean_1_cast, y = denom_1_cast)[name = tensor("out_1_cast")]; + tensor var_384_to_fp16 = const()[name = tensor("op_384_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19087424)))]; + tensor var_385_cast = add(x = out_1_cast, y = var_384_to_fp16)[name = tensor("op_385_cast")]; + tensor var_387_to_fp16 = const()[name = tensor("op_387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19088768)))]; + tensor hidden_states_17_cast = mul(x = var_385_cast, y = var_387_to_fp16)[name = tensor("hidden_states_17_cast")]; + tensor var_394 = const()[name = tensor("op_394"), val = tensor([1, 1])]; + tensor var_396 = const()[name = tensor("op_396"), val = tensor([1, 1])]; + tensor q_1_pad_type_0 = const()[name = tensor("q_1_pad_type_0"), val = tensor("custom")]; + tensor q_1_pad_0 = const()[name = tensor("q_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19090112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19397376))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_1_cast = conv(dilations = var_396, groups = var_282, pad = q_1_pad_0, pad_type = q_1_pad_type_0, strides = var_394, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("q_1_cast")]; + tensor var_400 = const()[name = tensor("op_400"), val = tensor([1, 1])]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, 1])]; + tensor k_1_pad_type_0 = const()[name = tensor("k_1_pad_type_0"), val = tensor("custom")]; + tensor k_1_pad_0 = const()[name = tensor("k_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19397568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19704832))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_1_cast = conv(dilations = var_402, groups = var_282, pad = k_1_pad_0, pad_type = k_1_pad_type_0, strides = var_400, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("k_1_cast")]; + tensor var_406 = const()[name = tensor("op_406"), val = tensor([1, 1])]; + tensor var_408 = const()[name = tensor("op_408"), val = tensor([1, 1])]; + tensor v_1_pad_type_0 = const()[name = tensor("v_1_pad_type_0"), val = tensor("custom")]; + tensor v_1_pad_0 = const()[name = tensor("v_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19705024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20012288))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_1_cast = conv(dilations = var_408, groups = var_282, pad = v_1_pad_0, pad_type = v_1_pad_type_0, strides = var_406, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("v_1_cast")]; + tensor var_412 = const()[name = tensor("op_412"), val = tensor([2, 10, 64, -1])]; + tensor var_413_cast = reshape(shape = var_412, x = q_1_cast)[name = tensor("op_413_cast")]; + tensor var_414 = const()[name = tensor("op_414"), val = tensor([2, 10, 64, -1])]; + tensor var_415_cast = reshape(shape = var_414, x = k_1_cast)[name = tensor("op_415_cast")]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([2, 10, 64, -1])]; + tensor var_417_cast = reshape(shape = var_416, x = v_1_cast)[name = tensor("op_417_cast")]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_413_cast, y = var_415_cast)[name = tensor("attn_weights_1_cast")]; + tensor var_273_to_fp16 = const()[name = tensor("op_273_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_3_cast = mul(x = attn_weights_1_cast, y = var_273_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_421_cast = softmax(axis = var_266, x = attn_weights_3_cast)[name = tensor("op_421_cast")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_417_cast, y = var_421_cast)[name = tensor("attn_1_cast")]; + tensor var_425 = const()[name = tensor("op_425"), val = tensor([2, 640, 1, -1])]; + tensor input_61_cast = reshape(shape = var_425, x = attn_1_cast)[name = tensor("input_61_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor var_432 = const()[name = tensor("op_432"), val = tensor([1, 1])]; + tensor var_434_pad_type_0 = const()[name = tensor("op_434_pad_type_0"), val = tensor("custom")]; + tensor var_434_pad_0 = const()[name = tensor("op_434_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20012480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20319744))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20319936)))]; + tensor var_434_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_432, groups = var_282, pad = var_434_pad_0, pad_type = var_434_pad_type_0, strides = var_430, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_61_cast)[name = tensor("op_434_cast")]; + tensor inputs_3_cast = add(x = var_434_cast, y = inputs_1_cast)[name = tensor("inputs_3_cast")]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1])]; + tensor channels_mean_3_cast = reduce_mean(axes = var_438, keep_dims = var_277, x = inputs_3_cast)[name = tensor("channels_mean_3_cast")]; + tensor zero_mean_3_cast = sub(x = inputs_3_cast, y = channels_mean_3_cast)[name = tensor("zero_mean_3_cast")]; + tensor zero_mean_sq_3_cast = mul(x = zero_mean_3_cast, y = zero_mean_3_cast)[name = tensor("zero_mean_sq_3_cast")]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([1])]; + tensor var_443_cast = reduce_mean(axes = var_442, keep_dims = var_277, x = zero_mean_sq_3_cast)[name = tensor("op_443_cast")]; + tensor var_444_to_fp16 = const()[name = tensor("op_444_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_445_cast = add(x = var_443_cast, y = var_444_to_fp16)[name = tensor("op_445_cast")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_445_cast)[name = tensor("denom_3_cast")]; + tensor out_3_cast = mul(x = zero_mean_3_cast, y = denom_3_cast)[name = tensor("out_3_cast")]; + tensor var_449_to_fp16 = const()[name = tensor("op_449_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20321280)))]; + tensor var_450_cast = add(x = out_3_cast, y = var_449_to_fp16)[name = tensor("op_450_cast")]; + tensor var_452_to_fp16 = const()[name = tensor("op_452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20322624)))]; + tensor hidden_states_19_cast = mul(x = var_450_cast, y = var_452_to_fp16)[name = tensor("hidden_states_19_cast")]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, 1])]; + tensor var_461 = const()[name = tensor("op_461"), val = tensor([1, 1])]; + tensor q_3_pad_type_0 = const()[name = tensor("q_3_pad_type_0"), val = tensor("custom")]; + tensor q_3_pad_0 = const()[name = tensor("q_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20323968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20528832))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_3_cast = conv(dilations = var_461, groups = var_282, pad = q_3_pad_0, pad_type = q_3_pad_type_0, strides = var_459, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("q_3_cast")]; + tensor var_465 = const()[name = tensor("op_465"), val = tensor([1, 1])]; + tensor var_467 = const()[name = tensor("op_467"), val = tensor([1, 1])]; + tensor k_3_pad_type_0 = const()[name = tensor("k_3_pad_type_0"), val = tensor("custom")]; + tensor k_3_pad_0 = const()[name = tensor("k_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20528960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21184384))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_3_cast = conv(dilations = var_467, groups = var_282, pad = k_3_pad_0, pad_type = k_3_pad_type_0, strides = var_465, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_3_cast")]; + tensor var_471 = const()[name = tensor("op_471"), val = tensor([1, 1])]; + tensor var_473 = const()[name = tensor("op_473"), val = tensor([1, 1])]; + tensor v_3_pad_type_0 = const()[name = tensor("v_3_pad_type_0"), val = tensor("custom")]; + tensor v_3_pad_0 = const()[name = tensor("v_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21184512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22167616))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_3_cast = conv(dilations = var_473, groups = var_282, pad = v_3_pad_0, pad_type = v_3_pad_type_0, strides = var_471, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_3_cast")]; + tensor var_477 = const()[name = tensor("op_477"), val = tensor([2, 10, 64, -1])]; + tensor var_478_cast = reshape(shape = var_477, x = q_3_cast)[name = tensor("op_478_cast")]; + tensor var_479 = const()[name = tensor("op_479"), val = tensor([2, 10, 64, -1])]; + tensor var_480_cast = reshape(shape = var_479, x = k_3_cast)[name = tensor("op_480_cast")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([2, 10, 64, -1])]; + tensor var_482_cast = reshape(shape = var_481, x = v_3_cast)[name = tensor("op_482_cast")]; + tensor attn_weights_5_transpose_x_0 = const()[name = tensor("attn_weights_5_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_5_transpose_y_0 = const()[name = tensor("attn_weights_5_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_5_cast = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_478_cast, y = var_480_cast)[name = tensor("attn_weights_5_cast")]; + tensor attn_weights_7_cast = mul(x = attn_weights_5_cast, y = var_273_to_fp16)[name = tensor("attn_weights_7_cast")]; + tensor var_486_cast = softmax(axis = var_266, x = attn_weights_7_cast)[name = tensor("op_486_cast")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_482_cast, y = var_486_cast)[name = tensor("attn_3_cast")]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([2, 640, 1, -1])]; + tensor input_63_cast = reshape(shape = var_490, x = attn_3_cast)[name = tensor("input_63_cast")]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 1])]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1, 1])]; + tensor var_499_pad_type_0 = const()[name = tensor("op_499_pad_type_0"), val = tensor("custom")]; + tensor var_499_pad_0 = const()[name = tensor("op_499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22167808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22475072))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22475264)))]; + tensor var_499_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_497, groups = var_282, pad = var_499_pad_0, pad_type = var_499_pad_type_0, strides = var_495, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_63_cast)[name = tensor("op_499_cast")]; + tensor inputs_5_cast = add(x = var_499_cast, y = inputs_3_cast)[name = tensor("inputs_5_cast")]; + tensor var_503 = const()[name = tensor("op_503"), val = tensor([1])]; + tensor channels_mean_5_cast = reduce_mean(axes = var_503, keep_dims = var_277, x = inputs_5_cast)[name = tensor("channels_mean_5_cast")]; + tensor zero_mean_5_cast = sub(x = inputs_5_cast, y = channels_mean_5_cast)[name = tensor("zero_mean_5_cast")]; + tensor zero_mean_sq_5_cast = mul(x = zero_mean_5_cast, y = zero_mean_5_cast)[name = tensor("zero_mean_sq_5_cast")]; + tensor var_507 = const()[name = tensor("op_507"), val = tensor([1])]; + tensor var_508_cast = reduce_mean(axes = var_507, keep_dims = var_277, x = zero_mean_sq_5_cast)[name = tensor("op_508_cast")]; + tensor var_509_to_fp16 = const()[name = tensor("op_509_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_510_cast = add(x = var_508_cast, y = var_509_to_fp16)[name = tensor("op_510_cast")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_510_cast)[name = tensor("denom_5_cast")]; + tensor out_5_cast = mul(x = zero_mean_5_cast, y = denom_5_cast)[name = tensor("out_5_cast")]; + tensor var_514_to_fp16 = const()[name = tensor("op_514_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22476608)))]; + tensor var_515_cast = add(x = out_5_cast, y = var_514_to_fp16)[name = tensor("op_515_cast")]; + tensor var_517_to_fp16 = const()[name = tensor("op_517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22477952)))]; + tensor input_65_cast = mul(x = var_515_cast, y = var_517_to_fp16)[name = tensor("input_65_cast")]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 1])]; + tensor var_527 = const()[name = tensor("op_527"), val = tensor([1, 1])]; + tensor var_529_pad_type_0 = const()[name = tensor("op_529_pad_type_0"), val = tensor("custom")]; + tensor var_529_pad_0 = const()[name = tensor("op_529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(22479296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25756160))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25756736)))]; + tensor var_529_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_527, groups = var_282, pad = var_529_pad_0, pad_type = var_529_pad_type_0, strides = var_525, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_65_cast)[name = tensor("op_529_cast")]; + tensor var_530_split_sizes_0 = const()[name = tensor("op_530_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_530_axis_0 = const()[name = tensor("op_530_axis_0"), val = tensor(1)]; + tensor var_530_cast_0, tensor var_530_cast_1 = split(axis = var_530_axis_0, split_sizes = var_530_split_sizes_0, x = var_529_cast)[name = tensor("op_530_cast")]; + tensor var_532_mode_0 = const()[name = tensor("op_532_mode_0"), val = tensor("EXACT")]; + tensor var_532_cast = gelu(mode = var_532_mode_0, x = var_530_cast_1)[name = tensor("op_532_cast")]; + tensor input_67_cast = mul(x = var_530_cast_0, y = var_532_cast)[name = tensor("input_67_cast")]; + tensor var_536 = const()[name = tensor("op_536"), val = tensor([1, 1])]; + tensor var_538 = const()[name = tensor("op_538"), val = tensor([1, 1])]; + tensor var_540_pad_type_0 = const()[name = tensor("op_540_pad_type_0"), val = tensor("custom")]; + tensor var_540_pad_0 = const()[name = tensor("op_540_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25767040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27405504))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27406080)))]; + tensor var_540_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_538, groups = var_282, pad = var_540_pad_0, pad_type = var_540_pad_type_0, strides = var_536, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_67_cast)[name = tensor("op_540_cast")]; + tensor inputs_7_cast = add(x = var_540_cast, y = inputs_5_cast)[name = tensor("inputs_7_cast")]; + tensor var_550 = const()[name = tensor("op_550"), val = tensor([1])]; + tensor channels_mean_7_cast = reduce_mean(axes = var_550, keep_dims = var_277, x = inputs_7_cast)[name = tensor("channels_mean_7_cast")]; + tensor zero_mean_7_cast = sub(x = inputs_7_cast, y = channels_mean_7_cast)[name = tensor("zero_mean_7_cast")]; + tensor zero_mean_sq_7_cast = mul(x = zero_mean_7_cast, y = zero_mean_7_cast)[name = tensor("zero_mean_sq_7_cast")]; + tensor var_554 = const()[name = tensor("op_554"), val = tensor([1])]; + tensor var_555_cast = reduce_mean(axes = var_554, keep_dims = var_277, x = zero_mean_sq_7_cast)[name = tensor("op_555_cast")]; + tensor var_556_to_fp16 = const()[name = tensor("op_556_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_557_cast = add(x = var_555_cast, y = var_556_to_fp16)[name = tensor("op_557_cast")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_557_cast)[name = tensor("denom_7_cast")]; + tensor out_7_cast = mul(x = zero_mean_7_cast, y = denom_7_cast)[name = tensor("out_7_cast")]; + tensor var_561_to_fp16 = const()[name = tensor("op_561_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27407424)))]; + tensor var_562_cast = add(x = out_7_cast, y = var_561_to_fp16)[name = tensor("op_562_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27408768)))]; + tensor hidden_states_23_cast = mul(x = var_562_cast, y = var_564_to_fp16)[name = tensor("hidden_states_23_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, 1])]; + tensor var_573 = const()[name = tensor("op_573"), val = tensor([1, 1])]; + tensor q_5_pad_type_0 = const()[name = tensor("q_5_pad_type_0"), val = tensor("custom")]; + tensor q_5_pad_0 = const()[name = tensor("q_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27410112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27717376))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_5_cast = conv(dilations = var_573, groups = var_282, pad = q_5_pad_0, pad_type = q_5_pad_type_0, strides = var_571, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("q_5_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([1, 1])]; + tensor var_579 = const()[name = tensor("op_579"), val = tensor([1, 1])]; + tensor k_5_pad_type_0 = const()[name = tensor("k_5_pad_type_0"), val = tensor("custom")]; + tensor k_5_pad_0 = const()[name = tensor("k_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27717568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28024832))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_5_cast = conv(dilations = var_579, groups = var_282, pad = k_5_pad_0, pad_type = k_5_pad_type_0, strides = var_577, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("k_5_cast")]; + tensor var_583 = const()[name = tensor("op_583"), val = tensor([1, 1])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 1])]; + tensor v_5_pad_type_0 = const()[name = tensor("v_5_pad_type_0"), val = tensor("custom")]; + tensor v_5_pad_0 = const()[name = tensor("v_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28025024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28332288))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_5_cast = conv(dilations = var_585, groups = var_282, pad = v_5_pad_0, pad_type = v_5_pad_type_0, strides = var_583, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("v_5_cast")]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([2, 10, 64, -1])]; + tensor var_590_cast = reshape(shape = var_589, x = q_5_cast)[name = tensor("op_590_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([2, 10, 64, -1])]; + tensor var_592_cast = reshape(shape = var_591, x = k_5_cast)[name = tensor("op_592_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([2, 10, 64, -1])]; + tensor var_594_cast = reshape(shape = var_593, x = v_5_cast)[name = tensor("op_594_cast")]; + tensor attn_weights_9_transpose_x_0 = const()[name = tensor("attn_weights_9_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_9_transpose_y_0 = const()[name = tensor("attn_weights_9_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_9_cast = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_590_cast, y = var_592_cast)[name = tensor("attn_weights_9_cast")]; + tensor attn_weights_11_cast = mul(x = attn_weights_9_cast, y = var_273_to_fp16)[name = tensor("attn_weights_11_cast")]; + tensor var_598_cast = softmax(axis = var_266, x = attn_weights_11_cast)[name = tensor("op_598_cast")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_594_cast, y = var_598_cast)[name = tensor("attn_5_cast")]; + tensor var_602 = const()[name = tensor("op_602"), val = tensor([2, 640, 1, -1])]; + tensor input_69_cast = reshape(shape = var_602, x = attn_5_cast)[name = tensor("input_69_cast")]; + tensor var_607 = const()[name = tensor("op_607"), val = tensor([1, 1])]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 1])]; + tensor var_611_pad_type_0 = const()[name = tensor("op_611_pad_type_0"), val = tensor("custom")]; + tensor var_611_pad_0 = const()[name = tensor("op_611_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28332480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28639744))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28639936)))]; + tensor var_611_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_609, groups = var_282, pad = var_611_pad_0, pad_type = var_611_pad_type_0, strides = var_607, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_69_cast)[name = tensor("op_611_cast")]; + tensor inputs_9_cast = add(x = var_611_cast, y = inputs_7_cast)[name = tensor("inputs_9_cast")]; + tensor var_615 = const()[name = tensor("op_615"), val = tensor([1])]; + tensor channels_mean_9_cast = reduce_mean(axes = var_615, keep_dims = var_277, x = inputs_9_cast)[name = tensor("channels_mean_9_cast")]; + tensor zero_mean_9_cast = sub(x = inputs_9_cast, y = channels_mean_9_cast)[name = tensor("zero_mean_9_cast")]; + tensor zero_mean_sq_9_cast = mul(x = zero_mean_9_cast, y = zero_mean_9_cast)[name = tensor("zero_mean_sq_9_cast")]; + tensor var_619 = const()[name = tensor("op_619"), val = tensor([1])]; + tensor var_620_cast = reduce_mean(axes = var_619, keep_dims = var_277, x = zero_mean_sq_9_cast)[name = tensor("op_620_cast")]; + tensor var_621_to_fp16 = const()[name = tensor("op_621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_622_cast = add(x = var_620_cast, y = var_621_to_fp16)[name = tensor("op_622_cast")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_622_cast)[name = tensor("denom_9_cast")]; + tensor out_9_cast = mul(x = zero_mean_9_cast, y = denom_9_cast)[name = tensor("out_9_cast")]; + tensor var_626_to_fp16 = const()[name = tensor("op_626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28641280)))]; + tensor var_627_cast = add(x = out_9_cast, y = var_626_to_fp16)[name = tensor("op_627_cast")]; + tensor var_629_to_fp16 = const()[name = tensor("op_629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28642624)))]; + tensor hidden_states_25_cast = mul(x = var_627_cast, y = var_629_to_fp16)[name = tensor("hidden_states_25_cast")]; + tensor var_636 = const()[name = tensor("op_636"), val = tensor([1, 1])]; + tensor var_638 = const()[name = tensor("op_638"), val = tensor([1, 1])]; + tensor q_7_pad_type_0 = const()[name = tensor("q_7_pad_type_0"), val = tensor("custom")]; + tensor q_7_pad_0 = const()[name = tensor("q_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28643968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28951232))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_7_cast = conv(dilations = var_638, groups = var_282, pad = q_7_pad_0, pad_type = q_7_pad_type_0, strides = var_636, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("q_7_cast")]; + tensor var_642 = const()[name = tensor("op_642"), val = tensor([1, 1])]; + tensor var_644 = const()[name = tensor("op_644"), val = tensor([1, 1])]; + tensor k_7_pad_type_0 = const()[name = tensor("k_7_pad_type_0"), val = tensor("custom")]; + tensor k_7_pad_0 = const()[name = tensor("k_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28951424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29934528))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_7_cast = conv(dilations = var_644, groups = var_282, pad = k_7_pad_0, pad_type = k_7_pad_type_0, strides = var_642, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_7_cast")]; + tensor var_648 = const()[name = tensor("op_648"), val = tensor([1, 1])]; + tensor var_650 = const()[name = tensor("op_650"), val = tensor([1, 1])]; + tensor v_7_pad_type_0 = const()[name = tensor("v_7_pad_type_0"), val = tensor("custom")]; + tensor v_7_pad_0 = const()[name = tensor("v_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29934720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30917824))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_7_cast = conv(dilations = var_650, groups = var_282, pad = v_7_pad_0, pad_type = v_7_pad_type_0, strides = var_648, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_7_cast")]; + tensor var_654 = const()[name = tensor("op_654"), val = tensor([2, 10, 64, -1])]; + tensor var_655_cast = reshape(shape = var_654, x = q_7_cast)[name = tensor("op_655_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([2, 10, 64, -1])]; + tensor var_657_cast = reshape(shape = var_656, x = k_7_cast)[name = tensor("op_657_cast")]; + tensor var_658 = const()[name = tensor("op_658"), val = tensor([2, 10, 64, -1])]; + tensor var_659_cast = reshape(shape = var_658, x = v_7_cast)[name = tensor("op_659_cast")]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = var_655_cast, y = var_657_cast)[name = tensor("attn_weights_13_cast")]; + tensor attn_weights_15_cast = mul(x = attn_weights_13_cast, y = var_273_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_663_cast = softmax(axis = var_266, x = attn_weights_15_cast)[name = tensor("op_663_cast")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_659_cast, y = var_663_cast)[name = tensor("attn_7_cast")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([2, 640, 1, -1])]; + tensor input_71_cast = reshape(shape = var_667, x = attn_7_cast)[name = tensor("input_71_cast")]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 1])]; + tensor var_674 = const()[name = tensor("op_674"), val = tensor([1, 1])]; + tensor var_676_pad_type_0 = const()[name = tensor("op_676_pad_type_0"), val = tensor("custom")]; + tensor var_676_pad_0 = const()[name = tensor("op_676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30918016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(31225280))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(31225472)))]; + tensor var_676_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_674, groups = var_282, pad = var_676_pad_0, pad_type = var_676_pad_type_0, strides = var_672, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_71_cast)[name = tensor("op_676_cast")]; + tensor inputs_11_cast = add(x = var_676_cast, y = inputs_9_cast)[name = tensor("inputs_11_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([1])]; + tensor channels_mean_11_cast = reduce_mean(axes = var_680, keep_dims = var_277, x = inputs_11_cast)[name = tensor("channels_mean_11_cast")]; + tensor zero_mean_11_cast = sub(x = inputs_11_cast, y = channels_mean_11_cast)[name = tensor("zero_mean_11_cast")]; + tensor zero_mean_sq_11_cast = mul(x = zero_mean_11_cast, y = zero_mean_11_cast)[name = tensor("zero_mean_sq_11_cast")]; + tensor var_684 = const()[name = tensor("op_684"), val = tensor([1])]; + tensor var_685_cast = reduce_mean(axes = var_684, keep_dims = var_277, x = zero_mean_sq_11_cast)[name = tensor("op_685_cast")]; + tensor var_686_to_fp16 = const()[name = tensor("op_686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_687_cast = add(x = var_685_cast, y = var_686_to_fp16)[name = tensor("op_687_cast")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_687_cast)[name = tensor("denom_11_cast")]; + tensor out_11_cast = mul(x = zero_mean_11_cast, y = denom_11_cast)[name = tensor("out_11_cast")]; + tensor var_691_to_fp16 = const()[name = tensor("op_691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(31226816)))]; + tensor var_692_cast = add(x = out_11_cast, y = var_691_to_fp16)[name = tensor("op_692_cast")]; + tensor var_694_to_fp16 = const()[name = tensor("op_694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(31228160)))]; + tensor input_73_cast = mul(x = var_692_cast, y = var_694_to_fp16)[name = tensor("input_73_cast")]; + tensor var_702 = const()[name = tensor("op_702"), val = tensor([1, 1])]; + tensor var_704 = const()[name = tensor("op_704"), val = tensor([1, 1])]; + tensor var_706_pad_type_0 = const()[name = tensor("op_706_pad_type_0"), val = tensor("custom")]; + tensor var_706_pad_0 = const()[name = tensor("op_706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(31229504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34506368))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34506944)))]; + tensor var_706_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_704, groups = var_282, pad = var_706_pad_0, pad_type = var_706_pad_type_0, strides = var_702, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_73_cast)[name = tensor("op_706_cast")]; + tensor var_707_split_sizes_0 = const()[name = tensor("op_707_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_707_axis_0 = const()[name = tensor("op_707_axis_0"), val = tensor(1)]; + tensor var_707_cast_0, tensor var_707_cast_1 = split(axis = var_707_axis_0, split_sizes = var_707_split_sizes_0, x = var_706_cast)[name = tensor("op_707_cast")]; + tensor var_709_mode_0 = const()[name = tensor("op_709_mode_0"), val = tensor("EXACT")]; + tensor var_709_cast = gelu(mode = var_709_mode_0, x = var_707_cast_1)[name = tensor("op_709_cast")]; + tensor input_75_cast = mul(x = var_707_cast_0, y = var_709_cast)[name = tensor("input_75_cast")]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 1])]; + tensor var_715 = const()[name = tensor("op_715"), val = tensor([1, 1])]; + tensor var_717_pad_type_0 = const()[name = tensor("op_717_pad_type_0"), val = tensor("custom")]; + tensor var_717_pad_0 = const()[name = tensor("op_717_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34517248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35746112))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35746304)))]; + tensor var_717_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_715, groups = var_282, pad = var_717_pad_0, pad_type = var_717_pad_type_0, strides = var_713, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_75_cast)[name = tensor("op_717_cast")]; + tensor hidden_states_29_cast = add(x = var_717_cast, y = inputs_11_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_719 = const()[name = tensor("op_719"), val = tensor([2, 640, 64, 64])]; + tensor input_77_cast = reshape(shape = var_719, x = hidden_states_29_cast)[name = tensor("input_77_cast")]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor var_725 = const()[name = tensor("op_725"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35747648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36157312))), name = tensor("down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36157888)))]; + tensor hidden_states_31_cast = conv(bias = down_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_725, groups = var_282, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_723, weight = down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_77_cast)[name = tensor("hidden_states_31_cast")]; + tensor input_79_cast = add(x = hidden_states_31_cast, y = hidden_states_13_cast)[name = tensor("input_79_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_79_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36159232)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36160576)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_83_cast = silu(x = add_15_cast)[name = tensor("input_83_cast")]; + tensor var_740 = const()[name = tensor("op_740"), val = tensor([1, 1])]; + tensor var_742 = const()[name = tensor("op_742"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36161920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38926784))), name = tensor("down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38926976)))]; + tensor hidden_states_33_cast = conv(bias = down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_742, groups = var_282, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_740, weight = down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_83_cast)[name = tensor("hidden_states_33_cast")]; + tensor var_748 = const()[name = tensor("op_748"), val = tensor([1, 1])]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, 1])]; + tensor temb_7_pad_type_0 = const()[name = tensor("temb_7_pad_type_0"), val = tensor("custom")]; + tensor temb_7_pad_0 = const()[name = tensor("temb_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38928320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39542784))), name = tensor("down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39542976)))]; + tensor temb_7_cast = conv(bias = down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_750, groups = var_282, pad = temb_7_pad_0, pad_type = temb_7_pad_type_0, strides = var_748, weight = down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_7_cast")]; + tensor input_87_cast = add(x = hidden_states_33_cast, y = temb_7_cast)[name = tensor("input_87_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_87_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39544320)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39545664)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_91_cast = silu(x = add_17_cast)[name = tensor("input_91_cast")]; + tensor var_760 = const()[name = tensor("op_760"), val = tensor([1, 1])]; + tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39547008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42311872))), name = tensor("down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42312064)))]; + tensor hidden_states_35_cast = conv(bias = down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_762, groups = var_282, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_760, weight = down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_91_cast)[name = tensor("hidden_states_35_cast")]; + tensor hidden_states_37_cast = add(x = input_79_cast, y = hidden_states_35_cast)[name = tensor("hidden_states_37_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = hidden_states_37_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42313408)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42314752)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor var_784 = const()[name = tensor("op_784"), val = tensor([1, 1])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42316096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42623360))), name = tensor("down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42623552)))]; + tensor hidden_states_39_cast = conv(bias = down_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_786, groups = var_282, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_784, weight = down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_19_cast)[name = tensor("hidden_states_39_cast")]; + tensor var_791 = const()[name = tensor("op_791"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_13_cast = reshape(shape = var_791, x = hidden_states_39_cast)[name = tensor("inputs_13_cast")]; + tensor var_801 = const()[name = tensor("op_801"), val = tensor([1])]; + tensor channels_mean_13_cast = reduce_mean(axes = var_801, keep_dims = var_277, x = inputs_13_cast)[name = tensor("channels_mean_13_cast")]; + tensor zero_mean_13_cast = sub(x = inputs_13_cast, y = channels_mean_13_cast)[name = tensor("zero_mean_13_cast")]; + tensor zero_mean_sq_13_cast = mul(x = zero_mean_13_cast, y = zero_mean_13_cast)[name = tensor("zero_mean_sq_13_cast")]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([1])]; + tensor var_806_cast = reduce_mean(axes = var_805, keep_dims = var_277, x = zero_mean_sq_13_cast)[name = tensor("op_806_cast")]; + tensor var_807_to_fp16 = const()[name = tensor("op_807_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_808_cast = add(x = var_806_cast, y = var_807_to_fp16)[name = tensor("op_808_cast")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_808_cast)[name = tensor("denom_13_cast")]; + tensor out_13_cast = mul(x = zero_mean_13_cast, y = denom_13_cast)[name = tensor("out_13_cast")]; + tensor var_812_to_fp16 = const()[name = tensor("op_812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42624896)))]; + tensor var_813_cast = add(x = out_13_cast, y = var_812_to_fp16)[name = tensor("op_813_cast")]; + tensor var_815_to_fp16 = const()[name = tensor("op_815_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42626240)))]; + tensor hidden_states_41_cast = mul(x = var_813_cast, y = var_815_to_fp16)[name = tensor("hidden_states_41_cast")]; + tensor var_822 = const()[name = tensor("op_822"), val = tensor([1, 1])]; + tensor var_824 = const()[name = tensor("op_824"), val = tensor([1, 1])]; + tensor q_9_pad_type_0 = const()[name = tensor("q_9_pad_type_0"), val = tensor("custom")]; + tensor q_9_pad_0 = const()[name = tensor("q_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42627584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42934848))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_9_cast = conv(dilations = var_824, groups = var_282, pad = q_9_pad_0, pad_type = q_9_pad_type_0, strides = var_822, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("q_9_cast")]; + tensor var_828 = const()[name = tensor("op_828"), val = tensor([1, 1])]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, 1])]; + tensor k_9_pad_type_0 = const()[name = tensor("k_9_pad_type_0"), val = tensor("custom")]; + tensor k_9_pad_0 = const()[name = tensor("k_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42935040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43242304))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_9_cast = conv(dilations = var_830, groups = var_282, pad = k_9_pad_0, pad_type = k_9_pad_type_0, strides = var_828, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("k_9_cast")]; + tensor var_834 = const()[name = tensor("op_834"), val = tensor([1, 1])]; + tensor var_836 = const()[name = tensor("op_836"), val = tensor([1, 1])]; + tensor v_9_pad_type_0 = const()[name = tensor("v_9_pad_type_0"), val = tensor("custom")]; + tensor v_9_pad_0 = const()[name = tensor("v_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43242496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43549760))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_9_cast = conv(dilations = var_836, groups = var_282, pad = v_9_pad_0, pad_type = v_9_pad_type_0, strides = var_834, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("v_9_cast")]; + tensor var_840 = const()[name = tensor("op_840"), val = tensor([2, 10, 64, -1])]; + tensor var_841_cast = reshape(shape = var_840, x = q_9_cast)[name = tensor("op_841_cast")]; + tensor var_842 = const()[name = tensor("op_842"), val = tensor([2, 10, 64, -1])]; + tensor var_843_cast = reshape(shape = var_842, x = k_9_cast)[name = tensor("op_843_cast")]; + tensor var_844 = const()[name = tensor("op_844"), val = tensor([2, 10, 64, -1])]; + tensor var_845_cast = reshape(shape = var_844, x = v_9_cast)[name = tensor("op_845_cast")]; + tensor attn_weights_17_transpose_x_0 = const()[name = tensor("attn_weights_17_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_17_transpose_y_0 = const()[name = tensor("attn_weights_17_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_17_cast = matmul(transpose_x = attn_weights_17_transpose_x_0, transpose_y = attn_weights_17_transpose_y_0, x = var_841_cast, y = var_843_cast)[name = tensor("attn_weights_17_cast")]; + tensor attn_weights_19_cast = mul(x = attn_weights_17_cast, y = var_273_to_fp16)[name = tensor("attn_weights_19_cast")]; + tensor var_849_cast = softmax(axis = var_266, x = attn_weights_19_cast)[name = tensor("op_849_cast")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_845_cast, y = var_849_cast)[name = tensor("attn_9_cast")]; + tensor var_853 = const()[name = tensor("op_853"), val = tensor([2, 640, 1, -1])]; + tensor input_95_cast = reshape(shape = var_853, x = attn_9_cast)[name = tensor("input_95_cast")]; + tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; + tensor var_860 = const()[name = tensor("op_860"), val = tensor([1, 1])]; + tensor var_862_pad_type_0 = const()[name = tensor("op_862_pad_type_0"), val = tensor("custom")]; + tensor var_862_pad_0 = const()[name = tensor("op_862_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43549952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43857216))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43857408)))]; + tensor var_862_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_860, groups = var_282, pad = var_862_pad_0, pad_type = var_862_pad_type_0, strides = var_858, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_95_cast)[name = tensor("op_862_cast")]; + tensor inputs_15_cast = add(x = var_862_cast, y = inputs_13_cast)[name = tensor("inputs_15_cast")]; + tensor var_866 = const()[name = tensor("op_866"), val = tensor([1])]; + tensor channels_mean_15_cast = reduce_mean(axes = var_866, keep_dims = var_277, x = inputs_15_cast)[name = tensor("channels_mean_15_cast")]; + tensor zero_mean_15_cast = sub(x = inputs_15_cast, y = channels_mean_15_cast)[name = tensor("zero_mean_15_cast")]; + tensor zero_mean_sq_15_cast = mul(x = zero_mean_15_cast, y = zero_mean_15_cast)[name = tensor("zero_mean_sq_15_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1])]; + tensor var_871_cast = reduce_mean(axes = var_870, keep_dims = var_277, x = zero_mean_sq_15_cast)[name = tensor("op_871_cast")]; + tensor var_872_to_fp16 = const()[name = tensor("op_872_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_873_cast = add(x = var_871_cast, y = var_872_to_fp16)[name = tensor("op_873_cast")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_873_cast)[name = tensor("denom_15_cast")]; + tensor out_15_cast = mul(x = zero_mean_15_cast, y = denom_15_cast)[name = tensor("out_15_cast")]; + tensor var_877_to_fp16 = const()[name = tensor("op_877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43858752)))]; + tensor var_878_cast = add(x = out_15_cast, y = var_877_to_fp16)[name = tensor("op_878_cast")]; + tensor var_880_to_fp16 = const()[name = tensor("op_880_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43860096)))]; + tensor hidden_states_43_cast = mul(x = var_878_cast, y = var_880_to_fp16)[name = tensor("hidden_states_43_cast")]; + tensor var_887 = const()[name = tensor("op_887"), val = tensor([1, 1])]; + tensor var_889 = const()[name = tensor("op_889"), val = tensor([1, 1])]; + tensor q_11_pad_type_0 = const()[name = tensor("q_11_pad_type_0"), val = tensor("custom")]; + tensor q_11_pad_0 = const()[name = tensor("q_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43861440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44066304))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_11_cast = conv(dilations = var_889, groups = var_282, pad = q_11_pad_0, pad_type = q_11_pad_type_0, strides = var_887, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("q_11_cast")]; + tensor var_893 = const()[name = tensor("op_893"), val = tensor([1, 1])]; + tensor var_895 = const()[name = tensor("op_895"), val = tensor([1, 1])]; + tensor k_11_pad_type_0 = const()[name = tensor("k_11_pad_type_0"), val = tensor("custom")]; + tensor k_11_pad_0 = const()[name = tensor("k_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44066432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44721856))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_11_cast = conv(dilations = var_895, groups = var_282, pad = k_11_pad_0, pad_type = k_11_pad_type_0, strides = var_893, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_11_cast")]; + tensor var_899 = const()[name = tensor("op_899"), val = tensor([1, 1])]; + tensor var_901 = const()[name = tensor("op_901"), val = tensor([1, 1])]; + tensor v_11_pad_type_0 = const()[name = tensor("v_11_pad_type_0"), val = tensor("custom")]; + tensor v_11_pad_0 = const()[name = tensor("v_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44721984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45377408))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_11_cast = conv(dilations = var_901, groups = var_282, pad = v_11_pad_0, pad_type = v_11_pad_type_0, strides = var_899, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_11_cast")]; + tensor var_905 = const()[name = tensor("op_905"), val = tensor([2, 10, 64, -1])]; + tensor var_906_cast = reshape(shape = var_905, x = q_11_cast)[name = tensor("op_906_cast")]; + tensor var_907 = const()[name = tensor("op_907"), val = tensor([2, 10, 64, -1])]; + tensor var_908_cast = reshape(shape = var_907, x = k_11_cast)[name = tensor("op_908_cast")]; + tensor var_909 = const()[name = tensor("op_909"), val = tensor([2, 10, 64, -1])]; + tensor var_910_cast = reshape(shape = var_909, x = v_11_cast)[name = tensor("op_910_cast")]; + tensor attn_weights_21_transpose_x_0 = const()[name = tensor("attn_weights_21_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_21_transpose_y_0 = const()[name = tensor("attn_weights_21_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_21_cast = matmul(transpose_x = attn_weights_21_transpose_x_0, transpose_y = attn_weights_21_transpose_y_0, x = var_906_cast, y = var_908_cast)[name = tensor("attn_weights_21_cast")]; + tensor attn_weights_23_cast = mul(x = attn_weights_21_cast, y = var_273_to_fp16)[name = tensor("attn_weights_23_cast")]; + tensor var_914_cast = softmax(axis = var_266, x = attn_weights_23_cast)[name = tensor("op_914_cast")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_910_cast, y = var_914_cast)[name = tensor("attn_11_cast")]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([2, 640, 1, -1])]; + tensor input_97_cast = reshape(shape = var_918, x = attn_11_cast)[name = tensor("input_97_cast")]; + tensor var_923 = const()[name = tensor("op_923"), val = tensor([1, 1])]; + tensor var_925 = const()[name = tensor("op_925"), val = tensor([1, 1])]; + tensor var_927_pad_type_0 = const()[name = tensor("op_927_pad_type_0"), val = tensor("custom")]; + tensor var_927_pad_0 = const()[name = tensor("op_927_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45377536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45582400))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45582528)))]; + tensor var_927_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_925, groups = var_282, pad = var_927_pad_0, pad_type = var_927_pad_type_0, strides = var_923, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("op_927_cast")]; + tensor inputs_17_cast = add(x = var_927_cast, y = inputs_15_cast)[name = tensor("inputs_17_cast")]; + tensor var_931 = const()[name = tensor("op_931"), val = tensor([1])]; + tensor channels_mean_17_cast = reduce_mean(axes = var_931, keep_dims = var_277, x = inputs_17_cast)[name = tensor("channels_mean_17_cast")]; + tensor zero_mean_17_cast = sub(x = inputs_17_cast, y = channels_mean_17_cast)[name = tensor("zero_mean_17_cast")]; + tensor zero_mean_sq_17_cast = mul(x = zero_mean_17_cast, y = zero_mean_17_cast)[name = tensor("zero_mean_sq_17_cast")]; + tensor var_935 = const()[name = tensor("op_935"), val = tensor([1])]; + tensor var_936_cast = reduce_mean(axes = var_935, keep_dims = var_277, x = zero_mean_sq_17_cast)[name = tensor("op_936_cast")]; + tensor var_937_to_fp16 = const()[name = tensor("op_937_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_938_cast = add(x = var_936_cast, y = var_937_to_fp16)[name = tensor("op_938_cast")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_938_cast)[name = tensor("denom_17_cast")]; + tensor out_17_cast = mul(x = zero_mean_17_cast, y = denom_17_cast)[name = tensor("out_17_cast")]; + tensor var_942_to_fp16 = const()[name = tensor("op_942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45583872)))]; + tensor var_943_cast = add(x = out_17_cast, y = var_942_to_fp16)[name = tensor("op_943_cast")]; + tensor var_945_to_fp16 = const()[name = tensor("op_945_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45585216)))]; + tensor input_99_cast = mul(x = var_943_cast, y = var_945_to_fp16)[name = tensor("input_99_cast")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([1, 1])]; + tensor var_955 = const()[name = tensor("op_955"), val = tensor([1, 1])]; + tensor var_957_pad_type_0 = const()[name = tensor("op_957_pad_type_0"), val = tensor("custom")]; + tensor var_957_pad_0 = const()[name = tensor("op_957_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45586560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(48044224))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(48044416)))]; + tensor var_957_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_955, groups = var_282, pad = var_957_pad_0, pad_type = var_957_pad_type_0, strides = var_953, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_99_cast)[name = tensor("op_957_cast")]; + tensor var_958_split_sizes_0 = const()[name = tensor("op_958_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_958_axis_0 = const()[name = tensor("op_958_axis_0"), val = tensor(1)]; + tensor var_958_cast_0, tensor var_958_cast_1 = split(axis = var_958_axis_0, split_sizes = var_958_split_sizes_0, x = var_957_cast)[name = tensor("op_958_cast")]; + tensor var_960_mode_0 = const()[name = tensor("op_960_mode_0"), val = tensor("EXACT")]; + tensor var_960_cast = gelu(mode = var_960_mode_0, x = var_958_cast_1)[name = tensor("op_960_cast")]; + tensor input_101_cast = mul(x = var_958_cast_0, y = var_960_cast)[name = tensor("input_101_cast")]; + tensor var_964 = const()[name = tensor("op_964"), val = tensor([1, 1])]; + tensor var_966 = const()[name = tensor("op_966"), val = tensor([1, 1])]; + tensor var_968_pad_type_0 = const()[name = tensor("op_968_pad_type_0"), val = tensor("custom")]; + tensor var_968_pad_0 = const()[name = tensor("op_968_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(48054720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49283584))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49283776)))]; + tensor var_968_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_966, groups = var_282, pad = var_968_pad_0, pad_type = var_968_pad_type_0, strides = var_964, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_101_cast)[name = tensor("op_968_cast")]; + tensor inputs_19_cast = add(x = var_968_cast, y = inputs_17_cast)[name = tensor("inputs_19_cast")]; + tensor var_978 = const()[name = tensor("op_978"), val = tensor([1])]; + tensor channels_mean_19_cast = reduce_mean(axes = var_978, keep_dims = var_277, x = inputs_19_cast)[name = tensor("channels_mean_19_cast")]; + tensor zero_mean_19_cast = sub(x = inputs_19_cast, y = channels_mean_19_cast)[name = tensor("zero_mean_19_cast")]; + tensor zero_mean_sq_19_cast = mul(x = zero_mean_19_cast, y = zero_mean_19_cast)[name = tensor("zero_mean_sq_19_cast")]; + tensor var_982 = const()[name = tensor("op_982"), val = tensor([1])]; + tensor var_983_cast = reduce_mean(axes = var_982, keep_dims = var_277, x = zero_mean_sq_19_cast)[name = tensor("op_983_cast")]; + tensor var_984_to_fp16 = const()[name = tensor("op_984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_985_cast = add(x = var_983_cast, y = var_984_to_fp16)[name = tensor("op_985_cast")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_985_cast)[name = tensor("denom_19_cast")]; + tensor out_19_cast = mul(x = zero_mean_19_cast, y = denom_19_cast)[name = tensor("out_19_cast")]; + tensor var_989_to_fp16 = const()[name = tensor("op_989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49285120)))]; + tensor var_990_cast = add(x = out_19_cast, y = var_989_to_fp16)[name = tensor("op_990_cast")]; + tensor var_992_to_fp16 = const()[name = tensor("op_992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49286464)))]; + tensor hidden_states_47_cast = mul(x = var_990_cast, y = var_992_to_fp16)[name = tensor("hidden_states_47_cast")]; + tensor var_999 = const()[name = tensor("op_999"), val = tensor([1, 1])]; + tensor var_1001 = const()[name = tensor("op_1001"), val = tensor([1, 1])]; + tensor q_13_pad_type_0 = const()[name = tensor("q_13_pad_type_0"), val = tensor("custom")]; + tensor q_13_pad_0 = const()[name = tensor("q_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49287808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49492672))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_13_cast = conv(dilations = var_1001, groups = var_282, pad = q_13_pad_0, pad_type = q_13_pad_type_0, strides = var_999, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("q_13_cast")]; + tensor var_1005 = const()[name = tensor("op_1005"), val = tensor([1, 1])]; + tensor var_1007 = const()[name = tensor("op_1007"), val = tensor([1, 1])]; + tensor k_13_pad_type_0 = const()[name = tensor("k_13_pad_type_0"), val = tensor("custom")]; + tensor k_13_pad_0 = const()[name = tensor("k_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49492800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49697664))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_13_cast = conv(dilations = var_1007, groups = var_282, pad = k_13_pad_0, pad_type = k_13_pad_type_0, strides = var_1005, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("k_13_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, 1])]; + tensor var_1013 = const()[name = tensor("op_1013"), val = tensor([1, 1])]; + tensor v_13_pad_type_0 = const()[name = tensor("v_13_pad_type_0"), val = tensor("custom")]; + tensor v_13_pad_0 = const()[name = tensor("v_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(49697792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50005056))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_13_cast = conv(dilations = var_1013, groups = var_282, pad = v_13_pad_0, pad_type = v_13_pad_type_0, strides = var_1011, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("v_13_cast")]; + tensor var_1017 = const()[name = tensor("op_1017"), val = tensor([2, 10, 64, -1])]; + tensor var_1018_cast = reshape(shape = var_1017, x = q_13_cast)[name = tensor("op_1018_cast")]; + tensor var_1019 = const()[name = tensor("op_1019"), val = tensor([2, 10, 64, -1])]; + tensor var_1020_cast = reshape(shape = var_1019, x = k_13_cast)[name = tensor("op_1020_cast")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([2, 10, 64, -1])]; + tensor var_1022_cast = reshape(shape = var_1021, x = v_13_cast)[name = tensor("op_1022_cast")]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = var_1018_cast, y = var_1020_cast)[name = tensor("attn_weights_25_cast")]; + tensor attn_weights_27_cast = mul(x = attn_weights_25_cast, y = var_273_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_1026_cast = softmax(axis = var_266, x = attn_weights_27_cast)[name = tensor("op_1026_cast")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_1022_cast, y = var_1026_cast)[name = tensor("attn_13_cast")]; + tensor var_1030 = const()[name = tensor("op_1030"), val = tensor([2, 640, 1, -1])]; + tensor input_103_cast = reshape(shape = var_1030, x = attn_13_cast)[name = tensor("input_103_cast")]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 1])]; + tensor var_1037 = const()[name = tensor("op_1037"), val = tensor([1, 1])]; + tensor var_1039_pad_type_0 = const()[name = tensor("op_1039_pad_type_0"), val = tensor("custom")]; + tensor var_1039_pad_0 = const()[name = tensor("op_1039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50005248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50312512))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50312704)))]; + tensor var_1039_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1037, groups = var_282, pad = var_1039_pad_0, pad_type = var_1039_pad_type_0, strides = var_1035, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_103_cast)[name = tensor("op_1039_cast")]; + tensor inputs_21_cast = add(x = var_1039_cast, y = inputs_19_cast)[name = tensor("inputs_21_cast")]; + tensor var_1043 = const()[name = tensor("op_1043"), val = tensor([1])]; + tensor channels_mean_21_cast = reduce_mean(axes = var_1043, keep_dims = var_277, x = inputs_21_cast)[name = tensor("channels_mean_21_cast")]; + tensor zero_mean_21_cast = sub(x = inputs_21_cast, y = channels_mean_21_cast)[name = tensor("zero_mean_21_cast")]; + tensor zero_mean_sq_21_cast = mul(x = zero_mean_21_cast, y = zero_mean_21_cast)[name = tensor("zero_mean_sq_21_cast")]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1])]; + tensor var_1048_cast = reduce_mean(axes = var_1047, keep_dims = var_277, x = zero_mean_sq_21_cast)[name = tensor("op_1048_cast")]; + tensor var_1049_to_fp16 = const()[name = tensor("op_1049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1050_cast = add(x = var_1048_cast, y = var_1049_to_fp16)[name = tensor("op_1050_cast")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_1050_cast)[name = tensor("denom_21_cast")]; + tensor out_21_cast = mul(x = zero_mean_21_cast, y = denom_21_cast)[name = tensor("out_21_cast")]; + tensor var_1054_to_fp16 = const()[name = tensor("op_1054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50314048)))]; + tensor var_1055_cast = add(x = out_21_cast, y = var_1054_to_fp16)[name = tensor("op_1055_cast")]; + tensor var_1057_to_fp16 = const()[name = tensor("op_1057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50315392)))]; + tensor hidden_states_49_cast = mul(x = var_1055_cast, y = var_1057_to_fp16)[name = tensor("hidden_states_49_cast")]; + tensor var_1064 = const()[name = tensor("op_1064"), val = tensor([1, 1])]; + tensor var_1066 = const()[name = tensor("op_1066"), val = tensor([1, 1])]; + tensor q_15_pad_type_0 = const()[name = tensor("q_15_pad_type_0"), val = tensor("custom")]; + tensor q_15_pad_0 = const()[name = tensor("q_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50316736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50521600))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_15_cast = conv(dilations = var_1066, groups = var_282, pad = q_15_pad_0, pad_type = q_15_pad_type_0, strides = var_1064, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("q_15_cast")]; + tensor var_1070 = const()[name = tensor("op_1070"), val = tensor([1, 1])]; + tensor var_1072 = const()[name = tensor("op_1072"), val = tensor([1, 1])]; + tensor k_15_pad_type_0 = const()[name = tensor("k_15_pad_type_0"), val = tensor("custom")]; + tensor k_15_pad_0 = const()[name = tensor("k_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(50521728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51177152))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_15_cast = conv(dilations = var_1072, groups = var_282, pad = k_15_pad_0, pad_type = k_15_pad_type_0, strides = var_1070, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_15_cast")]; + tensor var_1076 = const()[name = tensor("op_1076"), val = tensor([1, 1])]; + tensor var_1078 = const()[name = tensor("op_1078"), val = tensor([1, 1])]; + tensor v_15_pad_type_0 = const()[name = tensor("v_15_pad_type_0"), val = tensor("custom")]; + tensor v_15_pad_0 = const()[name = tensor("v_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51177280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51832704))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_15_cast = conv(dilations = var_1078, groups = var_282, pad = v_15_pad_0, pad_type = v_15_pad_type_0, strides = var_1076, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_15_cast")]; + tensor var_1082 = const()[name = tensor("op_1082"), val = tensor([2, 10, 64, -1])]; + tensor var_1083_cast = reshape(shape = var_1082, x = q_15_cast)[name = tensor("op_1083_cast")]; + tensor var_1084 = const()[name = tensor("op_1084"), val = tensor([2, 10, 64, -1])]; + tensor var_1085_cast = reshape(shape = var_1084, x = k_15_cast)[name = tensor("op_1085_cast")]; + tensor var_1086 = const()[name = tensor("op_1086"), val = tensor([2, 10, 64, -1])]; + tensor var_1087_cast = reshape(shape = var_1086, x = v_15_cast)[name = tensor("op_1087_cast")]; + tensor attn_weights_29_transpose_x_0 = const()[name = tensor("attn_weights_29_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_29_transpose_y_0 = const()[name = tensor("attn_weights_29_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_29_cast = matmul(transpose_x = attn_weights_29_transpose_x_0, transpose_y = attn_weights_29_transpose_y_0, x = var_1083_cast, y = var_1085_cast)[name = tensor("attn_weights_29_cast")]; + tensor attn_weights_31_cast = mul(x = attn_weights_29_cast, y = var_273_to_fp16)[name = tensor("attn_weights_31_cast")]; + tensor var_1091_cast = softmax(axis = var_266, x = attn_weights_31_cast)[name = tensor("op_1091_cast")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1087_cast, y = var_1091_cast)[name = tensor("attn_15_cast")]; + tensor var_1095 = const()[name = tensor("op_1095"), val = tensor([2, 640, 1, -1])]; + tensor input_105_cast = reshape(shape = var_1095, x = attn_15_cast)[name = tensor("input_105_cast")]; + tensor var_1100 = const()[name = tensor("op_1100"), val = tensor([1, 1])]; + tensor var_1102 = const()[name = tensor("op_1102"), val = tensor([1, 1])]; + tensor var_1104_pad_type_0 = const()[name = tensor("op_1104_pad_type_0"), val = tensor("custom")]; + tensor var_1104_pad_0 = const()[name = tensor("op_1104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51832832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52037696))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52037824)))]; + tensor var_1104_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1102, groups = var_282, pad = var_1104_pad_0, pad_type = var_1104_pad_type_0, strides = var_1100, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("op_1104_cast")]; + tensor inputs_23_cast = add(x = var_1104_cast, y = inputs_21_cast)[name = tensor("inputs_23_cast")]; + tensor var_1108 = const()[name = tensor("op_1108"), val = tensor([1])]; + tensor channels_mean_23_cast = reduce_mean(axes = var_1108, keep_dims = var_277, x = inputs_23_cast)[name = tensor("channels_mean_23_cast")]; + tensor zero_mean_23_cast = sub(x = inputs_23_cast, y = channels_mean_23_cast)[name = tensor("zero_mean_23_cast")]; + tensor zero_mean_sq_23_cast = mul(x = zero_mean_23_cast, y = zero_mean_23_cast)[name = tensor("zero_mean_sq_23_cast")]; + tensor var_1112 = const()[name = tensor("op_1112"), val = tensor([1])]; + tensor var_1113_cast = reduce_mean(axes = var_1112, keep_dims = var_277, x = zero_mean_sq_23_cast)[name = tensor("op_1113_cast")]; + tensor var_1114_to_fp16 = const()[name = tensor("op_1114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1115_cast = add(x = var_1113_cast, y = var_1114_to_fp16)[name = tensor("op_1115_cast")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_1115_cast)[name = tensor("denom_23_cast")]; + tensor out_23_cast = mul(x = zero_mean_23_cast, y = denom_23_cast)[name = tensor("out_23_cast")]; + tensor var_1119_to_fp16 = const()[name = tensor("op_1119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52039168)))]; + tensor var_1120_cast = add(x = out_23_cast, y = var_1119_to_fp16)[name = tensor("op_1120_cast")]; + tensor var_1122_to_fp16 = const()[name = tensor("op_1122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52040512)))]; + tensor input_107_cast = mul(x = var_1120_cast, y = var_1122_to_fp16)[name = tensor("input_107_cast")]; + tensor var_1130 = const()[name = tensor("op_1130"), val = tensor([1, 1])]; + tensor var_1132 = const()[name = tensor("op_1132"), val = tensor([1, 1])]; + tensor var_1134_pad_type_0 = const()[name = tensor("op_1134_pad_type_0"), val = tensor("custom")]; + tensor var_1134_pad_0 = const()[name = tensor("op_1134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52041856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54499520))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54499712)))]; + tensor var_1134_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1132, groups = var_282, pad = var_1134_pad_0, pad_type = var_1134_pad_type_0, strides = var_1130, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_107_cast)[name = tensor("op_1134_cast")]; + tensor var_1135_split_sizes_0 = const()[name = tensor("op_1135_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_1135_axis_0 = const()[name = tensor("op_1135_axis_0"), val = tensor(1)]; + tensor var_1135_cast_0, tensor var_1135_cast_1 = split(axis = var_1135_axis_0, split_sizes = var_1135_split_sizes_0, x = var_1134_cast)[name = tensor("op_1135_cast")]; + tensor var_1137_mode_0 = const()[name = tensor("op_1137_mode_0"), val = tensor("EXACT")]; + tensor var_1137_cast = gelu(mode = var_1137_mode_0, x = var_1135_cast_1)[name = tensor("op_1137_cast")]; + tensor input_109_cast = mul(x = var_1135_cast_0, y = var_1137_cast)[name = tensor("input_109_cast")]; + tensor var_1141 = const()[name = tensor("op_1141"), val = tensor([1, 1])]; + tensor var_1143 = const()[name = tensor("op_1143"), val = tensor([1, 1])]; + tensor var_1145_pad_type_0 = const()[name = tensor("op_1145_pad_type_0"), val = tensor("custom")]; + tensor var_1145_pad_0 = const()[name = tensor("op_1145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54510016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55738880))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55739072)))]; + tensor var_1145_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1143, groups = var_282, pad = var_1145_pad_0, pad_type = var_1145_pad_type_0, strides = var_1141, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_109_cast)[name = tensor("op_1145_cast")]; + tensor hidden_states_53_cast = add(x = var_1145_cast, y = inputs_23_cast)[name = tensor("hidden_states_53_cast")]; + tensor var_1147 = const()[name = tensor("op_1147"), val = tensor([2, 640, 64, 64])]; + tensor input_111_cast = reshape(shape = var_1147, x = hidden_states_53_cast)[name = tensor("input_111_cast")]; + tensor var_1151 = const()[name = tensor("op_1151"), val = tensor([1, 1])]; + tensor var_1153 = const()[name = tensor("op_1153"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55740416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56047680))), name = tensor("down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56047872)))]; + tensor hidden_states_55_cast = conv(bias = down_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_1153, groups = var_282, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_1151, weight = down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_111_cast)[name = tensor("hidden_states_55_cast")]; + tensor input_113_cast = add(x = hidden_states_55_cast, y = hidden_states_37_cast)[name = tensor("input_113_cast")]; + tensor var_1160 = const()[name = tensor("op_1160"), val = tensor([2, 2])]; + tensor var_1162 = const()[name = tensor("op_1162"), val = tensor([1, 1])]; + tensor input_115_pad_type_0 = const()[name = tensor("input_115_pad_type_0"), val = tensor("custom")]; + tensor input_115_pad_0 = const()[name = tensor("input_115_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56049216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58814080))), name = tensor("down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58814272)))]; + tensor input_115_cast = conv(bias = down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_1162, groups = var_282, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = var_1160, weight = down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("input_115_cast")]; + tensor var_1170 = const()[name = tensor("op_1170"), val = tensor(3)]; + tensor var_1181 = const()[name = tensor("op_1181"), val = tensor(true)]; + tensor var_1186 = const()[name = tensor("op_1186"), val = tensor(1)]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([2, 32, 20, 32, 32])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = input_115_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([2, 640, 32, 32])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58815616)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58816960)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_119_cast = silu(x = add_21_cast)[name = tensor("input_119_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 1])]; + tensor var_1209 = const()[name = tensor("op_1209"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58818304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62504768))), name = tensor("down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 640, 3, 3])]; + tensor down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62504896)))]; + tensor hidden_states_57_cast = conv(bias = down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_1209, groups = var_1186, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_1207, weight = down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_119_cast)[name = tensor("hidden_states_57_cast")]; + tensor var_1215 = const()[name = tensor("op_1215"), val = tensor([1, 1])]; + tensor var_1217 = const()[name = tensor("op_1217"), val = tensor([1, 1])]; + tensor temb_9_pad_type_0 = const()[name = tensor("temb_9_pad_type_0"), val = tensor("custom")]; + tensor temb_9_pad_0 = const()[name = tensor("temb_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62507520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63326784))), name = tensor("down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63326912)))]; + tensor temb_9_cast = conv(bias = down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_1217, groups = var_1186, pad = temb_9_pad_0, pad_type = temb_9_pad_type_0, strides = var_1215, weight = down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_9_cast")]; + tensor input_123_cast = add(x = hidden_states_57_cast, y = temb_9_cast)[name = tensor("input_123_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_123_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_mean_0_to_fp16 = const()[name = tensor("add_23_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63329536)))]; + tensor add_23_variance_0_to_fp16 = const()[name = tensor("add_23_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63332160)))]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63334784)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63337408)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_127_cast = silu(x = add_23_cast)[name = tensor("input_127_cast")]; + tensor var_1227 = const()[name = tensor("op_1227"), val = tensor([1, 1])]; + tensor var_1229 = const()[name = tensor("op_1229"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63340032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70712896))), name = tensor("down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70713024)))]; + tensor hidden_states_59_cast = conv(bias = down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_1229, groups = var_1186, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_1227, weight = down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_127_cast)[name = tensor("hidden_states_59_cast")]; + tensor var_1234 = const()[name = tensor("op_1234"), val = tensor([1, 1])]; + tensor var_1236 = const()[name = tensor("op_1236"), val = tensor([1, 1])]; + tensor x_3_pad_type_0 = const()[name = tensor("x_3_pad_type_0"), val = tensor("custom")]; + tensor x_3_pad_0 = const()[name = tensor("x_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70715648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71330112))), name = tensor("down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 640, 1, 1])]; + tensor down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71330304)))]; + tensor x_3_cast = conv(bias = down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_1236, groups = var_1186, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = var_1234, weight = down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_115_cast)[name = tensor("x_3_cast")]; + tensor hidden_states_61_cast = add(x = x_3_cast, y = hidden_states_59_cast)[name = tensor("hidden_states_61_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = hidden_states_61_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71332928)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71335552)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor var_1274 = const()[name = tensor("op_1274"), val = tensor([1, 1])]; + tensor var_1276 = const()[name = tensor("op_1276"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71338176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72567040))), name = tensor("down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72567232)))]; + tensor hidden_states_63_cast = conv(bias = down_blocks_2_attentions_0_proj_in_bias_to_fp16, dilations = var_1276, groups = var_1186, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_1274, weight = down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized, x = add_25_cast)[name = tensor("hidden_states_63_cast")]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_25_cast = reshape(shape = var_1281, x = hidden_states_63_cast)[name = tensor("inputs_25_cast")]; + tensor var_1291 = const()[name = tensor("op_1291"), val = tensor([1])]; + tensor channels_mean_25_cast = reduce_mean(axes = var_1291, keep_dims = var_1181, x = inputs_25_cast)[name = tensor("channels_mean_25_cast")]; + tensor zero_mean_25_cast = sub(x = inputs_25_cast, y = channels_mean_25_cast)[name = tensor("zero_mean_25_cast")]; + tensor zero_mean_sq_25_cast = mul(x = zero_mean_25_cast, y = zero_mean_25_cast)[name = tensor("zero_mean_sq_25_cast")]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1])]; + tensor var_1296_cast = reduce_mean(axes = var_1295, keep_dims = var_1181, x = zero_mean_sq_25_cast)[name = tensor("op_1296_cast")]; + tensor var_1297_to_fp16 = const()[name = tensor("op_1297_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1298_cast = add(x = var_1296_cast, y = var_1297_to_fp16)[name = tensor("op_1298_cast")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_1298_cast)[name = tensor("denom_25_cast")]; + tensor out_25_cast = mul(x = zero_mean_25_cast, y = denom_25_cast)[name = tensor("out_25_cast")]; + tensor var_1302_to_fp16 = const()[name = tensor("op_1302_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72569856)))]; + tensor var_1303_cast = add(x = out_25_cast, y = var_1302_to_fp16)[name = tensor("op_1303_cast")]; + tensor var_1305_to_fp16 = const()[name = tensor("op_1305_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72572480)))]; + tensor hidden_states_65_cast = mul(x = var_1303_cast, y = var_1305_to_fp16)[name = tensor("hidden_states_65_cast")]; + tensor var_1312 = const()[name = tensor("op_1312"), val = tensor([1, 1])]; + tensor var_1314 = const()[name = tensor("op_1314"), val = tensor([1, 1])]; + tensor q_17_pad_type_0 = const()[name = tensor("q_17_pad_type_0"), val = tensor("custom")]; + tensor q_17_pad_0 = const()[name = tensor("q_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72575104))), lut = tensor([-0x1.374p-5, -0x1.6d8p-7, 0x1.6f8p-7, 0x1.384p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_17_cast = conv(dilations = var_1314, groups = var_1186, pad = q_17_pad_0, pad_type = q_17_pad_type_0, strides = var_1312, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("q_17_cast")]; + tensor var_1318 = const()[name = tensor("op_1318"), val = tensor([1, 1])]; + tensor var_1320 = const()[name = tensor("op_1320"), val = tensor([1, 1])]; + tensor k_17_pad_type_0 = const()[name = tensor("k_17_pad_type_0"), val = tensor("custom")]; + tensor k_17_pad_0 = const()[name = tensor("k_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72984768))), lut = tensor([-0x1.384p-5, -0x1.6ep-7, 0x1.704p-7, 0x1.384p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_17_cast = conv(dilations = var_1320, groups = var_1186, pad = k_17_pad_0, pad_type = k_17_pad_type_0, strides = var_1318, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("k_17_cast")]; + tensor var_1324 = const()[name = tensor("op_1324"), val = tensor([1, 1])]; + tensor var_1326 = const()[name = tensor("op_1326"), val = tensor([1, 1])]; + tensor v_17_pad_type_0 = const()[name = tensor("v_17_pad_type_0"), val = tensor("custom")]; + tensor v_17_pad_0 = const()[name = tensor("v_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(73394432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74213696))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_17_cast = conv(dilations = var_1326, groups = var_1186, pad = v_17_pad_0, pad_type = v_17_pad_type_0, strides = var_1324, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("v_17_cast")]; + tensor var_1330 = const()[name = tensor("op_1330"), val = tensor([2, 20, 64, -1])]; + tensor var_1331_cast = reshape(shape = var_1330, x = q_17_cast)[name = tensor("op_1331_cast")]; + tensor var_1332 = const()[name = tensor("op_1332"), val = tensor([2, 20, 64, -1])]; + tensor var_1333_cast = reshape(shape = var_1332, x = k_17_cast)[name = tensor("op_1333_cast")]; + tensor var_1334 = const()[name = tensor("op_1334"), val = tensor([2, 20, 64, -1])]; + tensor var_1335_cast = reshape(shape = var_1334, x = v_17_cast)[name = tensor("op_1335_cast")]; + tensor attn_weights_33_transpose_x_0 = const()[name = tensor("attn_weights_33_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_33_transpose_y_0 = const()[name = tensor("attn_weights_33_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_33_cast = matmul(transpose_x = attn_weights_33_transpose_x_0, transpose_y = attn_weights_33_transpose_y_0, x = var_1331_cast, y = var_1333_cast)[name = tensor("attn_weights_33_cast")]; + tensor var_1177_to_fp16 = const()[name = tensor("op_1177_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_35_cast = mul(x = attn_weights_33_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_35_cast")]; + tensor var_1339_cast = softmax(axis = var_1170, x = attn_weights_35_cast)[name = tensor("op_1339_cast")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1335_cast, y = var_1339_cast)[name = tensor("attn_17_cast")]; + tensor var_1343 = const()[name = tensor("op_1343"), val = tensor([2, 1280, 1, -1])]; + tensor input_131_cast = reshape(shape = var_1343, x = attn_17_cast)[name = tensor("input_131_cast")]; + tensor var_1348 = const()[name = tensor("op_1348"), val = tensor([1, 1])]; + tensor var_1350 = const()[name = tensor("op_1350"), val = tensor([1, 1])]; + tensor var_1352_pad_type_0 = const()[name = tensor("op_1352_pad_type_0"), val = tensor("custom")]; + tensor var_1352_pad_0 = const()[name = tensor("op_1352_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(74213824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75033088))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75033216)))]; + tensor var_1352_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_1350, groups = var_1186, pad = var_1352_pad_0, pad_type = var_1352_pad_type_0, strides = var_1348, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_131_cast)[name = tensor("op_1352_cast")]; + tensor inputs_27_cast = add(x = var_1352_cast, y = inputs_25_cast)[name = tensor("inputs_27_cast")]; + tensor var_1356 = const()[name = tensor("op_1356"), val = tensor([1])]; + tensor channels_mean_27_cast = reduce_mean(axes = var_1356, keep_dims = var_1181, x = inputs_27_cast)[name = tensor("channels_mean_27_cast")]; + tensor zero_mean_27_cast = sub(x = inputs_27_cast, y = channels_mean_27_cast)[name = tensor("zero_mean_27_cast")]; + tensor zero_mean_sq_27_cast = mul(x = zero_mean_27_cast, y = zero_mean_27_cast)[name = tensor("zero_mean_sq_27_cast")]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1])]; + tensor var_1361_cast = reduce_mean(axes = var_1360, keep_dims = var_1181, x = zero_mean_sq_27_cast)[name = tensor("op_1361_cast")]; + tensor var_1362_to_fp16 = const()[name = tensor("op_1362_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1363_cast = add(x = var_1361_cast, y = var_1362_to_fp16)[name = tensor("op_1363_cast")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_1363_cast)[name = tensor("denom_27_cast")]; + tensor out_27_cast = mul(x = zero_mean_27_cast, y = denom_27_cast)[name = tensor("out_27_cast")]; + tensor var_1367_to_fp16 = const()[name = tensor("op_1367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75035840)))]; + tensor var_1368_cast = add(x = out_27_cast, y = var_1367_to_fp16)[name = tensor("op_1368_cast")]; + tensor var_1370_to_fp16 = const()[name = tensor("op_1370_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75038464)))]; + tensor hidden_states_67_cast = mul(x = var_1368_cast, y = var_1370_to_fp16)[name = tensor("hidden_states_67_cast")]; + tensor var_1377 = const()[name = tensor("op_1377"), val = tensor([1, 1])]; + tensor var_1379 = const()[name = tensor("op_1379"), val = tensor([1, 1])]; + tensor q_19_pad_type_0 = const()[name = tensor("q_19_pad_type_0"), val = tensor("custom")]; + tensor q_19_pad_0 = const()[name = tensor("q_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75041088))), lut = tensor([-0x1.adp-7, 0x1.ad8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_19_cast = conv(dilations = var_1379, groups = var_1186, pad = q_19_pad_0, pad_type = q_19_pad_type_0, strides = var_1377, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("q_19_cast")]; + tensor var_1383 = const()[name = tensor("op_1383"), val = tensor([1, 1])]; + tensor var_1385 = const()[name = tensor("op_1385"), val = tensor([1, 1])]; + tensor k_19_pad_type_0 = const()[name = tensor("k_19_pad_type_0"), val = tensor("custom")]; + tensor k_19_pad_0 = const()[name = tensor("k_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75245952))), lut = tensor([-0x1.7fcp-6, -0x1.bdcp-8, 0x1.c44p-8, 0x1.818p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_19_cast = conv(dilations = var_1385, groups = var_1186, pad = k_19_pad_0, pad_type = k_19_pad_type_0, strides = var_1383, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_19_cast")]; + tensor var_1389 = const()[name = tensor("op_1389"), val = tensor([1, 1])]; + tensor var_1391 = const()[name = tensor("op_1391"), val = tensor([1, 1])]; + tensor v_19_pad_type_0 = const()[name = tensor("v_19_pad_type_0"), val = tensor("custom")]; + tensor v_19_pad_0 = const()[name = tensor("v_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75901376))), lut = tensor([-0x1.8e8p-6, -0x1.cbp-8, 0x1.cccp-8, 0x1.9p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_19_cast = conv(dilations = var_1391, groups = var_1186, pad = v_19_pad_0, pad_type = v_19_pad_type_0, strides = var_1389, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_19_cast")]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([2, 20, 64, -1])]; + tensor var_1396_cast = reshape(shape = var_1395, x = q_19_cast)[name = tensor("op_1396_cast")]; + tensor var_1397 = const()[name = tensor("op_1397"), val = tensor([2, 20, 64, -1])]; + tensor var_1398_cast = reshape(shape = var_1397, x = k_19_cast)[name = tensor("op_1398_cast")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([2, 20, 64, -1])]; + tensor var_1400_cast = reshape(shape = var_1399, x = v_19_cast)[name = tensor("op_1400_cast")]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = var_1396_cast, y = var_1398_cast)[name = tensor("attn_weights_37_cast")]; + tensor attn_weights_39_cast = mul(x = attn_weights_37_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_1404_cast = softmax(axis = var_1170, x = attn_weights_39_cast)[name = tensor("op_1404_cast")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1400_cast, y = var_1404_cast)[name = tensor("attn_19_cast")]; + tensor var_1408 = const()[name = tensor("op_1408"), val = tensor([2, 1280, 1, -1])]; + tensor input_133_cast = reshape(shape = var_1408, x = attn_19_cast)[name = tensor("input_133_cast")]; + tensor var_1413 = const()[name = tensor("op_1413"), val = tensor([1, 1])]; + tensor var_1415 = const()[name = tensor("op_1415"), val = tensor([1, 1])]; + tensor var_1417_pad_type_0 = const()[name = tensor("op_1417_pad_type_0"), val = tensor("custom")]; + tensor var_1417_pad_0 = const()[name = tensor("op_1417_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76556800))), lut = tensor([-0x1.b3p-8, 0x1.b4p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76761664)))]; + tensor var_1417_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_1415, groups = var_1186, pad = var_1417_pad_0, pad_type = var_1417_pad_type_0, strides = var_1413, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_133_cast)[name = tensor("op_1417_cast")]; + tensor inputs_29_cast = add(x = var_1417_cast, y = inputs_27_cast)[name = tensor("inputs_29_cast")]; + tensor var_1421 = const()[name = tensor("op_1421"), val = tensor([1])]; + tensor channels_mean_29_cast = reduce_mean(axes = var_1421, keep_dims = var_1181, x = inputs_29_cast)[name = tensor("channels_mean_29_cast")]; + tensor zero_mean_29_cast = sub(x = inputs_29_cast, y = channels_mean_29_cast)[name = tensor("zero_mean_29_cast")]; + tensor zero_mean_sq_29_cast = mul(x = zero_mean_29_cast, y = zero_mean_29_cast)[name = tensor("zero_mean_sq_29_cast")]; + tensor var_1425 = const()[name = tensor("op_1425"), val = tensor([1])]; + tensor var_1426_cast = reduce_mean(axes = var_1425, keep_dims = var_1181, x = zero_mean_sq_29_cast)[name = tensor("op_1426_cast")]; + tensor var_1427_to_fp16 = const()[name = tensor("op_1427_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1428_cast = add(x = var_1426_cast, y = var_1427_to_fp16)[name = tensor("op_1428_cast")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1428_cast)[name = tensor("denom_29_cast")]; + tensor out_29_cast = mul(x = zero_mean_29_cast, y = denom_29_cast)[name = tensor("out_29_cast")]; + tensor var_1432_to_fp16 = const()[name = tensor("op_1432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76764288)))]; + tensor var_1433_cast = add(x = out_29_cast, y = var_1432_to_fp16)[name = tensor("op_1433_cast")]; + tensor var_1435_to_fp16 = const()[name = tensor("op_1435_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76766912)))]; + tensor input_135_cast = mul(x = var_1433_cast, y = var_1435_to_fp16)[name = tensor("input_135_cast")]; + tensor var_1443 = const()[name = tensor("op_1443"), val = tensor([1, 1])]; + tensor var_1445 = const()[name = tensor("op_1445"), val = tensor([1, 1])]; + tensor var_1447_pad_type_0 = const()[name = tensor("op_1447_pad_type_0"), val = tensor("custom")]; + tensor var_1447_pad_0 = const()[name = tensor("op_1447_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76769536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83323200))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83323328)))]; + tensor var_1447_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_1445, groups = var_1186, pad = var_1447_pad_0, pad_type = var_1447_pad_type_0, strides = var_1443, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_135_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_split_sizes_0 = const()[name = tensor("op_1448_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1448_axis_0 = const()[name = tensor("op_1448_axis_0"), val = tensor(1)]; + tensor var_1448_cast_0, tensor var_1448_cast_1 = split(axis = var_1448_axis_0, split_sizes = var_1448_split_sizes_0, x = var_1447_cast)[name = tensor("op_1448_cast")]; + tensor var_1450_mode_0 = const()[name = tensor("op_1450_mode_0"), val = tensor("EXACT")]; + tensor var_1450_cast = gelu(mode = var_1450_mode_0, x = var_1448_cast_1)[name = tensor("op_1450_cast")]; + tensor input_137_cast = mul(x = var_1448_cast_0, y = var_1450_cast)[name = tensor("input_137_cast")]; + tensor var_1454 = const()[name = tensor("op_1454"), val = tensor([1, 1])]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([1, 1])]; + tensor var_1458_pad_type_0 = const()[name = tensor("op_1458_pad_type_0"), val = tensor("custom")]; + tensor var_1458_pad_0 = const()[name = tensor("op_1458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83343872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86620736))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86620864)))]; + tensor var_1458_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_1456, groups = var_1186, pad = var_1458_pad_0, pad_type = var_1458_pad_type_0, strides = var_1454, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("op_1458_cast")]; + tensor inputs_31_cast = add(x = var_1458_cast, y = inputs_29_cast)[name = tensor("inputs_31_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1])]; + tensor channels_mean_31_cast = reduce_mean(axes = var_1468, keep_dims = var_1181, x = inputs_31_cast)[name = tensor("channels_mean_31_cast")]; + tensor zero_mean_31_cast = sub(x = inputs_31_cast, y = channels_mean_31_cast)[name = tensor("zero_mean_31_cast")]; + tensor zero_mean_sq_31_cast = mul(x = zero_mean_31_cast, y = zero_mean_31_cast)[name = tensor("zero_mean_sq_31_cast")]; + tensor var_1472 = const()[name = tensor("op_1472"), val = tensor([1])]; + tensor var_1473_cast = reduce_mean(axes = var_1472, keep_dims = var_1181, x = zero_mean_sq_31_cast)[name = tensor("op_1473_cast")]; + tensor var_1474_to_fp16 = const()[name = tensor("op_1474_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1475_cast = add(x = var_1473_cast, y = var_1474_to_fp16)[name = tensor("op_1475_cast")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1475_cast)[name = tensor("denom_31_cast")]; + tensor out_31_cast = mul(x = zero_mean_31_cast, y = denom_31_cast)[name = tensor("out_31_cast")]; + tensor var_1479_to_fp16 = const()[name = tensor("op_1479_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86623488)))]; + tensor var_1480_cast = add(x = out_31_cast, y = var_1479_to_fp16)[name = tensor("op_1480_cast")]; + tensor var_1482_to_fp16 = const()[name = tensor("op_1482_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86626112)))]; + tensor hidden_states_71_cast = mul(x = var_1480_cast, y = var_1482_to_fp16)[name = tensor("hidden_states_71_cast")]; + tensor var_1489 = const()[name = tensor("op_1489"), val = tensor([1, 1])]; + tensor var_1491 = const()[name = tensor("op_1491"), val = tensor([1, 1])]; + tensor q_21_pad_type_0 = const()[name = tensor("q_21_pad_type_0"), val = tensor("custom")]; + tensor q_21_pad_0 = const()[name = tensor("q_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86628736))), lut = tensor([-0x1.17p-5, -0x1.518p-7, 0x1.4d4p-7, 0x1.16p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_21_cast = conv(dilations = var_1491, groups = var_1186, pad = q_21_pad_0, pad_type = q_21_pad_type_0, strides = var_1489, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("q_21_cast")]; + tensor var_1495 = const()[name = tensor("op_1495"), val = tensor([1, 1])]; + tensor var_1497 = const()[name = tensor("op_1497"), val = tensor([1, 1])]; + tensor k_21_pad_type_0 = const()[name = tensor("k_21_pad_type_0"), val = tensor("custom")]; + tensor k_21_pad_0 = const()[name = tensor("k_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(87038400))), lut = tensor([-0x1.26p-6, 0x1.254p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_21_cast = conv(dilations = var_1497, groups = var_1186, pad = k_21_pad_0, pad_type = k_21_pad_type_0, strides = var_1495, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("k_21_cast")]; + tensor var_1501 = const()[name = tensor("op_1501"), val = tensor([1, 1])]; + tensor var_1503 = const()[name = tensor("op_1503"), val = tensor([1, 1])]; + tensor v_21_pad_type_0 = const()[name = tensor("v_21_pad_type_0"), val = tensor("custom")]; + tensor v_21_pad_0 = const()[name = tensor("v_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(87243264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88062528))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_21_cast = conv(dilations = var_1503, groups = var_1186, pad = v_21_pad_0, pad_type = v_21_pad_type_0, strides = var_1501, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("v_21_cast")]; + tensor var_1507 = const()[name = tensor("op_1507"), val = tensor([2, 20, 64, -1])]; + tensor var_1508_cast = reshape(shape = var_1507, x = q_21_cast)[name = tensor("op_1508_cast")]; + tensor var_1509 = const()[name = tensor("op_1509"), val = tensor([2, 20, 64, -1])]; + tensor var_1510_cast = reshape(shape = var_1509, x = k_21_cast)[name = tensor("op_1510_cast")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([2, 20, 64, -1])]; + tensor var_1512_cast = reshape(shape = var_1511, x = v_21_cast)[name = tensor("op_1512_cast")]; + tensor attn_weights_41_transpose_x_0 = const()[name = tensor("attn_weights_41_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_41_transpose_y_0 = const()[name = tensor("attn_weights_41_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_41_cast = matmul(transpose_x = attn_weights_41_transpose_x_0, transpose_y = attn_weights_41_transpose_y_0, x = var_1508_cast, y = var_1510_cast)[name = tensor("attn_weights_41_cast")]; + tensor attn_weights_43_cast = mul(x = attn_weights_41_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_43_cast")]; + tensor var_1516_cast = softmax(axis = var_1170, x = attn_weights_43_cast)[name = tensor("op_1516_cast")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1512_cast, y = var_1516_cast)[name = tensor("attn_21_cast")]; + tensor var_1520 = const()[name = tensor("op_1520"), val = tensor([2, 1280, 1, -1])]; + tensor input_139_cast = reshape(shape = var_1520, x = attn_21_cast)[name = tensor("input_139_cast")]; + tensor var_1525 = const()[name = tensor("op_1525"), val = tensor([1, 1])]; + tensor var_1527 = const()[name = tensor("op_1527"), val = tensor([1, 1])]; + tensor var_1529_pad_type_0 = const()[name = tensor("op_1529_pad_type_0"), val = tensor("custom")]; + tensor var_1529_pad_0 = const()[name = tensor("op_1529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88062656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88881920))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88882048)))]; + tensor var_1529_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1527, groups = var_1186, pad = var_1529_pad_0, pad_type = var_1529_pad_type_0, strides = var_1525, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_139_cast)[name = tensor("op_1529_cast")]; + tensor inputs_33_cast = add(x = var_1529_cast, y = inputs_31_cast)[name = tensor("inputs_33_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1])]; + tensor channels_mean_33_cast = reduce_mean(axes = var_1533, keep_dims = var_1181, x = inputs_33_cast)[name = tensor("channels_mean_33_cast")]; + tensor zero_mean_33_cast = sub(x = inputs_33_cast, y = channels_mean_33_cast)[name = tensor("zero_mean_33_cast")]; + tensor zero_mean_sq_33_cast = mul(x = zero_mean_33_cast, y = zero_mean_33_cast)[name = tensor("zero_mean_sq_33_cast")]; + tensor var_1537 = const()[name = tensor("op_1537"), val = tensor([1])]; + tensor var_1538_cast = reduce_mean(axes = var_1537, keep_dims = var_1181, x = zero_mean_sq_33_cast)[name = tensor("op_1538_cast")]; + tensor var_1539_to_fp16 = const()[name = tensor("op_1539_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1540_cast = add(x = var_1538_cast, y = var_1539_to_fp16)[name = tensor("op_1540_cast")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1540_cast)[name = tensor("denom_33_cast")]; + tensor out_33_cast = mul(x = zero_mean_33_cast, y = denom_33_cast)[name = tensor("out_33_cast")]; + tensor var_1544_to_fp16 = const()[name = tensor("op_1544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88884672)))]; + tensor var_1545_cast = add(x = out_33_cast, y = var_1544_to_fp16)[name = tensor("op_1545_cast")]; + tensor var_1547_to_fp16 = const()[name = tensor("op_1547_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88887296)))]; + tensor hidden_states_73_cast = mul(x = var_1545_cast, y = var_1547_to_fp16)[name = tensor("hidden_states_73_cast")]; + tensor var_1554 = const()[name = tensor("op_1554"), val = tensor([1, 1])]; + tensor var_1556 = const()[name = tensor("op_1556"), val = tensor([1, 1])]; + tensor q_23_pad_type_0 = const()[name = tensor("q_23_pad_type_0"), val = tensor("custom")]; + tensor q_23_pad_0 = const()[name = tensor("q_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(88889920))), lut = tensor([-0x1.c9p-7, 0x1.c94p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_23_cast = conv(dilations = var_1556, groups = var_1186, pad = q_23_pad_0, pad_type = q_23_pad_type_0, strides = var_1554, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("q_23_cast")]; + tensor var_1560 = const()[name = tensor("op_1560"), val = tensor([1, 1])]; + tensor var_1562 = const()[name = tensor("op_1562"), val = tensor([1, 1])]; + tensor k_23_pad_type_0 = const()[name = tensor("k_23_pad_type_0"), val = tensor("custom")]; + tensor k_23_pad_0 = const()[name = tensor("k_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89094784))), lut = tensor([-0x1.85cp-6, -0x1.cbp-8, 0x1.c18p-8, 0x1.82cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_23_cast = conv(dilations = var_1562, groups = var_1186, pad = k_23_pad_0, pad_type = k_23_pad_type_0, strides = var_1560, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_23_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 1])]; + tensor var_1568 = const()[name = tensor("op_1568"), val = tensor([1, 1])]; + tensor v_23_pad_type_0 = const()[name = tensor("v_23_pad_type_0"), val = tensor("custom")]; + tensor v_23_pad_0 = const()[name = tensor("v_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89750208))), lut = tensor([-0x1.9dp-7, 0x1.9ccp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_23_cast = conv(dilations = var_1568, groups = var_1186, pad = v_23_pad_0, pad_type = v_23_pad_type_0, strides = var_1566, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_23_cast")]; + tensor var_1572 = const()[name = tensor("op_1572"), val = tensor([2, 20, 64, -1])]; + tensor var_1573_cast = reshape(shape = var_1572, x = q_23_cast)[name = tensor("op_1573_cast")]; + tensor var_1574 = const()[name = tensor("op_1574"), val = tensor([2, 20, 64, -1])]; + tensor var_1575_cast = reshape(shape = var_1574, x = k_23_cast)[name = tensor("op_1575_cast")]; + tensor var_1576 = const()[name = tensor("op_1576"), val = tensor([2, 20, 64, -1])]; + tensor var_1577_cast = reshape(shape = var_1576, x = v_23_cast)[name = tensor("op_1577_cast")]; + tensor attn_weights_45_transpose_x_0 = const()[name = tensor("attn_weights_45_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_45_transpose_y_0 = const()[name = tensor("attn_weights_45_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_45_cast = matmul(transpose_x = attn_weights_45_transpose_x_0, transpose_y = attn_weights_45_transpose_y_0, x = var_1573_cast, y = var_1575_cast)[name = tensor("attn_weights_45_cast")]; + tensor attn_weights_47_cast = mul(x = attn_weights_45_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_47_cast")]; + tensor var_1581_cast = softmax(axis = var_1170, x = attn_weights_47_cast)[name = tensor("op_1581_cast")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1577_cast, y = var_1581_cast)[name = tensor("attn_23_cast")]; + tensor var_1585 = const()[name = tensor("op_1585"), val = tensor([2, 1280, 1, -1])]; + tensor input_141_cast = reshape(shape = var_1585, x = attn_23_cast)[name = tensor("input_141_cast")]; + tensor var_1590 = const()[name = tensor("op_1590"), val = tensor([1, 1])]; + tensor var_1592 = const()[name = tensor("op_1592"), val = tensor([1, 1])]; + tensor var_1594_pad_type_0 = const()[name = tensor("op_1594_pad_type_0"), val = tensor("custom")]; + tensor var_1594_pad_0 = const()[name = tensor("op_1594_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90077952))), lut = tensor([-0x1.ca8p-8, 0x1.ccp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90282816)))]; + tensor var_1594_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1592, groups = var_1186, pad = var_1594_pad_0, pad_type = var_1594_pad_type_0, strides = var_1590, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("op_1594_cast")]; + tensor inputs_35_cast = add(x = var_1594_cast, y = inputs_33_cast)[name = tensor("inputs_35_cast")]; + tensor var_1598 = const()[name = tensor("op_1598"), val = tensor([1])]; + tensor channels_mean_35_cast = reduce_mean(axes = var_1598, keep_dims = var_1181, x = inputs_35_cast)[name = tensor("channels_mean_35_cast")]; + tensor zero_mean_35_cast = sub(x = inputs_35_cast, y = channels_mean_35_cast)[name = tensor("zero_mean_35_cast")]; + tensor zero_mean_sq_35_cast = mul(x = zero_mean_35_cast, y = zero_mean_35_cast)[name = tensor("zero_mean_sq_35_cast")]; + tensor var_1602 = const()[name = tensor("op_1602"), val = tensor([1])]; + tensor var_1603_cast = reduce_mean(axes = var_1602, keep_dims = var_1181, x = zero_mean_sq_35_cast)[name = tensor("op_1603_cast")]; + tensor var_1604_to_fp16 = const()[name = tensor("op_1604_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1605_cast = add(x = var_1603_cast, y = var_1604_to_fp16)[name = tensor("op_1605_cast")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1605_cast)[name = tensor("denom_35_cast")]; + tensor out_35_cast = mul(x = zero_mean_35_cast, y = denom_35_cast)[name = tensor("out_35_cast")]; + tensor var_1609_to_fp16 = const()[name = tensor("op_1609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90285440)))]; + tensor var_1610_cast = add(x = out_35_cast, y = var_1609_to_fp16)[name = tensor("op_1610_cast")]; + tensor var_1612_to_fp16 = const()[name = tensor("op_1612_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90288064)))]; + tensor input_143_cast = mul(x = var_1610_cast, y = var_1612_to_fp16)[name = tensor("input_143_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, 1])]; + tensor var_1622 = const()[name = tensor("op_1622"), val = tensor([1, 1])]; + tensor var_1624_pad_type_0 = const()[name = tensor("op_1624_pad_type_0"), val = tensor("custom")]; + tensor var_1624_pad_0 = const()[name = tensor("op_1624_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90290688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96844352))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96844480)))]; + tensor var_1624_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1622, groups = var_1186, pad = var_1624_pad_0, pad_type = var_1624_pad_type_0, strides = var_1620, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_143_cast)[name = tensor("op_1624_cast")]; + tensor var_1625_split_sizes_0 = const()[name = tensor("op_1625_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1625_axis_0 = const()[name = tensor("op_1625_axis_0"), val = tensor(1)]; + tensor var_1625_cast_0, tensor var_1625_cast_1 = split(axis = var_1625_axis_0, split_sizes = var_1625_split_sizes_0, x = var_1624_cast)[name = tensor("op_1625_cast")]; + tensor var_1627_mode_0 = const()[name = tensor("op_1627_mode_0"), val = tensor("EXACT")]; + tensor var_1627_cast = gelu(mode = var_1627_mode_0, x = var_1625_cast_1)[name = tensor("op_1627_cast")]; + tensor input_145_cast = mul(x = var_1625_cast_0, y = var_1627_cast)[name = tensor("input_145_cast")]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 1])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([1, 1])]; + tensor var_1635_pad_type_0 = const()[name = tensor("op_1635_pad_type_0"), val = tensor("custom")]; + tensor var_1635_pad_0 = const()[name = tensor("op_1635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96865024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100141888))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100142016)))]; + tensor var_1635_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1633, groups = var_1186, pad = var_1635_pad_0, pad_type = var_1635_pad_type_0, strides = var_1631, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("op_1635_cast")]; + tensor inputs_37_cast = add(x = var_1635_cast, y = inputs_35_cast)[name = tensor("inputs_37_cast")]; + tensor var_1645 = const()[name = tensor("op_1645"), val = tensor([1])]; + tensor channels_mean_37_cast = reduce_mean(axes = var_1645, keep_dims = var_1181, x = inputs_37_cast)[name = tensor("channels_mean_37_cast")]; + tensor zero_mean_37_cast = sub(x = inputs_37_cast, y = channels_mean_37_cast)[name = tensor("zero_mean_37_cast")]; + tensor zero_mean_sq_37_cast = mul(x = zero_mean_37_cast, y = zero_mean_37_cast)[name = tensor("zero_mean_sq_37_cast")]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([1])]; + tensor var_1650_cast = reduce_mean(axes = var_1649, keep_dims = var_1181, x = zero_mean_sq_37_cast)[name = tensor("op_1650_cast")]; + tensor var_1651_to_fp16 = const()[name = tensor("op_1651_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1652_cast = add(x = var_1650_cast, y = var_1651_to_fp16)[name = tensor("op_1652_cast")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1652_cast)[name = tensor("denom_37_cast")]; + tensor out_37_cast = mul(x = zero_mean_37_cast, y = denom_37_cast)[name = tensor("out_37_cast")]; + tensor var_1656_to_fp16 = const()[name = tensor("op_1656_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100144640)))]; + tensor var_1657_cast = add(x = out_37_cast, y = var_1656_to_fp16)[name = tensor("op_1657_cast")]; + tensor var_1659_to_fp16 = const()[name = tensor("op_1659_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100147264)))]; + tensor hidden_states_77_cast = mul(x = var_1657_cast, y = var_1659_to_fp16)[name = tensor("hidden_states_77_cast")]; + tensor var_1666 = const()[name = tensor("op_1666"), val = tensor([1, 1])]; + tensor var_1668 = const()[name = tensor("op_1668"), val = tensor([1, 1])]; + tensor q_25_pad_type_0 = const()[name = tensor("q_25_pad_type_0"), val = tensor("custom")]; + tensor q_25_pad_0 = const()[name = tensor("q_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100149888))), lut = tensor([-0x1.354p-5, -0x1.714p-7, 0x1.71cp-7, 0x1.354p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_25_cast = conv(dilations = var_1668, groups = var_1186, pad = q_25_pad_0, pad_type = q_25_pad_type_0, strides = var_1666, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("q_25_cast")]; + tensor var_1672 = const()[name = tensor("op_1672"), val = tensor([1, 1])]; + tensor var_1674 = const()[name = tensor("op_1674"), val = tensor([1, 1])]; + tensor k_25_pad_type_0 = const()[name = tensor("k_25_pad_type_0"), val = tensor("custom")]; + tensor k_25_pad_0 = const()[name = tensor("k_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100559552))), lut = tensor([-0x1.358p-5, -0x1.724p-7, 0x1.7p-7, 0x1.354p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_25_cast = conv(dilations = var_1674, groups = var_1186, pad = k_25_pad_0, pad_type = k_25_pad_type_0, strides = var_1672, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("k_25_cast")]; + tensor var_1678 = const()[name = tensor("op_1678"), val = tensor([1, 1])]; + tensor var_1680 = const()[name = tensor("op_1680"), val = tensor([1, 1])]; + tensor v_25_pad_type_0 = const()[name = tensor("v_25_pad_type_0"), val = tensor("custom")]; + tensor v_25_pad_0 = const()[name = tensor("v_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100969216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101788480))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_25_cast = conv(dilations = var_1680, groups = var_1186, pad = v_25_pad_0, pad_type = v_25_pad_type_0, strides = var_1678, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("v_25_cast")]; + tensor var_1684 = const()[name = tensor("op_1684"), val = tensor([2, 20, 64, -1])]; + tensor var_1685_cast = reshape(shape = var_1684, x = q_25_cast)[name = tensor("op_1685_cast")]; + tensor var_1686 = const()[name = tensor("op_1686"), val = tensor([2, 20, 64, -1])]; + tensor var_1687_cast = reshape(shape = var_1686, x = k_25_cast)[name = tensor("op_1687_cast")]; + tensor var_1688 = const()[name = tensor("op_1688"), val = tensor([2, 20, 64, -1])]; + tensor var_1689_cast = reshape(shape = var_1688, x = v_25_cast)[name = tensor("op_1689_cast")]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = var_1685_cast, y = var_1687_cast)[name = tensor("attn_weights_49_cast")]; + tensor attn_weights_51_cast = mul(x = attn_weights_49_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_1693_cast = softmax(axis = var_1170, x = attn_weights_51_cast)[name = tensor("op_1693_cast")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1689_cast, y = var_1693_cast)[name = tensor("attn_25_cast")]; + tensor var_1697 = const()[name = tensor("op_1697"), val = tensor([2, 1280, 1, -1])]; + tensor input_147_cast = reshape(shape = var_1697, x = attn_25_cast)[name = tensor("input_147_cast")]; + tensor var_1702 = const()[name = tensor("op_1702"), val = tensor([1, 1])]; + tensor var_1704 = const()[name = tensor("op_1704"), val = tensor([1, 1])]; + tensor var_1706_pad_type_0 = const()[name = tensor("op_1706_pad_type_0"), val = tensor("custom")]; + tensor var_1706_pad_0 = const()[name = tensor("op_1706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101788608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102607872))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102608000)))]; + tensor var_1706_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_1704, groups = var_1186, pad = var_1706_pad_0, pad_type = var_1706_pad_type_0, strides = var_1702, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_147_cast)[name = tensor("op_1706_cast")]; + tensor inputs_39_cast = add(x = var_1706_cast, y = inputs_37_cast)[name = tensor("inputs_39_cast")]; + tensor var_1710 = const()[name = tensor("op_1710"), val = tensor([1])]; + tensor channels_mean_39_cast = reduce_mean(axes = var_1710, keep_dims = var_1181, x = inputs_39_cast)[name = tensor("channels_mean_39_cast")]; + tensor zero_mean_39_cast = sub(x = inputs_39_cast, y = channels_mean_39_cast)[name = tensor("zero_mean_39_cast")]; + tensor zero_mean_sq_39_cast = mul(x = zero_mean_39_cast, y = zero_mean_39_cast)[name = tensor("zero_mean_sq_39_cast")]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor([1])]; + tensor var_1715_cast = reduce_mean(axes = var_1714, keep_dims = var_1181, x = zero_mean_sq_39_cast)[name = tensor("op_1715_cast")]; + tensor var_1716_to_fp16 = const()[name = tensor("op_1716_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1717_cast = add(x = var_1715_cast, y = var_1716_to_fp16)[name = tensor("op_1717_cast")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1717_cast)[name = tensor("denom_39_cast")]; + tensor out_39_cast = mul(x = zero_mean_39_cast, y = denom_39_cast)[name = tensor("out_39_cast")]; + tensor var_1721_to_fp16 = const()[name = tensor("op_1721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102610624)))]; + tensor var_1722_cast = add(x = out_39_cast, y = var_1721_to_fp16)[name = tensor("op_1722_cast")]; + tensor var_1724_to_fp16 = const()[name = tensor("op_1724_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102613248)))]; + tensor hidden_states_79_cast = mul(x = var_1722_cast, y = var_1724_to_fp16)[name = tensor("hidden_states_79_cast")]; + tensor var_1731 = const()[name = tensor("op_1731"), val = tensor([1, 1])]; + tensor var_1733 = const()[name = tensor("op_1733"), val = tensor([1, 1])]; + tensor q_27_pad_type_0 = const()[name = tensor("q_27_pad_type_0"), val = tensor("custom")]; + tensor q_27_pad_0 = const()[name = tensor("q_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102615872))), lut = tensor([-0x1.9e4p-7, 0x1.9f4p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_27_cast = conv(dilations = var_1733, groups = var_1186, pad = q_27_pad_0, pad_type = q_27_pad_type_0, strides = var_1731, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("q_27_cast")]; + tensor var_1737 = const()[name = tensor("op_1737"), val = tensor([1, 1])]; + tensor var_1739 = const()[name = tensor("op_1739"), val = tensor([1, 1])]; + tensor k_27_pad_type_0 = const()[name = tensor("k_27_pad_type_0"), val = tensor("custom")]; + tensor k_27_pad_0 = const()[name = tensor("k_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102820736))), lut = tensor([-0x1.3e4p-7, 0x1.3c8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_27_cast = conv(dilations = var_1739, groups = var_1186, pad = k_27_pad_0, pad_type = k_27_pad_type_0, strides = var_1737, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_27_cast")]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 1])]; + tensor var_1745 = const()[name = tensor("op_1745"), val = tensor([1, 1])]; + tensor v_27_pad_type_0 = const()[name = tensor("v_27_pad_type_0"), val = tensor("custom")]; + tensor v_27_pad_0 = const()[name = tensor("v_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103148480))), lut = tensor([-0x1.65p-7, 0x1.658p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_27_cast = conv(dilations = var_1745, groups = var_1186, pad = v_27_pad_0, pad_type = v_27_pad_type_0, strides = var_1743, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_27_cast")]; + tensor var_1749 = const()[name = tensor("op_1749"), val = tensor([2, 20, 64, -1])]; + tensor var_1750_cast = reshape(shape = var_1749, x = q_27_cast)[name = tensor("op_1750_cast")]; + tensor var_1751 = const()[name = tensor("op_1751"), val = tensor([2, 20, 64, -1])]; + tensor var_1752_cast = reshape(shape = var_1751, x = k_27_cast)[name = tensor("op_1752_cast")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([2, 20, 64, -1])]; + tensor var_1754_cast = reshape(shape = var_1753, x = v_27_cast)[name = tensor("op_1754_cast")]; + tensor attn_weights_53_transpose_x_0 = const()[name = tensor("attn_weights_53_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_53_transpose_y_0 = const()[name = tensor("attn_weights_53_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_53_cast = matmul(transpose_x = attn_weights_53_transpose_x_0, transpose_y = attn_weights_53_transpose_y_0, x = var_1750_cast, y = var_1752_cast)[name = tensor("attn_weights_53_cast")]; + tensor attn_weights_55_cast = mul(x = attn_weights_53_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_55_cast")]; + tensor var_1758_cast = softmax(axis = var_1170, x = attn_weights_55_cast)[name = tensor("op_1758_cast")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1754_cast, y = var_1758_cast)[name = tensor("attn_27_cast")]; + tensor var_1762 = const()[name = tensor("op_1762"), val = tensor([2, 1280, 1, -1])]; + tensor input_149_cast = reshape(shape = var_1762, x = attn_27_cast)[name = tensor("input_149_cast")]; + tensor var_1767 = const()[name = tensor("op_1767"), val = tensor([1, 1])]; + tensor var_1769 = const()[name = tensor("op_1769"), val = tensor([1, 1])]; + tensor var_1771_pad_type_0 = const()[name = tensor("op_1771_pad_type_0"), val = tensor("custom")]; + tensor var_1771_pad_0 = const()[name = tensor("op_1771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103476224))), lut = tensor([-0x1.8bp-8, 0x1.8bp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103681088)))]; + tensor var_1771_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_1769, groups = var_1186, pad = var_1771_pad_0, pad_type = var_1771_pad_type_0, strides = var_1767, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_149_cast)[name = tensor("op_1771_cast")]; + tensor inputs_41_cast = add(x = var_1771_cast, y = inputs_39_cast)[name = tensor("inputs_41_cast")]; + tensor var_1775 = const()[name = tensor("op_1775"), val = tensor([1])]; + tensor channels_mean_41_cast = reduce_mean(axes = var_1775, keep_dims = var_1181, x = inputs_41_cast)[name = tensor("channels_mean_41_cast")]; + tensor zero_mean_41_cast = sub(x = inputs_41_cast, y = channels_mean_41_cast)[name = tensor("zero_mean_41_cast")]; + tensor zero_mean_sq_41_cast = mul(x = zero_mean_41_cast, y = zero_mean_41_cast)[name = tensor("zero_mean_sq_41_cast")]; + tensor var_1779 = const()[name = tensor("op_1779"), val = tensor([1])]; + tensor var_1780_cast = reduce_mean(axes = var_1779, keep_dims = var_1181, x = zero_mean_sq_41_cast)[name = tensor("op_1780_cast")]; + tensor var_1781_to_fp16 = const()[name = tensor("op_1781_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1782_cast = add(x = var_1780_cast, y = var_1781_to_fp16)[name = tensor("op_1782_cast")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1782_cast)[name = tensor("denom_41_cast")]; + tensor out_41_cast = mul(x = zero_mean_41_cast, y = denom_41_cast)[name = tensor("out_41_cast")]; + tensor var_1786_to_fp16 = const()[name = tensor("op_1786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103683712)))]; + tensor var_1787_cast = add(x = out_41_cast, y = var_1786_to_fp16)[name = tensor("op_1787_cast")]; + tensor var_1789_to_fp16 = const()[name = tensor("op_1789_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103686336)))]; + tensor input_151_cast = mul(x = var_1787_cast, y = var_1789_to_fp16)[name = tensor("input_151_cast")]; + tensor var_1797 = const()[name = tensor("op_1797"), val = tensor([1, 1])]; + tensor var_1799 = const()[name = tensor("op_1799"), val = tensor([1, 1])]; + tensor var_1801_pad_type_0 = const()[name = tensor("op_1801_pad_type_0"), val = tensor("custom")]; + tensor var_1801_pad_0 = const()[name = tensor("op_1801_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103688960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110242624))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110242752)))]; + tensor var_1801_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_1799, groups = var_1186, pad = var_1801_pad_0, pad_type = var_1801_pad_type_0, strides = var_1797, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_151_cast)[name = tensor("op_1801_cast")]; + tensor var_1802_split_sizes_0 = const()[name = tensor("op_1802_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1802_axis_0 = const()[name = tensor("op_1802_axis_0"), val = tensor(1)]; + tensor var_1802_cast_0, tensor var_1802_cast_1 = split(axis = var_1802_axis_0, split_sizes = var_1802_split_sizes_0, x = var_1801_cast)[name = tensor("op_1802_cast")]; + tensor var_1804_mode_0 = const()[name = tensor("op_1804_mode_0"), val = tensor("EXACT")]; + tensor var_1804_cast = gelu(mode = var_1804_mode_0, x = var_1802_cast_1)[name = tensor("op_1804_cast")]; + tensor input_153_cast = mul(x = var_1802_cast_0, y = var_1804_cast)[name = tensor("input_153_cast")]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([1, 1])]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([1, 1])]; + tensor var_1812_pad_type_0 = const()[name = tensor("op_1812_pad_type_0"), val = tensor("custom")]; + tensor var_1812_pad_0 = const()[name = tensor("op_1812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110263296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113540160))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113540288)))]; + tensor var_1812_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_1810, groups = var_1186, pad = var_1812_pad_0, pad_type = var_1812_pad_type_0, strides = var_1808, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("op_1812_cast")]; + tensor inputs_43_cast = add(x = var_1812_cast, y = inputs_41_cast)[name = tensor("inputs_43_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([1])]; + tensor channels_mean_43_cast = reduce_mean(axes = var_1822, keep_dims = var_1181, x = inputs_43_cast)[name = tensor("channels_mean_43_cast")]; + tensor zero_mean_43_cast = sub(x = inputs_43_cast, y = channels_mean_43_cast)[name = tensor("zero_mean_43_cast")]; + tensor zero_mean_sq_43_cast = mul(x = zero_mean_43_cast, y = zero_mean_43_cast)[name = tensor("zero_mean_sq_43_cast")]; + tensor var_1826 = const()[name = tensor("op_1826"), val = tensor([1])]; + tensor var_1827_cast = reduce_mean(axes = var_1826, keep_dims = var_1181, x = zero_mean_sq_43_cast)[name = tensor("op_1827_cast")]; + tensor var_1828_to_fp16 = const()[name = tensor("op_1828_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1829_cast = add(x = var_1827_cast, y = var_1828_to_fp16)[name = tensor("op_1829_cast")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1829_cast)[name = tensor("denom_43_cast")]; + tensor out_43_cast = mul(x = zero_mean_43_cast, y = denom_43_cast)[name = tensor("out_43_cast")]; + tensor var_1833_to_fp16 = const()[name = tensor("op_1833_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113542912)))]; + tensor var_1834_cast = add(x = out_43_cast, y = var_1833_to_fp16)[name = tensor("op_1834_cast")]; + tensor var_1836_to_fp16 = const()[name = tensor("op_1836_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113545536)))]; + tensor hidden_states_83_cast = mul(x = var_1834_cast, y = var_1836_to_fp16)[name = tensor("hidden_states_83_cast")]; + tensor var_1843 = const()[name = tensor("op_1843"), val = tensor([1, 1])]; + tensor var_1845 = const()[name = tensor("op_1845"), val = tensor([1, 1])]; + tensor q_29_pad_type_0 = const()[name = tensor("q_29_pad_type_0"), val = tensor("custom")]; + tensor q_29_pad_0 = const()[name = tensor("q_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113548160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114367424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_29_cast = conv(dilations = var_1845, groups = var_1186, pad = q_29_pad_0, pad_type = q_29_pad_type_0, strides = var_1843, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("q_29_cast")]; + tensor var_1849 = const()[name = tensor("op_1849"), val = tensor([1, 1])]; + tensor var_1851 = const()[name = tensor("op_1851"), val = tensor([1, 1])]; + tensor k_29_pad_type_0 = const()[name = tensor("k_29_pad_type_0"), val = tensor("custom")]; + tensor k_29_pad_0 = const()[name = tensor("k_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114367552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115186816))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_29_cast = conv(dilations = var_1851, groups = var_1186, pad = k_29_pad_0, pad_type = k_29_pad_type_0, strides = var_1849, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("k_29_cast")]; + tensor var_1855 = const()[name = tensor("op_1855"), val = tensor([1, 1])]; + tensor var_1857 = const()[name = tensor("op_1857"), val = tensor([1, 1])]; + tensor v_29_pad_type_0 = const()[name = tensor("v_29_pad_type_0"), val = tensor("custom")]; + tensor v_29_pad_0 = const()[name = tensor("v_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115186944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116006208))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_29_cast = conv(dilations = var_1857, groups = var_1186, pad = v_29_pad_0, pad_type = v_29_pad_type_0, strides = var_1855, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("v_29_cast")]; + tensor var_1861 = const()[name = tensor("op_1861"), val = tensor([2, 20, 64, -1])]; + tensor var_1862_cast = reshape(shape = var_1861, x = q_29_cast)[name = tensor("op_1862_cast")]; + tensor var_1863 = const()[name = tensor("op_1863"), val = tensor([2, 20, 64, -1])]; + tensor var_1864_cast = reshape(shape = var_1863, x = k_29_cast)[name = tensor("op_1864_cast")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([2, 20, 64, -1])]; + tensor var_1866_cast = reshape(shape = var_1865, x = v_29_cast)[name = tensor("op_1866_cast")]; + tensor attn_weights_57_transpose_x_0 = const()[name = tensor("attn_weights_57_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_57_transpose_y_0 = const()[name = tensor("attn_weights_57_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_57_cast = matmul(transpose_x = attn_weights_57_transpose_x_0, transpose_y = attn_weights_57_transpose_y_0, x = var_1862_cast, y = var_1864_cast)[name = tensor("attn_weights_57_cast")]; + tensor attn_weights_59_cast = mul(x = attn_weights_57_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_59_cast")]; + tensor var_1870_cast = softmax(axis = var_1170, x = attn_weights_59_cast)[name = tensor("op_1870_cast")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1866_cast, y = var_1870_cast)[name = tensor("attn_29_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([2, 1280, 1, -1])]; + tensor input_155_cast = reshape(shape = var_1874, x = attn_29_cast)[name = tensor("input_155_cast")]; + tensor var_1879 = const()[name = tensor("op_1879"), val = tensor([1, 1])]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, 1])]; + tensor var_1883_pad_type_0 = const()[name = tensor("op_1883_pad_type_0"), val = tensor("custom")]; + tensor var_1883_pad_0 = const()[name = tensor("op_1883_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116006336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116825600))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116825728)))]; + tensor var_1883_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_1881, groups = var_1186, pad = var_1883_pad_0, pad_type = var_1883_pad_type_0, strides = var_1879, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_155_cast)[name = tensor("op_1883_cast")]; + tensor inputs_45_cast = add(x = var_1883_cast, y = inputs_43_cast)[name = tensor("inputs_45_cast")]; + tensor var_1887 = const()[name = tensor("op_1887"), val = tensor([1])]; + tensor channels_mean_45_cast = reduce_mean(axes = var_1887, keep_dims = var_1181, x = inputs_45_cast)[name = tensor("channels_mean_45_cast")]; + tensor zero_mean_45_cast = sub(x = inputs_45_cast, y = channels_mean_45_cast)[name = tensor("zero_mean_45_cast")]; + tensor zero_mean_sq_45_cast = mul(x = zero_mean_45_cast, y = zero_mean_45_cast)[name = tensor("zero_mean_sq_45_cast")]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1])]; + tensor var_1892_cast = reduce_mean(axes = var_1891, keep_dims = var_1181, x = zero_mean_sq_45_cast)[name = tensor("op_1892_cast")]; + tensor var_1893_to_fp16 = const()[name = tensor("op_1893_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1894_cast = add(x = var_1892_cast, y = var_1893_to_fp16)[name = tensor("op_1894_cast")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1894_cast)[name = tensor("denom_45_cast")]; + tensor out_45_cast = mul(x = zero_mean_45_cast, y = denom_45_cast)[name = tensor("out_45_cast")]; + tensor var_1898_to_fp16 = const()[name = tensor("op_1898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116828352)))]; + tensor var_1899_cast = add(x = out_45_cast, y = var_1898_to_fp16)[name = tensor("op_1899_cast")]; + tensor var_1901_to_fp16 = const()[name = tensor("op_1901_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116830976)))]; + tensor hidden_states_85_cast = mul(x = var_1899_cast, y = var_1901_to_fp16)[name = tensor("hidden_states_85_cast")]; + tensor var_1908 = const()[name = tensor("op_1908"), val = tensor([1, 1])]; + tensor var_1910 = const()[name = tensor("op_1910"), val = tensor([1, 1])]; + tensor q_31_pad_type_0 = const()[name = tensor("q_31_pad_type_0"), val = tensor("custom")]; + tensor q_31_pad_0 = const()[name = tensor("q_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(116833600))), lut = tensor([-0x1.918p-7, 0x1.924p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_31_cast = conv(dilations = var_1910, groups = var_1186, pad = q_31_pad_0, pad_type = q_31_pad_type_0, strides = var_1908, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("q_31_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 1])]; + tensor var_1916 = const()[name = tensor("op_1916"), val = tensor([1, 1])]; + tensor k_31_pad_type_0 = const()[name = tensor("k_31_pad_type_0"), val = tensor("custom")]; + tensor k_31_pad_0 = const()[name = tensor("k_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(117038464))), lut = tensor([-0x1.214p-6, -0x1.5c8p-8, 0x1.5bcp-8, 0x1.218p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_31_cast = conv(dilations = var_1916, groups = var_1186, pad = k_31_pad_0, pad_type = k_31_pad_type_0, strides = var_1914, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_31_cast")]; + tensor var_1920 = const()[name = tensor("op_1920"), val = tensor([1, 1])]; + tensor var_1922 = const()[name = tensor("op_1922"), val = tensor([1, 1])]; + tensor v_31_pad_type_0 = const()[name = tensor("v_31_pad_type_0"), val = tensor("custom")]; + tensor v_31_pad_0 = const()[name = tensor("v_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(117693888))), lut = tensor([-0x1.5dp-7, 0x1.5ep-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_31_cast = conv(dilations = var_1922, groups = var_1186, pad = v_31_pad_0, pad_type = v_31_pad_type_0, strides = var_1920, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_31_cast")]; + tensor var_1926 = const()[name = tensor("op_1926"), val = tensor([2, 20, 64, -1])]; + tensor var_1927_cast = reshape(shape = var_1926, x = q_31_cast)[name = tensor("op_1927_cast")]; + tensor var_1928 = const()[name = tensor("op_1928"), val = tensor([2, 20, 64, -1])]; + tensor var_1929_cast = reshape(shape = var_1928, x = k_31_cast)[name = tensor("op_1929_cast")]; + tensor var_1930 = const()[name = tensor("op_1930"), val = tensor([2, 20, 64, -1])]; + tensor var_1931_cast = reshape(shape = var_1930, x = v_31_cast)[name = tensor("op_1931_cast")]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = var_1927_cast, y = var_1929_cast)[name = tensor("attn_weights_61_cast")]; + tensor attn_weights_63_cast = mul(x = attn_weights_61_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1935_cast = softmax(axis = var_1170, x = attn_weights_63_cast)[name = tensor("op_1935_cast")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_1931_cast, y = var_1935_cast)[name = tensor("attn_31_cast")]; + tensor var_1939 = const()[name = tensor("op_1939"), val = tensor([2, 1280, 1, -1])]; + tensor input_157_cast = reshape(shape = var_1939, x = attn_31_cast)[name = tensor("input_157_cast")]; + tensor var_1944 = const()[name = tensor("op_1944"), val = tensor([1, 1])]; + tensor var_1946 = const()[name = tensor("op_1946"), val = tensor([1, 1])]; + tensor var_1948_pad_type_0 = const()[name = tensor("op_1948_pad_type_0"), val = tensor("custom")]; + tensor var_1948_pad_0 = const()[name = tensor("op_1948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118021632))), lut = tensor([-0x1.8a8p-8, 0x1.89cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118226496)))]; + tensor var_1948_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_1946, groups = var_1186, pad = var_1948_pad_0, pad_type = var_1948_pad_type_0, strides = var_1944, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("op_1948_cast")]; + tensor inputs_47_cast = add(x = var_1948_cast, y = inputs_45_cast)[name = tensor("inputs_47_cast")]; + tensor var_1952 = const()[name = tensor("op_1952"), val = tensor([1])]; + tensor channels_mean_47_cast = reduce_mean(axes = var_1952, keep_dims = var_1181, x = inputs_47_cast)[name = tensor("channels_mean_47_cast")]; + tensor zero_mean_47_cast = sub(x = inputs_47_cast, y = channels_mean_47_cast)[name = tensor("zero_mean_47_cast")]; + tensor zero_mean_sq_47_cast = mul(x = zero_mean_47_cast, y = zero_mean_47_cast)[name = tensor("zero_mean_sq_47_cast")]; + tensor var_1956 = const()[name = tensor("op_1956"), val = tensor([1])]; + tensor var_1957_cast = reduce_mean(axes = var_1956, keep_dims = var_1181, x = zero_mean_sq_47_cast)[name = tensor("op_1957_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1959_cast = add(x = var_1957_cast, y = var_1958_to_fp16)[name = tensor("op_1959_cast")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1959_cast)[name = tensor("denom_47_cast")]; + tensor out_47_cast = mul(x = zero_mean_47_cast, y = denom_47_cast)[name = tensor("out_47_cast")]; + tensor var_1963_to_fp16 = const()[name = tensor("op_1963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118229120)))]; + tensor var_1964_cast = add(x = out_47_cast, y = var_1963_to_fp16)[name = tensor("op_1964_cast")]; + tensor var_1966_to_fp16 = const()[name = tensor("op_1966_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118231744)))]; + tensor input_159_cast = mul(x = var_1964_cast, y = var_1966_to_fp16)[name = tensor("input_159_cast")]; + tensor var_1974 = const()[name = tensor("op_1974"), val = tensor([1, 1])]; + tensor var_1976 = const()[name = tensor("op_1976"), val = tensor([1, 1])]; + tensor var_1978_pad_type_0 = const()[name = tensor("op_1978_pad_type_0"), val = tensor("custom")]; + tensor var_1978_pad_0 = const()[name = tensor("op_1978_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118234368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124788032))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124788160)))]; + tensor var_1978_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_1976, groups = var_1186, pad = var_1978_pad_0, pad_type = var_1978_pad_type_0, strides = var_1974, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_159_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_split_sizes_0 = const()[name = tensor("op_1979_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1979_axis_0 = const()[name = tensor("op_1979_axis_0"), val = tensor(1)]; + tensor var_1979_cast_0, tensor var_1979_cast_1 = split(axis = var_1979_axis_0, split_sizes = var_1979_split_sizes_0, x = var_1978_cast)[name = tensor("op_1979_cast")]; + tensor var_1981_mode_0 = const()[name = tensor("op_1981_mode_0"), val = tensor("EXACT")]; + tensor var_1981_cast = gelu(mode = var_1981_mode_0, x = var_1979_cast_1)[name = tensor("op_1981_cast")]; + tensor input_161_cast = mul(x = var_1979_cast_0, y = var_1981_cast)[name = tensor("input_161_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([1, 1])]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([1, 1])]; + tensor var_1989_pad_type_0 = const()[name = tensor("op_1989_pad_type_0"), val = tensor("custom")]; + tensor var_1989_pad_0 = const()[name = tensor("op_1989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124808704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128085568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128085696)))]; + tensor var_1989_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_1987, groups = var_1186, pad = var_1989_pad_0, pad_type = var_1989_pad_type_0, strides = var_1985, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("op_1989_cast")]; + tensor inputs_49_cast = add(x = var_1989_cast, y = inputs_47_cast)[name = tensor("inputs_49_cast")]; + tensor var_1999 = const()[name = tensor("op_1999"), val = tensor([1])]; + tensor channels_mean_49_cast = reduce_mean(axes = var_1999, keep_dims = var_1181, x = inputs_49_cast)[name = tensor("channels_mean_49_cast")]; + tensor zero_mean_49_cast = sub(x = inputs_49_cast, y = channels_mean_49_cast)[name = tensor("zero_mean_49_cast")]; + tensor zero_mean_sq_49_cast = mul(x = zero_mean_49_cast, y = zero_mean_49_cast)[name = tensor("zero_mean_sq_49_cast")]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1])]; + tensor var_2004_cast = reduce_mean(axes = var_2003, keep_dims = var_1181, x = zero_mean_sq_49_cast)[name = tensor("op_2004_cast")]; + tensor var_2005_to_fp16 = const()[name = tensor("op_2005_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2006_cast = add(x = var_2004_cast, y = var_2005_to_fp16)[name = tensor("op_2006_cast")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_2006_cast)[name = tensor("denom_49_cast")]; + tensor out_49_cast = mul(x = zero_mean_49_cast, y = denom_49_cast)[name = tensor("out_49_cast")]; + tensor var_2010_to_fp16 = const()[name = tensor("op_2010_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128088320)))]; + tensor var_2011_cast = add(x = out_49_cast, y = var_2010_to_fp16)[name = tensor("op_2011_cast")]; + tensor var_2013_to_fp16 = const()[name = tensor("op_2013_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128090944)))]; + tensor hidden_states_89_cast = mul(x = var_2011_cast, y = var_2013_to_fp16)[name = tensor("hidden_states_89_cast")]; + tensor var_2020 = const()[name = tensor("op_2020"), val = tensor([1, 1])]; + tensor var_2022 = const()[name = tensor("op_2022"), val = tensor([1, 1])]; + tensor q_33_pad_type_0 = const()[name = tensor("q_33_pad_type_0"), val = tensor("custom")]; + tensor q_33_pad_0 = const()[name = tensor("q_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128093568))), lut = tensor([-0x1.3p-5, -0x1.6d8p-7, 0x1.6b8p-7, 0x1.2fcp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_33_cast = conv(dilations = var_2022, groups = var_1186, pad = q_33_pad_0, pad_type = q_33_pad_type_0, strides = var_2020, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("q_33_cast")]; + tensor var_2026 = const()[name = tensor("op_2026"), val = tensor([1, 1])]; + tensor var_2028 = const()[name = tensor("op_2028"), val = tensor([1, 1])]; + tensor k_33_pad_type_0 = const()[name = tensor("k_33_pad_type_0"), val = tensor("custom")]; + tensor k_33_pad_0 = const()[name = tensor("k_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128503232))), lut = tensor([-0x1.2fp-5, -0x1.6a8p-7, 0x1.6d8p-7, 0x1.308p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_33_cast = conv(dilations = var_2028, groups = var_1186, pad = k_33_pad_0, pad_type = k_33_pad_type_0, strides = var_2026, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("k_33_cast")]; + tensor var_2032 = const()[name = tensor("op_2032"), val = tensor([1, 1])]; + tensor var_2034 = const()[name = tensor("op_2034"), val = tensor([1, 1])]; + tensor v_33_pad_type_0 = const()[name = tensor("v_33_pad_type_0"), val = tensor("custom")]; + tensor v_33_pad_0 = const()[name = tensor("v_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128912896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129732160))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_33_cast = conv(dilations = var_2034, groups = var_1186, pad = v_33_pad_0, pad_type = v_33_pad_type_0, strides = var_2032, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("v_33_cast")]; + tensor var_2038 = const()[name = tensor("op_2038"), val = tensor([2, 20, 64, -1])]; + tensor var_2039_cast = reshape(shape = var_2038, x = q_33_cast)[name = tensor("op_2039_cast")]; + tensor var_2040 = const()[name = tensor("op_2040"), val = tensor([2, 20, 64, -1])]; + tensor var_2041_cast = reshape(shape = var_2040, x = k_33_cast)[name = tensor("op_2041_cast")]; + tensor var_2042 = const()[name = tensor("op_2042"), val = tensor([2, 20, 64, -1])]; + tensor var_2043_cast = reshape(shape = var_2042, x = v_33_cast)[name = tensor("op_2043_cast")]; + tensor attn_weights_65_transpose_x_0 = const()[name = tensor("attn_weights_65_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_65_transpose_y_0 = const()[name = tensor("attn_weights_65_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_65_cast = matmul(transpose_x = attn_weights_65_transpose_x_0, transpose_y = attn_weights_65_transpose_y_0, x = var_2039_cast, y = var_2041_cast)[name = tensor("attn_weights_65_cast")]; + tensor attn_weights_67_cast = mul(x = attn_weights_65_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_67_cast")]; + tensor var_2047_cast = softmax(axis = var_1170, x = attn_weights_67_cast)[name = tensor("op_2047_cast")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_2043_cast, y = var_2047_cast)[name = tensor("attn_33_cast")]; + tensor var_2051 = const()[name = tensor("op_2051"), val = tensor([2, 1280, 1, -1])]; + tensor input_163_cast = reshape(shape = var_2051, x = attn_33_cast)[name = tensor("input_163_cast")]; + tensor var_2056 = const()[name = tensor("op_2056"), val = tensor([1, 1])]; + tensor var_2058 = const()[name = tensor("op_2058"), val = tensor([1, 1])]; + tensor var_2060_pad_type_0 = const()[name = tensor("op_2060_pad_type_0"), val = tensor("custom")]; + tensor var_2060_pad_0 = const()[name = tensor("op_2060_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129732288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130551552))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130551680)))]; + tensor var_2060_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_2058, groups = var_1186, pad = var_2060_pad_0, pad_type = var_2060_pad_type_0, strides = var_2056, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_163_cast)[name = tensor("op_2060_cast")]; + tensor inputs_51_cast = add(x = var_2060_cast, y = inputs_49_cast)[name = tensor("inputs_51_cast")]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1])]; + tensor channels_mean_51_cast = reduce_mean(axes = var_2064, keep_dims = var_1181, x = inputs_51_cast)[name = tensor("channels_mean_51_cast")]; + tensor zero_mean_51_cast = sub(x = inputs_51_cast, y = channels_mean_51_cast)[name = tensor("zero_mean_51_cast")]; + tensor zero_mean_sq_51_cast = mul(x = zero_mean_51_cast, y = zero_mean_51_cast)[name = tensor("zero_mean_sq_51_cast")]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([1])]; + tensor var_2069_cast = reduce_mean(axes = var_2068, keep_dims = var_1181, x = zero_mean_sq_51_cast)[name = tensor("op_2069_cast")]; + tensor var_2070_to_fp16 = const()[name = tensor("op_2070_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2071_cast = add(x = var_2069_cast, y = var_2070_to_fp16)[name = tensor("op_2071_cast")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_2071_cast)[name = tensor("denom_51_cast")]; + tensor out_51_cast = mul(x = zero_mean_51_cast, y = denom_51_cast)[name = tensor("out_51_cast")]; + tensor var_2075_to_fp16 = const()[name = tensor("op_2075_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130554304)))]; + tensor var_2076_cast = add(x = out_51_cast, y = var_2075_to_fp16)[name = tensor("op_2076_cast")]; + tensor var_2078_to_fp16 = const()[name = tensor("op_2078_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130556928)))]; + tensor hidden_states_91_cast = mul(x = var_2076_cast, y = var_2078_to_fp16)[name = tensor("hidden_states_91_cast")]; + tensor var_2085 = const()[name = tensor("op_2085"), val = tensor([1, 1])]; + tensor var_2087 = const()[name = tensor("op_2087"), val = tensor([1, 1])]; + tensor q_35_pad_type_0 = const()[name = tensor("q_35_pad_type_0"), val = tensor("custom")]; + tensor q_35_pad_0 = const()[name = tensor("q_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130559552))), lut = tensor([-0x1.83p-7, 0x1.82cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_35_cast = conv(dilations = var_2087, groups = var_1186, pad = q_35_pad_0, pad_type = q_35_pad_type_0, strides = var_2085, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("q_35_cast")]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 1])]; + tensor var_2093 = const()[name = tensor("op_2093"), val = tensor([1, 1])]; + tensor k_35_pad_type_0 = const()[name = tensor("k_35_pad_type_0"), val = tensor("custom")]; + tensor k_35_pad_0 = const()[name = tensor("k_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130764416))), lut = tensor([-0x1.12cp-7, 0x1.13p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_35_cast = conv(dilations = var_2093, groups = var_1186, pad = k_35_pad_0, pad_type = k_35_pad_type_0, strides = var_2091, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_35_cast")]; + tensor var_2097 = const()[name = tensor("op_2097"), val = tensor([1, 1])]; + tensor var_2099 = const()[name = tensor("op_2099"), val = tensor([1, 1])]; + tensor v_35_pad_type_0 = const()[name = tensor("v_35_pad_type_0"), val = tensor("custom")]; + tensor v_35_pad_0 = const()[name = tensor("v_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131092160))), lut = tensor([-0x1.3e4p-7, 0x1.3e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_35_cast = conv(dilations = var_2099, groups = var_1186, pad = v_35_pad_0, pad_type = v_35_pad_type_0, strides = var_2097, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_35_cast")]; + tensor var_2103 = const()[name = tensor("op_2103"), val = tensor([2, 20, 64, -1])]; + tensor var_2104_cast = reshape(shape = var_2103, x = q_35_cast)[name = tensor("op_2104_cast")]; + tensor var_2105 = const()[name = tensor("op_2105"), val = tensor([2, 20, 64, -1])]; + tensor var_2106_cast = reshape(shape = var_2105, x = k_35_cast)[name = tensor("op_2106_cast")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([2, 20, 64, -1])]; + tensor var_2108_cast = reshape(shape = var_2107, x = v_35_cast)[name = tensor("op_2108_cast")]; + tensor attn_weights_69_transpose_x_0 = const()[name = tensor("attn_weights_69_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_69_transpose_y_0 = const()[name = tensor("attn_weights_69_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_69_cast = matmul(transpose_x = attn_weights_69_transpose_x_0, transpose_y = attn_weights_69_transpose_y_0, x = var_2104_cast, y = var_2106_cast)[name = tensor("attn_weights_69_cast")]; + tensor attn_weights_71_cast = mul(x = attn_weights_69_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_71_cast")]; + tensor var_2112_cast = softmax(axis = var_1170, x = attn_weights_71_cast)[name = tensor("op_2112_cast")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2108_cast, y = var_2112_cast)[name = tensor("attn_35_cast")]; + tensor var_2116 = const()[name = tensor("op_2116"), val = tensor([2, 1280, 1, -1])]; + tensor input_165_cast = reshape(shape = var_2116, x = attn_35_cast)[name = tensor("input_165_cast")]; + tensor var_2121 = const()[name = tensor("op_2121"), val = tensor([1, 1])]; + tensor var_2123 = const()[name = tensor("op_2123"), val = tensor([1, 1])]; + tensor var_2125_pad_type_0 = const()[name = tensor("op_2125_pad_type_0"), val = tensor("custom")]; + tensor var_2125_pad_0 = const()[name = tensor("op_2125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131419904))), lut = tensor([-0x1.684p-8, 0x1.68cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131624768)))]; + tensor var_2125_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_2123, groups = var_1186, pad = var_2125_pad_0, pad_type = var_2125_pad_type_0, strides = var_2121, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_165_cast)[name = tensor("op_2125_cast")]; + tensor inputs_53_cast = add(x = var_2125_cast, y = inputs_51_cast)[name = tensor("inputs_53_cast")]; + tensor var_2129 = const()[name = tensor("op_2129"), val = tensor([1])]; + tensor channels_mean_53_cast = reduce_mean(axes = var_2129, keep_dims = var_1181, x = inputs_53_cast)[name = tensor("channels_mean_53_cast")]; + tensor zero_mean_53_cast = sub(x = inputs_53_cast, y = channels_mean_53_cast)[name = tensor("zero_mean_53_cast")]; + tensor zero_mean_sq_53_cast = mul(x = zero_mean_53_cast, y = zero_mean_53_cast)[name = tensor("zero_mean_sq_53_cast")]; + tensor var_2133 = const()[name = tensor("op_2133"), val = tensor([1])]; + tensor var_2134_cast = reduce_mean(axes = var_2133, keep_dims = var_1181, x = zero_mean_sq_53_cast)[name = tensor("op_2134_cast")]; + tensor var_2135_to_fp16 = const()[name = tensor("op_2135_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2136_cast = add(x = var_2134_cast, y = var_2135_to_fp16)[name = tensor("op_2136_cast")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_2136_cast)[name = tensor("denom_53_cast")]; + tensor out_53_cast = mul(x = zero_mean_53_cast, y = denom_53_cast)[name = tensor("out_53_cast")]; + tensor var_2140_to_fp16 = const()[name = tensor("op_2140_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131627392)))]; + tensor var_2141_cast = add(x = out_53_cast, y = var_2140_to_fp16)[name = tensor("op_2141_cast")]; + tensor var_2143_to_fp16 = const()[name = tensor("op_2143_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131630016)))]; + tensor input_167_cast = mul(x = var_2141_cast, y = var_2143_to_fp16)[name = tensor("input_167_cast")]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 1])]; + tensor var_2153 = const()[name = tensor("op_2153"), val = tensor([1, 1])]; + tensor var_2155_pad_type_0 = const()[name = tensor("op_2155_pad_type_0"), val = tensor("custom")]; + tensor var_2155_pad_0 = const()[name = tensor("op_2155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(131632640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(138186304))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(138186432)))]; + tensor var_2155_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_2153, groups = var_1186, pad = var_2155_pad_0, pad_type = var_2155_pad_type_0, strides = var_2151, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_167_cast)[name = tensor("op_2155_cast")]; + tensor var_2156_split_sizes_0 = const()[name = tensor("op_2156_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2156_axis_0 = const()[name = tensor("op_2156_axis_0"), val = tensor(1)]; + tensor var_2156_cast_0, tensor var_2156_cast_1 = split(axis = var_2156_axis_0, split_sizes = var_2156_split_sizes_0, x = var_2155_cast)[name = tensor("op_2156_cast")]; + tensor var_2158_mode_0 = const()[name = tensor("op_2158_mode_0"), val = tensor("EXACT")]; + tensor var_2158_cast = gelu(mode = var_2158_mode_0, x = var_2156_cast_1)[name = tensor("op_2158_cast")]; + tensor input_169_cast = mul(x = var_2156_cast_0, y = var_2158_cast)[name = tensor("input_169_cast")]; + tensor var_2162 = const()[name = tensor("op_2162"), val = tensor([1, 1])]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 1])]; + tensor var_2166_pad_type_0 = const()[name = tensor("op_2166_pad_type_0"), val = tensor("custom")]; + tensor var_2166_pad_0 = const()[name = tensor("op_2166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(138206976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143122240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143122432)))]; + tensor var_2166_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_2164, groups = var_1186, pad = var_2166_pad_0, pad_type = var_2166_pad_type_0, strides = var_2162, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("op_2166_cast")]; + tensor inputs_55_cast = add(x = var_2166_cast, y = inputs_53_cast)[name = tensor("inputs_55_cast")]; + tensor var_2176 = const()[name = tensor("op_2176"), val = tensor([1])]; + tensor channels_mean_55_cast = reduce_mean(axes = var_2176, keep_dims = var_1181, x = inputs_55_cast)[name = tensor("channels_mean_55_cast")]; + tensor zero_mean_55_cast = sub(x = inputs_55_cast, y = channels_mean_55_cast)[name = tensor("zero_mean_55_cast")]; + tensor zero_mean_sq_55_cast = mul(x = zero_mean_55_cast, y = zero_mean_55_cast)[name = tensor("zero_mean_sq_55_cast")]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1])]; + tensor var_2181_cast = reduce_mean(axes = var_2180, keep_dims = var_1181, x = zero_mean_sq_55_cast)[name = tensor("op_2181_cast")]; + tensor var_2182_to_fp16 = const()[name = tensor("op_2182_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2183_cast = add(x = var_2181_cast, y = var_2182_to_fp16)[name = tensor("op_2183_cast")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_2183_cast)[name = tensor("denom_55_cast")]; + tensor out_55_cast = mul(x = zero_mean_55_cast, y = denom_55_cast)[name = tensor("out_55_cast")]; + tensor var_2187_to_fp16 = const()[name = tensor("op_2187_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143125056)))]; + tensor var_2188_cast = add(x = out_55_cast, y = var_2187_to_fp16)[name = tensor("op_2188_cast")]; + tensor var_2190_to_fp16 = const()[name = tensor("op_2190_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143127680)))]; + tensor hidden_states_95_cast = mul(x = var_2188_cast, y = var_2190_to_fp16)[name = tensor("hidden_states_95_cast")]; + tensor var_2197 = const()[name = tensor("op_2197"), val = tensor([1, 1])]; + tensor var_2199 = const()[name = tensor("op_2199"), val = tensor([1, 1])]; + tensor q_37_pad_type_0 = const()[name = tensor("q_37_pad_type_0"), val = tensor("custom")]; + tensor q_37_pad_0 = const()[name = tensor("q_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143130304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143949568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_37_cast = conv(dilations = var_2199, groups = var_1186, pad = q_37_pad_0, pad_type = q_37_pad_type_0, strides = var_2197, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("q_37_cast")]; + tensor var_2203 = const()[name = tensor("op_2203"), val = tensor([1, 1])]; + tensor var_2205 = const()[name = tensor("op_2205"), val = tensor([1, 1])]; + tensor k_37_pad_type_0 = const()[name = tensor("k_37_pad_type_0"), val = tensor("custom")]; + tensor k_37_pad_0 = const()[name = tensor("k_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143949696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144768960))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_37_cast = conv(dilations = var_2205, groups = var_1186, pad = k_37_pad_0, pad_type = k_37_pad_type_0, strides = var_2203, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("k_37_cast")]; + tensor var_2209 = const()[name = tensor("op_2209"), val = tensor([1, 1])]; + tensor var_2211 = const()[name = tensor("op_2211"), val = tensor([1, 1])]; + tensor v_37_pad_type_0 = const()[name = tensor("v_37_pad_type_0"), val = tensor("custom")]; + tensor v_37_pad_0 = const()[name = tensor("v_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144769088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145588352))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_37_cast = conv(dilations = var_2211, groups = var_1186, pad = v_37_pad_0, pad_type = v_37_pad_type_0, strides = var_2209, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("v_37_cast")]; + tensor var_2215 = const()[name = tensor("op_2215"), val = tensor([2, 20, 64, -1])]; + tensor var_2216_cast = reshape(shape = var_2215, x = q_37_cast)[name = tensor("op_2216_cast")]; + tensor var_2217 = const()[name = tensor("op_2217"), val = tensor([2, 20, 64, -1])]; + tensor var_2218_cast = reshape(shape = var_2217, x = k_37_cast)[name = tensor("op_2218_cast")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([2, 20, 64, -1])]; + tensor var_2220_cast = reshape(shape = var_2219, x = v_37_cast)[name = tensor("op_2220_cast")]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = var_2216_cast, y = var_2218_cast)[name = tensor("attn_weights_73_cast")]; + tensor attn_weights_75_cast = mul(x = attn_weights_73_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_2224_cast = softmax(axis = var_1170, x = attn_weights_75_cast)[name = tensor("op_2224_cast")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2220_cast, y = var_2224_cast)[name = tensor("attn_37_cast")]; + tensor var_2228 = const()[name = tensor("op_2228"), val = tensor([2, 1280, 1, -1])]; + tensor input_171_cast = reshape(shape = var_2228, x = attn_37_cast)[name = tensor("input_171_cast")]; + tensor var_2233 = const()[name = tensor("op_2233"), val = tensor([1, 1])]; + tensor var_2235 = const()[name = tensor("op_2235"), val = tensor([1, 1])]; + tensor var_2237_pad_type_0 = const()[name = tensor("op_2237_pad_type_0"), val = tensor("custom")]; + tensor var_2237_pad_0 = const()[name = tensor("op_2237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145588480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146407744))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146407872)))]; + tensor var_2237_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_2235, groups = var_1186, pad = var_2237_pad_0, pad_type = var_2237_pad_type_0, strides = var_2233, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_171_cast)[name = tensor("op_2237_cast")]; + tensor inputs_57_cast = add(x = var_2237_cast, y = inputs_55_cast)[name = tensor("inputs_57_cast")]; + tensor var_2241 = const()[name = tensor("op_2241"), val = tensor([1])]; + tensor channels_mean_57_cast = reduce_mean(axes = var_2241, keep_dims = var_1181, x = inputs_57_cast)[name = tensor("channels_mean_57_cast")]; + tensor zero_mean_57_cast = sub(x = inputs_57_cast, y = channels_mean_57_cast)[name = tensor("zero_mean_57_cast")]; + tensor zero_mean_sq_57_cast = mul(x = zero_mean_57_cast, y = zero_mean_57_cast)[name = tensor("zero_mean_sq_57_cast")]; + tensor var_2245 = const()[name = tensor("op_2245"), val = tensor([1])]; + tensor var_2246_cast = reduce_mean(axes = var_2245, keep_dims = var_1181, x = zero_mean_sq_57_cast)[name = tensor("op_2246_cast")]; + tensor var_2247_to_fp16 = const()[name = tensor("op_2247_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2248_cast = add(x = var_2246_cast, y = var_2247_to_fp16)[name = tensor("op_2248_cast")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_2248_cast)[name = tensor("denom_57_cast")]; + tensor out_57_cast = mul(x = zero_mean_57_cast, y = denom_57_cast)[name = tensor("out_57_cast")]; + tensor var_2252_to_fp16 = const()[name = tensor("op_2252_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146410496)))]; + tensor var_2253_cast = add(x = out_57_cast, y = var_2252_to_fp16)[name = tensor("op_2253_cast")]; + tensor var_2255_to_fp16 = const()[name = tensor("op_2255_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146413120)))]; + tensor hidden_states_97_cast = mul(x = var_2253_cast, y = var_2255_to_fp16)[name = tensor("hidden_states_97_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 1])]; + tensor var_2264 = const()[name = tensor("op_2264"), val = tensor([1, 1])]; + tensor q_39_pad_type_0 = const()[name = tensor("q_39_pad_type_0"), val = tensor("custom")]; + tensor q_39_pad_0 = const()[name = tensor("q_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146415744))), lut = tensor([-0x1.7p-7, 0x1.6fcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_39_cast = conv(dilations = var_2264, groups = var_1186, pad = q_39_pad_0, pad_type = q_39_pad_type_0, strides = var_2262, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("q_39_cast")]; + tensor var_2268 = const()[name = tensor("op_2268"), val = tensor([1, 1])]; + tensor var_2270 = const()[name = tensor("op_2270"), val = tensor([1, 1])]; + tensor k_39_pad_type_0 = const()[name = tensor("k_39_pad_type_0"), val = tensor("custom")]; + tensor k_39_pad_0 = const()[name = tensor("k_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146620608))), lut = tensor([-0x1.f88p-8, 0x1.f94p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_39_cast = conv(dilations = var_2270, groups = var_1186, pad = k_39_pad_0, pad_type = k_39_pad_type_0, strides = var_2268, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_39_cast")]; + tensor var_2274 = const()[name = tensor("op_2274"), val = tensor([1, 1])]; + tensor var_2276 = const()[name = tensor("op_2276"), val = tensor([1, 1])]; + tensor v_39_pad_type_0 = const()[name = tensor("v_39_pad_type_0"), val = tensor("custom")]; + tensor v_39_pad_0 = const()[name = tensor("v_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146948352))), lut = tensor([-0x1.1e8p-7, 0x1.1e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_39_cast = conv(dilations = var_2276, groups = var_1186, pad = v_39_pad_0, pad_type = v_39_pad_type_0, strides = var_2274, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_39_cast")]; + tensor var_2280 = const()[name = tensor("op_2280"), val = tensor([2, 20, 64, -1])]; + tensor var_2281_cast = reshape(shape = var_2280, x = q_39_cast)[name = tensor("op_2281_cast")]; + tensor var_2282 = const()[name = tensor("op_2282"), val = tensor([2, 20, 64, -1])]; + tensor var_2283_cast = reshape(shape = var_2282, x = k_39_cast)[name = tensor("op_2283_cast")]; + tensor var_2284 = const()[name = tensor("op_2284"), val = tensor([2, 20, 64, -1])]; + tensor var_2285_cast = reshape(shape = var_2284, x = v_39_cast)[name = tensor("op_2285_cast")]; + tensor attn_weights_77_transpose_x_0 = const()[name = tensor("attn_weights_77_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_77_transpose_y_0 = const()[name = tensor("attn_weights_77_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_77_cast = matmul(transpose_x = attn_weights_77_transpose_x_0, transpose_y = attn_weights_77_transpose_y_0, x = var_2281_cast, y = var_2283_cast)[name = tensor("attn_weights_77_cast")]; + tensor attn_weights_79_cast = mul(x = attn_weights_77_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_79_cast")]; + tensor var_2289_cast = softmax(axis = var_1170, x = attn_weights_79_cast)[name = tensor("op_2289_cast")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2285_cast, y = var_2289_cast)[name = tensor("attn_39_cast")]; + tensor var_2293 = const()[name = tensor("op_2293"), val = tensor([2, 1280, 1, -1])]; + tensor input_173_cast = reshape(shape = var_2293, x = attn_39_cast)[name = tensor("input_173_cast")]; + tensor var_2298 = const()[name = tensor("op_2298"), val = tensor([1, 1])]; + tensor var_2300 = const()[name = tensor("op_2300"), val = tensor([1, 1])]; + tensor var_2302_pad_type_0 = const()[name = tensor("op_2302_pad_type_0"), val = tensor("custom")]; + tensor var_2302_pad_0 = const()[name = tensor("op_2302_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147276096))), lut = tensor([-0x1.46cp-8, 0x1.48p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147480960)))]; + tensor var_2302_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_2300, groups = var_1186, pad = var_2302_pad_0, pad_type = var_2302_pad_type_0, strides = var_2298, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("op_2302_cast")]; + tensor inputs_59_cast = add(x = var_2302_cast, y = inputs_57_cast)[name = tensor("inputs_59_cast")]; + tensor var_2306 = const()[name = tensor("op_2306"), val = tensor([1])]; + tensor channels_mean_59_cast = reduce_mean(axes = var_2306, keep_dims = var_1181, x = inputs_59_cast)[name = tensor("channels_mean_59_cast")]; + tensor zero_mean_59_cast = sub(x = inputs_59_cast, y = channels_mean_59_cast)[name = tensor("zero_mean_59_cast")]; + tensor zero_mean_sq_59_cast = mul(x = zero_mean_59_cast, y = zero_mean_59_cast)[name = tensor("zero_mean_sq_59_cast")]; + tensor var_2310 = const()[name = tensor("op_2310"), val = tensor([1])]; + tensor var_2311_cast = reduce_mean(axes = var_2310, keep_dims = var_1181, x = zero_mean_sq_59_cast)[name = tensor("op_2311_cast")]; + tensor var_2312_to_fp16 = const()[name = tensor("op_2312_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2313_cast = add(x = var_2311_cast, y = var_2312_to_fp16)[name = tensor("op_2313_cast")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_2313_cast)[name = tensor("denom_59_cast")]; + tensor out_59_cast = mul(x = zero_mean_59_cast, y = denom_59_cast)[name = tensor("out_59_cast")]; + tensor var_2317_to_fp16 = const()[name = tensor("op_2317_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147483584)))]; + tensor var_2318_cast = add(x = out_59_cast, y = var_2317_to_fp16)[name = tensor("op_2318_cast")]; + tensor var_2320_to_fp16 = const()[name = tensor("op_2320_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147486208)))]; + tensor input_175_cast = mul(x = var_2318_cast, y = var_2320_to_fp16)[name = tensor("input_175_cast")]; + tensor var_2328 = const()[name = tensor("op_2328"), val = tensor([1, 1])]; + tensor var_2330 = const()[name = tensor("op_2330"), val = tensor([1, 1])]; + tensor var_2332_pad_type_0 = const()[name = tensor("op_2332_pad_type_0"), val = tensor("custom")]; + tensor var_2332_pad_0 = const()[name = tensor("op_2332_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147488832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(154042496))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(154042624)))]; + tensor var_2332_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_2330, groups = var_1186, pad = var_2332_pad_0, pad_type = var_2332_pad_type_0, strides = var_2328, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_175_cast)[name = tensor("op_2332_cast")]; + tensor var_2333_split_sizes_0 = const()[name = tensor("op_2333_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2333_axis_0 = const()[name = tensor("op_2333_axis_0"), val = tensor(1)]; + tensor var_2333_cast_0, tensor var_2333_cast_1 = split(axis = var_2333_axis_0, split_sizes = var_2333_split_sizes_0, x = var_2332_cast)[name = tensor("op_2333_cast")]; + tensor var_2335_mode_0 = const()[name = tensor("op_2335_mode_0"), val = tensor("EXACT")]; + tensor var_2335_cast = gelu(mode = var_2335_mode_0, x = var_2333_cast_1)[name = tensor("op_2335_cast")]; + tensor input_177_cast = mul(x = var_2333_cast_0, y = var_2335_cast)[name = tensor("input_177_cast")]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([1, 1])]; + tensor var_2341 = const()[name = tensor("op_2341"), val = tensor([1, 1])]; + tensor var_2343_pad_type_0 = const()[name = tensor("op_2343_pad_type_0"), val = tensor("custom")]; + tensor var_2343_pad_0 = const()[name = tensor("op_2343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(154063168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157340032))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157340160)))]; + tensor var_2343_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_2341, groups = var_1186, pad = var_2343_pad_0, pad_type = var_2343_pad_type_0, strides = var_2339, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("op_2343_cast")]; + tensor inputs_61_cast = add(x = var_2343_cast, y = inputs_59_cast)[name = tensor("inputs_61_cast")]; + tensor var_2353 = const()[name = tensor("op_2353"), val = tensor([1])]; + tensor channels_mean_61_cast = reduce_mean(axes = var_2353, keep_dims = var_1181, x = inputs_61_cast)[name = tensor("channels_mean_61_cast")]; + tensor zero_mean_61_cast = sub(x = inputs_61_cast, y = channels_mean_61_cast)[name = tensor("zero_mean_61_cast")]; + tensor zero_mean_sq_61_cast = mul(x = zero_mean_61_cast, y = zero_mean_61_cast)[name = tensor("zero_mean_sq_61_cast")]; + tensor var_2357 = const()[name = tensor("op_2357"), val = tensor([1])]; + tensor var_2358_cast = reduce_mean(axes = var_2357, keep_dims = var_1181, x = zero_mean_sq_61_cast)[name = tensor("op_2358_cast")]; + tensor var_2359_to_fp16 = const()[name = tensor("op_2359_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2360_cast = add(x = var_2358_cast, y = var_2359_to_fp16)[name = tensor("op_2360_cast")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_2360_cast)[name = tensor("denom_61_cast")]; + tensor out_61_cast = mul(x = zero_mean_61_cast, y = denom_61_cast)[name = tensor("out_61_cast")]; + tensor var_2364_to_fp16 = const()[name = tensor("op_2364_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157342784)))]; + tensor var_2365_cast = add(x = out_61_cast, y = var_2364_to_fp16)[name = tensor("op_2365_cast")]; + tensor var_2367_to_fp16 = const()[name = tensor("op_2367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157345408)))]; + tensor hidden_states_101_cast = mul(x = var_2365_cast, y = var_2367_to_fp16)[name = tensor("hidden_states_101_cast")]; + tensor var_2374 = const()[name = tensor("op_2374"), val = tensor([1, 1])]; + tensor var_2376 = const()[name = tensor("op_2376"), val = tensor([1, 1])]; + tensor q_41_pad_type_0 = const()[name = tensor("q_41_pad_type_0"), val = tensor("custom")]; + tensor q_41_pad_0 = const()[name = tensor("q_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157348032))), lut = tensor([-0x1.13p-5, -0x1.4a4p-7, 0x1.4b4p-7, 0x1.12cp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_41_cast = conv(dilations = var_2376, groups = var_1186, pad = q_41_pad_0, pad_type = q_41_pad_type_0, strides = var_2374, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("q_41_cast")]; + tensor var_2380 = const()[name = tensor("op_2380"), val = tensor([1, 1])]; + tensor var_2382 = const()[name = tensor("op_2382"), val = tensor([1, 1])]; + tensor k_41_pad_type_0 = const()[name = tensor("k_41_pad_type_0"), val = tensor("custom")]; + tensor k_41_pad_0 = const()[name = tensor("k_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(157757696))), lut = tensor([-0x1.1p-5, -0x1.474p-7, 0x1.49cp-7, 0x1.10cp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_41_cast = conv(dilations = var_2382, groups = var_1186, pad = k_41_pad_0, pad_type = k_41_pad_type_0, strides = var_2380, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("k_41_cast")]; + tensor var_2386 = const()[name = tensor("op_2386"), val = tensor([1, 1])]; + tensor var_2388 = const()[name = tensor("op_2388"), val = tensor([1, 1])]; + tensor v_41_pad_type_0 = const()[name = tensor("v_41_pad_type_0"), val = tensor("custom")]; + tensor v_41_pad_0 = const()[name = tensor("v_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158167360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158986624))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_41_cast = conv(dilations = var_2388, groups = var_1186, pad = v_41_pad_0, pad_type = v_41_pad_type_0, strides = var_2386, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("v_41_cast")]; + tensor var_2392 = const()[name = tensor("op_2392"), val = tensor([2, 20, 64, -1])]; + tensor var_2393_cast = reshape(shape = var_2392, x = q_41_cast)[name = tensor("op_2393_cast")]; + tensor var_2394 = const()[name = tensor("op_2394"), val = tensor([2, 20, 64, -1])]; + tensor var_2395_cast = reshape(shape = var_2394, x = k_41_cast)[name = tensor("op_2395_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([2, 20, 64, -1])]; + tensor var_2397_cast = reshape(shape = var_2396, x = v_41_cast)[name = tensor("op_2397_cast")]; + tensor attn_weights_81_transpose_x_0 = const()[name = tensor("attn_weights_81_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_81_transpose_y_0 = const()[name = tensor("attn_weights_81_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_81_cast = matmul(transpose_x = attn_weights_81_transpose_x_0, transpose_y = attn_weights_81_transpose_y_0, x = var_2393_cast, y = var_2395_cast)[name = tensor("attn_weights_81_cast")]; + tensor attn_weights_83_cast = mul(x = attn_weights_81_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_83_cast")]; + tensor var_2401_cast = softmax(axis = var_1170, x = attn_weights_83_cast)[name = tensor("op_2401_cast")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2397_cast, y = var_2401_cast)[name = tensor("attn_41_cast")]; + tensor var_2405 = const()[name = tensor("op_2405"), val = tensor([2, 1280, 1, -1])]; + tensor input_179_cast = reshape(shape = var_2405, x = attn_41_cast)[name = tensor("input_179_cast")]; + tensor var_2410 = const()[name = tensor("op_2410"), val = tensor([1, 1])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 1])]; + tensor var_2414_pad_type_0 = const()[name = tensor("op_2414_pad_type_0"), val = tensor("custom")]; + tensor var_2414_pad_0 = const()[name = tensor("op_2414_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(158986752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159806016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159806144)))]; + tensor var_2414_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_2412, groups = var_1186, pad = var_2414_pad_0, pad_type = var_2414_pad_type_0, strides = var_2410, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_179_cast)[name = tensor("op_2414_cast")]; + tensor inputs_63_cast = add(x = var_2414_cast, y = inputs_61_cast)[name = tensor("inputs_63_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([1])]; + tensor channels_mean_63_cast = reduce_mean(axes = var_2418, keep_dims = var_1181, x = inputs_63_cast)[name = tensor("channels_mean_63_cast")]; + tensor zero_mean_63_cast = sub(x = inputs_63_cast, y = channels_mean_63_cast)[name = tensor("zero_mean_63_cast")]; + tensor zero_mean_sq_63_cast = mul(x = zero_mean_63_cast, y = zero_mean_63_cast)[name = tensor("zero_mean_sq_63_cast")]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([1])]; + tensor var_2423_cast = reduce_mean(axes = var_2422, keep_dims = var_1181, x = zero_mean_sq_63_cast)[name = tensor("op_2423_cast")]; + tensor var_2424_to_fp16 = const()[name = tensor("op_2424_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2425_cast = add(x = var_2423_cast, y = var_2424_to_fp16)[name = tensor("op_2425_cast")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2425_cast)[name = tensor("denom_63_cast")]; + tensor out_63_cast = mul(x = zero_mean_63_cast, y = denom_63_cast)[name = tensor("out_63_cast")]; + tensor var_2429_to_fp16 = const()[name = tensor("op_2429_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159808768)))]; + tensor var_2430_cast = add(x = out_63_cast, y = var_2429_to_fp16)[name = tensor("op_2430_cast")]; + tensor var_2432_to_fp16 = const()[name = tensor("op_2432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159811392)))]; + tensor hidden_states_103_cast = mul(x = var_2430_cast, y = var_2432_to_fp16)[name = tensor("hidden_states_103_cast")]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 1])]; + tensor var_2441 = const()[name = tensor("op_2441"), val = tensor([1, 1])]; + tensor q_43_pad_type_0 = const()[name = tensor("q_43_pad_type_0"), val = tensor("custom")]; + tensor q_43_pad_0 = const()[name = tensor("q_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(159814016))), lut = tensor([-0x1.61p-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_43_cast = conv(dilations = var_2441, groups = var_1186, pad = q_43_pad_0, pad_type = q_43_pad_type_0, strides = var_2439, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("q_43_cast")]; + tensor var_2445 = const()[name = tensor("op_2445"), val = tensor([1, 1])]; + tensor var_2447 = const()[name = tensor("op_2447"), val = tensor([1, 1])]; + tensor k_43_pad_type_0 = const()[name = tensor("k_43_pad_type_0"), val = tensor("custom")]; + tensor k_43_pad_0 = const()[name = tensor("k_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160018880))), lut = tensor([-0x1.c8p-8, 0x1.c8cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_43_cast = conv(dilations = var_2447, groups = var_1186, pad = k_43_pad_0, pad_type = k_43_pad_type_0, strides = var_2445, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_43_cast")]; + tensor var_2451 = const()[name = tensor("op_2451"), val = tensor([1, 1])]; + tensor var_2453 = const()[name = tensor("op_2453"), val = tensor([1, 1])]; + tensor v_43_pad_type_0 = const()[name = tensor("v_43_pad_type_0"), val = tensor("custom")]; + tensor v_43_pad_0 = const()[name = tensor("v_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160346624))), lut = tensor([-0x1.e5cp-8, 0x1.e78p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_43_cast = conv(dilations = var_2453, groups = var_1186, pad = v_43_pad_0, pad_type = v_43_pad_type_0, strides = var_2451, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_43_cast")]; + tensor var_2457 = const()[name = tensor("op_2457"), val = tensor([2, 20, 64, -1])]; + tensor var_2458_cast = reshape(shape = var_2457, x = q_43_cast)[name = tensor("op_2458_cast")]; + tensor var_2459 = const()[name = tensor("op_2459"), val = tensor([2, 20, 64, -1])]; + tensor var_2460_cast = reshape(shape = var_2459, x = k_43_cast)[name = tensor("op_2460_cast")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([2, 20, 64, -1])]; + tensor var_2462_cast = reshape(shape = var_2461, x = v_43_cast)[name = tensor("op_2462_cast")]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = var_2458_cast, y = var_2460_cast)[name = tensor("attn_weights_85_cast")]; + tensor attn_weights_87_cast = mul(x = attn_weights_85_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_2466_cast = softmax(axis = var_1170, x = attn_weights_87_cast)[name = tensor("op_2466_cast")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2462_cast, y = var_2466_cast)[name = tensor("attn_43_cast")]; + tensor var_2470 = const()[name = tensor("op_2470"), val = tensor([2, 1280, 1, -1])]; + tensor input_181_cast = reshape(shape = var_2470, x = attn_43_cast)[name = tensor("input_181_cast")]; + tensor var_2475 = const()[name = tensor("op_2475"), val = tensor([1, 1])]; + tensor var_2477 = const()[name = tensor("op_2477"), val = tensor([1, 1])]; + tensor var_2479_pad_type_0 = const()[name = tensor("op_2479_pad_type_0"), val = tensor("custom")]; + tensor var_2479_pad_0 = const()[name = tensor("op_2479_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160674368))), lut = tensor([-0x1.1a8p-8, 0x1.1a8p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160879232)))]; + tensor var_2479_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_2477, groups = var_1186, pad = var_2479_pad_0, pad_type = var_2479_pad_type_0, strides = var_2475, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_181_cast)[name = tensor("op_2479_cast")]; + tensor inputs_65_cast = add(x = var_2479_cast, y = inputs_63_cast)[name = tensor("inputs_65_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1])]; + tensor channels_mean_65_cast = reduce_mean(axes = var_2483, keep_dims = var_1181, x = inputs_65_cast)[name = tensor("channels_mean_65_cast")]; + tensor zero_mean_65_cast = sub(x = inputs_65_cast, y = channels_mean_65_cast)[name = tensor("zero_mean_65_cast")]; + tensor zero_mean_sq_65_cast = mul(x = zero_mean_65_cast, y = zero_mean_65_cast)[name = tensor("zero_mean_sq_65_cast")]; + tensor var_2487 = const()[name = tensor("op_2487"), val = tensor([1])]; + tensor var_2488_cast = reduce_mean(axes = var_2487, keep_dims = var_1181, x = zero_mean_sq_65_cast)[name = tensor("op_2488_cast")]; + tensor var_2489_to_fp16 = const()[name = tensor("op_2489_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2490_cast = add(x = var_2488_cast, y = var_2489_to_fp16)[name = tensor("op_2490_cast")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2490_cast)[name = tensor("denom_65_cast")]; + tensor out_65_cast = mul(x = zero_mean_65_cast, y = denom_65_cast)[name = tensor("out_65_cast")]; + tensor var_2494_to_fp16 = const()[name = tensor("op_2494_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160881856)))]; + tensor var_2495_cast = add(x = out_65_cast, y = var_2494_to_fp16)[name = tensor("op_2495_cast")]; + tensor var_2497_to_fp16 = const()[name = tensor("op_2497_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160884480)))]; + tensor input_183_cast = mul(x = var_2495_cast, y = var_2497_to_fp16)[name = tensor("input_183_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([1, 1])]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([1, 1])]; + tensor var_2509_pad_type_0 = const()[name = tensor("op_2509_pad_type_0"), val = tensor("custom")]; + tensor var_2509_pad_0 = const()[name = tensor("op_2509_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160887104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(167440768))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(167440896)))]; + tensor var_2509_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_2507, groups = var_1186, pad = var_2509_pad_0, pad_type = var_2509_pad_type_0, strides = var_2505, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_183_cast)[name = tensor("op_2509_cast")]; + tensor var_2510_split_sizes_0 = const()[name = tensor("op_2510_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2510_axis_0 = const()[name = tensor("op_2510_axis_0"), val = tensor(1)]; + tensor var_2510_cast_0, tensor var_2510_cast_1 = split(axis = var_2510_axis_0, split_sizes = var_2510_split_sizes_0, x = var_2509_cast)[name = tensor("op_2510_cast")]; + tensor var_2512_mode_0 = const()[name = tensor("op_2512_mode_0"), val = tensor("EXACT")]; + tensor var_2512_cast = gelu(mode = var_2512_mode_0, x = var_2510_cast_1)[name = tensor("op_2512_cast")]; + tensor input_185_cast = mul(x = var_2510_cast_0, y = var_2512_cast)[name = tensor("input_185_cast")]; + tensor var_2516 = const()[name = tensor("op_2516"), val = tensor([1, 1])]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([1, 1])]; + tensor var_2520_pad_type_0 = const()[name = tensor("op_2520_pad_type_0"), val = tensor("custom")]; + tensor var_2520_pad_0 = const()[name = tensor("op_2520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(167461440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170738304))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170738432)))]; + tensor var_2520_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_2518, groups = var_1186, pad = var_2520_pad_0, pad_type = var_2520_pad_type_0, strides = var_2516, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("op_2520_cast")]; + tensor inputs_67_cast = add(x = var_2520_cast, y = inputs_65_cast)[name = tensor("inputs_67_cast")]; + tensor var_2530 = const()[name = tensor("op_2530"), val = tensor([1])]; + tensor channels_mean_67_cast = reduce_mean(axes = var_2530, keep_dims = var_1181, x = inputs_67_cast)[name = tensor("channels_mean_67_cast")]; + tensor zero_mean_67_cast = sub(x = inputs_67_cast, y = channels_mean_67_cast)[name = tensor("zero_mean_67_cast")]; + tensor zero_mean_sq_67_cast = mul(x = zero_mean_67_cast, y = zero_mean_67_cast)[name = tensor("zero_mean_sq_67_cast")]; + tensor var_2534 = const()[name = tensor("op_2534"), val = tensor([1])]; + tensor var_2535_cast = reduce_mean(axes = var_2534, keep_dims = var_1181, x = zero_mean_sq_67_cast)[name = tensor("op_2535_cast")]; + tensor var_2536_to_fp16 = const()[name = tensor("op_2536_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2537_cast = add(x = var_2535_cast, y = var_2536_to_fp16)[name = tensor("op_2537_cast")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2537_cast)[name = tensor("denom_67_cast")]; + tensor out_67_cast = mul(x = zero_mean_67_cast, y = denom_67_cast)[name = tensor("out_67_cast")]; + tensor var_2541_to_fp16 = const()[name = tensor("op_2541_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170741056)))]; + tensor var_2542_cast = add(x = out_67_cast, y = var_2541_to_fp16)[name = tensor("op_2542_cast")]; + tensor var_2544_to_fp16 = const()[name = tensor("op_2544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170743680)))]; + tensor hidden_states_107_cast = mul(x = var_2542_cast, y = var_2544_to_fp16)[name = tensor("hidden_states_107_cast")]; + tensor var_2551 = const()[name = tensor("op_2551"), val = tensor([1, 1])]; + tensor var_2553 = const()[name = tensor("op_2553"), val = tensor([1, 1])]; + tensor q_45_pad_type_0 = const()[name = tensor("q_45_pad_type_0"), val = tensor("custom")]; + tensor q_45_pad_0 = const()[name = tensor("q_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(170746304))), lut = tensor([-0x1.0bp-5, -0x1.4p-7, 0x1.458p-7, 0x1.0c4p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_45_cast = conv(dilations = var_2553, groups = var_1186, pad = q_45_pad_0, pad_type = q_45_pad_type_0, strides = var_2551, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("q_45_cast")]; + tensor var_2557 = const()[name = tensor("op_2557"), val = tensor([1, 1])]; + tensor var_2559 = const()[name = tensor("op_2559"), val = tensor([1, 1])]; + tensor k_45_pad_type_0 = const()[name = tensor("k_45_pad_type_0"), val = tensor("custom")]; + tensor k_45_pad_0 = const()[name = tensor("k_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171155968))), lut = tensor([-0x1.09p-5, -0x1.404p-7, 0x1.404p-7, 0x1.08cp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_45_cast = conv(dilations = var_2559, groups = var_1186, pad = k_45_pad_0, pad_type = k_45_pad_type_0, strides = var_2557, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("k_45_cast")]; + tensor var_2563 = const()[name = tensor("op_2563"), val = tensor([1, 1])]; + tensor var_2565 = const()[name = tensor("op_2565"), val = tensor([1, 1])]; + tensor v_45_pad_type_0 = const()[name = tensor("v_45_pad_type_0"), val = tensor("custom")]; + tensor v_45_pad_0 = const()[name = tensor("v_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171565632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172384896))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_45_cast = conv(dilations = var_2565, groups = var_1186, pad = v_45_pad_0, pad_type = v_45_pad_type_0, strides = var_2563, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("v_45_cast")]; + tensor var_2569 = const()[name = tensor("op_2569"), val = tensor([2, 20, 64, -1])]; + tensor var_2570_cast = reshape(shape = var_2569, x = q_45_cast)[name = tensor("op_2570_cast")]; + tensor var_2571 = const()[name = tensor("op_2571"), val = tensor([2, 20, 64, -1])]; + tensor var_2572_cast = reshape(shape = var_2571, x = k_45_cast)[name = tensor("op_2572_cast")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([2, 20, 64, -1])]; + tensor var_2574_cast = reshape(shape = var_2573, x = v_45_cast)[name = tensor("op_2574_cast")]; + tensor attn_weights_89_transpose_x_0 = const()[name = tensor("attn_weights_89_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_89_transpose_y_0 = const()[name = tensor("attn_weights_89_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_89_cast = matmul(transpose_x = attn_weights_89_transpose_x_0, transpose_y = attn_weights_89_transpose_y_0, x = var_2570_cast, y = var_2572_cast)[name = tensor("attn_weights_89_cast")]; + tensor attn_weights_91_cast = mul(x = attn_weights_89_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_91_cast")]; + tensor var_2578_cast = softmax(axis = var_1170, x = attn_weights_91_cast)[name = tensor("op_2578_cast")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2574_cast, y = var_2578_cast)[name = tensor("attn_45_cast")]; + tensor var_2582 = const()[name = tensor("op_2582"), val = tensor([2, 1280, 1, -1])]; + tensor input_187_cast = reshape(shape = var_2582, x = attn_45_cast)[name = tensor("input_187_cast")]; + tensor var_2587 = const()[name = tensor("op_2587"), val = tensor([1, 1])]; + tensor var_2589 = const()[name = tensor("op_2589"), val = tensor([1, 1])]; + tensor var_2591_pad_type_0 = const()[name = tensor("op_2591_pad_type_0"), val = tensor("custom")]; + tensor var_2591_pad_0 = const()[name = tensor("op_2591_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172385024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173204288))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173204416)))]; + tensor var_2591_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_2589, groups = var_1186, pad = var_2591_pad_0, pad_type = var_2591_pad_type_0, strides = var_2587, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_187_cast)[name = tensor("op_2591_cast")]; + tensor inputs_69_cast = add(x = var_2591_cast, y = inputs_67_cast)[name = tensor("inputs_69_cast")]; + tensor var_2595 = const()[name = tensor("op_2595"), val = tensor([1])]; + tensor channels_mean_69_cast = reduce_mean(axes = var_2595, keep_dims = var_1181, x = inputs_69_cast)[name = tensor("channels_mean_69_cast")]; + tensor zero_mean_69_cast = sub(x = inputs_69_cast, y = channels_mean_69_cast)[name = tensor("zero_mean_69_cast")]; + tensor zero_mean_sq_69_cast = mul(x = zero_mean_69_cast, y = zero_mean_69_cast)[name = tensor("zero_mean_sq_69_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1])]; + tensor var_2600_cast = reduce_mean(axes = var_2599, keep_dims = var_1181, x = zero_mean_sq_69_cast)[name = tensor("op_2600_cast")]; + tensor var_2601_to_fp16 = const()[name = tensor("op_2601_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2602_cast = add(x = var_2600_cast, y = var_2601_to_fp16)[name = tensor("op_2602_cast")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2602_cast)[name = tensor("denom_69_cast")]; + tensor out_69_cast = mul(x = zero_mean_69_cast, y = denom_69_cast)[name = tensor("out_69_cast")]; + tensor var_2606_to_fp16 = const()[name = tensor("op_2606_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173207040)))]; + tensor var_2607_cast = add(x = out_69_cast, y = var_2606_to_fp16)[name = tensor("op_2607_cast")]; + tensor var_2609_to_fp16 = const()[name = tensor("op_2609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173209664)))]; + tensor hidden_states_109_cast = mul(x = var_2607_cast, y = var_2609_to_fp16)[name = tensor("hidden_states_109_cast")]; + tensor var_2616 = const()[name = tensor("op_2616"), val = tensor([1, 1])]; + tensor var_2618 = const()[name = tensor("op_2618"), val = tensor([1, 1])]; + tensor q_47_pad_type_0 = const()[name = tensor("q_47_pad_type_0"), val = tensor("custom")]; + tensor q_47_pad_0 = const()[name = tensor("q_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173212288))), lut = tensor([-0x1.61p-7, 0x1.614p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_47_cast = conv(dilations = var_2618, groups = var_1186, pad = q_47_pad_0, pad_type = q_47_pad_type_0, strides = var_2616, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("q_47_cast")]; + tensor var_2622 = const()[name = tensor("op_2622"), val = tensor([1, 1])]; + tensor var_2624 = const()[name = tensor("op_2624"), val = tensor([1, 1])]; + tensor k_47_pad_type_0 = const()[name = tensor("k_47_pad_type_0"), val = tensor("custom")]; + tensor k_47_pad_0 = const()[name = tensor("k_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173417152))), lut = tensor([-0x1.c84p-8, 0x1.c98p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_47_cast = conv(dilations = var_2624, groups = var_1186, pad = k_47_pad_0, pad_type = k_47_pad_type_0, strides = var_2622, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_47_cast")]; + tensor var_2628 = const()[name = tensor("op_2628"), val = tensor([1, 1])]; + tensor var_2630 = const()[name = tensor("op_2630"), val = tensor([1, 1])]; + tensor v_47_pad_type_0 = const()[name = tensor("v_47_pad_type_0"), val = tensor("custom")]; + tensor v_47_pad_0 = const()[name = tensor("v_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173744896))), lut = tensor([-0x1.e6cp-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_47_cast = conv(dilations = var_2630, groups = var_1186, pad = v_47_pad_0, pad_type = v_47_pad_type_0, strides = var_2628, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_47_cast")]; + tensor var_2634 = const()[name = tensor("op_2634"), val = tensor([2, 20, 64, -1])]; + tensor var_2635_cast = reshape(shape = var_2634, x = q_47_cast)[name = tensor("op_2635_cast")]; + tensor var_2636 = const()[name = tensor("op_2636"), val = tensor([2, 20, 64, -1])]; + tensor var_2637_cast = reshape(shape = var_2636, x = k_47_cast)[name = tensor("op_2637_cast")]; + tensor var_2638 = const()[name = tensor("op_2638"), val = tensor([2, 20, 64, -1])]; + tensor var_2639_cast = reshape(shape = var_2638, x = v_47_cast)[name = tensor("op_2639_cast")]; + tensor attn_weights_93_transpose_x_0 = const()[name = tensor("attn_weights_93_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_93_transpose_y_0 = const()[name = tensor("attn_weights_93_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_93_cast = matmul(transpose_x = attn_weights_93_transpose_x_0, transpose_y = attn_weights_93_transpose_y_0, x = var_2635_cast, y = var_2637_cast)[name = tensor("attn_weights_93_cast")]; + tensor attn_weights_95_cast = mul(x = attn_weights_93_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_95_cast")]; + tensor var_2643_cast = softmax(axis = var_1170, x = attn_weights_95_cast)[name = tensor("op_2643_cast")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2639_cast, y = var_2643_cast)[name = tensor("attn_47_cast")]; + tensor var_2647 = const()[name = tensor("op_2647"), val = tensor([2, 1280, 1, -1])]; + tensor input_189_cast = reshape(shape = var_2647, x = attn_47_cast)[name = tensor("input_189_cast")]; + tensor var_2652 = const()[name = tensor("op_2652"), val = tensor([1, 1])]; + tensor var_2654 = const()[name = tensor("op_2654"), val = tensor([1, 1])]; + tensor var_2656_pad_type_0 = const()[name = tensor("op_2656_pad_type_0"), val = tensor("custom")]; + tensor var_2656_pad_0 = const()[name = tensor("op_2656_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174072640))), lut = tensor([-0x1.214p-8, 0x1.21p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174277504)))]; + tensor var_2656_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_2654, groups = var_1186, pad = var_2656_pad_0, pad_type = var_2656_pad_type_0, strides = var_2652, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("op_2656_cast")]; + tensor inputs_71_cast = add(x = var_2656_cast, y = inputs_69_cast)[name = tensor("inputs_71_cast")]; + tensor var_2660 = const()[name = tensor("op_2660"), val = tensor([1])]; + tensor channels_mean_71_cast = reduce_mean(axes = var_2660, keep_dims = var_1181, x = inputs_71_cast)[name = tensor("channels_mean_71_cast")]; + tensor zero_mean_71_cast = sub(x = inputs_71_cast, y = channels_mean_71_cast)[name = tensor("zero_mean_71_cast")]; + tensor zero_mean_sq_71_cast = mul(x = zero_mean_71_cast, y = zero_mean_71_cast)[name = tensor("zero_mean_sq_71_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1])]; + tensor var_2665_cast = reduce_mean(axes = var_2664, keep_dims = var_1181, x = zero_mean_sq_71_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_to_fp16 = const()[name = tensor("op_2666_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2667_cast = add(x = var_2665_cast, y = var_2666_to_fp16)[name = tensor("op_2667_cast")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2667_cast)[name = tensor("denom_71_cast")]; + tensor out_71_cast = mul(x = zero_mean_71_cast, y = denom_71_cast)[name = tensor("out_71_cast")]; + tensor var_2671_to_fp16 = const()[name = tensor("op_2671_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174280128)))]; + tensor var_2672_cast = add(x = out_71_cast, y = var_2671_to_fp16)[name = tensor("op_2672_cast")]; + tensor var_2674_to_fp16 = const()[name = tensor("op_2674_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174282752)))]; + tensor input_191_cast = mul(x = var_2672_cast, y = var_2674_to_fp16)[name = tensor("input_191_cast")]; + tensor var_2682 = const()[name = tensor("op_2682"), val = tensor([1, 1])]; + tensor var_2684 = const()[name = tensor("op_2684"), val = tensor([1, 1])]; + tensor var_2686_pad_type_0 = const()[name = tensor("op_2686_pad_type_0"), val = tensor("custom")]; + tensor var_2686_pad_0 = const()[name = tensor("op_2686_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174285376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180839040))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180839168)))]; + tensor var_2686_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_2684, groups = var_1186, pad = var_2686_pad_0, pad_type = var_2686_pad_type_0, strides = var_2682, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_191_cast)[name = tensor("op_2686_cast")]; + tensor var_2687_split_sizes_0 = const()[name = tensor("op_2687_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2687_axis_0 = const()[name = tensor("op_2687_axis_0"), val = tensor(1)]; + tensor var_2687_cast_0, tensor var_2687_cast_1 = split(axis = var_2687_axis_0, split_sizes = var_2687_split_sizes_0, x = var_2686_cast)[name = tensor("op_2687_cast")]; + tensor var_2689_mode_0 = const()[name = tensor("op_2689_mode_0"), val = tensor("EXACT")]; + tensor var_2689_cast = gelu(mode = var_2689_mode_0, x = var_2687_cast_1)[name = tensor("op_2689_cast")]; + tensor input_193_cast = mul(x = var_2687_cast_0, y = var_2689_cast)[name = tensor("input_193_cast")]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([1, 1])]; + tensor var_2695 = const()[name = tensor("op_2695"), val = tensor([1, 1])]; + tensor var_2697_pad_type_0 = const()[name = tensor("op_2697_pad_type_0"), val = tensor("custom")]; + tensor var_2697_pad_0 = const()[name = tensor("op_2697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180859712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184136576))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184136704)))]; + tensor var_2697_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_2695, groups = var_1186, pad = var_2697_pad_0, pad_type = var_2697_pad_type_0, strides = var_2693, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("op_2697_cast")]; + tensor inputs_73_cast = add(x = var_2697_cast, y = inputs_71_cast)[name = tensor("inputs_73_cast")]; + tensor var_2707 = const()[name = tensor("op_2707"), val = tensor([1])]; + tensor channels_mean_73_cast = reduce_mean(axes = var_2707, keep_dims = var_1181, x = inputs_73_cast)[name = tensor("channels_mean_73_cast")]; + tensor zero_mean_73_cast = sub(x = inputs_73_cast, y = channels_mean_73_cast)[name = tensor("zero_mean_73_cast")]; + tensor zero_mean_sq_73_cast = mul(x = zero_mean_73_cast, y = zero_mean_73_cast)[name = tensor("zero_mean_sq_73_cast")]; + tensor var_2711 = const()[name = tensor("op_2711"), val = tensor([1])]; + tensor var_2712_cast = reduce_mean(axes = var_2711, keep_dims = var_1181, x = zero_mean_sq_73_cast)[name = tensor("op_2712_cast")]; + tensor var_2713_to_fp16 = const()[name = tensor("op_2713_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2714_cast = add(x = var_2712_cast, y = var_2713_to_fp16)[name = tensor("op_2714_cast")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2714_cast)[name = tensor("denom_73_cast")]; + tensor out_73_cast = mul(x = zero_mean_73_cast, y = denom_73_cast)[name = tensor("out_73_cast")]; + tensor var_2718_to_fp16 = const()[name = tensor("op_2718_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184139328)))]; + tensor var_2719_cast = add(x = out_73_cast, y = var_2718_to_fp16)[name = tensor("op_2719_cast")]; + tensor var_2721_to_fp16 = const()[name = tensor("op_2721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184141952)))]; + tensor hidden_states_113_cast = mul(x = var_2719_cast, y = var_2721_to_fp16)[name = tensor("hidden_states_113_cast")]; + tensor var_2728 = const()[name = tensor("op_2728"), val = tensor([1, 1])]; + tensor var_2730 = const()[name = tensor("op_2730"), val = tensor([1, 1])]; + tensor q_49_pad_type_0 = const()[name = tensor("q_49_pad_type_0"), val = tensor("custom")]; + tensor q_49_pad_0 = const()[name = tensor("q_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184144576))), lut = tensor([-0x1.fc8p-6, -0x1.338p-7, 0x1.33p-7, 0x1.fbcp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_49_cast = conv(dilations = var_2730, groups = var_1186, pad = q_49_pad_0, pad_type = q_49_pad_type_0, strides = var_2728, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("q_49_cast")]; + tensor var_2734 = const()[name = tensor("op_2734"), val = tensor([1, 1])]; + tensor var_2736 = const()[name = tensor("op_2736"), val = tensor([1, 1])]; + tensor k_49_pad_type_0 = const()[name = tensor("k_49_pad_type_0"), val = tensor("custom")]; + tensor k_49_pad_0 = const()[name = tensor("k_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184554240))), lut = tensor([-0x1.f44p-6, -0x1.2f4p-7, 0x1.2fcp-7, 0x1.f5p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_49_cast = conv(dilations = var_2736, groups = var_1186, pad = k_49_pad_0, pad_type = k_49_pad_type_0, strides = var_2734, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("k_49_cast")]; + tensor var_2740 = const()[name = tensor("op_2740"), val = tensor([1, 1])]; + tensor var_2742 = const()[name = tensor("op_2742"), val = tensor([1, 1])]; + tensor v_49_pad_type_0 = const()[name = tensor("v_49_pad_type_0"), val = tensor("custom")]; + tensor v_49_pad_0 = const()[name = tensor("v_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184963904))), lut = tensor([-0x1.fap-6, -0x1.328p-7, 0x1.334p-7, 0x1.fa8p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_49_cast = conv(dilations = var_2742, groups = var_1186, pad = v_49_pad_0, pad_type = v_49_pad_type_0, strides = var_2740, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("v_49_cast")]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([2, 20, 64, -1])]; + tensor var_2747_cast = reshape(shape = var_2746, x = q_49_cast)[name = tensor("op_2747_cast")]; + tensor var_2748 = const()[name = tensor("op_2748"), val = tensor([2, 20, 64, -1])]; + tensor var_2749_cast = reshape(shape = var_2748, x = k_49_cast)[name = tensor("op_2749_cast")]; + tensor var_2750 = const()[name = tensor("op_2750"), val = tensor([2, 20, 64, -1])]; + tensor var_2751_cast = reshape(shape = var_2750, x = v_49_cast)[name = tensor("op_2751_cast")]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = var_2747_cast, y = var_2749_cast)[name = tensor("attn_weights_97_cast")]; + tensor attn_weights_99_cast = mul(x = attn_weights_97_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_2755_cast = softmax(axis = var_1170, x = attn_weights_99_cast)[name = tensor("op_2755_cast")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_2751_cast, y = var_2755_cast)[name = tensor("attn_49_cast")]; + tensor var_2759 = const()[name = tensor("op_2759"), val = tensor([2, 1280, 1, -1])]; + tensor input_195_cast = reshape(shape = var_2759, x = attn_49_cast)[name = tensor("input_195_cast")]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([1, 1])]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([1, 1])]; + tensor var_2768_pad_type_0 = const()[name = tensor("op_2768_pad_type_0"), val = tensor("custom")]; + tensor var_2768_pad_0 = const()[name = tensor("op_2768_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(185373568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186192832))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186192960)))]; + tensor var_2768_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_2766, groups = var_1186, pad = var_2768_pad_0, pad_type = var_2768_pad_type_0, strides = var_2764, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_195_cast)[name = tensor("op_2768_cast")]; + tensor inputs_75_cast = add(x = var_2768_cast, y = inputs_73_cast)[name = tensor("inputs_75_cast")]; + tensor var_2772 = const()[name = tensor("op_2772"), val = tensor([1])]; + tensor channels_mean_75_cast = reduce_mean(axes = var_2772, keep_dims = var_1181, x = inputs_75_cast)[name = tensor("channels_mean_75_cast")]; + tensor zero_mean_75_cast = sub(x = inputs_75_cast, y = channels_mean_75_cast)[name = tensor("zero_mean_75_cast")]; + tensor zero_mean_sq_75_cast = mul(x = zero_mean_75_cast, y = zero_mean_75_cast)[name = tensor("zero_mean_sq_75_cast")]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor([1])]; + tensor var_2777_cast = reduce_mean(axes = var_2776, keep_dims = var_1181, x = zero_mean_sq_75_cast)[name = tensor("op_2777_cast")]; + tensor var_2778_to_fp16 = const()[name = tensor("op_2778_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2779_cast = add(x = var_2777_cast, y = var_2778_to_fp16)[name = tensor("op_2779_cast")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2779_cast)[name = tensor("denom_75_cast")]; + tensor out_75_cast = mul(x = zero_mean_75_cast, y = denom_75_cast)[name = tensor("out_75_cast")]; + tensor var_2783_to_fp16 = const()[name = tensor("op_2783_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186195584)))]; + tensor var_2784_cast = add(x = out_75_cast, y = var_2783_to_fp16)[name = tensor("op_2784_cast")]; + tensor var_2786_to_fp16 = const()[name = tensor("op_2786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186198208)))]; + tensor hidden_states_115_cast = mul(x = var_2784_cast, y = var_2786_to_fp16)[name = tensor("hidden_states_115_cast")]; + tensor var_2793 = const()[name = tensor("op_2793"), val = tensor([1, 1])]; + tensor var_2795 = const()[name = tensor("op_2795"), val = tensor([1, 1])]; + tensor q_51_pad_type_0 = const()[name = tensor("q_51_pad_type_0"), val = tensor("custom")]; + tensor q_51_pad_0 = const()[name = tensor("q_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186200832))), lut = tensor([-0x1.60cp-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_51_cast = conv(dilations = var_2795, groups = var_1186, pad = q_51_pad_0, pad_type = q_51_pad_type_0, strides = var_2793, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("q_51_cast")]; + tensor var_2799 = const()[name = tensor("op_2799"), val = tensor([1, 1])]; + tensor var_2801 = const()[name = tensor("op_2801"), val = tensor([1, 1])]; + tensor k_51_pad_type_0 = const()[name = tensor("k_51_pad_type_0"), val = tensor("custom")]; + tensor k_51_pad_0 = const()[name = tensor("k_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186405696))), lut = tensor([-0x1.c7p-8, 0x1.c7cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_51_cast = conv(dilations = var_2801, groups = var_1186, pad = k_51_pad_0, pad_type = k_51_pad_type_0, strides = var_2799, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_51_cast")]; + tensor var_2805 = const()[name = tensor("op_2805"), val = tensor([1, 1])]; + tensor var_2807 = const()[name = tensor("op_2807"), val = tensor([1, 1])]; + tensor v_51_pad_type_0 = const()[name = tensor("v_51_pad_type_0"), val = tensor("custom")]; + tensor v_51_pad_0 = const()[name = tensor("v_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(186733440))), lut = tensor([-0x1.eap-8, 0x1.ecp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_51_cast = conv(dilations = var_2807, groups = var_1186, pad = v_51_pad_0, pad_type = v_51_pad_type_0, strides = var_2805, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_51_cast")]; + tensor var_2811 = const()[name = tensor("op_2811"), val = tensor([2, 20, 64, -1])]; + tensor var_2812_cast = reshape(shape = var_2811, x = q_51_cast)[name = tensor("op_2812_cast")]; + tensor var_2813 = const()[name = tensor("op_2813"), val = tensor([2, 20, 64, -1])]; + tensor var_2814_cast = reshape(shape = var_2813, x = k_51_cast)[name = tensor("op_2814_cast")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([2, 20, 64, -1])]; + tensor var_2816_cast = reshape(shape = var_2815, x = v_51_cast)[name = tensor("op_2816_cast")]; + tensor attn_weights_101_transpose_x_0 = const()[name = tensor("attn_weights_101_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_101_transpose_y_0 = const()[name = tensor("attn_weights_101_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_101_cast = matmul(transpose_x = attn_weights_101_transpose_x_0, transpose_y = attn_weights_101_transpose_y_0, x = var_2812_cast, y = var_2814_cast)[name = tensor("attn_weights_101_cast")]; + tensor attn_weights_103_cast = mul(x = attn_weights_101_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_103_cast")]; + tensor var_2820_cast = softmax(axis = var_1170, x = attn_weights_103_cast)[name = tensor("op_2820_cast")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_2816_cast, y = var_2820_cast)[name = tensor("attn_51_cast")]; + tensor var_2824 = const()[name = tensor("op_2824"), val = tensor([2, 1280, 1, -1])]; + tensor input_197_cast = reshape(shape = var_2824, x = attn_51_cast)[name = tensor("input_197_cast")]; + tensor var_2829 = const()[name = tensor("op_2829"), val = tensor([1, 1])]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, 1])]; + tensor var_2833_pad_type_0 = const()[name = tensor("op_2833_pad_type_0"), val = tensor("custom")]; + tensor var_2833_pad_0 = const()[name = tensor("op_2833_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187061184))), lut = tensor([-0x1.2b8p-8, 0x1.2cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187266048)))]; + tensor var_2833_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_2831, groups = var_1186, pad = var_2833_pad_0, pad_type = var_2833_pad_type_0, strides = var_2829, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_197_cast)[name = tensor("op_2833_cast")]; + tensor inputs_77_cast = add(x = var_2833_cast, y = inputs_75_cast)[name = tensor("inputs_77_cast")]; + tensor var_2837 = const()[name = tensor("op_2837"), val = tensor([1])]; + tensor channels_mean_77_cast = reduce_mean(axes = var_2837, keep_dims = var_1181, x = inputs_77_cast)[name = tensor("channels_mean_77_cast")]; + tensor zero_mean_77_cast = sub(x = inputs_77_cast, y = channels_mean_77_cast)[name = tensor("zero_mean_77_cast")]; + tensor zero_mean_sq_77_cast = mul(x = zero_mean_77_cast, y = zero_mean_77_cast)[name = tensor("zero_mean_sq_77_cast")]; + tensor var_2841 = const()[name = tensor("op_2841"), val = tensor([1])]; + tensor var_2842_cast = reduce_mean(axes = var_2841, keep_dims = var_1181, x = zero_mean_sq_77_cast)[name = tensor("op_2842_cast")]; + tensor var_2843_to_fp16 = const()[name = tensor("op_2843_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2844_cast = add(x = var_2842_cast, y = var_2843_to_fp16)[name = tensor("op_2844_cast")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2844_cast)[name = tensor("denom_77_cast")]; + tensor out_77_cast = mul(x = zero_mean_77_cast, y = denom_77_cast)[name = tensor("out_77_cast")]; + tensor var_2848_to_fp16 = const()[name = tensor("op_2848_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187268672)))]; + tensor var_2849_cast = add(x = out_77_cast, y = var_2848_to_fp16)[name = tensor("op_2849_cast")]; + tensor var_2851_to_fp16 = const()[name = tensor("op_2851_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187271296)))]; + tensor input_199_cast = mul(x = var_2849_cast, y = var_2851_to_fp16)[name = tensor("input_199_cast")]; + tensor var_2859 = const()[name = tensor("op_2859"), val = tensor([1, 1])]; + tensor var_2861 = const()[name = tensor("op_2861"), val = tensor([1, 1])]; + tensor var_2863_pad_type_0 = const()[name = tensor("op_2863_pad_type_0"), val = tensor("custom")]; + tensor var_2863_pad_0 = const()[name = tensor("op_2863_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(187273920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193827584))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193827712)))]; + tensor var_2863_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_2861, groups = var_1186, pad = var_2863_pad_0, pad_type = var_2863_pad_type_0, strides = var_2859, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_199_cast)[name = tensor("op_2863_cast")]; + tensor var_2864_split_sizes_0 = const()[name = tensor("op_2864_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2864_axis_0 = const()[name = tensor("op_2864_axis_0"), val = tensor(1)]; + tensor var_2864_cast_0, tensor var_2864_cast_1 = split(axis = var_2864_axis_0, split_sizes = var_2864_split_sizes_0, x = var_2863_cast)[name = tensor("op_2864_cast")]; + tensor var_2866_mode_0 = const()[name = tensor("op_2866_mode_0"), val = tensor("EXACT")]; + tensor var_2866_cast = gelu(mode = var_2866_mode_0, x = var_2864_cast_1)[name = tensor("op_2866_cast")]; + tensor input_201_cast = mul(x = var_2864_cast_0, y = var_2866_cast)[name = tensor("input_201_cast")]; + tensor var_2870 = const()[name = tensor("op_2870"), val = tensor([1, 1])]; + tensor var_2872 = const()[name = tensor("op_2872"), val = tensor([1, 1])]; + tensor var_2874_pad_type_0 = const()[name = tensor("op_2874_pad_type_0"), val = tensor("custom")]; + tensor var_2874_pad_0 = const()[name = tensor("op_2874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(193848256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197125120))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197125248)))]; + tensor var_2874_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_2872, groups = var_1186, pad = var_2874_pad_0, pad_type = var_2874_pad_type_0, strides = var_2870, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("op_2874_cast")]; + tensor inputs_79_cast = add(x = var_2874_cast, y = inputs_77_cast)[name = tensor("inputs_79_cast")]; + tensor var_2884 = const()[name = tensor("op_2884"), val = tensor([1])]; + tensor channels_mean_79_cast = reduce_mean(axes = var_2884, keep_dims = var_1181, x = inputs_79_cast)[name = tensor("channels_mean_79_cast")]; + tensor zero_mean_79_cast = sub(x = inputs_79_cast, y = channels_mean_79_cast)[name = tensor("zero_mean_79_cast")]; + tensor zero_mean_sq_79_cast = mul(x = zero_mean_79_cast, y = zero_mean_79_cast)[name = tensor("zero_mean_sq_79_cast")]; + tensor var_2888 = const()[name = tensor("op_2888"), val = tensor([1])]; + tensor var_2889_cast = reduce_mean(axes = var_2888, keep_dims = var_1181, x = zero_mean_sq_79_cast)[name = tensor("op_2889_cast")]; + tensor var_2890_to_fp16 = const()[name = tensor("op_2890_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2891_cast = add(x = var_2889_cast, y = var_2890_to_fp16)[name = tensor("op_2891_cast")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2891_cast)[name = tensor("denom_79_cast")]; + tensor out_79_cast = mul(x = zero_mean_79_cast, y = denom_79_cast)[name = tensor("out_79_cast")]; + tensor var_2895_to_fp16 = const()[name = tensor("op_2895_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197127872)))]; + tensor var_2896_cast = add(x = out_79_cast, y = var_2895_to_fp16)[name = tensor("op_2896_cast")]; + tensor var_2898_to_fp16 = const()[name = tensor("op_2898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197130496)))]; + tensor hidden_states_119_cast = mul(x = var_2896_cast, y = var_2898_to_fp16)[name = tensor("hidden_states_119_cast")]; + tensor var_2905 = const()[name = tensor("op_2905"), val = tensor([1, 1])]; + tensor var_2907 = const()[name = tensor("op_2907"), val = tensor([1, 1])]; + tensor q_53_pad_type_0 = const()[name = tensor("q_53_pad_type_0"), val = tensor("custom")]; + tensor q_53_pad_0 = const()[name = tensor("q_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197133120))), lut = tensor([-0x1.1d4p-6, 0x1.1dcp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_53_cast = conv(dilations = var_2907, groups = var_1186, pad = q_53_pad_0, pad_type = q_53_pad_type_0, strides = var_2905, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("q_53_cast")]; + tensor var_2911 = const()[name = tensor("op_2911"), val = tensor([1, 1])]; + tensor var_2913 = const()[name = tensor("op_2913"), val = tensor([1, 1])]; + tensor k_53_pad_type_0 = const()[name = tensor("k_53_pad_type_0"), val = tensor("custom")]; + tensor k_53_pad_0 = const()[name = tensor("k_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197337984))), lut = tensor([-0x1.194p-6, 0x1.1ap-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_53_cast = conv(dilations = var_2913, groups = var_1186, pad = k_53_pad_0, pad_type = k_53_pad_type_0, strides = var_2911, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("k_53_cast")]; + tensor var_2917 = const()[name = tensor("op_2917"), val = tensor([1, 1])]; + tensor var_2919 = const()[name = tensor("op_2919"), val = tensor([1, 1])]; + tensor v_53_pad_type_0 = const()[name = tensor("v_53_pad_type_0"), val = tensor("custom")]; + tensor v_53_pad_0 = const()[name = tensor("v_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197542848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198362112))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_53_cast = conv(dilations = var_2919, groups = var_1186, pad = v_53_pad_0, pad_type = v_53_pad_type_0, strides = var_2917, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("v_53_cast")]; + tensor var_2923 = const()[name = tensor("op_2923"), val = tensor([2, 20, 64, -1])]; + tensor var_2924_cast = reshape(shape = var_2923, x = q_53_cast)[name = tensor("op_2924_cast")]; + tensor var_2925 = const()[name = tensor("op_2925"), val = tensor([2, 20, 64, -1])]; + tensor var_2926_cast = reshape(shape = var_2925, x = k_53_cast)[name = tensor("op_2926_cast")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([2, 20, 64, -1])]; + tensor var_2928_cast = reshape(shape = var_2927, x = v_53_cast)[name = tensor("op_2928_cast")]; + tensor attn_weights_105_transpose_x_0 = const()[name = tensor("attn_weights_105_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_105_transpose_y_0 = const()[name = tensor("attn_weights_105_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_105_cast = matmul(transpose_x = attn_weights_105_transpose_x_0, transpose_y = attn_weights_105_transpose_y_0, x = var_2924_cast, y = var_2926_cast)[name = tensor("attn_weights_105_cast")]; + tensor attn_weights_107_cast = mul(x = attn_weights_105_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_107_cast")]; + tensor var_2932_cast = softmax(axis = var_1170, x = attn_weights_107_cast)[name = tensor("op_2932_cast")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_2928_cast, y = var_2932_cast)[name = tensor("attn_53_cast")]; + tensor var_2936 = const()[name = tensor("op_2936"), val = tensor([2, 1280, 1, -1])]; + tensor input_203_cast = reshape(shape = var_2936, x = attn_53_cast)[name = tensor("input_203_cast")]; + tensor var_2941 = const()[name = tensor("op_2941"), val = tensor([1, 1])]; + tensor var_2943 = const()[name = tensor("op_2943"), val = tensor([1, 1])]; + tensor var_2945_pad_type_0 = const()[name = tensor("op_2945_pad_type_0"), val = tensor("custom")]; + tensor var_2945_pad_0 = const()[name = tensor("op_2945_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198362240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199181504))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199181632)))]; + tensor var_2945_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_2943, groups = var_1186, pad = var_2945_pad_0, pad_type = var_2945_pad_type_0, strides = var_2941, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_203_cast)[name = tensor("op_2945_cast")]; + tensor inputs_81_cast = add(x = var_2945_cast, y = inputs_79_cast)[name = tensor("inputs_81_cast")]; + tensor var_2949 = const()[name = tensor("op_2949"), val = tensor([1])]; + tensor channels_mean_81_cast = reduce_mean(axes = var_2949, keep_dims = var_1181, x = inputs_81_cast)[name = tensor("channels_mean_81_cast")]; + tensor zero_mean_81_cast = sub(x = inputs_81_cast, y = channels_mean_81_cast)[name = tensor("zero_mean_81_cast")]; + tensor zero_mean_sq_81_cast = mul(x = zero_mean_81_cast, y = zero_mean_81_cast)[name = tensor("zero_mean_sq_81_cast")]; + tensor var_2953 = const()[name = tensor("op_2953"), val = tensor([1])]; + tensor var_2954_cast = reduce_mean(axes = var_2953, keep_dims = var_1181, x = zero_mean_sq_81_cast)[name = tensor("op_2954_cast")]; + tensor var_2955_to_fp16 = const()[name = tensor("op_2955_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2956_cast = add(x = var_2954_cast, y = var_2955_to_fp16)[name = tensor("op_2956_cast")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2956_cast)[name = tensor("denom_81_cast")]; + tensor out_81_cast = mul(x = zero_mean_81_cast, y = denom_81_cast)[name = tensor("out_81_cast")]; + tensor var_2960_to_fp16 = const()[name = tensor("op_2960_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199184256)))]; + tensor var_2961_cast = add(x = out_81_cast, y = var_2960_to_fp16)[name = tensor("op_2961_cast")]; + tensor var_2963_to_fp16 = const()[name = tensor("op_2963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199186880)))]; + tensor hidden_states_121_cast = mul(x = var_2961_cast, y = var_2963_to_fp16)[name = tensor("hidden_states_121_cast")]; + tensor var_2970 = const()[name = tensor("op_2970"), val = tensor([1, 1])]; + tensor var_2972 = const()[name = tensor("op_2972"), val = tensor([1, 1])]; + tensor q_55_pad_type_0 = const()[name = tensor("q_55_pad_type_0"), val = tensor("custom")]; + tensor q_55_pad_0 = const()[name = tensor("q_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199189504))), lut = tensor([-0x1.688p-7, 0x1.688p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_55_cast = conv(dilations = var_2972, groups = var_1186, pad = q_55_pad_0, pad_type = q_55_pad_type_0, strides = var_2970, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("q_55_cast")]; + tensor var_2976 = const()[name = tensor("op_2976"), val = tensor([1, 1])]; + tensor var_2978 = const()[name = tensor("op_2978"), val = tensor([1, 1])]; + tensor k_55_pad_type_0 = const()[name = tensor("k_55_pad_type_0"), val = tensor("custom")]; + tensor k_55_pad_0 = const()[name = tensor("k_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199394368))), lut = tensor([-0x1.8ep-7, -0x1.fe8p-9, 0x1.06p-8, 0x1.90cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_55_cast = conv(dilations = var_2978, groups = var_1186, pad = k_55_pad_0, pad_type = k_55_pad_type_0, strides = var_2976, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_55_cast")]; + tensor var_2982 = const()[name = tensor("op_2982"), val = tensor([1, 1])]; + tensor var_2984 = const()[name = tensor("op_2984"), val = tensor([1, 1])]; + tensor v_55_pad_type_0 = const()[name = tensor("v_55_pad_type_0"), val = tensor("custom")]; + tensor v_55_pad_0 = const()[name = tensor("v_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200049792))), lut = tensor([-0x1.cbcp-7, -0x1.134p-8, 0x1.15p-8, 0x1.cbcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_55_cast = conv(dilations = var_2984, groups = var_1186, pad = v_55_pad_0, pad_type = v_55_pad_type_0, strides = var_2982, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_55_cast")]; + tensor var_2988 = const()[name = tensor("op_2988"), val = tensor([2, 20, 64, -1])]; + tensor var_2989_cast = reshape(shape = var_2988, x = q_55_cast)[name = tensor("op_2989_cast")]; + tensor var_2990 = const()[name = tensor("op_2990"), val = tensor([2, 20, 64, -1])]; + tensor var_2991_cast = reshape(shape = var_2990, x = k_55_cast)[name = tensor("op_2991_cast")]; + tensor var_2992 = const()[name = tensor("op_2992"), val = tensor([2, 20, 64, -1])]; + tensor var_2993_cast = reshape(shape = var_2992, x = v_55_cast)[name = tensor("op_2993_cast")]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = var_2989_cast, y = var_2991_cast)[name = tensor("attn_weights_109_cast")]; + tensor attn_weights_111_cast = mul(x = attn_weights_109_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_2997_cast = softmax(axis = var_1170, x = attn_weights_111_cast)[name = tensor("op_2997_cast")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_2993_cast, y = var_2997_cast)[name = tensor("attn_55_cast")]; + tensor var_3001 = const()[name = tensor("op_3001"), val = tensor([2, 1280, 1, -1])]; + tensor input_205_cast = reshape(shape = var_3001, x = attn_55_cast)[name = tensor("input_205_cast")]; + tensor var_3006 = const()[name = tensor("op_3006"), val = tensor([1, 1])]; + tensor var_3008 = const()[name = tensor("op_3008"), val = tensor([1, 1])]; + tensor var_3010_pad_type_0 = const()[name = tensor("op_3010_pad_type_0"), val = tensor("custom")]; + tensor var_3010_pad_0 = const()[name = tensor("op_3010_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200705216))), lut = tensor([-0x1.1b4p-7, -0x1.5ccp-9, 0x1.5d8p-9, 0x1.1b4p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201114880)))]; + tensor var_3010_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_3008, groups = var_1186, pad = var_3010_pad_0, pad_type = var_3010_pad_type_0, strides = var_3006, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("op_3010_cast")]; + tensor inputs_83_cast = add(x = var_3010_cast, y = inputs_81_cast)[name = tensor("inputs_83_cast")]; + tensor var_3014 = const()[name = tensor("op_3014"), val = tensor([1])]; + tensor channels_mean_83_cast = reduce_mean(axes = var_3014, keep_dims = var_1181, x = inputs_83_cast)[name = tensor("channels_mean_83_cast")]; + tensor zero_mean_83_cast = sub(x = inputs_83_cast, y = channels_mean_83_cast)[name = tensor("zero_mean_83_cast")]; + tensor zero_mean_sq_83_cast = mul(x = zero_mean_83_cast, y = zero_mean_83_cast)[name = tensor("zero_mean_sq_83_cast")]; + tensor var_3018 = const()[name = tensor("op_3018"), val = tensor([1])]; + tensor var_3019_cast = reduce_mean(axes = var_3018, keep_dims = var_1181, x = zero_mean_sq_83_cast)[name = tensor("op_3019_cast")]; + tensor var_3020_to_fp16 = const()[name = tensor("op_3020_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3021_cast = add(x = var_3019_cast, y = var_3020_to_fp16)[name = tensor("op_3021_cast")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_3021_cast)[name = tensor("denom_83_cast")]; + tensor out_83_cast = mul(x = zero_mean_83_cast, y = denom_83_cast)[name = tensor("out_83_cast")]; + tensor var_3025_to_fp16 = const()[name = tensor("op_3025_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201117504)))]; + tensor var_3026_cast = add(x = out_83_cast, y = var_3025_to_fp16)[name = tensor("op_3026_cast")]; + tensor var_3028_to_fp16 = const()[name = tensor("op_3028_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201120128)))]; + tensor input_207_cast = mul(x = var_3026_cast, y = var_3028_to_fp16)[name = tensor("input_207_cast")]; + tensor var_3036 = const()[name = tensor("op_3036"), val = tensor([1, 1])]; + tensor var_3038 = const()[name = tensor("op_3038"), val = tensor([1, 1])]; + tensor var_3040_pad_type_0 = const()[name = tensor("op_3040_pad_type_0"), val = tensor("custom")]; + tensor var_3040_pad_0 = const()[name = tensor("op_3040_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201122752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207676416))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207676544)))]; + tensor var_3040_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_3038, groups = var_1186, pad = var_3040_pad_0, pad_type = var_3040_pad_type_0, strides = var_3036, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_207_cast)[name = tensor("op_3040_cast")]; + tensor var_3041_split_sizes_0 = const()[name = tensor("op_3041_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3041_axis_0 = const()[name = tensor("op_3041_axis_0"), val = tensor(1)]; + tensor var_3041_cast_0, tensor var_3041_cast_1 = split(axis = var_3041_axis_0, split_sizes = var_3041_split_sizes_0, x = var_3040_cast)[name = tensor("op_3041_cast")]; + tensor var_3043_mode_0 = const()[name = tensor("op_3043_mode_0"), val = tensor("EXACT")]; + tensor var_3043_cast = gelu(mode = var_3043_mode_0, x = var_3041_cast_1)[name = tensor("op_3043_cast")]; + tensor input_209_cast = mul(x = var_3041_cast_0, y = var_3043_cast)[name = tensor("input_209_cast")]; + tensor var_3047 = const()[name = tensor("op_3047"), val = tensor([1, 1])]; + tensor var_3049 = const()[name = tensor("op_3049"), val = tensor([1, 1])]; + tensor var_3051_pad_type_0 = const()[name = tensor("op_3051_pad_type_0"), val = tensor("custom")]; + tensor var_3051_pad_0 = const()[name = tensor("op_3051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(207697088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210973952))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210974080)))]; + tensor var_3051_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_3049, groups = var_1186, pad = var_3051_pad_0, pad_type = var_3051_pad_type_0, strides = var_3047, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("op_3051_cast")]; + tensor hidden_states_125_cast = add(x = var_3051_cast, y = inputs_83_cast)[name = tensor("hidden_states_125_cast")]; + tensor var_3053 = const()[name = tensor("op_3053"), val = tensor([2, 1280, 32, 32])]; + tensor input_211_cast = reshape(shape = var_3053, x = hidden_states_125_cast)[name = tensor("input_211_cast")]; + tensor var_3057 = const()[name = tensor("op_3057"), val = tensor([1, 1])]; + tensor var_3059 = const()[name = tensor("op_3059"), val = tensor([1, 1])]; + tensor hidden_states_127_pad_type_0 = const()[name = tensor("hidden_states_127_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_127_pad_0 = const()[name = tensor("hidden_states_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(210976704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211795968))), name = tensor("down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211796096)))]; + tensor hidden_states_127_cast = conv(bias = down_blocks_2_attentions_0_proj_out_bias_to_fp16, dilations = var_3059, groups = var_1186, pad = hidden_states_127_pad_0, pad_type = hidden_states_127_pad_type_0, strides = var_3057, weight = down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized, x = input_211_cast)[name = tensor("hidden_states_127_cast")]; + tensor input_213_cast = add(x = hidden_states_127_cast, y = hidden_states_61_cast)[name = tensor("input_213_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_213_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211798720)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211801344)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_217_cast = silu(x = add_27_cast)[name = tensor("input_217_cast")]; + tensor var_3074 = const()[name = tensor("op_3074"), val = tensor([1, 1])]; + tensor var_3076 = const()[name = tensor("op_3076"), val = tensor([1, 1])]; + tensor hidden_states_129_pad_type_0 = const()[name = tensor("hidden_states_129_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_129_pad_0 = const()[name = tensor("hidden_states_129_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(211803968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219176832))), name = tensor("down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219176960)))]; + tensor hidden_states_129_cast = conv(bias = down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_3076, groups = var_1186, pad = hidden_states_129_pad_0, pad_type = hidden_states_129_pad_type_0, strides = var_3074, weight = down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("hidden_states_129_cast")]; + tensor var_3082 = const()[name = tensor("op_3082"), val = tensor([1, 1])]; + tensor var_3084 = const()[name = tensor("op_3084"), val = tensor([1, 1])]; + tensor temb_11_pad_type_0 = const()[name = tensor("temb_11_pad_type_0"), val = tensor("custom")]; + tensor temb_11_pad_0 = const()[name = tensor("temb_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219179584))), lut = tensor([-0x1.ddcp-5, -0x1.08p-6, 0x1.ef4p-7, 0x1.cf8p-5]), name = tensor("down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219589248)))]; + tensor temb_11_cast = conv(bias = down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_3084, groups = var_1186, pad = temb_11_pad_0, pad_type = temb_11_pad_type_0, strides = var_3082, weight = down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_11_cast")]; + tensor input_221_cast = add(x = hidden_states_129_cast, y = temb_11_cast)[name = tensor("input_221_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = input_221_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219591872)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219594496)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_225_cast = silu(x = add_29_cast)[name = tensor("input_225_cast")]; + tensor var_3094 = const()[name = tensor("op_3094"), val = tensor([1, 1])]; + tensor var_3096 = const()[name = tensor("op_3096"), val = tensor([1, 1])]; + tensor hidden_states_131_pad_type_0 = const()[name = tensor("hidden_states_131_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_131_pad_0 = const()[name = tensor("hidden_states_131_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219597120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226969984))), name = tensor("down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226970112)))]; + tensor hidden_states_131_cast = conv(bias = down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_3096, groups = var_1186, pad = hidden_states_131_pad_0, pad_type = hidden_states_131_pad_type_0, strides = var_3094, weight = down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("hidden_states_131_cast")]; + tensor hidden_states_133_cast = add(x = input_213_cast, y = hidden_states_131_cast)[name = tensor("hidden_states_133_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = hidden_states_133_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226972736)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226975360)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor var_3134 = const()[name = tensor("op_3134"), val = tensor([1, 1])]; + tensor var_3136 = const()[name = tensor("op_3136"), val = tensor([1, 1])]; + tensor hidden_states_135_pad_type_0 = const()[name = tensor("hidden_states_135_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_135_pad_0 = const()[name = tensor("hidden_states_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226977984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227797248))), name = tensor("down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227797376)))]; + tensor hidden_states_135_cast = conv(bias = down_blocks_2_attentions_1_proj_in_bias_to_fp16, dilations = var_3136, groups = var_1186, pad = hidden_states_135_pad_0, pad_type = hidden_states_135_pad_type_0, strides = var_3134, weight = down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized, x = add_31_cast)[name = tensor("hidden_states_135_cast")]; + tensor var_3141 = const()[name = tensor("op_3141"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_85_cast = reshape(shape = var_3141, x = hidden_states_135_cast)[name = tensor("inputs_85_cast")]; + tensor var_3151 = const()[name = tensor("op_3151"), val = tensor([1])]; + tensor channels_mean_85_cast = reduce_mean(axes = var_3151, keep_dims = var_1181, x = inputs_85_cast)[name = tensor("channels_mean_85_cast")]; + tensor zero_mean_85_cast = sub(x = inputs_85_cast, y = channels_mean_85_cast)[name = tensor("zero_mean_85_cast")]; + tensor zero_mean_sq_85_cast = mul(x = zero_mean_85_cast, y = zero_mean_85_cast)[name = tensor("zero_mean_sq_85_cast")]; + tensor var_3155 = const()[name = tensor("op_3155"), val = tensor([1])]; + tensor var_3156_cast = reduce_mean(axes = var_3155, keep_dims = var_1181, x = zero_mean_sq_85_cast)[name = tensor("op_3156_cast")]; + tensor var_3157_to_fp16 = const()[name = tensor("op_3157_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3158_cast = add(x = var_3156_cast, y = var_3157_to_fp16)[name = tensor("op_3158_cast")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_3158_cast)[name = tensor("denom_85_cast")]; + tensor out_85_cast = mul(x = zero_mean_85_cast, y = denom_85_cast)[name = tensor("out_85_cast")]; + tensor var_3162_to_fp16 = const()[name = tensor("op_3162_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227800000)))]; + tensor var_3163_cast = add(x = out_85_cast, y = var_3162_to_fp16)[name = tensor("op_3163_cast")]; + tensor var_3165_to_fp16 = const()[name = tensor("op_3165_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227802624)))]; + tensor hidden_states_137_cast = mul(x = var_3163_cast, y = var_3165_to_fp16)[name = tensor("hidden_states_137_cast")]; + tensor var_3172 = const()[name = tensor("op_3172"), val = tensor([1, 1])]; + tensor var_3174 = const()[name = tensor("op_3174"), val = tensor([1, 1])]; + tensor q_57_pad_type_0 = const()[name = tensor("q_57_pad_type_0"), val = tensor("custom")]; + tensor q_57_pad_0 = const()[name = tensor("q_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227805248))), lut = tensor([-0x1.378p-5, -0x1.794p-7, 0x1.744p-7, 0x1.364p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_57_cast = conv(dilations = var_3174, groups = var_1186, pad = q_57_pad_0, pad_type = q_57_pad_type_0, strides = var_3172, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("q_57_cast")]; + tensor var_3178 = const()[name = tensor("op_3178"), val = tensor([1, 1])]; + tensor var_3180 = const()[name = tensor("op_3180"), val = tensor([1, 1])]; + tensor k_57_pad_type_0 = const()[name = tensor("k_57_pad_type_0"), val = tensor("custom")]; + tensor k_57_pad_0 = const()[name = tensor("k_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228214912))), lut = tensor([-0x1.394p-5, -0x1.784p-7, 0x1.7c4p-7, 0x1.3a4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_57_cast = conv(dilations = var_3180, groups = var_1186, pad = k_57_pad_0, pad_type = k_57_pad_type_0, strides = var_3178, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("k_57_cast")]; + tensor var_3184 = const()[name = tensor("op_3184"), val = tensor([1, 1])]; + tensor var_3186 = const()[name = tensor("op_3186"), val = tensor([1, 1])]; + tensor v_57_pad_type_0 = const()[name = tensor("v_57_pad_type_0"), val = tensor("custom")]; + tensor v_57_pad_0 = const()[name = tensor("v_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228624576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229443840))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_57_cast = conv(dilations = var_3186, groups = var_1186, pad = v_57_pad_0, pad_type = v_57_pad_type_0, strides = var_3184, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("v_57_cast")]; + tensor var_3190 = const()[name = tensor("op_3190"), val = tensor([2, 20, 64, -1])]; + tensor var_3191_cast = reshape(shape = var_3190, x = q_57_cast)[name = tensor("op_3191_cast")]; + tensor var_3192 = const()[name = tensor("op_3192"), val = tensor([2, 20, 64, -1])]; + tensor var_3193_cast = reshape(shape = var_3192, x = k_57_cast)[name = tensor("op_3193_cast")]; + tensor var_3194 = const()[name = tensor("op_3194"), val = tensor([2, 20, 64, -1])]; + tensor var_3195_cast = reshape(shape = var_3194, x = v_57_cast)[name = tensor("op_3195_cast")]; + tensor attn_weights_113_transpose_x_0 = const()[name = tensor("attn_weights_113_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_113_transpose_y_0 = const()[name = tensor("attn_weights_113_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_113_cast = matmul(transpose_x = attn_weights_113_transpose_x_0, transpose_y = attn_weights_113_transpose_y_0, x = var_3191_cast, y = var_3193_cast)[name = tensor("attn_weights_113_cast")]; + tensor attn_weights_115_cast = mul(x = attn_weights_113_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_115_cast")]; + tensor var_3199_cast = softmax(axis = var_1170, x = attn_weights_115_cast)[name = tensor("op_3199_cast")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3195_cast, y = var_3199_cast)[name = tensor("attn_57_cast")]; + tensor var_3203 = const()[name = tensor("op_3203"), val = tensor([2, 1280, 1, -1])]; + tensor input_229_cast = reshape(shape = var_3203, x = attn_57_cast)[name = tensor("input_229_cast")]; + tensor var_3208 = const()[name = tensor("op_3208"), val = tensor([1, 1])]; + tensor var_3210 = const()[name = tensor("op_3210"), val = tensor([1, 1])]; + tensor var_3212_pad_type_0 = const()[name = tensor("op_3212_pad_type_0"), val = tensor("custom")]; + tensor var_3212_pad_0 = const()[name = tensor("op_3212_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229443968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230263232))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230263360)))]; + tensor var_3212_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_3210, groups = var_1186, pad = var_3212_pad_0, pad_type = var_3212_pad_type_0, strides = var_3208, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_229_cast)[name = tensor("op_3212_cast")]; + tensor inputs_87_cast = add(x = var_3212_cast, y = inputs_85_cast)[name = tensor("inputs_87_cast")]; + tensor var_3216 = const()[name = tensor("op_3216"), val = tensor([1])]; + tensor channels_mean_87_cast = reduce_mean(axes = var_3216, keep_dims = var_1181, x = inputs_87_cast)[name = tensor("channels_mean_87_cast")]; + tensor zero_mean_87_cast = sub(x = inputs_87_cast, y = channels_mean_87_cast)[name = tensor("zero_mean_87_cast")]; + tensor zero_mean_sq_87_cast = mul(x = zero_mean_87_cast, y = zero_mean_87_cast)[name = tensor("zero_mean_sq_87_cast")]; + tensor var_3220 = const()[name = tensor("op_3220"), val = tensor([1])]; + tensor var_3221_cast = reduce_mean(axes = var_3220, keep_dims = var_1181, x = zero_mean_sq_87_cast)[name = tensor("op_3221_cast")]; + tensor var_3222_to_fp16 = const()[name = tensor("op_3222_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3223_cast = add(x = var_3221_cast, y = var_3222_to_fp16)[name = tensor("op_3223_cast")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_3223_cast)[name = tensor("denom_87_cast")]; + tensor out_87_cast = mul(x = zero_mean_87_cast, y = denom_87_cast)[name = tensor("out_87_cast")]; + tensor var_3227_to_fp16 = const()[name = tensor("op_3227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230265984)))]; + tensor var_3228_cast = add(x = out_87_cast, y = var_3227_to_fp16)[name = tensor("op_3228_cast")]; + tensor var_3230_to_fp16 = const()[name = tensor("op_3230_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230268608)))]; + tensor hidden_states_139_cast = mul(x = var_3228_cast, y = var_3230_to_fp16)[name = tensor("hidden_states_139_cast")]; + tensor var_3237 = const()[name = tensor("op_3237"), val = tensor([1, 1])]; + tensor var_3239 = const()[name = tensor("op_3239"), val = tensor([1, 1])]; + tensor q_59_pad_type_0 = const()[name = tensor("q_59_pad_type_0"), val = tensor("custom")]; + tensor q_59_pad_0 = const()[name = tensor("q_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230271232))), lut = tensor([-0x1.f38p-7, 0x1.f48p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_59_cast = conv(dilations = var_3239, groups = var_1186, pad = q_59_pad_0, pad_type = q_59_pad_type_0, strides = var_3237, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_139_cast)[name = tensor("q_59_cast")]; + tensor var_3243 = const()[name = tensor("op_3243"), val = tensor([1, 1])]; + tensor var_3245 = const()[name = tensor("op_3245"), val = tensor([1, 1])]; + tensor k_59_pad_type_0 = const()[name = tensor("k_59_pad_type_0"), val = tensor("custom")]; + tensor k_59_pad_0 = const()[name = tensor("k_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230476096))), lut = tensor([-0x1.dc8p-6, -0x1.1b8p-7, 0x1.198p-7, 0x1.dbp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_59_cast = conv(dilations = var_3245, groups = var_1186, pad = k_59_pad_0, pad_type = k_59_pad_type_0, strides = var_3243, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_59_cast")]; + tensor var_3249 = const()[name = tensor("op_3249"), val = tensor([1, 1])]; + tensor var_3251 = const()[name = tensor("op_3251"), val = tensor([1, 1])]; + tensor v_59_pad_type_0 = const()[name = tensor("v_59_pad_type_0"), val = tensor("custom")]; + tensor v_59_pad_0 = const()[name = tensor("v_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231131520))), lut = tensor([-0x1.078p-6, 0x1.078p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_59_cast = conv(dilations = var_3251, groups = var_1186, pad = v_59_pad_0, pad_type = v_59_pad_type_0, strides = var_3249, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_59_cast")]; + tensor var_3255 = const()[name = tensor("op_3255"), val = tensor([2, 20, 64, -1])]; + tensor var_3256_cast = reshape(shape = var_3255, x = q_59_cast)[name = tensor("op_3256_cast")]; + tensor var_3257 = const()[name = tensor("op_3257"), val = tensor([2, 20, 64, -1])]; + tensor var_3258_cast = reshape(shape = var_3257, x = k_59_cast)[name = tensor("op_3258_cast")]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([2, 20, 64, -1])]; + tensor var_3260_cast = reshape(shape = var_3259, x = v_59_cast)[name = tensor("op_3260_cast")]; + tensor attn_weights_117_transpose_x_0 = const()[name = tensor("attn_weights_117_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_117_transpose_y_0 = const()[name = tensor("attn_weights_117_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_117_cast = matmul(transpose_x = attn_weights_117_transpose_x_0, transpose_y = attn_weights_117_transpose_y_0, x = var_3256_cast, y = var_3258_cast)[name = tensor("attn_weights_117_cast")]; + tensor attn_weights_119_cast = mul(x = attn_weights_117_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_119_cast")]; + tensor var_3264_cast = softmax(axis = var_1170, x = attn_weights_119_cast)[name = tensor("op_3264_cast")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3260_cast, y = var_3264_cast)[name = tensor("attn_59_cast")]; + tensor var_3268 = const()[name = tensor("op_3268"), val = tensor([2, 1280, 1, -1])]; + tensor input_231_cast = reshape(shape = var_3268, x = attn_59_cast)[name = tensor("input_231_cast")]; + tensor var_3273 = const()[name = tensor("op_3273"), val = tensor([1, 1])]; + tensor var_3275 = const()[name = tensor("op_3275"), val = tensor([1, 1])]; + tensor var_3277_pad_type_0 = const()[name = tensor("op_3277_pad_type_0"), val = tensor("custom")]; + tensor var_3277_pad_0 = const()[name = tensor("op_3277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231459264))), lut = tensor([-0x1.3f4p-7, 0x1.3fcp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231664128)))]; + tensor var_3277_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_3275, groups = var_1186, pad = var_3277_pad_0, pad_type = var_3277_pad_type_0, strides = var_3273, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_231_cast)[name = tensor("op_3277_cast")]; + tensor inputs_89_cast = add(x = var_3277_cast, y = inputs_87_cast)[name = tensor("inputs_89_cast")]; + tensor var_3281 = const()[name = tensor("op_3281"), val = tensor([1])]; + tensor channels_mean_89_cast = reduce_mean(axes = var_3281, keep_dims = var_1181, x = inputs_89_cast)[name = tensor("channels_mean_89_cast")]; + tensor zero_mean_89_cast = sub(x = inputs_89_cast, y = channels_mean_89_cast)[name = tensor("zero_mean_89_cast")]; + tensor zero_mean_sq_89_cast = mul(x = zero_mean_89_cast, y = zero_mean_89_cast)[name = tensor("zero_mean_sq_89_cast")]; + tensor var_3285 = const()[name = tensor("op_3285"), val = tensor([1])]; + tensor var_3286_cast = reduce_mean(axes = var_3285, keep_dims = var_1181, x = zero_mean_sq_89_cast)[name = tensor("op_3286_cast")]; + tensor var_3287_to_fp16 = const()[name = tensor("op_3287_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3288_cast = add(x = var_3286_cast, y = var_3287_to_fp16)[name = tensor("op_3288_cast")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_3288_cast)[name = tensor("denom_89_cast")]; + tensor out_89_cast = mul(x = zero_mean_89_cast, y = denom_89_cast)[name = tensor("out_89_cast")]; + tensor var_3292_to_fp16 = const()[name = tensor("op_3292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231666752)))]; + tensor var_3293_cast = add(x = out_89_cast, y = var_3292_to_fp16)[name = tensor("op_3293_cast")]; + tensor var_3295_to_fp16 = const()[name = tensor("op_3295_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231669376)))]; + tensor input_233_cast = mul(x = var_3293_cast, y = var_3295_to_fp16)[name = tensor("input_233_cast")]; + tensor var_3303 = const()[name = tensor("op_3303"), val = tensor([1, 1])]; + tensor var_3305 = const()[name = tensor("op_3305"), val = tensor([1, 1])]; + tensor var_3307_pad_type_0 = const()[name = tensor("op_3307_pad_type_0"), val = tensor("custom")]; + tensor var_3307_pad_0 = const()[name = tensor("op_3307_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231672000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238225664))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238225792)))]; + tensor var_3307_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_3305, groups = var_1186, pad = var_3307_pad_0, pad_type = var_3307_pad_type_0, strides = var_3303, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("op_3307_cast")]; + tensor var_3308_split_sizes_0 = const()[name = tensor("op_3308_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3308_axis_0 = const()[name = tensor("op_3308_axis_0"), val = tensor(1)]; + tensor var_3308_cast_0, tensor var_3308_cast_1 = split(axis = var_3308_axis_0, split_sizes = var_3308_split_sizes_0, x = var_3307_cast)[name = tensor("op_3308_cast")]; + tensor var_3310_mode_0 = const()[name = tensor("op_3310_mode_0"), val = tensor("EXACT")]; + tensor var_3310_cast = gelu(mode = var_3310_mode_0, x = var_3308_cast_1)[name = tensor("op_3310_cast")]; + tensor input_235_cast = mul(x = var_3308_cast_0, y = var_3310_cast)[name = tensor("input_235_cast")]; + tensor var_3314 = const()[name = tensor("op_3314"), val = tensor([1, 1])]; + tensor var_3316 = const()[name = tensor("op_3316"), val = tensor([1, 1])]; + tensor var_3318_pad_type_0 = const()[name = tensor("op_3318_pad_type_0"), val = tensor("custom")]; + tensor var_3318_pad_0 = const()[name = tensor("op_3318_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(238246336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241523200))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241523328)))]; + tensor var_3318_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_3316, groups = var_1186, pad = var_3318_pad_0, pad_type = var_3318_pad_type_0, strides = var_3314, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_235_cast)[name = tensor("op_3318_cast")]; + tensor inputs_91_cast = add(x = var_3318_cast, y = inputs_89_cast)[name = tensor("inputs_91_cast")]; + tensor var_3328 = const()[name = tensor("op_3328"), val = tensor([1])]; + tensor channels_mean_91_cast = reduce_mean(axes = var_3328, keep_dims = var_1181, x = inputs_91_cast)[name = tensor("channels_mean_91_cast")]; + tensor zero_mean_91_cast = sub(x = inputs_91_cast, y = channels_mean_91_cast)[name = tensor("zero_mean_91_cast")]; + tensor zero_mean_sq_91_cast = mul(x = zero_mean_91_cast, y = zero_mean_91_cast)[name = tensor("zero_mean_sq_91_cast")]; + tensor var_3332 = const()[name = tensor("op_3332"), val = tensor([1])]; + tensor var_3333_cast = reduce_mean(axes = var_3332, keep_dims = var_1181, x = zero_mean_sq_91_cast)[name = tensor("op_3333_cast")]; + tensor var_3334_to_fp16 = const()[name = tensor("op_3334_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3335_cast = add(x = var_3333_cast, y = var_3334_to_fp16)[name = tensor("op_3335_cast")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_3335_cast)[name = tensor("denom_91_cast")]; + tensor out_91_cast = mul(x = zero_mean_91_cast, y = denom_91_cast)[name = tensor("out_91_cast")]; + tensor var_3339_to_fp16 = const()[name = tensor("op_3339_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241525952)))]; + tensor var_3340_cast = add(x = out_91_cast, y = var_3339_to_fp16)[name = tensor("op_3340_cast")]; + tensor var_3342_to_fp16 = const()[name = tensor("op_3342_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241528576)))]; + tensor hidden_states_143_cast = mul(x = var_3340_cast, y = var_3342_to_fp16)[name = tensor("hidden_states_143_cast")]; + tensor var_3349 = const()[name = tensor("op_3349"), val = tensor([1, 1])]; + tensor var_3351 = const()[name = tensor("op_3351"), val = tensor([1, 1])]; + tensor q_61_pad_type_0 = const()[name = tensor("q_61_pad_type_0"), val = tensor("custom")]; + tensor q_61_pad_0 = const()[name = tensor("q_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241531200))), lut = tensor([-0x1.414p-5, -0x1.83cp-7, 0x1.814p-7, 0x1.414p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_61_cast = conv(dilations = var_3351, groups = var_1186, pad = q_61_pad_0, pad_type = q_61_pad_type_0, strides = var_3349, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("q_61_cast")]; + tensor var_3355 = const()[name = tensor("op_3355"), val = tensor([1, 1])]; + tensor var_3357 = const()[name = tensor("op_3357"), val = tensor([1, 1])]; + tensor k_61_pad_type_0 = const()[name = tensor("k_61_pad_type_0"), val = tensor("custom")]; + tensor k_61_pad_0 = const()[name = tensor("k_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241940864))), lut = tensor([-0x1.444p-5, -0x1.84cp-7, 0x1.884p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_61_cast = conv(dilations = var_3357, groups = var_1186, pad = k_61_pad_0, pad_type = k_61_pad_type_0, strides = var_3355, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("k_61_cast")]; + tensor var_3361 = const()[name = tensor("op_3361"), val = tensor([1, 1])]; + tensor var_3363 = const()[name = tensor("op_3363"), val = tensor([1, 1])]; + tensor v_61_pad_type_0 = const()[name = tensor("v_61_pad_type_0"), val = tensor("custom")]; + tensor v_61_pad_0 = const()[name = tensor("v_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242350528))), lut = tensor([-0x1.3ccp-5, -0x1.7e8p-7, 0x1.7cp-7, 0x1.3c4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_61_cast = conv(dilations = var_3363, groups = var_1186, pad = v_61_pad_0, pad_type = v_61_pad_type_0, strides = var_3361, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("v_61_cast")]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor([2, 20, 64, -1])]; + tensor var_3368_cast = reshape(shape = var_3367, x = q_61_cast)[name = tensor("op_3368_cast")]; + tensor var_3369 = const()[name = tensor("op_3369"), val = tensor([2, 20, 64, -1])]; + tensor var_3370_cast = reshape(shape = var_3369, x = k_61_cast)[name = tensor("op_3370_cast")]; + tensor var_3371 = const()[name = tensor("op_3371"), val = tensor([2, 20, 64, -1])]; + tensor var_3372_cast = reshape(shape = var_3371, x = v_61_cast)[name = tensor("op_3372_cast")]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = var_3368_cast, y = var_3370_cast)[name = tensor("attn_weights_121_cast")]; + tensor attn_weights_123_cast = mul(x = attn_weights_121_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_3376_cast = softmax(axis = var_1170, x = attn_weights_123_cast)[name = tensor("op_3376_cast")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3372_cast, y = var_3376_cast)[name = tensor("attn_61_cast")]; + tensor var_3380 = const()[name = tensor("op_3380"), val = tensor([2, 1280, 1, -1])]; + tensor input_237_cast = reshape(shape = var_3380, x = attn_61_cast)[name = tensor("input_237_cast")]; + tensor var_3385 = const()[name = tensor("op_3385"), val = tensor([1, 1])]; + tensor var_3387 = const()[name = tensor("op_3387"), val = tensor([1, 1])]; + tensor var_3389_pad_type_0 = const()[name = tensor("op_3389_pad_type_0"), val = tensor("custom")]; + tensor var_3389_pad_0 = const()[name = tensor("op_3389_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(242760192))), lut = tensor([-0x1.458p-5, -0x1.868p-7, 0x1.88cp-7, 0x1.46p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(243169856)))]; + tensor var_3389_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_3387, groups = var_1186, pad = var_3389_pad_0, pad_type = var_3389_pad_type_0, strides = var_3385, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("op_3389_cast")]; + tensor inputs_93_cast = add(x = var_3389_cast, y = inputs_91_cast)[name = tensor("inputs_93_cast")]; + tensor var_3393 = const()[name = tensor("op_3393"), val = tensor([1])]; + tensor channels_mean_93_cast = reduce_mean(axes = var_3393, keep_dims = var_1181, x = inputs_93_cast)[name = tensor("channels_mean_93_cast")]; + tensor zero_mean_93_cast = sub(x = inputs_93_cast, y = channels_mean_93_cast)[name = tensor("zero_mean_93_cast")]; + tensor zero_mean_sq_93_cast = mul(x = zero_mean_93_cast, y = zero_mean_93_cast)[name = tensor("zero_mean_sq_93_cast")]; + tensor var_3397 = const()[name = tensor("op_3397"), val = tensor([1])]; + tensor var_3398_cast = reduce_mean(axes = var_3397, keep_dims = var_1181, x = zero_mean_sq_93_cast)[name = tensor("op_3398_cast")]; + tensor var_3399_to_fp16 = const()[name = tensor("op_3399_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3400_cast = add(x = var_3398_cast, y = var_3399_to_fp16)[name = tensor("op_3400_cast")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_3400_cast)[name = tensor("denom_93_cast")]; + tensor out_93_cast = mul(x = zero_mean_93_cast, y = denom_93_cast)[name = tensor("out_93_cast")]; + tensor var_3404_to_fp16 = const()[name = tensor("op_3404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(243172480)))]; + tensor var_3405_cast = add(x = out_93_cast, y = var_3404_to_fp16)[name = tensor("op_3405_cast")]; + tensor var_3407_to_fp16 = const()[name = tensor("op_3407_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(243175104)))]; + tensor hidden_states_145_cast = mul(x = var_3405_cast, y = var_3407_to_fp16)[name = tensor("hidden_states_145_cast")]; + tensor var_3414 = const()[name = tensor("op_3414"), val = tensor([1, 1])]; + tensor var_3416 = const()[name = tensor("op_3416"), val = tensor([1, 1])]; + tensor q_63_pad_type_0 = const()[name = tensor("q_63_pad_type_0"), val = tensor("custom")]; + tensor q_63_pad_0 = const()[name = tensor("q_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(243177728))), lut = tensor([-0x1.284p-6, 0x1.274p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_63_cast = conv(dilations = var_3416, groups = var_1186, pad = q_63_pad_0, pad_type = q_63_pad_type_0, strides = var_3414, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_145_cast)[name = tensor("q_63_cast")]; + tensor var_3420 = const()[name = tensor("op_3420"), val = tensor([1, 1])]; + tensor var_3422 = const()[name = tensor("op_3422"), val = tensor([1, 1])]; + tensor k_63_pad_type_0 = const()[name = tensor("k_63_pad_type_0"), val = tensor("custom")]; + tensor k_63_pad_0 = const()[name = tensor("k_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(243382592))), lut = tensor([-0x1.f2p-6, -0x1.278p-7, 0x1.258p-7, 0x1.f1p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_63_cast = conv(dilations = var_3422, groups = var_1186, pad = k_63_pad_0, pad_type = k_63_pad_type_0, strides = var_3420, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_63_cast")]; + tensor var_3426 = const()[name = tensor("op_3426"), val = tensor([1, 1])]; + tensor var_3428 = const()[name = tensor("op_3428"), val = tensor([1, 1])]; + tensor v_63_pad_type_0 = const()[name = tensor("v_63_pad_type_0"), val = tensor("custom")]; + tensor v_63_pad_0 = const()[name = tensor("v_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244038016))), lut = tensor([-0x1.19p-6, 0x1.19p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_63_cast = conv(dilations = var_3428, groups = var_1186, pad = v_63_pad_0, pad_type = v_63_pad_type_0, strides = var_3426, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_63_cast")]; + tensor var_3432 = const()[name = tensor("op_3432"), val = tensor([2, 20, 64, -1])]; + tensor var_3433_cast = reshape(shape = var_3432, x = q_63_cast)[name = tensor("op_3433_cast")]; + tensor var_3434 = const()[name = tensor("op_3434"), val = tensor([2, 20, 64, -1])]; + tensor var_3435_cast = reshape(shape = var_3434, x = k_63_cast)[name = tensor("op_3435_cast")]; + tensor var_3436 = const()[name = tensor("op_3436"), val = tensor([2, 20, 64, -1])]; + tensor var_3437_cast = reshape(shape = var_3436, x = v_63_cast)[name = tensor("op_3437_cast")]; + tensor attn_weights_125_transpose_x_0 = const()[name = tensor("attn_weights_125_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_125_transpose_y_0 = const()[name = tensor("attn_weights_125_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_125_cast = matmul(transpose_x = attn_weights_125_transpose_x_0, transpose_y = attn_weights_125_transpose_y_0, x = var_3433_cast, y = var_3435_cast)[name = tensor("attn_weights_125_cast")]; + tensor attn_weights_127_cast = mul(x = attn_weights_125_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_127_cast")]; + tensor var_3441_cast = softmax(axis = var_1170, x = attn_weights_127_cast)[name = tensor("op_3441_cast")]; + tensor attn_63_transpose_x_0 = const()[name = tensor("attn_63_transpose_x_0"), val = tensor(false)]; + tensor attn_63_transpose_y_0 = const()[name = tensor("attn_63_transpose_y_0"), val = tensor(true)]; + tensor attn_63_cast = matmul(transpose_x = attn_63_transpose_x_0, transpose_y = attn_63_transpose_y_0, x = var_3437_cast, y = var_3441_cast)[name = tensor("attn_63_cast")]; + tensor var_3445 = const()[name = tensor("op_3445"), val = tensor([2, 1280, 1, -1])]; + tensor input_239_cast = reshape(shape = var_3445, x = attn_63_cast)[name = tensor("input_239_cast")]; + tensor var_3450 = const()[name = tensor("op_3450"), val = tensor([1, 1])]; + tensor var_3452 = const()[name = tensor("op_3452"), val = tensor([1, 1])]; + tensor var_3454_pad_type_0 = const()[name = tensor("op_3454_pad_type_0"), val = tensor("custom")]; + tensor var_3454_pad_0 = const()[name = tensor("op_3454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244365760))), lut = tensor([-0x1.6ecp-7, 0x1.6ep-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244570624)))]; + tensor var_3454_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_3452, groups = var_1186, pad = var_3454_pad_0, pad_type = var_3454_pad_type_0, strides = var_3450, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_239_cast)[name = tensor("op_3454_cast")]; + tensor inputs_95_cast = add(x = var_3454_cast, y = inputs_93_cast)[name = tensor("inputs_95_cast")]; + tensor var_3458 = const()[name = tensor("op_3458"), val = tensor([1])]; + tensor channels_mean_95_cast = reduce_mean(axes = var_3458, keep_dims = var_1181, x = inputs_95_cast)[name = tensor("channels_mean_95_cast")]; + tensor zero_mean_95_cast = sub(x = inputs_95_cast, y = channels_mean_95_cast)[name = tensor("zero_mean_95_cast")]; + tensor zero_mean_sq_95_cast = mul(x = zero_mean_95_cast, y = zero_mean_95_cast)[name = tensor("zero_mean_sq_95_cast")]; + tensor var_3462 = const()[name = tensor("op_3462"), val = tensor([1])]; + tensor var_3463_cast = reduce_mean(axes = var_3462, keep_dims = var_1181, x = zero_mean_sq_95_cast)[name = tensor("op_3463_cast")]; + tensor var_3464_to_fp16 = const()[name = tensor("op_3464_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3465_cast = add(x = var_3463_cast, y = var_3464_to_fp16)[name = tensor("op_3465_cast")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_3465_cast)[name = tensor("denom_95_cast")]; + tensor out_95_cast = mul(x = zero_mean_95_cast, y = denom_95_cast)[name = tensor("out_95_cast")]; + tensor var_3469_to_fp16 = const()[name = tensor("op_3469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244573248)))]; + tensor var_3470_cast = add(x = out_95_cast, y = var_3469_to_fp16)[name = tensor("op_3470_cast")]; + tensor var_3472_to_fp16 = const()[name = tensor("op_3472_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244575872)))]; + tensor input_241_cast = mul(x = var_3470_cast, y = var_3472_to_fp16)[name = tensor("input_241_cast")]; + tensor var_3480 = const()[name = tensor("op_3480"), val = tensor([1, 1])]; + tensor var_3482 = const()[name = tensor("op_3482"), val = tensor([1, 1])]; + tensor var_3484_pad_type_0 = const()[name = tensor("op_3484_pad_type_0"), val = tensor("custom")]; + tensor var_3484_pad_0 = const()[name = tensor("op_3484_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(244578496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251132160))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251132288)))]; + tensor var_3484_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_3482, groups = var_1186, pad = var_3484_pad_0, pad_type = var_3484_pad_type_0, strides = var_3480, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("op_3484_cast")]; + tensor var_3485_split_sizes_0 = const()[name = tensor("op_3485_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3485_axis_0 = const()[name = tensor("op_3485_axis_0"), val = tensor(1)]; + tensor var_3485_cast_0, tensor var_3485_cast_1 = split(axis = var_3485_axis_0, split_sizes = var_3485_split_sizes_0, x = var_3484_cast)[name = tensor("op_3485_cast")]; + tensor var_3487_mode_0 = const()[name = tensor("op_3487_mode_0"), val = tensor("EXACT")]; + tensor var_3487_cast = gelu(mode = var_3487_mode_0, x = var_3485_cast_1)[name = tensor("op_3487_cast")]; + tensor input_243_cast = mul(x = var_3485_cast_0, y = var_3487_cast)[name = tensor("input_243_cast")]; + tensor var_3491 = const()[name = tensor("op_3491"), val = tensor([1, 1])]; + tensor var_3493 = const()[name = tensor("op_3493"), val = tensor([1, 1])]; + tensor var_3495_pad_type_0 = const()[name = tensor("op_3495_pad_type_0"), val = tensor("custom")]; + tensor var_3495_pad_0 = const()[name = tensor("op_3495_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251152832))), lut = tensor([-0x1.43p-5, -0x1.828p-7, 0x1.83cp-7, 0x1.438p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252791296)))]; + tensor var_3495_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_3493, groups = var_1186, pad = var_3495_pad_0, pad_type = var_3495_pad_type_0, strides = var_3491, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_243_cast)[name = tensor("op_3495_cast")]; + tensor inputs_97_cast = add(x = var_3495_cast, y = inputs_95_cast)[name = tensor("inputs_97_cast")]; + tensor var_3505 = const()[name = tensor("op_3505"), val = tensor([1])]; + tensor channels_mean_97_cast = reduce_mean(axes = var_3505, keep_dims = var_1181, x = inputs_97_cast)[name = tensor("channels_mean_97_cast")]; + tensor zero_mean_97_cast = sub(x = inputs_97_cast, y = channels_mean_97_cast)[name = tensor("zero_mean_97_cast")]; + tensor zero_mean_sq_97_cast = mul(x = zero_mean_97_cast, y = zero_mean_97_cast)[name = tensor("zero_mean_sq_97_cast")]; + tensor var_3509 = const()[name = tensor("op_3509"), val = tensor([1])]; + tensor var_3510_cast = reduce_mean(axes = var_3509, keep_dims = var_1181, x = zero_mean_sq_97_cast)[name = tensor("op_3510_cast")]; + tensor var_3511_to_fp16 = const()[name = tensor("op_3511_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3512_cast = add(x = var_3510_cast, y = var_3511_to_fp16)[name = tensor("op_3512_cast")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3512_cast)[name = tensor("denom_97_cast")]; + tensor out_97_cast = mul(x = zero_mean_97_cast, y = denom_97_cast)[name = tensor("out_97_cast")]; + tensor var_3516_to_fp16 = const()[name = tensor("op_3516_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252793920)))]; + tensor var_3517_cast = add(x = out_97_cast, y = var_3516_to_fp16)[name = tensor("op_3517_cast")]; + tensor var_3519_to_fp16 = const()[name = tensor("op_3519_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252796544)))]; + tensor hidden_states_149_cast = mul(x = var_3517_cast, y = var_3519_to_fp16)[name = tensor("hidden_states_149_cast")]; + tensor var_3526 = const()[name = tensor("op_3526"), val = tensor([1, 1])]; + tensor var_3528 = const()[name = tensor("op_3528"), val = tensor([1, 1])]; + tensor q_65_pad_type_0 = const()[name = tensor("q_65_pad_type_0"), val = tensor("custom")]; + tensor q_65_pad_0 = const()[name = tensor("q_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252799168))), lut = tensor([-0x1.57p-6, 0x1.57p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_65_cast = conv(dilations = var_3528, groups = var_1186, pad = q_65_pad_0, pad_type = q_65_pad_type_0, strides = var_3526, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("q_65_cast")]; + tensor var_3532 = const()[name = tensor("op_3532"), val = tensor([1, 1])]; + tensor var_3534 = const()[name = tensor("op_3534"), val = tensor([1, 1])]; + tensor k_65_pad_type_0 = const()[name = tensor("k_65_pad_type_0"), val = tensor("custom")]; + tensor k_65_pad_0 = const()[name = tensor("k_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253004032))), lut = tensor([-0x1.44cp-5, -0x1.86cp-7, 0x1.854p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_65_cast = conv(dilations = var_3534, groups = var_1186, pad = k_65_pad_0, pad_type = k_65_pad_type_0, strides = var_3532, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("k_65_cast")]; + tensor var_3538 = const()[name = tensor("op_3538"), val = tensor([1, 1])]; + tensor var_3540 = const()[name = tensor("op_3540"), val = tensor([1, 1])]; + tensor v_65_pad_type_0 = const()[name = tensor("v_65_pad_type_0"), val = tensor("custom")]; + tensor v_65_pad_0 = const()[name = tensor("v_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253413696))), lut = tensor([-0x1.3bcp-5, -0x1.7bp-7, 0x1.7ep-7, 0x1.3c8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_65_cast = conv(dilations = var_3540, groups = var_1186, pad = v_65_pad_0, pad_type = v_65_pad_type_0, strides = var_3538, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("v_65_cast")]; + tensor var_3544 = const()[name = tensor("op_3544"), val = tensor([2, 20, 64, -1])]; + tensor var_3545_cast = reshape(shape = var_3544, x = q_65_cast)[name = tensor("op_3545_cast")]; + tensor var_3546 = const()[name = tensor("op_3546"), val = tensor([2, 20, 64, -1])]; + tensor var_3547_cast = reshape(shape = var_3546, x = k_65_cast)[name = tensor("op_3547_cast")]; + tensor var_3548 = const()[name = tensor("op_3548"), val = tensor([2, 20, 64, -1])]; + tensor var_3549_cast = reshape(shape = var_3548, x = v_65_cast)[name = tensor("op_3549_cast")]; + tensor attn_weights_129_transpose_x_0 = const()[name = tensor("attn_weights_129_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_129_transpose_y_0 = const()[name = tensor("attn_weights_129_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_129_cast = matmul(transpose_x = attn_weights_129_transpose_x_0, transpose_y = attn_weights_129_transpose_y_0, x = var_3545_cast, y = var_3547_cast)[name = tensor("attn_weights_129_cast")]; + tensor attn_weights_131_cast = mul(x = attn_weights_129_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_131_cast")]; + tensor var_3553_cast = softmax(axis = var_1170, x = attn_weights_131_cast)[name = tensor("op_3553_cast")]; + tensor attn_65_transpose_x_0 = const()[name = tensor("attn_65_transpose_x_0"), val = tensor(false)]; + tensor attn_65_transpose_y_0 = const()[name = tensor("attn_65_transpose_y_0"), val = tensor(true)]; + tensor attn_65_cast = matmul(transpose_x = attn_65_transpose_x_0, transpose_y = attn_65_transpose_y_0, x = var_3549_cast, y = var_3553_cast)[name = tensor("attn_65_cast")]; + tensor var_3557 = const()[name = tensor("op_3557"), val = tensor([2, 1280, 1, -1])]; + tensor input_245_cast = reshape(shape = var_3557, x = attn_65_cast)[name = tensor("input_245_cast")]; + tensor var_3562 = const()[name = tensor("op_3562"), val = tensor([1, 1])]; + tensor var_3564 = const()[name = tensor("op_3564"), val = tensor([1, 1])]; + tensor var_3566_pad_type_0 = const()[name = tensor("op_3566_pad_type_0"), val = tensor("custom")]; + tensor var_3566_pad_0 = const()[name = tensor("op_3566_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253823360))), lut = tensor([-0x1.42p-5, -0x1.81cp-7, 0x1.844p-7, 0x1.42p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254233024)))]; + tensor var_3566_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_3564, groups = var_1186, pad = var_3566_pad_0, pad_type = var_3566_pad_type_0, strides = var_3562, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_245_cast)[name = tensor("op_3566_cast")]; + tensor inputs_99_cast = add(x = var_3566_cast, y = inputs_97_cast)[name = tensor("inputs_99_cast")]; + tensor var_3570 = const()[name = tensor("op_3570"), val = tensor([1])]; + tensor channels_mean_99_cast = reduce_mean(axes = var_3570, keep_dims = var_1181, x = inputs_99_cast)[name = tensor("channels_mean_99_cast")]; + tensor zero_mean_99_cast = sub(x = inputs_99_cast, y = channels_mean_99_cast)[name = tensor("zero_mean_99_cast")]; + tensor zero_mean_sq_99_cast = mul(x = zero_mean_99_cast, y = zero_mean_99_cast)[name = tensor("zero_mean_sq_99_cast")]; + tensor var_3574 = const()[name = tensor("op_3574"), val = tensor([1])]; + tensor var_3575_cast = reduce_mean(axes = var_3574, keep_dims = var_1181, x = zero_mean_sq_99_cast)[name = tensor("op_3575_cast")]; + tensor var_3576_to_fp16 = const()[name = tensor("op_3576_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3577_cast = add(x = var_3575_cast, y = var_3576_to_fp16)[name = tensor("op_3577_cast")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3577_cast)[name = tensor("denom_99_cast")]; + tensor out_99_cast = mul(x = zero_mean_99_cast, y = denom_99_cast)[name = tensor("out_99_cast")]; + tensor var_3581_to_fp16 = const()[name = tensor("op_3581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254235648)))]; + tensor var_3582_cast = add(x = out_99_cast, y = var_3581_to_fp16)[name = tensor("op_3582_cast")]; + tensor var_3584_to_fp16 = const()[name = tensor("op_3584_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254238272)))]; + tensor hidden_states_151_cast = mul(x = var_3582_cast, y = var_3584_to_fp16)[name = tensor("hidden_states_151_cast")]; + tensor var_3591 = const()[name = tensor("op_3591"), val = tensor([1, 1])]; + tensor var_3593 = const()[name = tensor("op_3593"), val = tensor([1, 1])]; + tensor q_67_pad_type_0 = const()[name = tensor("q_67_pad_type_0"), val = tensor("custom")]; + tensor q_67_pad_0 = const()[name = tensor("q_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254240896))), lut = tensor([-0x1.354p-6, 0x1.364p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_67_cast = conv(dilations = var_3593, groups = var_1186, pad = q_67_pad_0, pad_type = q_67_pad_type_0, strides = var_3591, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_151_cast)[name = tensor("q_67_cast")]; + tensor var_3597 = const()[name = tensor("op_3597"), val = tensor([1, 1])]; + tensor var_3599 = const()[name = tensor("op_3599"), val = tensor([1, 1])]; + tensor k_67_pad_type_0 = const()[name = tensor("k_67_pad_type_0"), val = tensor("custom")]; + tensor k_67_pad_0 = const()[name = tensor("k_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254445760))), lut = tensor([-0x1.08p-6, 0x1.084p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_67_cast = conv(dilations = var_3599, groups = var_1186, pad = k_67_pad_0, pad_type = k_67_pad_type_0, strides = var_3597, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_67_cast")]; + tensor var_3603 = const()[name = tensor("op_3603"), val = tensor([1, 1])]; + tensor var_3605 = const()[name = tensor("op_3605"), val = tensor([1, 1])]; + tensor v_67_pad_type_0 = const()[name = tensor("v_67_pad_type_0"), val = tensor("custom")]; + tensor v_67_pad_0 = const()[name = tensor("v_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254773504))), lut = tensor([-0x1.1f4p-6, 0x1.2p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_67_cast = conv(dilations = var_3605, groups = var_1186, pad = v_67_pad_0, pad_type = v_67_pad_type_0, strides = var_3603, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_67_cast")]; + tensor var_3609 = const()[name = tensor("op_3609"), val = tensor([2, 20, 64, -1])]; + tensor var_3610_cast = reshape(shape = var_3609, x = q_67_cast)[name = tensor("op_3610_cast")]; + tensor var_3611 = const()[name = tensor("op_3611"), val = tensor([2, 20, 64, -1])]; + tensor var_3612_cast = reshape(shape = var_3611, x = k_67_cast)[name = tensor("op_3612_cast")]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([2, 20, 64, -1])]; + tensor var_3614_cast = reshape(shape = var_3613, x = v_67_cast)[name = tensor("op_3614_cast")]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = var_3610_cast, y = var_3612_cast)[name = tensor("attn_weights_133_cast")]; + tensor attn_weights_135_cast = mul(x = attn_weights_133_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_3618_cast = softmax(axis = var_1170, x = attn_weights_135_cast)[name = tensor("op_3618_cast")]; + tensor attn_67_transpose_x_0 = const()[name = tensor("attn_67_transpose_x_0"), val = tensor(false)]; + tensor attn_67_transpose_y_0 = const()[name = tensor("attn_67_transpose_y_0"), val = tensor(true)]; + tensor attn_67_cast = matmul(transpose_x = attn_67_transpose_x_0, transpose_y = attn_67_transpose_y_0, x = var_3614_cast, y = var_3618_cast)[name = tensor("attn_67_cast")]; + tensor var_3622 = const()[name = tensor("op_3622"), val = tensor([2, 1280, 1, -1])]; + tensor input_247_cast = reshape(shape = var_3622, x = attn_67_cast)[name = tensor("input_247_cast")]; + tensor var_3627 = const()[name = tensor("op_3627"), val = tensor([1, 1])]; + tensor var_3629 = const()[name = tensor("op_3629"), val = tensor([1, 1])]; + tensor var_3631_pad_type_0 = const()[name = tensor("op_3631_pad_type_0"), val = tensor("custom")]; + tensor var_3631_pad_0 = const()[name = tensor("op_3631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255101248))), lut = tensor([-0x1.6ecp-7, 0x1.6fp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255306112)))]; + tensor var_3631_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_3629, groups = var_1186, pad = var_3631_pad_0, pad_type = var_3631_pad_type_0, strides = var_3627, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_247_cast)[name = tensor("op_3631_cast")]; + tensor inputs_101_cast = add(x = var_3631_cast, y = inputs_99_cast)[name = tensor("inputs_101_cast")]; + tensor var_3635 = const()[name = tensor("op_3635"), val = tensor([1])]; + tensor channels_mean_101_cast = reduce_mean(axes = var_3635, keep_dims = var_1181, x = inputs_101_cast)[name = tensor("channels_mean_101_cast")]; + tensor zero_mean_101_cast = sub(x = inputs_101_cast, y = channels_mean_101_cast)[name = tensor("zero_mean_101_cast")]; + tensor zero_mean_sq_101_cast = mul(x = zero_mean_101_cast, y = zero_mean_101_cast)[name = tensor("zero_mean_sq_101_cast")]; + tensor var_3639 = const()[name = tensor("op_3639"), val = tensor([1])]; + tensor var_3640_cast = reduce_mean(axes = var_3639, keep_dims = var_1181, x = zero_mean_sq_101_cast)[name = tensor("op_3640_cast")]; + tensor var_3641_to_fp16 = const()[name = tensor("op_3641_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3642_cast = add(x = var_3640_cast, y = var_3641_to_fp16)[name = tensor("op_3642_cast")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3642_cast)[name = tensor("denom_101_cast")]; + tensor out_101_cast = mul(x = zero_mean_101_cast, y = denom_101_cast)[name = tensor("out_101_cast")]; + tensor var_3646_to_fp16 = const()[name = tensor("op_3646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255308736)))]; + tensor var_3647_cast = add(x = out_101_cast, y = var_3646_to_fp16)[name = tensor("op_3647_cast")]; + tensor var_3649_to_fp16 = const()[name = tensor("op_3649_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255311360)))]; + tensor input_249_cast = mul(x = var_3647_cast, y = var_3649_to_fp16)[name = tensor("input_249_cast")]; + tensor var_3657 = const()[name = tensor("op_3657"), val = tensor([1, 1])]; + tensor var_3659 = const()[name = tensor("op_3659"), val = tensor([1, 1])]; + tensor var_3661_pad_type_0 = const()[name = tensor("op_3661_pad_type_0"), val = tensor("custom")]; + tensor var_3661_pad_0 = const()[name = tensor("op_3661_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255313984))), lut = tensor([-0x1.4a8p-5, -0x1.8ccp-7, 0x1.8c4p-7, 0x1.4ap-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(258590848)))]; + tensor var_3661_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_3659, groups = var_1186, pad = var_3661_pad_0, pad_type = var_3661_pad_type_0, strides = var_3657, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("op_3661_cast")]; + tensor var_3662_split_sizes_0 = const()[name = tensor("op_3662_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3662_axis_0 = const()[name = tensor("op_3662_axis_0"), val = tensor(1)]; + tensor var_3662_cast_0, tensor var_3662_cast_1 = split(axis = var_3662_axis_0, split_sizes = var_3662_split_sizes_0, x = var_3661_cast)[name = tensor("op_3662_cast")]; + tensor var_3664_mode_0 = const()[name = tensor("op_3664_mode_0"), val = tensor("EXACT")]; + tensor var_3664_cast = gelu(mode = var_3664_mode_0, x = var_3662_cast_1)[name = tensor("op_3664_cast")]; + tensor input_251_cast = mul(x = var_3662_cast_0, y = var_3664_cast)[name = tensor("input_251_cast")]; + tensor var_3668 = const()[name = tensor("op_3668"), val = tensor([1, 1])]; + tensor var_3670 = const()[name = tensor("op_3670"), val = tensor([1, 1])]; + tensor var_3672_pad_type_0 = const()[name = tensor("op_3672_pad_type_0"), val = tensor("custom")]; + tensor var_3672_pad_0 = const()[name = tensor("op_3672_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(258611392))), lut = tensor([-0x1.484p-5, -0x1.88p-7, 0x1.89cp-7, 0x1.488p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260249856)))]; + tensor var_3672_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_3670, groups = var_1186, pad = var_3672_pad_0, pad_type = var_3672_pad_type_0, strides = var_3668, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_251_cast)[name = tensor("op_3672_cast")]; + tensor inputs_103_cast = add(x = var_3672_cast, y = inputs_101_cast)[name = tensor("inputs_103_cast")]; + tensor var_3682 = const()[name = tensor("op_3682"), val = tensor([1])]; + tensor channels_mean_103_cast = reduce_mean(axes = var_3682, keep_dims = var_1181, x = inputs_103_cast)[name = tensor("channels_mean_103_cast")]; + tensor zero_mean_103_cast = sub(x = inputs_103_cast, y = channels_mean_103_cast)[name = tensor("zero_mean_103_cast")]; + tensor zero_mean_sq_103_cast = mul(x = zero_mean_103_cast, y = zero_mean_103_cast)[name = tensor("zero_mean_sq_103_cast")]; + tensor var_3686 = const()[name = tensor("op_3686"), val = tensor([1])]; + tensor var_3687_cast = reduce_mean(axes = var_3686, keep_dims = var_1181, x = zero_mean_sq_103_cast)[name = tensor("op_3687_cast")]; + tensor var_3688_to_fp16 = const()[name = tensor("op_3688_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3689_cast = add(x = var_3687_cast, y = var_3688_to_fp16)[name = tensor("op_3689_cast")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3689_cast)[name = tensor("denom_103_cast")]; + tensor out_103_cast = mul(x = zero_mean_103_cast, y = denom_103_cast)[name = tensor("out_103_cast")]; + tensor var_3693_to_fp16 = const()[name = tensor("op_3693_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260252480)))]; + tensor var_3694_cast = add(x = out_103_cast, y = var_3693_to_fp16)[name = tensor("op_3694_cast")]; + tensor var_3696_to_fp16 = const()[name = tensor("op_3696_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260255104)))]; + tensor hidden_states_155_cast = mul(x = var_3694_cast, y = var_3696_to_fp16)[name = tensor("hidden_states_155_cast")]; + tensor var_3703 = const()[name = tensor("op_3703"), val = tensor([1, 1])]; + tensor var_3705 = const()[name = tensor("op_3705"), val = tensor([1, 1])]; + tensor q_69_pad_type_0 = const()[name = tensor("q_69_pad_type_0"), val = tensor("custom")]; + tensor q_69_pad_0 = const()[name = tensor("q_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260257728))), lut = tensor([-0x1.568p-6, 0x1.58p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_69_cast = conv(dilations = var_3705, groups = var_1186, pad = q_69_pad_0, pad_type = q_69_pad_type_0, strides = var_3703, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("q_69_cast")]; + tensor var_3709 = const()[name = tensor("op_3709"), val = tensor([1, 1])]; + tensor var_3711 = const()[name = tensor("op_3711"), val = tensor([1, 1])]; + tensor k_69_pad_type_0 = const()[name = tensor("k_69_pad_type_0"), val = tensor("custom")]; + tensor k_69_pad_0 = const()[name = tensor("k_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260462592))), lut = tensor([-0x1.574p-6, 0x1.56cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_69_cast = conv(dilations = var_3711, groups = var_1186, pad = k_69_pad_0, pad_type = k_69_pad_type_0, strides = var_3709, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("k_69_cast")]; + tensor var_3715 = const()[name = tensor("op_3715"), val = tensor([1, 1])]; + tensor var_3717 = const()[name = tensor("op_3717"), val = tensor([1, 1])]; + tensor v_69_pad_type_0 = const()[name = tensor("v_69_pad_type_0"), val = tensor("custom")]; + tensor v_69_pad_0 = const()[name = tensor("v_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(260667456))), lut = tensor([-0x1.358p-5, -0x1.73p-7, 0x1.74cp-7, 0x1.358p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_69_cast = conv(dilations = var_3717, groups = var_1186, pad = v_69_pad_0, pad_type = v_69_pad_type_0, strides = var_3715, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("v_69_cast")]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor([2, 20, 64, -1])]; + tensor var_3722_cast = reshape(shape = var_3721, x = q_69_cast)[name = tensor("op_3722_cast")]; + tensor var_3723 = const()[name = tensor("op_3723"), val = tensor([2, 20, 64, -1])]; + tensor var_3724_cast = reshape(shape = var_3723, x = k_69_cast)[name = tensor("op_3724_cast")]; + tensor var_3725 = const()[name = tensor("op_3725"), val = tensor([2, 20, 64, -1])]; + tensor var_3726_cast = reshape(shape = var_3725, x = v_69_cast)[name = tensor("op_3726_cast")]; + tensor attn_weights_137_transpose_x_0 = const()[name = tensor("attn_weights_137_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_137_transpose_y_0 = const()[name = tensor("attn_weights_137_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_137_cast = matmul(transpose_x = attn_weights_137_transpose_x_0, transpose_y = attn_weights_137_transpose_y_0, x = var_3722_cast, y = var_3724_cast)[name = tensor("attn_weights_137_cast")]; + tensor attn_weights_139_cast = mul(x = attn_weights_137_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_139_cast")]; + tensor var_3730_cast = softmax(axis = var_1170, x = attn_weights_139_cast)[name = tensor("op_3730_cast")]; + tensor attn_69_transpose_x_0 = const()[name = tensor("attn_69_transpose_x_0"), val = tensor(false)]; + tensor attn_69_transpose_y_0 = const()[name = tensor("attn_69_transpose_y_0"), val = tensor(true)]; + tensor attn_69_cast = matmul(transpose_x = attn_69_transpose_x_0, transpose_y = attn_69_transpose_y_0, x = var_3726_cast, y = var_3730_cast)[name = tensor("attn_69_cast")]; + tensor var_3734 = const()[name = tensor("op_3734"), val = tensor([2, 1280, 1, -1])]; + tensor input_253_cast = reshape(shape = var_3734, x = attn_69_cast)[name = tensor("input_253_cast")]; + tensor var_3739 = const()[name = tensor("op_3739"), val = tensor([1, 1])]; + tensor var_3741 = const()[name = tensor("op_3741"), val = tensor([1, 1])]; + tensor var_3743_pad_type_0 = const()[name = tensor("op_3743_pad_type_0"), val = tensor("custom")]; + tensor var_3743_pad_0 = const()[name = tensor("op_3743_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261077120))), lut = tensor([-0x1.35cp-5, -0x1.74p-7, 0x1.74p-7, 0x1.358p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261486784)))]; + tensor var_3743_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_3741, groups = var_1186, pad = var_3743_pad_0, pad_type = var_3743_pad_type_0, strides = var_3739, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("op_3743_cast")]; + tensor inputs_105_cast = add(x = var_3743_cast, y = inputs_103_cast)[name = tensor("inputs_105_cast")]; + tensor var_3747 = const()[name = tensor("op_3747"), val = tensor([1])]; + tensor channels_mean_105_cast = reduce_mean(axes = var_3747, keep_dims = var_1181, x = inputs_105_cast)[name = tensor("channels_mean_105_cast")]; + tensor zero_mean_105_cast = sub(x = inputs_105_cast, y = channels_mean_105_cast)[name = tensor("zero_mean_105_cast")]; + tensor zero_mean_sq_105_cast = mul(x = zero_mean_105_cast, y = zero_mean_105_cast)[name = tensor("zero_mean_sq_105_cast")]; + tensor var_3751 = const()[name = tensor("op_3751"), val = tensor([1])]; + tensor var_3752_cast = reduce_mean(axes = var_3751, keep_dims = var_1181, x = zero_mean_sq_105_cast)[name = tensor("op_3752_cast")]; + tensor var_3753_to_fp16 = const()[name = tensor("op_3753_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3754_cast = add(x = var_3752_cast, y = var_3753_to_fp16)[name = tensor("op_3754_cast")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3754_cast)[name = tensor("denom_105_cast")]; + tensor out_105_cast = mul(x = zero_mean_105_cast, y = denom_105_cast)[name = tensor("out_105_cast")]; + tensor var_3758_to_fp16 = const()[name = tensor("op_3758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261489408)))]; + tensor var_3759_cast = add(x = out_105_cast, y = var_3758_to_fp16)[name = tensor("op_3759_cast")]; + tensor var_3761_to_fp16 = const()[name = tensor("op_3761_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261492032)))]; + tensor hidden_states_157_cast = mul(x = var_3759_cast, y = var_3761_to_fp16)[name = tensor("hidden_states_157_cast")]; + tensor var_3768 = const()[name = tensor("op_3768"), val = tensor([1, 1])]; + tensor var_3770 = const()[name = tensor("op_3770"), val = tensor([1, 1])]; + tensor q_71_pad_type_0 = const()[name = tensor("q_71_pad_type_0"), val = tensor("custom")]; + tensor q_71_pad_0 = const()[name = tensor("q_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261494656))), lut = tensor([-0x1.26cp-6, 0x1.268p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_71_cast = conv(dilations = var_3770, groups = var_1186, pad = q_71_pad_0, pad_type = q_71_pad_type_0, strides = var_3768, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_157_cast)[name = tensor("q_71_cast")]; + tensor var_3774 = const()[name = tensor("op_3774"), val = tensor([1, 1])]; + tensor var_3776 = const()[name = tensor("op_3776"), val = tensor([1, 1])]; + tensor k_71_pad_type_0 = const()[name = tensor("k_71_pad_type_0"), val = tensor("custom")]; + tensor k_71_pad_0 = const()[name = tensor("k_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(261699520))), lut = tensor([-0x1.f2cp-7, 0x1.f4cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_71_cast = conv(dilations = var_3776, groups = var_1186, pad = k_71_pad_0, pad_type = k_71_pad_type_0, strides = var_3774, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_71_cast")]; + tensor var_3780 = const()[name = tensor("op_3780"), val = tensor([1, 1])]; + tensor var_3782 = const()[name = tensor("op_3782"), val = tensor([1, 1])]; + tensor v_71_pad_type_0 = const()[name = tensor("v_71_pad_type_0"), val = tensor("custom")]; + tensor v_71_pad_0 = const()[name = tensor("v_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262027264))), lut = tensor([-0x1.1ecp-6, 0x1.1f4p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_71_cast = conv(dilations = var_3782, groups = var_1186, pad = v_71_pad_0, pad_type = v_71_pad_type_0, strides = var_3780, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_71_cast")]; + tensor var_3786 = const()[name = tensor("op_3786"), val = tensor([2, 20, 64, -1])]; + tensor var_3787_cast = reshape(shape = var_3786, x = q_71_cast)[name = tensor("op_3787_cast")]; + tensor var_3788 = const()[name = tensor("op_3788"), val = tensor([2, 20, 64, -1])]; + tensor var_3789_cast = reshape(shape = var_3788, x = k_71_cast)[name = tensor("op_3789_cast")]; + tensor var_3790 = const()[name = tensor("op_3790"), val = tensor([2, 20, 64, -1])]; + tensor var_3791_cast = reshape(shape = var_3790, x = v_71_cast)[name = tensor("op_3791_cast")]; + tensor attn_weights_141_transpose_x_0 = const()[name = tensor("attn_weights_141_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_141_transpose_y_0 = const()[name = tensor("attn_weights_141_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_141_cast = matmul(transpose_x = attn_weights_141_transpose_x_0, transpose_y = attn_weights_141_transpose_y_0, x = var_3787_cast, y = var_3789_cast)[name = tensor("attn_weights_141_cast")]; + tensor attn_weights_143_cast = mul(x = attn_weights_141_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_143_cast")]; + tensor var_3795_cast = softmax(axis = var_1170, x = attn_weights_143_cast)[name = tensor("op_3795_cast")]; + tensor attn_71_transpose_x_0 = const()[name = tensor("attn_71_transpose_x_0"), val = tensor(false)]; + tensor attn_71_transpose_y_0 = const()[name = tensor("attn_71_transpose_y_0"), val = tensor(true)]; + tensor attn_71_cast = matmul(transpose_x = attn_71_transpose_x_0, transpose_y = attn_71_transpose_y_0, x = var_3791_cast, y = var_3795_cast)[name = tensor("attn_71_cast")]; + tensor var_3799 = const()[name = tensor("op_3799"), val = tensor([2, 1280, 1, -1])]; + tensor input_255_cast = reshape(shape = var_3799, x = attn_71_cast)[name = tensor("input_255_cast")]; + tensor var_3804 = const()[name = tensor("op_3804"), val = tensor([1, 1])]; + tensor var_3806 = const()[name = tensor("op_3806"), val = tensor([1, 1])]; + tensor var_3808_pad_type_0 = const()[name = tensor("op_3808_pad_type_0"), val = tensor("custom")]; + tensor var_3808_pad_0 = const()[name = tensor("op_3808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262355008))), lut = tensor([-0x1.618p-7, 0x1.628p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262559872)))]; + tensor var_3808_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_3806, groups = var_1186, pad = var_3808_pad_0, pad_type = var_3808_pad_type_0, strides = var_3804, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_255_cast)[name = tensor("op_3808_cast")]; + tensor inputs_107_cast = add(x = var_3808_cast, y = inputs_105_cast)[name = tensor("inputs_107_cast")]; + tensor var_3812 = const()[name = tensor("op_3812"), val = tensor([1])]; + tensor channels_mean_107_cast = reduce_mean(axes = var_3812, keep_dims = var_1181, x = inputs_107_cast)[name = tensor("channels_mean_107_cast")]; + tensor zero_mean_107_cast = sub(x = inputs_107_cast, y = channels_mean_107_cast)[name = tensor("zero_mean_107_cast")]; + tensor zero_mean_sq_107_cast = mul(x = zero_mean_107_cast, y = zero_mean_107_cast)[name = tensor("zero_mean_sq_107_cast")]; + tensor var_3816 = const()[name = tensor("op_3816"), val = tensor([1])]; + tensor var_3817_cast = reduce_mean(axes = var_3816, keep_dims = var_1181, x = zero_mean_sq_107_cast)[name = tensor("op_3817_cast")]; + tensor var_3818_to_fp16 = const()[name = tensor("op_3818_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3819_cast = add(x = var_3817_cast, y = var_3818_to_fp16)[name = tensor("op_3819_cast")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3819_cast)[name = tensor("denom_107_cast")]; + tensor out_107_cast = mul(x = zero_mean_107_cast, y = denom_107_cast)[name = tensor("out_107_cast")]; + tensor var_3823_to_fp16 = const()[name = tensor("op_3823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262562496)))]; + tensor var_3824_cast = add(x = out_107_cast, y = var_3823_to_fp16)[name = tensor("op_3824_cast")]; + tensor var_3826_to_fp16 = const()[name = tensor("op_3826_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262565120)))]; + tensor input_257_cast = mul(x = var_3824_cast, y = var_3826_to_fp16)[name = tensor("input_257_cast")]; + tensor var_3834 = const()[name = tensor("op_3834"), val = tensor([1, 1])]; + tensor var_3836 = const()[name = tensor("op_3836"), val = tensor([1, 1])]; + tensor var_3838_pad_type_0 = const()[name = tensor("op_3838_pad_type_0"), val = tensor("custom")]; + tensor var_3838_pad_0 = const()[name = tensor("op_3838_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(262567744))), lut = tensor([-0x1.52p-5, -0x1.97p-7, 0x1.93p-7, 0x1.514p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265844608)))]; + tensor var_3838_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_3836, groups = var_1186, pad = var_3838_pad_0, pad_type = var_3838_pad_type_0, strides = var_3834, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("op_3838_cast")]; + tensor var_3839_split_sizes_0 = const()[name = tensor("op_3839_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3839_axis_0 = const()[name = tensor("op_3839_axis_0"), val = tensor(1)]; + tensor var_3839_cast_0, tensor var_3839_cast_1 = split(axis = var_3839_axis_0, split_sizes = var_3839_split_sizes_0, x = var_3838_cast)[name = tensor("op_3839_cast")]; + tensor var_3841_mode_0 = const()[name = tensor("op_3841_mode_0"), val = tensor("EXACT")]; + tensor var_3841_cast = gelu(mode = var_3841_mode_0, x = var_3839_cast_1)[name = tensor("op_3841_cast")]; + tensor input_259_cast = mul(x = var_3839_cast_0, y = var_3841_cast)[name = tensor("input_259_cast")]; + tensor var_3845 = const()[name = tensor("op_3845"), val = tensor([1, 1])]; + tensor var_3847 = const()[name = tensor("op_3847"), val = tensor([1, 1])]; + tensor var_3849_pad_type_0 = const()[name = tensor("op_3849_pad_type_0"), val = tensor("custom")]; + tensor var_3849_pad_0 = const()[name = tensor("op_3849_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265865152))), lut = tensor([-0x1.4c4p-5, -0x1.8dp-7, 0x1.8dcp-7, 0x1.4c4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267503616)))]; + tensor var_3849_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_3847, groups = var_1186, pad = var_3849_pad_0, pad_type = var_3849_pad_type_0, strides = var_3845, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_259_cast)[name = tensor("op_3849_cast")]; + tensor inputs_109_cast = add(x = var_3849_cast, y = inputs_107_cast)[name = tensor("inputs_109_cast")]; + tensor var_3859 = const()[name = tensor("op_3859"), val = tensor([1])]; + tensor channels_mean_109_cast = reduce_mean(axes = var_3859, keep_dims = var_1181, x = inputs_109_cast)[name = tensor("channels_mean_109_cast")]; + tensor zero_mean_109_cast = sub(x = inputs_109_cast, y = channels_mean_109_cast)[name = tensor("zero_mean_109_cast")]; + tensor zero_mean_sq_109_cast = mul(x = zero_mean_109_cast, y = zero_mean_109_cast)[name = tensor("zero_mean_sq_109_cast")]; + tensor var_3863 = const()[name = tensor("op_3863"), val = tensor([1])]; + tensor var_3864_cast = reduce_mean(axes = var_3863, keep_dims = var_1181, x = zero_mean_sq_109_cast)[name = tensor("op_3864_cast")]; + tensor var_3865_to_fp16 = const()[name = tensor("op_3865_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3866_cast = add(x = var_3864_cast, y = var_3865_to_fp16)[name = tensor("op_3866_cast")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3866_cast)[name = tensor("denom_109_cast")]; + tensor out_109_cast = mul(x = zero_mean_109_cast, y = denom_109_cast)[name = tensor("out_109_cast")]; + tensor var_3870_to_fp16 = const()[name = tensor("op_3870_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267506240)))]; + tensor var_3871_cast = add(x = out_109_cast, y = var_3870_to_fp16)[name = tensor("op_3871_cast")]; + tensor var_3873_to_fp16 = const()[name = tensor("op_3873_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267508864)))]; + tensor hidden_states_161_cast = mul(x = var_3871_cast, y = var_3873_to_fp16)[name = tensor("hidden_states_161_cast")]; + tensor var_3880 = const()[name = tensor("op_3880"), val = tensor([1, 1])]; + tensor var_3882 = const()[name = tensor("op_3882"), val = tensor([1, 1])]; + tensor q_73_pad_type_0 = const()[name = tensor("q_73_pad_type_0"), val = tensor("custom")]; + tensor q_73_pad_0 = const()[name = tensor("q_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267511488))), lut = tensor([-0x1.3dcp-5, -0x1.7dp-7, 0x1.7f8p-7, 0x1.3e8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_73_cast = conv(dilations = var_3882, groups = var_1186, pad = q_73_pad_0, pad_type = q_73_pad_type_0, strides = var_3880, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("q_73_cast")]; + tensor var_3886 = const()[name = tensor("op_3886"), val = tensor([1, 1])]; + tensor var_3888 = const()[name = tensor("op_3888"), val = tensor([1, 1])]; + tensor k_73_pad_type_0 = const()[name = tensor("k_73_pad_type_0"), val = tensor("custom")]; + tensor k_73_pad_0 = const()[name = tensor("k_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(267921152))), lut = tensor([-0x1.514p-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_73_cast = conv(dilations = var_3888, groups = var_1186, pad = k_73_pad_0, pad_type = k_73_pad_type_0, strides = var_3886, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("k_73_cast")]; + tensor var_3892 = const()[name = tensor("op_3892"), val = tensor([1, 1])]; + tensor var_3894 = const()[name = tensor("op_3894"), val = tensor([1, 1])]; + tensor v_73_pad_type_0 = const()[name = tensor("v_73_pad_type_0"), val = tensor("custom")]; + tensor v_73_pad_0 = const()[name = tensor("v_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268126016))), lut = tensor([-0x1.2ep-5, -0x1.6d4p-7, 0x1.6acp-7, 0x1.2d8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_73_cast = conv(dilations = var_3894, groups = var_1186, pad = v_73_pad_0, pad_type = v_73_pad_type_0, strides = var_3892, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("v_73_cast")]; + tensor var_3898 = const()[name = tensor("op_3898"), val = tensor([2, 20, 64, -1])]; + tensor var_3899_cast = reshape(shape = var_3898, x = q_73_cast)[name = tensor("op_3899_cast")]; + tensor var_3900 = const()[name = tensor("op_3900"), val = tensor([2, 20, 64, -1])]; + tensor var_3901_cast = reshape(shape = var_3900, x = k_73_cast)[name = tensor("op_3901_cast")]; + tensor var_3902 = const()[name = tensor("op_3902"), val = tensor([2, 20, 64, -1])]; + tensor var_3903_cast = reshape(shape = var_3902, x = v_73_cast)[name = tensor("op_3903_cast")]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = var_3899_cast, y = var_3901_cast)[name = tensor("attn_weights_145_cast")]; + tensor attn_weights_147_cast = mul(x = attn_weights_145_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_3907_cast = softmax(axis = var_1170, x = attn_weights_147_cast)[name = tensor("op_3907_cast")]; + tensor attn_73_transpose_x_0 = const()[name = tensor("attn_73_transpose_x_0"), val = tensor(false)]; + tensor attn_73_transpose_y_0 = const()[name = tensor("attn_73_transpose_y_0"), val = tensor(true)]; + tensor attn_73_cast = matmul(transpose_x = attn_73_transpose_x_0, transpose_y = attn_73_transpose_y_0, x = var_3903_cast, y = var_3907_cast)[name = tensor("attn_73_cast")]; + tensor var_3911 = const()[name = tensor("op_3911"), val = tensor([2, 1280, 1, -1])]; + tensor input_261_cast = reshape(shape = var_3911, x = attn_73_cast)[name = tensor("input_261_cast")]; + tensor var_3916 = const()[name = tensor("op_3916"), val = tensor([1, 1])]; + tensor var_3918 = const()[name = tensor("op_3918"), val = tensor([1, 1])]; + tensor var_3920_pad_type_0 = const()[name = tensor("op_3920_pad_type_0"), val = tensor("custom")]; + tensor var_3920_pad_0 = const()[name = tensor("op_3920_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268535680))), lut = tensor([-0x1.2c4p-5, -0x1.6c8p-7, 0x1.64p-7, 0x1.2a8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268945344)))]; + tensor var_3920_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_3918, groups = var_1186, pad = var_3920_pad_0, pad_type = var_3920_pad_type_0, strides = var_3916, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_261_cast)[name = tensor("op_3920_cast")]; + tensor inputs_111_cast = add(x = var_3920_cast, y = inputs_109_cast)[name = tensor("inputs_111_cast")]; + tensor var_3924 = const()[name = tensor("op_3924"), val = tensor([1])]; + tensor channels_mean_111_cast = reduce_mean(axes = var_3924, keep_dims = var_1181, x = inputs_111_cast)[name = tensor("channels_mean_111_cast")]; + tensor zero_mean_111_cast = sub(x = inputs_111_cast, y = channels_mean_111_cast)[name = tensor("zero_mean_111_cast")]; + tensor zero_mean_sq_111_cast = mul(x = zero_mean_111_cast, y = zero_mean_111_cast)[name = tensor("zero_mean_sq_111_cast")]; + tensor var_3928 = const()[name = tensor("op_3928"), val = tensor([1])]; + tensor var_3929_cast = reduce_mean(axes = var_3928, keep_dims = var_1181, x = zero_mean_sq_111_cast)[name = tensor("op_3929_cast")]; + tensor var_3930_to_fp16 = const()[name = tensor("op_3930_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3931_cast = add(x = var_3929_cast, y = var_3930_to_fp16)[name = tensor("op_3931_cast")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3931_cast)[name = tensor("denom_111_cast")]; + tensor out_111_cast = mul(x = zero_mean_111_cast, y = denom_111_cast)[name = tensor("out_111_cast")]; + tensor var_3935_to_fp16 = const()[name = tensor("op_3935_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268947968)))]; + tensor var_3936_cast = add(x = out_111_cast, y = var_3935_to_fp16)[name = tensor("op_3936_cast")]; + tensor var_3938_to_fp16 = const()[name = tensor("op_3938_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268950592)))]; + tensor hidden_states_163_cast = mul(x = var_3936_cast, y = var_3938_to_fp16)[name = tensor("hidden_states_163_cast")]; + tensor var_3945 = const()[name = tensor("op_3945"), val = tensor([1, 1])]; + tensor var_3947 = const()[name = tensor("op_3947"), val = tensor([1, 1])]; + tensor q_75_pad_type_0 = const()[name = tensor("q_75_pad_type_0"), val = tensor("custom")]; + tensor q_75_pad_0 = const()[name = tensor("q_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(268953216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269772480))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_75_cast = conv(dilations = var_3947, groups = var_1186, pad = q_75_pad_0, pad_type = q_75_pad_type_0, strides = var_3945, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_163_cast)[name = tensor("q_75_cast")]; + tensor var_3951 = const()[name = tensor("op_3951"), val = tensor([1, 1])]; + tensor var_3953 = const()[name = tensor("op_3953"), val = tensor([1, 1])]; + tensor k_75_pad_type_0 = const()[name = tensor("k_75_pad_type_0"), val = tensor("custom")]; + tensor k_75_pad_0 = const()[name = tensor("k_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269772608))), lut = tensor([-0x1.d3p-7, 0x1.d54p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_75_cast = conv(dilations = var_3953, groups = var_1186, pad = k_75_pad_0, pad_type = k_75_pad_type_0, strides = var_3951, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_75_cast")]; + tensor var_3957 = const()[name = tensor("op_3957"), val = tensor([1, 1])]; + tensor var_3959 = const()[name = tensor("op_3959"), val = tensor([1, 1])]; + tensor v_75_pad_type_0 = const()[name = tensor("v_75_pad_type_0"), val = tensor("custom")]; + tensor v_75_pad_0 = const()[name = tensor("v_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270100352))), lut = tensor([-0x1.17cp-6, 0x1.188p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_75_cast = conv(dilations = var_3959, groups = var_1186, pad = v_75_pad_0, pad_type = v_75_pad_type_0, strides = var_3957, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_75_cast")]; + tensor var_3963 = const()[name = tensor("op_3963"), val = tensor([2, 20, 64, -1])]; + tensor var_3964_cast = reshape(shape = var_3963, x = q_75_cast)[name = tensor("op_3964_cast")]; + tensor var_3965 = const()[name = tensor("op_3965"), val = tensor([2, 20, 64, -1])]; + tensor var_3966_cast = reshape(shape = var_3965, x = k_75_cast)[name = tensor("op_3966_cast")]; + tensor var_3967 = const()[name = tensor("op_3967"), val = tensor([2, 20, 64, -1])]; + tensor var_3968_cast = reshape(shape = var_3967, x = v_75_cast)[name = tensor("op_3968_cast")]; + tensor attn_weights_149_transpose_x_0 = const()[name = tensor("attn_weights_149_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_149_transpose_y_0 = const()[name = tensor("attn_weights_149_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_149_cast = matmul(transpose_x = attn_weights_149_transpose_x_0, transpose_y = attn_weights_149_transpose_y_0, x = var_3964_cast, y = var_3966_cast)[name = tensor("attn_weights_149_cast")]; + tensor attn_weights_151_cast = mul(x = attn_weights_149_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_151_cast")]; + tensor var_3972_cast = softmax(axis = var_1170, x = attn_weights_151_cast)[name = tensor("op_3972_cast")]; + tensor attn_75_transpose_x_0 = const()[name = tensor("attn_75_transpose_x_0"), val = tensor(false)]; + tensor attn_75_transpose_y_0 = const()[name = tensor("attn_75_transpose_y_0"), val = tensor(true)]; + tensor attn_75_cast = matmul(transpose_x = attn_75_transpose_x_0, transpose_y = attn_75_transpose_y_0, x = var_3968_cast, y = var_3972_cast)[name = tensor("attn_75_cast")]; + tensor var_3976 = const()[name = tensor("op_3976"), val = tensor([2, 1280, 1, -1])]; + tensor input_263_cast = reshape(shape = var_3976, x = attn_75_cast)[name = tensor("input_263_cast")]; + tensor var_3981 = const()[name = tensor("op_3981"), val = tensor([1, 1])]; + tensor var_3983 = const()[name = tensor("op_3983"), val = tensor([1, 1])]; + tensor var_3985_pad_type_0 = const()[name = tensor("op_3985_pad_type_0"), val = tensor("custom")]; + tensor var_3985_pad_0 = const()[name = tensor("op_3985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270428096))), lut = tensor([-0x1.5bp-7, 0x1.5a8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270632960)))]; + tensor var_3985_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_3983, groups = var_1186, pad = var_3985_pad_0, pad_type = var_3985_pad_type_0, strides = var_3981, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_263_cast)[name = tensor("op_3985_cast")]; + tensor inputs_113_cast = add(x = var_3985_cast, y = inputs_111_cast)[name = tensor("inputs_113_cast")]; + tensor var_3989 = const()[name = tensor("op_3989"), val = tensor([1])]; + tensor channels_mean_113_cast = reduce_mean(axes = var_3989, keep_dims = var_1181, x = inputs_113_cast)[name = tensor("channels_mean_113_cast")]; + tensor zero_mean_113_cast = sub(x = inputs_113_cast, y = channels_mean_113_cast)[name = tensor("zero_mean_113_cast")]; + tensor zero_mean_sq_113_cast = mul(x = zero_mean_113_cast, y = zero_mean_113_cast)[name = tensor("zero_mean_sq_113_cast")]; + tensor var_3993 = const()[name = tensor("op_3993"), val = tensor([1])]; + tensor var_3994_cast = reduce_mean(axes = var_3993, keep_dims = var_1181, x = zero_mean_sq_113_cast)[name = tensor("op_3994_cast")]; + tensor var_3995_to_fp16 = const()[name = tensor("op_3995_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3996_cast = add(x = var_3994_cast, y = var_3995_to_fp16)[name = tensor("op_3996_cast")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3996_cast)[name = tensor("denom_113_cast")]; + tensor out_113_cast = mul(x = zero_mean_113_cast, y = denom_113_cast)[name = tensor("out_113_cast")]; + tensor var_4000_to_fp16 = const()[name = tensor("op_4000_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270635584)))]; + tensor var_4001_cast = add(x = out_113_cast, y = var_4000_to_fp16)[name = tensor("op_4001_cast")]; + tensor var_4003_to_fp16 = const()[name = tensor("op_4003_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270638208)))]; + tensor input_265_cast = mul(x = var_4001_cast, y = var_4003_to_fp16)[name = tensor("input_265_cast")]; + tensor var_4011 = const()[name = tensor("op_4011"), val = tensor([1, 1])]; + tensor var_4013 = const()[name = tensor("op_4013"), val = tensor([1, 1])]; + tensor var_4015_pad_type_0 = const()[name = tensor("op_4015_pad_type_0"), val = tensor("custom")]; + tensor var_4015_pad_0 = const()[name = tensor("op_4015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270640832))), lut = tensor([-0x1.584p-5, -0x1.9d8p-7, 0x1.9bcp-7, 0x1.58p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(273917696)))]; + tensor var_4015_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_4013, groups = var_1186, pad = var_4015_pad_0, pad_type = var_4015_pad_type_0, strides = var_4011, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("op_4015_cast")]; + tensor var_4016_split_sizes_0 = const()[name = tensor("op_4016_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4016_axis_0 = const()[name = tensor("op_4016_axis_0"), val = tensor(1)]; + tensor var_4016_cast_0, tensor var_4016_cast_1 = split(axis = var_4016_axis_0, split_sizes = var_4016_split_sizes_0, x = var_4015_cast)[name = tensor("op_4016_cast")]; + tensor var_4018_mode_0 = const()[name = tensor("op_4018_mode_0"), val = tensor("EXACT")]; + tensor var_4018_cast = gelu(mode = var_4018_mode_0, x = var_4016_cast_1)[name = tensor("op_4018_cast")]; + tensor input_267_cast = mul(x = var_4016_cast_0, y = var_4018_cast)[name = tensor("input_267_cast")]; + tensor var_4022 = const()[name = tensor("op_4022"), val = tensor([1, 1])]; + tensor var_4024 = const()[name = tensor("op_4024"), val = tensor([1, 1])]; + tensor var_4026_pad_type_0 = const()[name = tensor("op_4026_pad_type_0"), val = tensor("custom")]; + tensor var_4026_pad_0 = const()[name = tensor("op_4026_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(273938240))), lut = tensor([-0x1.508p-5, -0x1.92cp-7, 0x1.92p-7, 0x1.504p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275576704)))]; + tensor var_4026_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_4024, groups = var_1186, pad = var_4026_pad_0, pad_type = var_4026_pad_type_0, strides = var_4022, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_267_cast)[name = tensor("op_4026_cast")]; + tensor inputs_115_cast = add(x = var_4026_cast, y = inputs_113_cast)[name = tensor("inputs_115_cast")]; + tensor var_4036 = const()[name = tensor("op_4036"), val = tensor([1])]; + tensor channels_mean_115_cast = reduce_mean(axes = var_4036, keep_dims = var_1181, x = inputs_115_cast)[name = tensor("channels_mean_115_cast")]; + tensor zero_mean_115_cast = sub(x = inputs_115_cast, y = channels_mean_115_cast)[name = tensor("zero_mean_115_cast")]; + tensor zero_mean_sq_115_cast = mul(x = zero_mean_115_cast, y = zero_mean_115_cast)[name = tensor("zero_mean_sq_115_cast")]; + tensor var_4040 = const()[name = tensor("op_4040"), val = tensor([1])]; + tensor var_4041_cast = reduce_mean(axes = var_4040, keep_dims = var_1181, x = zero_mean_sq_115_cast)[name = tensor("op_4041_cast")]; + tensor var_4042_to_fp16 = const()[name = tensor("op_4042_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4043_cast = add(x = var_4041_cast, y = var_4042_to_fp16)[name = tensor("op_4043_cast")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_4043_cast)[name = tensor("denom_115_cast")]; + tensor out_115_cast = mul(x = zero_mean_115_cast, y = denom_115_cast)[name = tensor("out_115_cast")]; + tensor var_4047_to_fp16 = const()[name = tensor("op_4047_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275579328)))]; + tensor var_4048_cast = add(x = out_115_cast, y = var_4047_to_fp16)[name = tensor("op_4048_cast")]; + tensor var_4050_to_fp16 = const()[name = tensor("op_4050_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275581952)))]; + tensor hidden_states_167_cast = mul(x = var_4048_cast, y = var_4050_to_fp16)[name = tensor("hidden_states_167_cast")]; + tensor var_4057 = const()[name = tensor("op_4057"), val = tensor([1, 1])]; + tensor var_4059 = const()[name = tensor("op_4059"), val = tensor([1, 1])]; + tensor q_77_pad_type_0 = const()[name = tensor("q_77_pad_type_0"), val = tensor("custom")]; + tensor q_77_pad_0 = const()[name = tensor("q_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275584576))), lut = tensor([-0x1.538p-6, 0x1.548p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_77_cast = conv(dilations = var_4059, groups = var_1186, pad = q_77_pad_0, pad_type = q_77_pad_type_0, strides = var_4057, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("q_77_cast")]; + tensor var_4063 = const()[name = tensor("op_4063"), val = tensor([1, 1])]; + tensor var_4065 = const()[name = tensor("op_4065"), val = tensor([1, 1])]; + tensor k_77_pad_type_0 = const()[name = tensor("k_77_pad_type_0"), val = tensor("custom")]; + tensor k_77_pad_0 = const()[name = tensor("k_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275789440))), lut = tensor([-0x1.528p-6, 0x1.528p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_77_cast = conv(dilations = var_4065, groups = var_1186, pad = k_77_pad_0, pad_type = k_77_pad_type_0, strides = var_4063, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("k_77_cast")]; + tensor var_4069 = const()[name = tensor("op_4069"), val = tensor([1, 1])]; + tensor var_4071 = const()[name = tensor("op_4071"), val = tensor([1, 1])]; + tensor v_77_pad_type_0 = const()[name = tensor("v_77_pad_type_0"), val = tensor("custom")]; + tensor v_77_pad_0 = const()[name = tensor("v_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275994304))), lut = tensor([-0x1.2d8p-5, -0x1.6b4p-7, 0x1.6bp-7, 0x1.2d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_77_cast = conv(dilations = var_4071, groups = var_1186, pad = v_77_pad_0, pad_type = v_77_pad_type_0, strides = var_4069, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("v_77_cast")]; + tensor var_4075 = const()[name = tensor("op_4075"), val = tensor([2, 20, 64, -1])]; + tensor var_4076_cast = reshape(shape = var_4075, x = q_77_cast)[name = tensor("op_4076_cast")]; + tensor var_4077 = const()[name = tensor("op_4077"), val = tensor([2, 20, 64, -1])]; + tensor var_4078_cast = reshape(shape = var_4077, x = k_77_cast)[name = tensor("op_4078_cast")]; + tensor var_4079 = const()[name = tensor("op_4079"), val = tensor([2, 20, 64, -1])]; + tensor var_4080_cast = reshape(shape = var_4079, x = v_77_cast)[name = tensor("op_4080_cast")]; + tensor attn_weights_153_transpose_x_0 = const()[name = tensor("attn_weights_153_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_153_transpose_y_0 = const()[name = tensor("attn_weights_153_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_153_cast = matmul(transpose_x = attn_weights_153_transpose_x_0, transpose_y = attn_weights_153_transpose_y_0, x = var_4076_cast, y = var_4078_cast)[name = tensor("attn_weights_153_cast")]; + tensor attn_weights_155_cast = mul(x = attn_weights_153_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_155_cast")]; + tensor var_4084_cast = softmax(axis = var_1170, x = attn_weights_155_cast)[name = tensor("op_4084_cast")]; + tensor attn_77_transpose_x_0 = const()[name = tensor("attn_77_transpose_x_0"), val = tensor(false)]; + tensor attn_77_transpose_y_0 = const()[name = tensor("attn_77_transpose_y_0"), val = tensor(true)]; + tensor attn_77_cast = matmul(transpose_x = attn_77_transpose_x_0, transpose_y = attn_77_transpose_y_0, x = var_4080_cast, y = var_4084_cast)[name = tensor("attn_77_cast")]; + tensor var_4088 = const()[name = tensor("op_4088"), val = tensor([2, 1280, 1, -1])]; + tensor input_269_cast = reshape(shape = var_4088, x = attn_77_cast)[name = tensor("input_269_cast")]; + tensor var_4093 = const()[name = tensor("op_4093"), val = tensor([1, 1])]; + tensor var_4095 = const()[name = tensor("op_4095"), val = tensor([1, 1])]; + tensor var_4097_pad_type_0 = const()[name = tensor("op_4097_pad_type_0"), val = tensor("custom")]; + tensor var_4097_pad_0 = const()[name = tensor("op_4097_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276403968))), lut = tensor([-0x1.3a4p-6, 0x1.3ap-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276608832)))]; + tensor var_4097_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_4095, groups = var_1186, pad = var_4097_pad_0, pad_type = var_4097_pad_type_0, strides = var_4093, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("op_4097_cast")]; + tensor inputs_117_cast = add(x = var_4097_cast, y = inputs_115_cast)[name = tensor("inputs_117_cast")]; + tensor var_4101 = const()[name = tensor("op_4101"), val = tensor([1])]; + tensor channels_mean_117_cast = reduce_mean(axes = var_4101, keep_dims = var_1181, x = inputs_117_cast)[name = tensor("channels_mean_117_cast")]; + tensor zero_mean_117_cast = sub(x = inputs_117_cast, y = channels_mean_117_cast)[name = tensor("zero_mean_117_cast")]; + tensor zero_mean_sq_117_cast = mul(x = zero_mean_117_cast, y = zero_mean_117_cast)[name = tensor("zero_mean_sq_117_cast")]; + tensor var_4105 = const()[name = tensor("op_4105"), val = tensor([1])]; + tensor var_4106_cast = reduce_mean(axes = var_4105, keep_dims = var_1181, x = zero_mean_sq_117_cast)[name = tensor("op_4106_cast")]; + tensor var_4107_to_fp16 = const()[name = tensor("op_4107_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4108_cast = add(x = var_4106_cast, y = var_4107_to_fp16)[name = tensor("op_4108_cast")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_4108_cast)[name = tensor("denom_117_cast")]; + tensor out_117_cast = mul(x = zero_mean_117_cast, y = denom_117_cast)[name = tensor("out_117_cast")]; + tensor var_4112_to_fp16 = const()[name = tensor("op_4112_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276611456)))]; + tensor var_4113_cast = add(x = out_117_cast, y = var_4112_to_fp16)[name = tensor("op_4113_cast")]; + tensor var_4115_to_fp16 = const()[name = tensor("op_4115_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276614080)))]; + tensor hidden_states_169_cast = mul(x = var_4113_cast, y = var_4115_to_fp16)[name = tensor("hidden_states_169_cast")]; + tensor var_4122 = const()[name = tensor("op_4122"), val = tensor([1, 1])]; + tensor var_4124 = const()[name = tensor("op_4124"), val = tensor([1, 1])]; + tensor q_79_pad_type_0 = const()[name = tensor("q_79_pad_type_0"), val = tensor("custom")]; + tensor q_79_pad_0 = const()[name = tensor("q_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276616704))), lut = tensor([-0x1.0fcp-6, 0x1.0fp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_79_cast = conv(dilations = var_4124, groups = var_1186, pad = q_79_pad_0, pad_type = q_79_pad_type_0, strides = var_4122, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_169_cast)[name = tensor("q_79_cast")]; + tensor var_4128 = const()[name = tensor("op_4128"), val = tensor([1, 1])]; + tensor var_4130 = const()[name = tensor("op_4130"), val = tensor([1, 1])]; + tensor k_79_pad_type_0 = const()[name = tensor("k_79_pad_type_0"), val = tensor("custom")]; + tensor k_79_pad_0 = const()[name = tensor("k_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276821568))), lut = tensor([-0x1.b3p-7, 0x1.b38p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_79_cast = conv(dilations = var_4130, groups = var_1186, pad = k_79_pad_0, pad_type = k_79_pad_type_0, strides = var_4128, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_79_cast")]; + tensor var_4134 = const()[name = tensor("op_4134"), val = tensor([1, 1])]; + tensor var_4136 = const()[name = tensor("op_4136"), val = tensor([1, 1])]; + tensor v_79_pad_type_0 = const()[name = tensor("v_79_pad_type_0"), val = tensor("custom")]; + tensor v_79_pad_0 = const()[name = tensor("v_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277149312))), lut = tensor([-0x1.12cp-6, 0x1.12p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_79_cast = conv(dilations = var_4136, groups = var_1186, pad = v_79_pad_0, pad_type = v_79_pad_type_0, strides = var_4134, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_79_cast")]; + tensor var_4140 = const()[name = tensor("op_4140"), val = tensor([2, 20, 64, -1])]; + tensor var_4141_cast = reshape(shape = var_4140, x = q_79_cast)[name = tensor("op_4141_cast")]; + tensor var_4142 = const()[name = tensor("op_4142"), val = tensor([2, 20, 64, -1])]; + tensor var_4143_cast = reshape(shape = var_4142, x = k_79_cast)[name = tensor("op_4143_cast")]; + tensor var_4144 = const()[name = tensor("op_4144"), val = tensor([2, 20, 64, -1])]; + tensor var_4145_cast = reshape(shape = var_4144, x = v_79_cast)[name = tensor("op_4145_cast")]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = var_4141_cast, y = var_4143_cast)[name = tensor("attn_weights_157_cast")]; + tensor attn_weights_159_cast = mul(x = attn_weights_157_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_4149_cast = softmax(axis = var_1170, x = attn_weights_159_cast)[name = tensor("op_4149_cast")]; + tensor attn_79_transpose_x_0 = const()[name = tensor("attn_79_transpose_x_0"), val = tensor(false)]; + tensor attn_79_transpose_y_0 = const()[name = tensor("attn_79_transpose_y_0"), val = tensor(true)]; + tensor attn_79_cast = matmul(transpose_x = attn_79_transpose_x_0, transpose_y = attn_79_transpose_y_0, x = var_4145_cast, y = var_4149_cast)[name = tensor("attn_79_cast")]; + tensor var_4153 = const()[name = tensor("op_4153"), val = tensor([2, 1280, 1, -1])]; + tensor input_271_cast = reshape(shape = var_4153, x = attn_79_cast)[name = tensor("input_271_cast")]; + tensor var_4158 = const()[name = tensor("op_4158"), val = tensor([1, 1])]; + tensor var_4160 = const()[name = tensor("op_4160"), val = tensor([1, 1])]; + tensor var_4162_pad_type_0 = const()[name = tensor("op_4162_pad_type_0"), val = tensor("custom")]; + tensor var_4162_pad_0 = const()[name = tensor("op_4162_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277477056))), lut = tensor([-0x1.578p-7, 0x1.57cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277681920)))]; + tensor var_4162_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_4160, groups = var_1186, pad = var_4162_pad_0, pad_type = var_4162_pad_type_0, strides = var_4158, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_271_cast)[name = tensor("op_4162_cast")]; + tensor inputs_119_cast = add(x = var_4162_cast, y = inputs_117_cast)[name = tensor("inputs_119_cast")]; + tensor var_4166 = const()[name = tensor("op_4166"), val = tensor([1])]; + tensor channels_mean_119_cast = reduce_mean(axes = var_4166, keep_dims = var_1181, x = inputs_119_cast)[name = tensor("channels_mean_119_cast")]; + tensor zero_mean_119_cast = sub(x = inputs_119_cast, y = channels_mean_119_cast)[name = tensor("zero_mean_119_cast")]; + tensor zero_mean_sq_119_cast = mul(x = zero_mean_119_cast, y = zero_mean_119_cast)[name = tensor("zero_mean_sq_119_cast")]; + tensor var_4170 = const()[name = tensor("op_4170"), val = tensor([1])]; + tensor var_4171_cast = reduce_mean(axes = var_4170, keep_dims = var_1181, x = zero_mean_sq_119_cast)[name = tensor("op_4171_cast")]; + tensor var_4172_to_fp16 = const()[name = tensor("op_4172_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4173_cast = add(x = var_4171_cast, y = var_4172_to_fp16)[name = tensor("op_4173_cast")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_4173_cast)[name = tensor("denom_119_cast")]; + tensor out_119_cast = mul(x = zero_mean_119_cast, y = denom_119_cast)[name = tensor("out_119_cast")]; + tensor var_4177_to_fp16 = const()[name = tensor("op_4177_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277684544)))]; + tensor var_4178_cast = add(x = out_119_cast, y = var_4177_to_fp16)[name = tensor("op_4178_cast")]; + tensor var_4180_to_fp16 = const()[name = tensor("op_4180_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277687168)))]; + tensor input_273_cast = mul(x = var_4178_cast, y = var_4180_to_fp16)[name = tensor("input_273_cast")]; + tensor var_4188 = const()[name = tensor("op_4188"), val = tensor([1, 1])]; + tensor var_4190 = const()[name = tensor("op_4190"), val = tensor([1, 1])]; + tensor var_4192_pad_type_0 = const()[name = tensor("op_4192_pad_type_0"), val = tensor("custom")]; + tensor var_4192_pad_0 = const()[name = tensor("op_4192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(277689792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284243456))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284243584)))]; + tensor var_4192_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_4190, groups = var_1186, pad = var_4192_pad_0, pad_type = var_4192_pad_type_0, strides = var_4188, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("op_4192_cast")]; + tensor var_4193_split_sizes_0 = const()[name = tensor("op_4193_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4193_axis_0 = const()[name = tensor("op_4193_axis_0"), val = tensor(1)]; + tensor var_4193_cast_0, tensor var_4193_cast_1 = split(axis = var_4193_axis_0, split_sizes = var_4193_split_sizes_0, x = var_4192_cast)[name = tensor("op_4193_cast")]; + tensor var_4195_mode_0 = const()[name = tensor("op_4195_mode_0"), val = tensor("EXACT")]; + tensor var_4195_cast = gelu(mode = var_4195_mode_0, x = var_4193_cast_1)[name = tensor("op_4195_cast")]; + tensor input_275_cast = mul(x = var_4193_cast_0, y = var_4195_cast)[name = tensor("input_275_cast")]; + tensor var_4199 = const()[name = tensor("op_4199"), val = tensor([1, 1])]; + tensor var_4201 = const()[name = tensor("op_4201"), val = tensor([1, 1])]; + tensor var_4203_pad_type_0 = const()[name = tensor("op_4203_pad_type_0"), val = tensor("custom")]; + tensor var_4203_pad_0 = const()[name = tensor("op_4203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(284264128))), lut = tensor([-0x1.52cp-5, -0x1.94cp-7, 0x1.94cp-7, 0x1.52cp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285902592)))]; + tensor var_4203_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_4201, groups = var_1186, pad = var_4203_pad_0, pad_type = var_4203_pad_type_0, strides = var_4199, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_275_cast)[name = tensor("op_4203_cast")]; + tensor inputs_121_cast = add(x = var_4203_cast, y = inputs_119_cast)[name = tensor("inputs_121_cast")]; + tensor var_4213 = const()[name = tensor("op_4213"), val = tensor([1])]; + tensor channels_mean_121_cast = reduce_mean(axes = var_4213, keep_dims = var_1181, x = inputs_121_cast)[name = tensor("channels_mean_121_cast")]; + tensor zero_mean_121_cast = sub(x = inputs_121_cast, y = channels_mean_121_cast)[name = tensor("zero_mean_121_cast")]; + tensor zero_mean_sq_121_cast = mul(x = zero_mean_121_cast, y = zero_mean_121_cast)[name = tensor("zero_mean_sq_121_cast")]; + tensor var_4217 = const()[name = tensor("op_4217"), val = tensor([1])]; + tensor var_4218_cast = reduce_mean(axes = var_4217, keep_dims = var_1181, x = zero_mean_sq_121_cast)[name = tensor("op_4218_cast")]; + tensor var_4219_to_fp16 = const()[name = tensor("op_4219_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4220_cast = add(x = var_4218_cast, y = var_4219_to_fp16)[name = tensor("op_4220_cast")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_4220_cast)[name = tensor("denom_121_cast")]; + tensor out_121_cast = mul(x = zero_mean_121_cast, y = denom_121_cast)[name = tensor("out_121_cast")]; + tensor var_4224_to_fp16 = const()[name = tensor("op_4224_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285905216)))]; + tensor var_4225_cast = add(x = out_121_cast, y = var_4224_to_fp16)[name = tensor("op_4225_cast")]; + tensor var_4227_to_fp16 = const()[name = tensor("op_4227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285907840)))]; + tensor hidden_states_173_cast = mul(x = var_4225_cast, y = var_4227_to_fp16)[name = tensor("hidden_states_173_cast")]; + tensor var_4234 = const()[name = tensor("op_4234"), val = tensor([1, 1])]; + tensor var_4236 = const()[name = tensor("op_4236"), val = tensor([1, 1])]; + tensor q_81_pad_type_0 = const()[name = tensor("q_81_pad_type_0"), val = tensor("custom")]; + tensor q_81_pad_0 = const()[name = tensor("q_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(285910464))), lut = tensor([-0x1.50cp-6, 0x1.51p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_81_cast = conv(dilations = var_4236, groups = var_1186, pad = q_81_pad_0, pad_type = q_81_pad_type_0, strides = var_4234, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("q_81_cast")]; + tensor var_4240 = const()[name = tensor("op_4240"), val = tensor([1, 1])]; + tensor var_4242 = const()[name = tensor("op_4242"), val = tensor([1, 1])]; + tensor k_81_pad_type_0 = const()[name = tensor("k_81_pad_type_0"), val = tensor("custom")]; + tensor k_81_pad_0 = const()[name = tensor("k_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(286115328))), lut = tensor([-0x1.4fp-6, 0x1.4f8p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_81_cast = conv(dilations = var_4242, groups = var_1186, pad = k_81_pad_0, pad_type = k_81_pad_type_0, strides = var_4240, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("k_81_cast")]; + tensor var_4246 = const()[name = tensor("op_4246"), val = tensor([1, 1])]; + tensor var_4248 = const()[name = tensor("op_4248"), val = tensor([1, 1])]; + tensor v_81_pad_type_0 = const()[name = tensor("v_81_pad_type_0"), val = tensor("custom")]; + tensor v_81_pad_0 = const()[name = tensor("v_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(286320192))), lut = tensor([-0x1.324p-5, -0x1.6ecp-7, 0x1.73p-7, 0x1.32cp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_81_cast = conv(dilations = var_4248, groups = var_1186, pad = v_81_pad_0, pad_type = v_81_pad_type_0, strides = var_4246, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("v_81_cast")]; + tensor var_4252 = const()[name = tensor("op_4252"), val = tensor([2, 20, 64, -1])]; + tensor var_4253_cast = reshape(shape = var_4252, x = q_81_cast)[name = tensor("op_4253_cast")]; + tensor var_4254 = const()[name = tensor("op_4254"), val = tensor([2, 20, 64, -1])]; + tensor var_4255_cast = reshape(shape = var_4254, x = k_81_cast)[name = tensor("op_4255_cast")]; + tensor var_4256 = const()[name = tensor("op_4256"), val = tensor([2, 20, 64, -1])]; + tensor var_4257_cast = reshape(shape = var_4256, x = v_81_cast)[name = tensor("op_4257_cast")]; + tensor attn_weights_161_transpose_x_0 = const()[name = tensor("attn_weights_161_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_161_transpose_y_0 = const()[name = tensor("attn_weights_161_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_161_cast = matmul(transpose_x = attn_weights_161_transpose_x_0, transpose_y = attn_weights_161_transpose_y_0, x = var_4253_cast, y = var_4255_cast)[name = tensor("attn_weights_161_cast")]; + tensor attn_weights_163_cast = mul(x = attn_weights_161_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_163_cast")]; + tensor var_4261_cast = softmax(axis = var_1170, x = attn_weights_163_cast)[name = tensor("op_4261_cast")]; + tensor attn_81_transpose_x_0 = const()[name = tensor("attn_81_transpose_x_0"), val = tensor(false)]; + tensor attn_81_transpose_y_0 = const()[name = tensor("attn_81_transpose_y_0"), val = tensor(true)]; + tensor attn_81_cast = matmul(transpose_x = attn_81_transpose_x_0, transpose_y = attn_81_transpose_y_0, x = var_4257_cast, y = var_4261_cast)[name = tensor("attn_81_cast")]; + tensor var_4265 = const()[name = tensor("op_4265"), val = tensor([2, 1280, 1, -1])]; + tensor input_277_cast = reshape(shape = var_4265, x = attn_81_cast)[name = tensor("input_277_cast")]; + tensor var_4270 = const()[name = tensor("op_4270"), val = tensor([1, 1])]; + tensor var_4272 = const()[name = tensor("op_4272"), val = tensor([1, 1])]; + tensor var_4274_pad_type_0 = const()[name = tensor("op_4274_pad_type_0"), val = tensor("custom")]; + tensor var_4274_pad_0 = const()[name = tensor("op_4274_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(286729856))), lut = tensor([-0x1.2e4p-5, -0x1.6bp-7, 0x1.6acp-7, 0x1.2ep-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287139520)))]; + tensor var_4274_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_4272, groups = var_1186, pad = var_4274_pad_0, pad_type = var_4274_pad_type_0, strides = var_4270, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_277_cast)[name = tensor("op_4274_cast")]; + tensor inputs_123_cast = add(x = var_4274_cast, y = inputs_121_cast)[name = tensor("inputs_123_cast")]; + tensor var_4278 = const()[name = tensor("op_4278"), val = tensor([1])]; + tensor channels_mean_123_cast = reduce_mean(axes = var_4278, keep_dims = var_1181, x = inputs_123_cast)[name = tensor("channels_mean_123_cast")]; + tensor zero_mean_123_cast = sub(x = inputs_123_cast, y = channels_mean_123_cast)[name = tensor("zero_mean_123_cast")]; + tensor zero_mean_sq_123_cast = mul(x = zero_mean_123_cast, y = zero_mean_123_cast)[name = tensor("zero_mean_sq_123_cast")]; + tensor var_4282 = const()[name = tensor("op_4282"), val = tensor([1])]; + tensor var_4283_cast = reduce_mean(axes = var_4282, keep_dims = var_1181, x = zero_mean_sq_123_cast)[name = tensor("op_4283_cast")]; + tensor var_4284_to_fp16 = const()[name = tensor("op_4284_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4285_cast = add(x = var_4283_cast, y = var_4284_to_fp16)[name = tensor("op_4285_cast")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_4285_cast)[name = tensor("denom_123_cast")]; + tensor out_123_cast = mul(x = zero_mean_123_cast, y = denom_123_cast)[name = tensor("out_123_cast")]; + tensor var_4289_to_fp16 = const()[name = tensor("op_4289_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287142144)))]; + tensor var_4290_cast = add(x = out_123_cast, y = var_4289_to_fp16)[name = tensor("op_4290_cast")]; + tensor var_4292_to_fp16 = const()[name = tensor("op_4292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287144768)))]; + tensor hidden_states_175_cast = mul(x = var_4290_cast, y = var_4292_to_fp16)[name = tensor("hidden_states_175_cast")]; + tensor var_4299 = const()[name = tensor("op_4299"), val = tensor([1, 1])]; + tensor var_4301 = const()[name = tensor("op_4301"), val = tensor([1, 1])]; + tensor q_83_pad_type_0 = const()[name = tensor("q_83_pad_type_0"), val = tensor("custom")]; + tensor q_83_pad_0 = const()[name = tensor("q_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287147392))), lut = tensor([-0x1.e5p-7, 0x1.e64p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_83_cast = conv(dilations = var_4301, groups = var_1186, pad = q_83_pad_0, pad_type = q_83_pad_type_0, strides = var_4299, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_175_cast)[name = tensor("q_83_cast")]; + tensor var_4305 = const()[name = tensor("op_4305"), val = tensor([1, 1])]; + tensor var_4307 = const()[name = tensor("op_4307"), val = tensor([1, 1])]; + tensor k_83_pad_type_0 = const()[name = tensor("k_83_pad_type_0"), val = tensor("custom")]; + tensor k_83_pad_0 = const()[name = tensor("k_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287352256))), lut = tensor([-0x1.798p-7, 0x1.78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_83_cast = conv(dilations = var_4307, groups = var_1186, pad = k_83_pad_0, pad_type = k_83_pad_type_0, strides = var_4305, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_83_cast")]; + tensor var_4311 = const()[name = tensor("op_4311"), val = tensor([1, 1])]; + tensor var_4313 = const()[name = tensor("op_4313"), val = tensor([1, 1])]; + tensor v_83_pad_type_0 = const()[name = tensor("v_83_pad_type_0"), val = tensor("custom")]; + tensor v_83_pad_0 = const()[name = tensor("v_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(287680000))), lut = tensor([-0x1.fb4p-7, 0x1.fap-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_83_cast = conv(dilations = var_4313, groups = var_1186, pad = v_83_pad_0, pad_type = v_83_pad_type_0, strides = var_4311, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_83_cast")]; + tensor var_4317 = const()[name = tensor("op_4317"), val = tensor([2, 20, 64, -1])]; + tensor var_4318_cast = reshape(shape = var_4317, x = q_83_cast)[name = tensor("op_4318_cast")]; + tensor var_4319 = const()[name = tensor("op_4319"), val = tensor([2, 20, 64, -1])]; + tensor var_4320_cast = reshape(shape = var_4319, x = k_83_cast)[name = tensor("op_4320_cast")]; + tensor var_4321 = const()[name = tensor("op_4321"), val = tensor([2, 20, 64, -1])]; + tensor var_4322_cast = reshape(shape = var_4321, x = v_83_cast)[name = tensor("op_4322_cast")]; + tensor attn_weights_165_transpose_x_0 = const()[name = tensor("attn_weights_165_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_165_transpose_y_0 = const()[name = tensor("attn_weights_165_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_165_cast = matmul(transpose_x = attn_weights_165_transpose_x_0, transpose_y = attn_weights_165_transpose_y_0, x = var_4318_cast, y = var_4320_cast)[name = tensor("attn_weights_165_cast")]; + tensor attn_weights_167_cast = mul(x = attn_weights_165_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_167_cast")]; + tensor var_4326_cast = softmax(axis = var_1170, x = attn_weights_167_cast)[name = tensor("op_4326_cast")]; + tensor attn_83_transpose_x_0 = const()[name = tensor("attn_83_transpose_x_0"), val = tensor(false)]; + tensor attn_83_transpose_y_0 = const()[name = tensor("attn_83_transpose_y_0"), val = tensor(true)]; + tensor attn_83_cast = matmul(transpose_x = attn_83_transpose_x_0, transpose_y = attn_83_transpose_y_0, x = var_4322_cast, y = var_4326_cast)[name = tensor("attn_83_cast")]; + tensor var_4330 = const()[name = tensor("op_4330"), val = tensor([2, 1280, 1, -1])]; + tensor input_279_cast = reshape(shape = var_4330, x = attn_83_cast)[name = tensor("input_279_cast")]; + tensor var_4335 = const()[name = tensor("op_4335"), val = tensor([1, 1])]; + tensor var_4337 = const()[name = tensor("op_4337"), val = tensor([1, 1])]; + tensor var_4339_pad_type_0 = const()[name = tensor("op_4339_pad_type_0"), val = tensor("custom")]; + tensor var_4339_pad_0 = const()[name = tensor("op_4339_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288007744))), lut = tensor([-0x1.3e8p-7, 0x1.3f4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288212608)))]; + tensor var_4339_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_4337, groups = var_1186, pad = var_4339_pad_0, pad_type = var_4339_pad_type_0, strides = var_4335, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_279_cast)[name = tensor("op_4339_cast")]; + tensor inputs_125_cast = add(x = var_4339_cast, y = inputs_123_cast)[name = tensor("inputs_125_cast")]; + tensor var_4343 = const()[name = tensor("op_4343"), val = tensor([1])]; + tensor channels_mean_125_cast = reduce_mean(axes = var_4343, keep_dims = var_1181, x = inputs_125_cast)[name = tensor("channels_mean_125_cast")]; + tensor zero_mean_125_cast = sub(x = inputs_125_cast, y = channels_mean_125_cast)[name = tensor("zero_mean_125_cast")]; + tensor zero_mean_sq_125_cast = mul(x = zero_mean_125_cast, y = zero_mean_125_cast)[name = tensor("zero_mean_sq_125_cast")]; + tensor var_4347 = const()[name = tensor("op_4347"), val = tensor([1])]; + tensor var_4348_cast = reduce_mean(axes = var_4347, keep_dims = var_1181, x = zero_mean_sq_125_cast)[name = tensor("op_4348_cast")]; + tensor var_4349_to_fp16 = const()[name = tensor("op_4349_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4350_cast = add(x = var_4348_cast, y = var_4349_to_fp16)[name = tensor("op_4350_cast")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_4350_cast)[name = tensor("denom_125_cast")]; + tensor out_125_cast = mul(x = zero_mean_125_cast, y = denom_125_cast)[name = tensor("out_125_cast")]; + tensor var_4354_to_fp16 = const()[name = tensor("op_4354_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288215232)))]; + tensor var_4355_cast = add(x = out_125_cast, y = var_4354_to_fp16)[name = tensor("op_4355_cast")]; + tensor var_4357_to_fp16 = const()[name = tensor("op_4357_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288217856)))]; + tensor input_281_cast = mul(x = var_4355_cast, y = var_4357_to_fp16)[name = tensor("input_281_cast")]; + tensor var_4365 = const()[name = tensor("op_4365"), val = tensor([1, 1])]; + tensor var_4367 = const()[name = tensor("op_4367"), val = tensor([1, 1])]; + tensor var_4369_pad_type_0 = const()[name = tensor("op_4369_pad_type_0"), val = tensor("custom")]; + tensor var_4369_pad_0 = const()[name = tensor("op_4369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(288220480))), lut = tensor([-0x1.5fp-5, -0x1.a54p-7, 0x1.a4cp-7, 0x1.5fp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291497344)))]; + tensor var_4369_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_4367, groups = var_1186, pad = var_4369_pad_0, pad_type = var_4369_pad_type_0, strides = var_4365, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("op_4369_cast")]; + tensor var_4370_split_sizes_0 = const()[name = tensor("op_4370_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4370_axis_0 = const()[name = tensor("op_4370_axis_0"), val = tensor(1)]; + tensor var_4370_cast_0, tensor var_4370_cast_1 = split(axis = var_4370_axis_0, split_sizes = var_4370_split_sizes_0, x = var_4369_cast)[name = tensor("op_4370_cast")]; + tensor var_4372_mode_0 = const()[name = tensor("op_4372_mode_0"), val = tensor("EXACT")]; + tensor var_4372_cast = gelu(mode = var_4372_mode_0, x = var_4370_cast_1)[name = tensor("op_4372_cast")]; + tensor input_283_cast = mul(x = var_4370_cast_0, y = var_4372_cast)[name = tensor("input_283_cast")]; + tensor var_4376 = const()[name = tensor("op_4376"), val = tensor([1, 1])]; + tensor var_4378 = const()[name = tensor("op_4378"), val = tensor([1, 1])]; + tensor var_4380_pad_type_0 = const()[name = tensor("op_4380_pad_type_0"), val = tensor("custom")]; + tensor var_4380_pad_0 = const()[name = tensor("op_4380_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291517888))), lut = tensor([-0x1.53p-5, -0x1.96p-7, 0x1.93cp-7, 0x1.528p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293156352)))]; + tensor var_4380_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_4378, groups = var_1186, pad = var_4380_pad_0, pad_type = var_4380_pad_type_0, strides = var_4376, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_283_cast)[name = tensor("op_4380_cast")]; + tensor inputs_127_cast = add(x = var_4380_cast, y = inputs_125_cast)[name = tensor("inputs_127_cast")]; + tensor var_4390 = const()[name = tensor("op_4390"), val = tensor([1])]; + tensor channels_mean_127_cast = reduce_mean(axes = var_4390, keep_dims = var_1181, x = inputs_127_cast)[name = tensor("channels_mean_127_cast")]; + tensor zero_mean_127_cast = sub(x = inputs_127_cast, y = channels_mean_127_cast)[name = tensor("zero_mean_127_cast")]; + tensor zero_mean_sq_127_cast = mul(x = zero_mean_127_cast, y = zero_mean_127_cast)[name = tensor("zero_mean_sq_127_cast")]; + tensor var_4394 = const()[name = tensor("op_4394"), val = tensor([1])]; + tensor var_4395_cast = reduce_mean(axes = var_4394, keep_dims = var_1181, x = zero_mean_sq_127_cast)[name = tensor("op_4395_cast")]; + tensor var_4396_to_fp16 = const()[name = tensor("op_4396_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4397_cast = add(x = var_4395_cast, y = var_4396_to_fp16)[name = tensor("op_4397_cast")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_4397_cast)[name = tensor("denom_127_cast")]; + tensor out_127_cast = mul(x = zero_mean_127_cast, y = denom_127_cast)[name = tensor("out_127_cast")]; + tensor var_4401_to_fp16 = const()[name = tensor("op_4401_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293158976)))]; + tensor var_4402_cast = add(x = out_127_cast, y = var_4401_to_fp16)[name = tensor("op_4402_cast")]; + tensor var_4404_to_fp16 = const()[name = tensor("op_4404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293161600)))]; + tensor hidden_states_179_cast = mul(x = var_4402_cast, y = var_4404_to_fp16)[name = tensor("hidden_states_179_cast")]; + tensor var_4411 = const()[name = tensor("op_4411"), val = tensor([1, 1])]; + tensor var_4413 = const()[name = tensor("op_4413"), val = tensor([1, 1])]; + tensor q_85_pad_type_0 = const()[name = tensor("q_85_pad_type_0"), val = tensor("custom")]; + tensor q_85_pad_0 = const()[name = tensor("q_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293164224))), lut = tensor([-0x1.50cp-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_85_cast = conv(dilations = var_4413, groups = var_1186, pad = q_85_pad_0, pad_type = q_85_pad_type_0, strides = var_4411, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("q_85_cast")]; + tensor var_4417 = const()[name = tensor("op_4417"), val = tensor([1, 1])]; + tensor var_4419 = const()[name = tensor("op_4419"), val = tensor([1, 1])]; + tensor k_85_pad_type_0 = const()[name = tensor("k_85_pad_type_0"), val = tensor("custom")]; + tensor k_85_pad_0 = const()[name = tensor("k_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293369088))), lut = tensor([-0x1.4f4p-6, 0x1.4ecp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_85_cast = conv(dilations = var_4419, groups = var_1186, pad = k_85_pad_0, pad_type = k_85_pad_type_0, strides = var_4417, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("k_85_cast")]; + tensor var_4423 = const()[name = tensor("op_4423"), val = tensor([1, 1])]; + tensor var_4425 = const()[name = tensor("op_4425"), val = tensor([1, 1])]; + tensor v_85_pad_type_0 = const()[name = tensor("v_85_pad_type_0"), val = tensor("custom")]; + tensor v_85_pad_0 = const()[name = tensor("v_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293573952))), lut = tensor([-0x1.37p-5, -0x1.75cp-7, 0x1.794p-7, 0x1.374p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_85_cast = conv(dilations = var_4425, groups = var_1186, pad = v_85_pad_0, pad_type = v_85_pad_type_0, strides = var_4423, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("v_85_cast")]; + tensor var_4429 = const()[name = tensor("op_4429"), val = tensor([2, 20, 64, -1])]; + tensor var_4430_cast = reshape(shape = var_4429, x = q_85_cast)[name = tensor("op_4430_cast")]; + tensor var_4431 = const()[name = tensor("op_4431"), val = tensor([2, 20, 64, -1])]; + tensor var_4432_cast = reshape(shape = var_4431, x = k_85_cast)[name = tensor("op_4432_cast")]; + tensor var_4433 = const()[name = tensor("op_4433"), val = tensor([2, 20, 64, -1])]; + tensor var_4434_cast = reshape(shape = var_4433, x = v_85_cast)[name = tensor("op_4434_cast")]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = var_4430_cast, y = var_4432_cast)[name = tensor("attn_weights_169_cast")]; + tensor attn_weights_171_cast = mul(x = attn_weights_169_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_4438_cast = softmax(axis = var_1170, x = attn_weights_171_cast)[name = tensor("op_4438_cast")]; + tensor attn_85_transpose_x_0 = const()[name = tensor("attn_85_transpose_x_0"), val = tensor(false)]; + tensor attn_85_transpose_y_0 = const()[name = tensor("attn_85_transpose_y_0"), val = tensor(true)]; + tensor attn_85_cast = matmul(transpose_x = attn_85_transpose_x_0, transpose_y = attn_85_transpose_y_0, x = var_4434_cast, y = var_4438_cast)[name = tensor("attn_85_cast")]; + tensor var_4442 = const()[name = tensor("op_4442"), val = tensor([2, 1280, 1, -1])]; + tensor input_285_cast = reshape(shape = var_4442, x = attn_85_cast)[name = tensor("input_285_cast")]; + tensor var_4447 = const()[name = tensor("op_4447"), val = tensor([1, 1])]; + tensor var_4449 = const()[name = tensor("op_4449"), val = tensor([1, 1])]; + tensor var_4451_pad_type_0 = const()[name = tensor("op_4451_pad_type_0"), val = tensor("custom")]; + tensor var_4451_pad_0 = const()[name = tensor("op_4451_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293983616))), lut = tensor([-0x1.31cp-5, -0x1.6f4p-7, 0x1.704p-7, 0x1.31cp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294393280)))]; + tensor var_4451_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_4449, groups = var_1186, pad = var_4451_pad_0, pad_type = var_4451_pad_type_0, strides = var_4447, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("op_4451_cast")]; + tensor inputs_129_cast = add(x = var_4451_cast, y = inputs_127_cast)[name = tensor("inputs_129_cast")]; + tensor var_4455 = const()[name = tensor("op_4455"), val = tensor([1])]; + tensor channels_mean_129_cast = reduce_mean(axes = var_4455, keep_dims = var_1181, x = inputs_129_cast)[name = tensor("channels_mean_129_cast")]; + tensor zero_mean_129_cast = sub(x = inputs_129_cast, y = channels_mean_129_cast)[name = tensor("zero_mean_129_cast")]; + tensor zero_mean_sq_129_cast = mul(x = zero_mean_129_cast, y = zero_mean_129_cast)[name = tensor("zero_mean_sq_129_cast")]; + tensor var_4459 = const()[name = tensor("op_4459"), val = tensor([1])]; + tensor var_4460_cast = reduce_mean(axes = var_4459, keep_dims = var_1181, x = zero_mean_sq_129_cast)[name = tensor("op_4460_cast")]; + tensor var_4461_to_fp16 = const()[name = tensor("op_4461_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4462_cast = add(x = var_4460_cast, y = var_4461_to_fp16)[name = tensor("op_4462_cast")]; + tensor denom_129_epsilon_0_to_fp16 = const()[name = tensor("denom_129_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_129_cast = rsqrt(epsilon = denom_129_epsilon_0_to_fp16, x = var_4462_cast)[name = tensor("denom_129_cast")]; + tensor out_129_cast = mul(x = zero_mean_129_cast, y = denom_129_cast)[name = tensor("out_129_cast")]; + tensor var_4466_to_fp16 = const()[name = tensor("op_4466_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294395904)))]; + tensor var_4467_cast = add(x = out_129_cast, y = var_4466_to_fp16)[name = tensor("op_4467_cast")]; + tensor var_4469_to_fp16 = const()[name = tensor("op_4469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294398528)))]; + tensor hidden_states_181_cast = mul(x = var_4467_cast, y = var_4469_to_fp16)[name = tensor("hidden_states_181_cast")]; + tensor var_4476 = const()[name = tensor("op_4476"), val = tensor([1, 1])]; + tensor var_4478 = const()[name = tensor("op_4478"), val = tensor([1, 1])]; + tensor q_87_pad_type_0 = const()[name = tensor("q_87_pad_type_0"), val = tensor("custom")]; + tensor q_87_pad_0 = const()[name = tensor("q_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294401152))), lut = tensor([-0x1.d38p-7, 0x1.d44p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_87_cast = conv(dilations = var_4478, groups = var_1186, pad = q_87_pad_0, pad_type = q_87_pad_type_0, strides = var_4476, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_181_cast)[name = tensor("q_87_cast")]; + tensor var_4482 = const()[name = tensor("op_4482"), val = tensor([1, 1])]; + tensor var_4484 = const()[name = tensor("op_4484"), val = tensor([1, 1])]; + tensor k_87_pad_type_0 = const()[name = tensor("k_87_pad_type_0"), val = tensor("custom")]; + tensor k_87_pad_0 = const()[name = tensor("k_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294606016))), lut = tensor([-0x1.648p-7, 0x1.62cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_87_cast = conv(dilations = var_4484, groups = var_1186, pad = k_87_pad_0, pad_type = k_87_pad_type_0, strides = var_4482, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_87_cast")]; + tensor var_4488 = const()[name = tensor("op_4488"), val = tensor([1, 1])]; + tensor var_4490 = const()[name = tensor("op_4490"), val = tensor([1, 1])]; + tensor v_87_pad_type_0 = const()[name = tensor("v_87_pad_type_0"), val = tensor("custom")]; + tensor v_87_pad_0 = const()[name = tensor("v_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294933760))), lut = tensor([-0x1.e88p-7, 0x1.e78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_87_cast = conv(dilations = var_4490, groups = var_1186, pad = v_87_pad_0, pad_type = v_87_pad_type_0, strides = var_4488, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_87_cast")]; + tensor var_4494 = const()[name = tensor("op_4494"), val = tensor([2, 20, 64, -1])]; + tensor var_4495_cast = reshape(shape = var_4494, x = q_87_cast)[name = tensor("op_4495_cast")]; + tensor var_4496 = const()[name = tensor("op_4496"), val = tensor([2, 20, 64, -1])]; + tensor var_4497_cast = reshape(shape = var_4496, x = k_87_cast)[name = tensor("op_4497_cast")]; + tensor var_4498 = const()[name = tensor("op_4498"), val = tensor([2, 20, 64, -1])]; + tensor var_4499_cast = reshape(shape = var_4498, x = v_87_cast)[name = tensor("op_4499_cast")]; + tensor attn_weights_173_transpose_x_0 = const()[name = tensor("attn_weights_173_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_173_transpose_y_0 = const()[name = tensor("attn_weights_173_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_173_cast = matmul(transpose_x = attn_weights_173_transpose_x_0, transpose_y = attn_weights_173_transpose_y_0, x = var_4495_cast, y = var_4497_cast)[name = tensor("attn_weights_173_cast")]; + tensor attn_weights_175_cast = mul(x = attn_weights_173_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_175_cast")]; + tensor var_4503_cast = softmax(axis = var_1170, x = attn_weights_175_cast)[name = tensor("op_4503_cast")]; + tensor attn_87_transpose_x_0 = const()[name = tensor("attn_87_transpose_x_0"), val = tensor(false)]; + tensor attn_87_transpose_y_0 = const()[name = tensor("attn_87_transpose_y_0"), val = tensor(true)]; + tensor attn_87_cast = matmul(transpose_x = attn_87_transpose_x_0, transpose_y = attn_87_transpose_y_0, x = var_4499_cast, y = var_4503_cast)[name = tensor("attn_87_cast")]; + tensor var_4507 = const()[name = tensor("op_4507"), val = tensor([2, 1280, 1, -1])]; + tensor input_287_cast = reshape(shape = var_4507, x = attn_87_cast)[name = tensor("input_287_cast")]; + tensor var_4512 = const()[name = tensor("op_4512"), val = tensor([1, 1])]; + tensor var_4514 = const()[name = tensor("op_4514"), val = tensor([1, 1])]; + tensor var_4516_pad_type_0 = const()[name = tensor("op_4516_pad_type_0"), val = tensor("custom")]; + tensor var_4516_pad_0 = const()[name = tensor("op_4516_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295261504))), lut = tensor([-0x1.354p-7, 0x1.37p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295466368)))]; + tensor var_4516_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_4514, groups = var_1186, pad = var_4516_pad_0, pad_type = var_4516_pad_type_0, strides = var_4512, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_287_cast)[name = tensor("op_4516_cast")]; + tensor inputs_131_cast = add(x = var_4516_cast, y = inputs_129_cast)[name = tensor("inputs_131_cast")]; + tensor var_4520 = const()[name = tensor("op_4520"), val = tensor([1])]; + tensor channels_mean_131_cast = reduce_mean(axes = var_4520, keep_dims = var_1181, x = inputs_131_cast)[name = tensor("channels_mean_131_cast")]; + tensor zero_mean_131_cast = sub(x = inputs_131_cast, y = channels_mean_131_cast)[name = tensor("zero_mean_131_cast")]; + tensor zero_mean_sq_131_cast = mul(x = zero_mean_131_cast, y = zero_mean_131_cast)[name = tensor("zero_mean_sq_131_cast")]; + tensor var_4524 = const()[name = tensor("op_4524"), val = tensor([1])]; + tensor var_4525_cast = reduce_mean(axes = var_4524, keep_dims = var_1181, x = zero_mean_sq_131_cast)[name = tensor("op_4525_cast")]; + tensor var_4526_to_fp16 = const()[name = tensor("op_4526_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4527_cast = add(x = var_4525_cast, y = var_4526_to_fp16)[name = tensor("op_4527_cast")]; + tensor denom_131_epsilon_0_to_fp16 = const()[name = tensor("denom_131_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_131_cast = rsqrt(epsilon = denom_131_epsilon_0_to_fp16, x = var_4527_cast)[name = tensor("denom_131_cast")]; + tensor out_131_cast = mul(x = zero_mean_131_cast, y = denom_131_cast)[name = tensor("out_131_cast")]; + tensor var_4531_to_fp16 = const()[name = tensor("op_4531_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295468992)))]; + tensor var_4532_cast = add(x = out_131_cast, y = var_4531_to_fp16)[name = tensor("op_4532_cast")]; + tensor var_4534_to_fp16 = const()[name = tensor("op_4534_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295471616)))]; + tensor input_289_cast = mul(x = var_4532_cast, y = var_4534_to_fp16)[name = tensor("input_289_cast")]; + tensor var_4542 = const()[name = tensor("op_4542"), val = tensor([1, 1])]; + tensor var_4544 = const()[name = tensor("op_4544"), val = tensor([1, 1])]; + tensor var_4546_pad_type_0 = const()[name = tensor("op_4546_pad_type_0"), val = tensor("custom")]; + tensor var_4546_pad_0 = const()[name = tensor("op_4546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295474240))), lut = tensor([-0x1.608p-5, -0x1.a5cp-7, 0x1.a88p-7, 0x1.61p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(298751104)))]; + tensor var_4546_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_4544, groups = var_1186, pad = var_4546_pad_0, pad_type = var_4546_pad_type_0, strides = var_4542, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("op_4546_cast")]; + tensor var_4547_split_sizes_0 = const()[name = tensor("op_4547_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4547_axis_0 = const()[name = tensor("op_4547_axis_0"), val = tensor(1)]; + tensor var_4547_cast_0, tensor var_4547_cast_1 = split(axis = var_4547_axis_0, split_sizes = var_4547_split_sizes_0, x = var_4546_cast)[name = tensor("op_4547_cast")]; + tensor var_4549_mode_0 = const()[name = tensor("op_4549_mode_0"), val = tensor("EXACT")]; + tensor var_4549_cast = gelu(mode = var_4549_mode_0, x = var_4547_cast_1)[name = tensor("op_4549_cast")]; + tensor input_291_cast = mul(x = var_4547_cast_0, y = var_4549_cast)[name = tensor("input_291_cast")]; + tensor var_4553 = const()[name = tensor("op_4553"), val = tensor([1, 1])]; + tensor var_4555 = const()[name = tensor("op_4555"), val = tensor([1, 1])]; + tensor var_4557_pad_type_0 = const()[name = tensor("op_4557_pad_type_0"), val = tensor("custom")]; + tensor var_4557_pad_0 = const()[name = tensor("op_4557_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(298771648))), lut = tensor([-0x1.52p-5, -0x1.94p-7, 0x1.944p-7, 0x1.52p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300410112)))]; + tensor var_4557_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_4555, groups = var_1186, pad = var_4557_pad_0, pad_type = var_4557_pad_type_0, strides = var_4553, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_291_cast)[name = tensor("op_4557_cast")]; + tensor inputs_133_cast = add(x = var_4557_cast, y = inputs_131_cast)[name = tensor("inputs_133_cast")]; + tensor var_4567 = const()[name = tensor("op_4567"), val = tensor([1])]; + tensor channels_mean_133_cast = reduce_mean(axes = var_4567, keep_dims = var_1181, x = inputs_133_cast)[name = tensor("channels_mean_133_cast")]; + tensor zero_mean_133_cast = sub(x = inputs_133_cast, y = channels_mean_133_cast)[name = tensor("zero_mean_133_cast")]; + tensor zero_mean_sq_133_cast = mul(x = zero_mean_133_cast, y = zero_mean_133_cast)[name = tensor("zero_mean_sq_133_cast")]; + tensor var_4571 = const()[name = tensor("op_4571"), val = tensor([1])]; + tensor var_4572_cast = reduce_mean(axes = var_4571, keep_dims = var_1181, x = zero_mean_sq_133_cast)[name = tensor("op_4572_cast")]; + tensor var_4573_to_fp16 = const()[name = tensor("op_4573_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4574_cast = add(x = var_4572_cast, y = var_4573_to_fp16)[name = tensor("op_4574_cast")]; + tensor denom_133_epsilon_0_to_fp16 = const()[name = tensor("denom_133_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_133_cast = rsqrt(epsilon = denom_133_epsilon_0_to_fp16, x = var_4574_cast)[name = tensor("denom_133_cast")]; + tensor out_133_cast = mul(x = zero_mean_133_cast, y = denom_133_cast)[name = tensor("out_133_cast")]; + tensor var_4578_to_fp16 = const()[name = tensor("op_4578_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300412736)))]; + tensor var_4579_cast = add(x = out_133_cast, y = var_4578_to_fp16)[name = tensor("op_4579_cast")]; + tensor var_4581_to_fp16 = const()[name = tensor("op_4581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300415360)))]; + tensor hidden_states_185_cast = mul(x = var_4579_cast, y = var_4581_to_fp16)[name = tensor("hidden_states_185_cast")]; + tensor var_4588 = const()[name = tensor("op_4588"), val = tensor([1, 1])]; + tensor var_4590 = const()[name = tensor("op_4590"), val = tensor([1, 1])]; + tensor q_89_pad_type_0 = const()[name = tensor("q_89_pad_type_0"), val = tensor("custom")]; + tensor q_89_pad_0 = const()[name = tensor("q_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300417984))), lut = tensor([-0x1.544p-6, 0x1.54p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_89_cast = conv(dilations = var_4590, groups = var_1186, pad = q_89_pad_0, pad_type = q_89_pad_type_0, strides = var_4588, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("q_89_cast")]; + tensor var_4594 = const()[name = tensor("op_4594"), val = tensor([1, 1])]; + tensor var_4596 = const()[name = tensor("op_4596"), val = tensor([1, 1])]; + tensor k_89_pad_type_0 = const()[name = tensor("k_89_pad_type_0"), val = tensor("custom")]; + tensor k_89_pad_0 = const()[name = tensor("k_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300622848))), lut = tensor([-0x1.53p-6, 0x1.51cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_89_cast = conv(dilations = var_4596, groups = var_1186, pad = k_89_pad_0, pad_type = k_89_pad_type_0, strides = var_4594, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("k_89_cast")]; + tensor var_4600 = const()[name = tensor("op_4600"), val = tensor([1, 1])]; + tensor var_4602 = const()[name = tensor("op_4602"), val = tensor([1, 1])]; + tensor v_89_pad_type_0 = const()[name = tensor("v_89_pad_type_0"), val = tensor("custom")]; + tensor v_89_pad_0 = const()[name = tensor("v_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300827712))), lut = tensor([-0x1.3e4p-5, -0x1.7ecp-7, 0x1.82p-7, 0x1.3f4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_89_cast = conv(dilations = var_4602, groups = var_1186, pad = v_89_pad_0, pad_type = v_89_pad_type_0, strides = var_4600, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("v_89_cast")]; + tensor var_4606 = const()[name = tensor("op_4606"), val = tensor([2, 20, 64, -1])]; + tensor var_4607_cast = reshape(shape = var_4606, x = q_89_cast)[name = tensor("op_4607_cast")]; + tensor var_4608 = const()[name = tensor("op_4608"), val = tensor([2, 20, 64, -1])]; + tensor var_4609_cast = reshape(shape = var_4608, x = k_89_cast)[name = tensor("op_4609_cast")]; + tensor var_4610 = const()[name = tensor("op_4610"), val = tensor([2, 20, 64, -1])]; + tensor var_4611_cast = reshape(shape = var_4610, x = v_89_cast)[name = tensor("op_4611_cast")]; + tensor attn_weights_177_transpose_x_0 = const()[name = tensor("attn_weights_177_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_177_transpose_y_0 = const()[name = tensor("attn_weights_177_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_177_cast = matmul(transpose_x = attn_weights_177_transpose_x_0, transpose_y = attn_weights_177_transpose_y_0, x = var_4607_cast, y = var_4609_cast)[name = tensor("attn_weights_177_cast")]; + tensor attn_weights_179_cast = mul(x = attn_weights_177_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_179_cast")]; + tensor var_4615_cast = softmax(axis = var_1170, x = attn_weights_179_cast)[name = tensor("op_4615_cast")]; + tensor attn_89_transpose_x_0 = const()[name = tensor("attn_89_transpose_x_0"), val = tensor(false)]; + tensor attn_89_transpose_y_0 = const()[name = tensor("attn_89_transpose_y_0"), val = tensor(true)]; + tensor attn_89_cast = matmul(transpose_x = attn_89_transpose_x_0, transpose_y = attn_89_transpose_y_0, x = var_4611_cast, y = var_4615_cast)[name = tensor("attn_89_cast")]; + tensor var_4619 = const()[name = tensor("op_4619"), val = tensor([2, 1280, 1, -1])]; + tensor input_293_cast = reshape(shape = var_4619, x = attn_89_cast)[name = tensor("input_293_cast")]; + tensor var_4624 = const()[name = tensor("op_4624"), val = tensor([1, 1])]; + tensor var_4626 = const()[name = tensor("op_4626"), val = tensor([1, 1])]; + tensor var_4628_pad_type_0 = const()[name = tensor("op_4628_pad_type_0"), val = tensor("custom")]; + tensor var_4628_pad_0 = const()[name = tensor("op_4628_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301237376))), lut = tensor([-0x1.378p-5, -0x1.74p-7, 0x1.7bcp-7, 0x1.394p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301647040)))]; + tensor var_4628_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_4626, groups = var_1186, pad = var_4628_pad_0, pad_type = var_4628_pad_type_0, strides = var_4624, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_293_cast)[name = tensor("op_4628_cast")]; + tensor inputs_135_cast = add(x = var_4628_cast, y = inputs_133_cast)[name = tensor("inputs_135_cast")]; + tensor var_4632 = const()[name = tensor("op_4632"), val = tensor([1])]; + tensor channels_mean_135_cast = reduce_mean(axes = var_4632, keep_dims = var_1181, x = inputs_135_cast)[name = tensor("channels_mean_135_cast")]; + tensor zero_mean_135_cast = sub(x = inputs_135_cast, y = channels_mean_135_cast)[name = tensor("zero_mean_135_cast")]; + tensor zero_mean_sq_135_cast = mul(x = zero_mean_135_cast, y = zero_mean_135_cast)[name = tensor("zero_mean_sq_135_cast")]; + tensor var_4636 = const()[name = tensor("op_4636"), val = tensor([1])]; + tensor var_4637_cast = reduce_mean(axes = var_4636, keep_dims = var_1181, x = zero_mean_sq_135_cast)[name = tensor("op_4637_cast")]; + tensor var_4638_to_fp16 = const()[name = tensor("op_4638_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4639_cast = add(x = var_4637_cast, y = var_4638_to_fp16)[name = tensor("op_4639_cast")]; + tensor denom_135_epsilon_0_to_fp16 = const()[name = tensor("denom_135_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_135_cast = rsqrt(epsilon = denom_135_epsilon_0_to_fp16, x = var_4639_cast)[name = tensor("denom_135_cast")]; + tensor out_135_cast = mul(x = zero_mean_135_cast, y = denom_135_cast)[name = tensor("out_135_cast")]; + tensor var_4643_to_fp16 = const()[name = tensor("op_4643_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301649664)))]; + tensor var_4644_cast = add(x = out_135_cast, y = var_4643_to_fp16)[name = tensor("op_4644_cast")]; + tensor var_4646_to_fp16 = const()[name = tensor("op_4646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301652288)))]; + tensor hidden_states_187_cast = mul(x = var_4644_cast, y = var_4646_to_fp16)[name = tensor("hidden_states_187_cast")]; + tensor var_4653 = const()[name = tensor("op_4653"), val = tensor([1, 1])]; + tensor var_4655 = const()[name = tensor("op_4655"), val = tensor([1, 1])]; + tensor q_91_pad_type_0 = const()[name = tensor("q_91_pad_type_0"), val = tensor("custom")]; + tensor q_91_pad_0 = const()[name = tensor("q_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301654912))), lut = tensor([-0x1.cd4p-7, 0x1.cf4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_91_cast = conv(dilations = var_4655, groups = var_1186, pad = q_91_pad_0, pad_type = q_91_pad_type_0, strides = var_4653, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_187_cast)[name = tensor("q_91_cast")]; + tensor var_4659 = const()[name = tensor("op_4659"), val = tensor([1, 1])]; + tensor var_4661 = const()[name = tensor("op_4661"), val = tensor([1, 1])]; + tensor k_91_pad_type_0 = const()[name = tensor("k_91_pad_type_0"), val = tensor("custom")]; + tensor k_91_pad_0 = const()[name = tensor("k_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(301859776))), lut = tensor([-0x1.584p-7, 0x1.568p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_91_cast = conv(dilations = var_4661, groups = var_1186, pad = k_91_pad_0, pad_type = k_91_pad_type_0, strides = var_4659, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_91_cast")]; + tensor var_4665 = const()[name = tensor("op_4665"), val = tensor([1, 1])]; + tensor var_4667 = const()[name = tensor("op_4667"), val = tensor([1, 1])]; + tensor v_91_pad_type_0 = const()[name = tensor("v_91_pad_type_0"), val = tensor("custom")]; + tensor v_91_pad_0 = const()[name = tensor("v_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302187520))), lut = tensor([-0x1.ce8p-7, 0x1.cd8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_91_cast = conv(dilations = var_4667, groups = var_1186, pad = v_91_pad_0, pad_type = v_91_pad_type_0, strides = var_4665, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_91_cast")]; + tensor var_4671 = const()[name = tensor("op_4671"), val = tensor([2, 20, 64, -1])]; + tensor var_4672_cast = reshape(shape = var_4671, x = q_91_cast)[name = tensor("op_4672_cast")]; + tensor var_4673 = const()[name = tensor("op_4673"), val = tensor([2, 20, 64, -1])]; + tensor var_4674_cast = reshape(shape = var_4673, x = k_91_cast)[name = tensor("op_4674_cast")]; + tensor var_4675 = const()[name = tensor("op_4675"), val = tensor([2, 20, 64, -1])]; + tensor var_4676_cast = reshape(shape = var_4675, x = v_91_cast)[name = tensor("op_4676_cast")]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = var_4672_cast, y = var_4674_cast)[name = tensor("attn_weights_181_cast")]; + tensor attn_weights_183_cast = mul(x = attn_weights_181_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_4680_cast = softmax(axis = var_1170, x = attn_weights_183_cast)[name = tensor("op_4680_cast")]; + tensor attn_91_transpose_x_0 = const()[name = tensor("attn_91_transpose_x_0"), val = tensor(false)]; + tensor attn_91_transpose_y_0 = const()[name = tensor("attn_91_transpose_y_0"), val = tensor(true)]; + tensor attn_91_cast = matmul(transpose_x = attn_91_transpose_x_0, transpose_y = attn_91_transpose_y_0, x = var_4676_cast, y = var_4680_cast)[name = tensor("attn_91_cast")]; + tensor var_4684 = const()[name = tensor("op_4684"), val = tensor([2, 1280, 1, -1])]; + tensor input_295_cast = reshape(shape = var_4684, x = attn_91_cast)[name = tensor("input_295_cast")]; + tensor var_4689 = const()[name = tensor("op_4689"), val = tensor([1, 1])]; + tensor var_4691 = const()[name = tensor("op_4691"), val = tensor([1, 1])]; + tensor var_4693_pad_type_0 = const()[name = tensor("op_4693_pad_type_0"), val = tensor("custom")]; + tensor var_4693_pad_0 = const()[name = tensor("op_4693_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302515264))), lut = tensor([-0x1.23p-7, 0x1.25p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302720128)))]; + tensor var_4693_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_4691, groups = var_1186, pad = var_4693_pad_0, pad_type = var_4693_pad_type_0, strides = var_4689, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_295_cast)[name = tensor("op_4693_cast")]; + tensor inputs_137_cast = add(x = var_4693_cast, y = inputs_135_cast)[name = tensor("inputs_137_cast")]; + tensor var_4697 = const()[name = tensor("op_4697"), val = tensor([1])]; + tensor channels_mean_137_cast = reduce_mean(axes = var_4697, keep_dims = var_1181, x = inputs_137_cast)[name = tensor("channels_mean_137_cast")]; + tensor zero_mean_137_cast = sub(x = inputs_137_cast, y = channels_mean_137_cast)[name = tensor("zero_mean_137_cast")]; + tensor zero_mean_sq_137_cast = mul(x = zero_mean_137_cast, y = zero_mean_137_cast)[name = tensor("zero_mean_sq_137_cast")]; + tensor var_4701 = const()[name = tensor("op_4701"), val = tensor([1])]; + tensor var_4702_cast = reduce_mean(axes = var_4701, keep_dims = var_1181, x = zero_mean_sq_137_cast)[name = tensor("op_4702_cast")]; + tensor var_4703_to_fp16 = const()[name = tensor("op_4703_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4704_cast = add(x = var_4702_cast, y = var_4703_to_fp16)[name = tensor("op_4704_cast")]; + tensor denom_137_epsilon_0_to_fp16 = const()[name = tensor("denom_137_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_137_cast = rsqrt(epsilon = denom_137_epsilon_0_to_fp16, x = var_4704_cast)[name = tensor("denom_137_cast")]; + tensor out_137_cast = mul(x = zero_mean_137_cast, y = denom_137_cast)[name = tensor("out_137_cast")]; + tensor var_4708_to_fp16 = const()[name = tensor("op_4708_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302722752)))]; + tensor var_4709_cast = add(x = out_137_cast, y = var_4708_to_fp16)[name = tensor("op_4709_cast")]; + tensor var_4711_to_fp16 = const()[name = tensor("op_4711_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302725376)))]; + tensor input_297_cast = mul(x = var_4709_cast, y = var_4711_to_fp16)[name = tensor("input_297_cast")]; + tensor var_4719 = const()[name = tensor("op_4719"), val = tensor([1, 1])]; + tensor var_4721 = const()[name = tensor("op_4721"), val = tensor([1, 1])]; + tensor var_4723_pad_type_0 = const()[name = tensor("op_4723_pad_type_0"), val = tensor("custom")]; + tensor var_4723_pad_0 = const()[name = tensor("op_4723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302728000))), lut = tensor([-0x1.618p-5, -0x1.a84p-7, 0x1.a78p-7, 0x1.614p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(306004864)))]; + tensor var_4723_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_4721, groups = var_1186, pad = var_4723_pad_0, pad_type = var_4723_pad_type_0, strides = var_4719, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("op_4723_cast")]; + tensor var_4724_split_sizes_0 = const()[name = tensor("op_4724_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4724_axis_0 = const()[name = tensor("op_4724_axis_0"), val = tensor(1)]; + tensor var_4724_cast_0, tensor var_4724_cast_1 = split(axis = var_4724_axis_0, split_sizes = var_4724_split_sizes_0, x = var_4723_cast)[name = tensor("op_4724_cast")]; + tensor var_4726_mode_0 = const()[name = tensor("op_4726_mode_0"), val = tensor("EXACT")]; + tensor var_4726_cast = gelu(mode = var_4726_mode_0, x = var_4724_cast_1)[name = tensor("op_4726_cast")]; + tensor input_299_cast = mul(x = var_4724_cast_0, y = var_4726_cast)[name = tensor("input_299_cast")]; + tensor var_4730 = const()[name = tensor("op_4730"), val = tensor([1, 1])]; + tensor var_4732 = const()[name = tensor("op_4732"), val = tensor([1, 1])]; + tensor var_4734_pad_type_0 = const()[name = tensor("op_4734_pad_type_0"), val = tensor("custom")]; + tensor var_4734_pad_0 = const()[name = tensor("op_4734_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(306025408))), lut = tensor([-0x1.4ep-5, -0x1.9p-7, 0x1.8ecp-7, 0x1.4dcp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307663872)))]; + tensor var_4734_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_4732, groups = var_1186, pad = var_4734_pad_0, pad_type = var_4734_pad_type_0, strides = var_4730, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_299_cast)[name = tensor("op_4734_cast")]; + tensor inputs_139_cast = add(x = var_4734_cast, y = inputs_137_cast)[name = tensor("inputs_139_cast")]; + tensor var_4744 = const()[name = tensor("op_4744"), val = tensor([1])]; + tensor channels_mean_139_cast = reduce_mean(axes = var_4744, keep_dims = var_1181, x = inputs_139_cast)[name = tensor("channels_mean_139_cast")]; + tensor zero_mean_139_cast = sub(x = inputs_139_cast, y = channels_mean_139_cast)[name = tensor("zero_mean_139_cast")]; + tensor zero_mean_sq_139_cast = mul(x = zero_mean_139_cast, y = zero_mean_139_cast)[name = tensor("zero_mean_sq_139_cast")]; + tensor var_4748 = const()[name = tensor("op_4748"), val = tensor([1])]; + tensor var_4749_cast = reduce_mean(axes = var_4748, keep_dims = var_1181, x = zero_mean_sq_139_cast)[name = tensor("op_4749_cast")]; + tensor var_4750_to_fp16 = const()[name = tensor("op_4750_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4751_cast = add(x = var_4749_cast, y = var_4750_to_fp16)[name = tensor("op_4751_cast")]; + tensor denom_139_epsilon_0_to_fp16 = const()[name = tensor("denom_139_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_139_cast = rsqrt(epsilon = denom_139_epsilon_0_to_fp16, x = var_4751_cast)[name = tensor("denom_139_cast")]; + tensor out_139_cast = mul(x = zero_mean_139_cast, y = denom_139_cast)[name = tensor("out_139_cast")]; + tensor var_4755_to_fp16 = const()[name = tensor("op_4755_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307666496)))]; + tensor var_4756_cast = add(x = out_139_cast, y = var_4755_to_fp16)[name = tensor("op_4756_cast")]; + tensor var_4758_to_fp16 = const()[name = tensor("op_4758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307669120)))]; + tensor hidden_states_191_cast = mul(x = var_4756_cast, y = var_4758_to_fp16)[name = tensor("hidden_states_191_cast")]; + tensor var_4765 = const()[name = tensor("op_4765"), val = tensor([1, 1])]; + tensor var_4767 = const()[name = tensor("op_4767"), val = tensor([1, 1])]; + tensor q_93_pad_type_0 = const()[name = tensor("q_93_pad_type_0"), val = tensor("custom")]; + tensor q_93_pad_0 = const()[name = tensor("q_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307671744))), lut = tensor([-0x1.5fcp-6, 0x1.604p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_93_cast = conv(dilations = var_4767, groups = var_1186, pad = q_93_pad_0, pad_type = q_93_pad_type_0, strides = var_4765, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("q_93_cast")]; + tensor var_4771 = const()[name = tensor("op_4771"), val = tensor([1, 1])]; + tensor var_4773 = const()[name = tensor("op_4773"), val = tensor([1, 1])]; + tensor k_93_pad_type_0 = const()[name = tensor("k_93_pad_type_0"), val = tensor("custom")]; + tensor k_93_pad_0 = const()[name = tensor("k_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(307876608))), lut = tensor([-0x1.5c4p-6, 0x1.5dcp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_93_cast = conv(dilations = var_4773, groups = var_1186, pad = k_93_pad_0, pad_type = k_93_pad_type_0, strides = var_4771, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("k_93_cast")]; + tensor var_4777 = const()[name = tensor("op_4777"), val = tensor([1, 1])]; + tensor var_4779 = const()[name = tensor("op_4779"), val = tensor([1, 1])]; + tensor v_93_pad_type_0 = const()[name = tensor("v_93_pad_type_0"), val = tensor("custom")]; + tensor v_93_pad_0 = const()[name = tensor("v_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308081472))), lut = tensor([-0x1.374p-5, -0x1.77p-7, 0x1.754p-7, 0x1.364p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_93_cast = conv(dilations = var_4779, groups = var_1186, pad = v_93_pad_0, pad_type = v_93_pad_type_0, strides = var_4777, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("v_93_cast")]; + tensor var_4783 = const()[name = tensor("op_4783"), val = tensor([2, 20, 64, -1])]; + tensor var_4784_cast = reshape(shape = var_4783, x = q_93_cast)[name = tensor("op_4784_cast")]; + tensor var_4785 = const()[name = tensor("op_4785"), val = tensor([2, 20, 64, -1])]; + tensor var_4786_cast = reshape(shape = var_4785, x = k_93_cast)[name = tensor("op_4786_cast")]; + tensor var_4787 = const()[name = tensor("op_4787"), val = tensor([2, 20, 64, -1])]; + tensor var_4788_cast = reshape(shape = var_4787, x = v_93_cast)[name = tensor("op_4788_cast")]; + tensor attn_weights_185_transpose_x_0 = const()[name = tensor("attn_weights_185_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_185_transpose_y_0 = const()[name = tensor("attn_weights_185_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_185_cast = matmul(transpose_x = attn_weights_185_transpose_x_0, transpose_y = attn_weights_185_transpose_y_0, x = var_4784_cast, y = var_4786_cast)[name = tensor("attn_weights_185_cast")]; + tensor attn_weights_187_cast = mul(x = attn_weights_185_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_187_cast")]; + tensor var_4792_cast = softmax(axis = var_1170, x = attn_weights_187_cast)[name = tensor("op_4792_cast")]; + tensor attn_93_transpose_x_0 = const()[name = tensor("attn_93_transpose_x_0"), val = tensor(false)]; + tensor attn_93_transpose_y_0 = const()[name = tensor("attn_93_transpose_y_0"), val = tensor(true)]; + tensor attn_93_cast = matmul(transpose_x = attn_93_transpose_x_0, transpose_y = attn_93_transpose_y_0, x = var_4788_cast, y = var_4792_cast)[name = tensor("attn_93_cast")]; + tensor var_4796 = const()[name = tensor("op_4796"), val = tensor([2, 1280, 1, -1])]; + tensor input_301_cast = reshape(shape = var_4796, x = attn_93_cast)[name = tensor("input_301_cast")]; + tensor var_4801 = const()[name = tensor("op_4801"), val = tensor([1, 1])]; + tensor var_4803 = const()[name = tensor("op_4803"), val = tensor([1, 1])]; + tensor var_4805_pad_type_0 = const()[name = tensor("op_4805_pad_type_0"), val = tensor("custom")]; + tensor var_4805_pad_0 = const()[name = tensor("op_4805_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308491136))), lut = tensor([-0x1.29cp-5, -0x1.68cp-7, 0x1.65cp-7, 0x1.298p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308900800)))]; + tensor var_4805_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_4803, groups = var_1186, pad = var_4805_pad_0, pad_type = var_4805_pad_type_0, strides = var_4801, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("op_4805_cast")]; + tensor inputs_141_cast = add(x = var_4805_cast, y = inputs_139_cast)[name = tensor("inputs_141_cast")]; + tensor var_4809 = const()[name = tensor("op_4809"), val = tensor([1])]; + tensor channels_mean_141_cast = reduce_mean(axes = var_4809, keep_dims = var_1181, x = inputs_141_cast)[name = tensor("channels_mean_141_cast")]; + tensor zero_mean_141_cast = sub(x = inputs_141_cast, y = channels_mean_141_cast)[name = tensor("zero_mean_141_cast")]; + tensor zero_mean_sq_141_cast = mul(x = zero_mean_141_cast, y = zero_mean_141_cast)[name = tensor("zero_mean_sq_141_cast")]; + tensor var_4813 = const()[name = tensor("op_4813"), val = tensor([1])]; + tensor var_4814_cast = reduce_mean(axes = var_4813, keep_dims = var_1181, x = zero_mean_sq_141_cast)[name = tensor("op_4814_cast")]; + tensor var_4815_to_fp16 = const()[name = tensor("op_4815_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4816_cast = add(x = var_4814_cast, y = var_4815_to_fp16)[name = tensor("op_4816_cast")]; + tensor denom_141_epsilon_0_to_fp16 = const()[name = tensor("denom_141_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_141_cast = rsqrt(epsilon = denom_141_epsilon_0_to_fp16, x = var_4816_cast)[name = tensor("denom_141_cast")]; + tensor out_141_cast = mul(x = zero_mean_141_cast, y = denom_141_cast)[name = tensor("out_141_cast")]; + tensor var_4820_to_fp16 = const()[name = tensor("op_4820_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308903424)))]; + tensor var_4821_cast = add(x = out_141_cast, y = var_4820_to_fp16)[name = tensor("op_4821_cast")]; + tensor var_4823_to_fp16 = const()[name = tensor("op_4823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308906048)))]; + tensor hidden_states_193_cast = mul(x = var_4821_cast, y = var_4823_to_fp16)[name = tensor("hidden_states_193_cast")]; + tensor var_4830 = const()[name = tensor("op_4830"), val = tensor([1, 1])]; + tensor var_4832 = const()[name = tensor("op_4832"), val = tensor([1, 1])]; + tensor q_95_pad_type_0 = const()[name = tensor("q_95_pad_type_0"), val = tensor("custom")]; + tensor q_95_pad_0 = const()[name = tensor("q_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308908672))), lut = tensor([-0x1.a64p-7, 0x1.a5cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_95_cast = conv(dilations = var_4832, groups = var_1186, pad = q_95_pad_0, pad_type = q_95_pad_type_0, strides = var_4830, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_193_cast)[name = tensor("q_95_cast")]; + tensor var_4836 = const()[name = tensor("op_4836"), val = tensor([1, 1])]; + tensor var_4838 = const()[name = tensor("op_4838"), val = tensor([1, 1])]; + tensor k_95_pad_type_0 = const()[name = tensor("k_95_pad_type_0"), val = tensor("custom")]; + tensor k_95_pad_0 = const()[name = tensor("k_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309113536))), lut = tensor([-0x1.27cp-7, 0x1.288p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_95_cast = conv(dilations = var_4838, groups = var_1186, pad = k_95_pad_0, pad_type = k_95_pad_type_0, strides = var_4836, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_95_cast")]; + tensor var_4842 = const()[name = tensor("op_4842"), val = tensor([1, 1])]; + tensor var_4844 = const()[name = tensor("op_4844"), val = tensor([1, 1])]; + tensor v_95_pad_type_0 = const()[name = tensor("v_95_pad_type_0"), val = tensor("custom")]; + tensor v_95_pad_0 = const()[name = tensor("v_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309441280))), lut = tensor([-0x1.77cp-7, 0x1.79p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_95_cast = conv(dilations = var_4844, groups = var_1186, pad = v_95_pad_0, pad_type = v_95_pad_type_0, strides = var_4842, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_95_cast")]; + tensor var_4848 = const()[name = tensor("op_4848"), val = tensor([2, 20, 64, -1])]; + tensor var_4849_cast = reshape(shape = var_4848, x = q_95_cast)[name = tensor("op_4849_cast")]; + tensor var_4850 = const()[name = tensor("op_4850"), val = tensor([2, 20, 64, -1])]; + tensor var_4851_cast = reshape(shape = var_4850, x = k_95_cast)[name = tensor("op_4851_cast")]; + tensor var_4852 = const()[name = tensor("op_4852"), val = tensor([2, 20, 64, -1])]; + tensor var_4853_cast = reshape(shape = var_4852, x = v_95_cast)[name = tensor("op_4853_cast")]; + tensor attn_weights_189_transpose_x_0 = const()[name = tensor("attn_weights_189_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_189_transpose_y_0 = const()[name = tensor("attn_weights_189_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_189_cast = matmul(transpose_x = attn_weights_189_transpose_x_0, transpose_y = attn_weights_189_transpose_y_0, x = var_4849_cast, y = var_4851_cast)[name = tensor("attn_weights_189_cast")]; + tensor attn_weights_191_cast = mul(x = attn_weights_189_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_191_cast")]; + tensor var_4857_cast = softmax(axis = var_1170, x = attn_weights_191_cast)[name = tensor("op_4857_cast")]; + tensor attn_95_transpose_x_0 = const()[name = tensor("attn_95_transpose_x_0"), val = tensor(false)]; + tensor attn_95_transpose_y_0 = const()[name = tensor("attn_95_transpose_y_0"), val = tensor(true)]; + tensor attn_95_cast = matmul(transpose_x = attn_95_transpose_x_0, transpose_y = attn_95_transpose_y_0, x = var_4853_cast, y = var_4857_cast)[name = tensor("attn_95_cast")]; + tensor var_4861 = const()[name = tensor("op_4861"), val = tensor([2, 1280, 1, -1])]; + tensor input_303_cast = reshape(shape = var_4861, x = attn_95_cast)[name = tensor("input_303_cast")]; + tensor var_4866 = const()[name = tensor("op_4866"), val = tensor([1, 1])]; + tensor var_4868 = const()[name = tensor("op_4868"), val = tensor([1, 1])]; + tensor var_4870_pad_type_0 = const()[name = tensor("op_4870_pad_type_0"), val = tensor("custom")]; + tensor var_4870_pad_0 = const()[name = tensor("op_4870_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309769024))), lut = tensor([-0x1.e74p-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309973888)))]; + tensor var_4870_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_4868, groups = var_1186, pad = var_4870_pad_0, pad_type = var_4870_pad_type_0, strides = var_4866, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_303_cast)[name = tensor("op_4870_cast")]; + tensor inputs_143_cast = add(x = var_4870_cast, y = inputs_141_cast)[name = tensor("inputs_143_cast")]; + tensor var_4874 = const()[name = tensor("op_4874"), val = tensor([1])]; + tensor channels_mean_143_cast = reduce_mean(axes = var_4874, keep_dims = var_1181, x = inputs_143_cast)[name = tensor("channels_mean_143_cast")]; + tensor zero_mean_143_cast = sub(x = inputs_143_cast, y = channels_mean_143_cast)[name = tensor("zero_mean_143_cast")]; + tensor zero_mean_sq_143_cast = mul(x = zero_mean_143_cast, y = zero_mean_143_cast)[name = tensor("zero_mean_sq_143_cast")]; + tensor var_4878 = const()[name = tensor("op_4878"), val = tensor([1])]; + tensor var_4879_cast = reduce_mean(axes = var_4878, keep_dims = var_1181, x = zero_mean_sq_143_cast)[name = tensor("op_4879_cast")]; + tensor var_4880_to_fp16 = const()[name = tensor("op_4880_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4881_cast = add(x = var_4879_cast, y = var_4880_to_fp16)[name = tensor("op_4881_cast")]; + tensor denom_143_epsilon_0_to_fp16 = const()[name = tensor("denom_143_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_143_cast = rsqrt(epsilon = denom_143_epsilon_0_to_fp16, x = var_4881_cast)[name = tensor("denom_143_cast")]; + tensor out_143_cast = mul(x = zero_mean_143_cast, y = denom_143_cast)[name = tensor("out_143_cast")]; + tensor var_4885_to_fp16 = const()[name = tensor("op_4885_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309976512)))]; + tensor var_4886_cast = add(x = out_143_cast, y = var_4885_to_fp16)[name = tensor("op_4886_cast")]; + tensor var_4888_to_fp16 = const()[name = tensor("op_4888_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309979136)))]; + tensor input_305_cast = mul(x = var_4886_cast, y = var_4888_to_fp16)[name = tensor("input_305_cast")]; + tensor var_4896 = const()[name = tensor("op_4896"), val = tensor([1, 1])]; + tensor var_4898 = const()[name = tensor("op_4898"), val = tensor([1, 1])]; + tensor var_4900_pad_type_0 = const()[name = tensor("op_4900_pad_type_0"), val = tensor("custom")]; + tensor var_4900_pad_0 = const()[name = tensor("op_4900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309981760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(316535424))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(316535552)))]; + tensor var_4900_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_4898, groups = var_1186, pad = var_4900_pad_0, pad_type = var_4900_pad_type_0, strides = var_4896, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("op_4900_cast")]; + tensor var_4901_split_sizes_0 = const()[name = tensor("op_4901_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4901_axis_0 = const()[name = tensor("op_4901_axis_0"), val = tensor(1)]; + tensor var_4901_cast_0, tensor var_4901_cast_1 = split(axis = var_4901_axis_0, split_sizes = var_4901_split_sizes_0, x = var_4900_cast)[name = tensor("op_4901_cast")]; + tensor var_4903_mode_0 = const()[name = tensor("op_4903_mode_0"), val = tensor("EXACT")]; + tensor var_4903_cast = gelu(mode = var_4903_mode_0, x = var_4901_cast_1)[name = tensor("op_4903_cast")]; + tensor input_307_cast = mul(x = var_4901_cast_0, y = var_4903_cast)[name = tensor("input_307_cast")]; + tensor var_4907 = const()[name = tensor("op_4907"), val = tensor([1, 1])]; + tensor var_4909 = const()[name = tensor("op_4909"), val = tensor([1, 1])]; + tensor var_4911_pad_type_0 = const()[name = tensor("op_4911_pad_type_0"), val = tensor("custom")]; + tensor var_4911_pad_0 = const()[name = tensor("op_4911_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(316556096))), lut = tensor([-0x1.424p-5, -0x1.814p-7, 0x1.828p-7, 0x1.428p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318194560)))]; + tensor var_4911_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_4909, groups = var_1186, pad = var_4911_pad_0, pad_type = var_4911_pad_type_0, strides = var_4907, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_307_cast)[name = tensor("op_4911_cast")]; + tensor hidden_states_197_cast = add(x = var_4911_cast, y = inputs_143_cast)[name = tensor("hidden_states_197_cast")]; + tensor var_4913 = const()[name = tensor("op_4913"), val = tensor([2, 1280, 32, 32])]; + tensor input_309_cast = reshape(shape = var_4913, x = hidden_states_197_cast)[name = tensor("input_309_cast")]; + tensor var_4917 = const()[name = tensor("op_4917"), val = tensor([1, 1])]; + tensor var_4919 = const()[name = tensor("op_4919"), val = tensor([1, 1])]; + tensor hidden_states_199_pad_type_0 = const()[name = tensor("hidden_states_199_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_199_pad_0 = const()[name = tensor("hidden_states_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318197184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319016448))), name = tensor("down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319016576)))]; + tensor hidden_states_199_cast = conv(bias = down_blocks_2_attentions_1_proj_out_bias_to_fp16, dilations = var_4919, groups = var_1186, pad = hidden_states_199_pad_0, pad_type = hidden_states_199_pad_type_0, strides = var_4917, weight = down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized, x = input_309_cast)[name = tensor("hidden_states_199_cast")]; + tensor input_311_cast = add(x = hidden_states_199_cast, y = hidden_states_133_cast)[name = tensor("input_311_cast")]; + tensor var_4927 = const()[name = tensor("op_4927"), val = tensor(3)]; + tensor var_4938 = const()[name = tensor("op_4938"), val = tensor(true)]; + tensor var_4943 = const()[name = tensor("op_4943"), val = tensor(1)]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = input_311_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319019200)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319021824)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_315_cast = silu(x = add_33_cast)[name = tensor("input_315_cast")]; + tensor var_4961 = const()[name = tensor("op_4961"), val = tensor([1, 1])]; + tensor var_4963 = const()[name = tensor("op_4963"), val = tensor([1, 1])]; + tensor hidden_states_201_pad_type_0 = const()[name = tensor("hidden_states_201_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_201_pad_0 = const()[name = tensor("hidden_states_201_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319024448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326397312))), name = tensor("mid_block_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326397440)))]; + tensor hidden_states_201_cast = conv(bias = mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_4963, groups = var_4943, pad = hidden_states_201_pad_0, pad_type = hidden_states_201_pad_type_0, strides = var_4961, weight = mid_block_resnets_0_conv1_weight_to_fp16_palettized, x = input_315_cast)[name = tensor("hidden_states_201_cast")]; + tensor var_4969 = const()[name = tensor("op_4969"), val = tensor([1, 1])]; + tensor var_4971 = const()[name = tensor("op_4971"), val = tensor([1, 1])]; + tensor temb_13_pad_type_0 = const()[name = tensor("temb_13_pad_type_0"), val = tensor("custom")]; + tensor temb_13_pad_0 = const()[name = tensor("temb_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(326400064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(327219328))), name = tensor("mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(327219456)))]; + tensor temb_13_cast = conv(bias = mid_block_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_4971, groups = var_4943, pad = temb_13_pad_0, pad_type = temb_13_pad_type_0, strides = var_4969, weight = mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_13_cast")]; + tensor input_319_cast = add(x = hidden_states_201_cast, y = temb_13_cast)[name = tensor("input_319_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_319_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(327222080)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(327224704)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_323_cast = silu(x = add_35_cast)[name = tensor("input_323_cast")]; + tensor var_4981 = const()[name = tensor("op_4981"), val = tensor([1, 1])]; + tensor var_4983 = const()[name = tensor("op_4983"), val = tensor([1, 1])]; + tensor hidden_states_203_pad_type_0 = const()[name = tensor("hidden_states_203_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_203_pad_0 = const()[name = tensor("hidden_states_203_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(327227328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(334600192))), name = tensor("mid_block_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(334600320)))]; + tensor hidden_states_203_cast = conv(bias = mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_4983, groups = var_4943, pad = hidden_states_203_pad_0, pad_type = hidden_states_203_pad_type_0, strides = var_4981, weight = mid_block_resnets_0_conv2_weight_to_fp16_palettized, x = input_323_cast)[name = tensor("hidden_states_203_cast")]; + tensor hidden_states_205_cast = add(x = input_311_cast, y = hidden_states_203_cast)[name = tensor("hidden_states_205_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = hidden_states_205_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor add_37_gamma_0_to_fp16 = const()[name = tensor("add_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(334602944)))]; + tensor add_37_beta_0_to_fp16 = const()[name = tensor("add_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(334605568)))]; + tensor add_37_epsilon_0_to_fp16 = const()[name = tensor("add_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_37_cast = batch_norm(beta = add_37_beta_0_to_fp16, epsilon = add_37_epsilon_0_to_fp16, gamma = add_37_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_73_cast)[name = tensor("add_37_cast")]; + tensor var_5021 = const()[name = tensor("op_5021"), val = tensor([1, 1])]; + tensor var_5023 = const()[name = tensor("op_5023"), val = tensor([1, 1])]; + tensor hidden_states_207_pad_type_0 = const()[name = tensor("hidden_states_207_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_207_pad_0 = const()[name = tensor("hidden_states_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(334608192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335427456))), name = tensor("mid_block_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335427584)))]; + tensor hidden_states_207_cast = conv(bias = mid_block_attentions_0_proj_in_bias_to_fp16, dilations = var_5023, groups = var_4943, pad = hidden_states_207_pad_0, pad_type = hidden_states_207_pad_type_0, strides = var_5021, weight = mid_block_attentions_0_proj_in_weight_to_fp16_palettized, x = add_37_cast)[name = tensor("hidden_states_207_cast")]; + tensor var_5028 = const()[name = tensor("op_5028"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_145_cast = reshape(shape = var_5028, x = hidden_states_207_cast)[name = tensor("inputs_145_cast")]; + tensor var_5038 = const()[name = tensor("op_5038"), val = tensor([1])]; + tensor channels_mean_145_cast = reduce_mean(axes = var_5038, keep_dims = var_4938, x = inputs_145_cast)[name = tensor("channels_mean_145_cast")]; + tensor zero_mean_145_cast = sub(x = inputs_145_cast, y = channels_mean_145_cast)[name = tensor("zero_mean_145_cast")]; + tensor zero_mean_sq_145_cast = mul(x = zero_mean_145_cast, y = zero_mean_145_cast)[name = tensor("zero_mean_sq_145_cast")]; + tensor var_5042 = const()[name = tensor("op_5042"), val = tensor([1])]; + tensor var_5043_cast = reduce_mean(axes = var_5042, keep_dims = var_4938, x = zero_mean_sq_145_cast)[name = tensor("op_5043_cast")]; + tensor var_5044_to_fp16 = const()[name = tensor("op_5044_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5045_cast = add(x = var_5043_cast, y = var_5044_to_fp16)[name = tensor("op_5045_cast")]; + tensor denom_145_epsilon_0_to_fp16 = const()[name = tensor("denom_145_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_145_cast = rsqrt(epsilon = denom_145_epsilon_0_to_fp16, x = var_5045_cast)[name = tensor("denom_145_cast")]; + tensor out_145_cast = mul(x = zero_mean_145_cast, y = denom_145_cast)[name = tensor("out_145_cast")]; + tensor var_5049_to_fp16 = const()[name = tensor("op_5049_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335430208)))]; + tensor var_5050_cast = add(x = out_145_cast, y = var_5049_to_fp16)[name = tensor("op_5050_cast")]; + tensor var_5052_to_fp16 = const()[name = tensor("op_5052_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335432832)))]; + tensor hidden_states_209_cast = mul(x = var_5050_cast, y = var_5052_to_fp16)[name = tensor("hidden_states_209_cast")]; + tensor var_5059 = const()[name = tensor("op_5059"), val = tensor([1, 1])]; + tensor var_5061 = const()[name = tensor("op_5061"), val = tensor([1, 1])]; + tensor q_97_pad_type_0 = const()[name = tensor("q_97_pad_type_0"), val = tensor("custom")]; + tensor q_97_pad_0 = const()[name = tensor("q_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335435456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336254720))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_97_cast = conv(dilations = var_5061, groups = var_4943, pad = q_97_pad_0, pad_type = q_97_pad_type_0, strides = var_5059, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("q_97_cast")]; + tensor var_5065 = const()[name = tensor("op_5065"), val = tensor([1, 1])]; + tensor var_5067 = const()[name = tensor("op_5067"), val = tensor([1, 1])]; + tensor k_97_pad_type_0 = const()[name = tensor("k_97_pad_type_0"), val = tensor("custom")]; + tensor k_97_pad_0 = const()[name = tensor("k_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336254848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337074112))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_97_cast = conv(dilations = var_5067, groups = var_4943, pad = k_97_pad_0, pad_type = k_97_pad_type_0, strides = var_5065, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("k_97_cast")]; + tensor var_5071 = const()[name = tensor("op_5071"), val = tensor([1, 1])]; + tensor var_5073 = const()[name = tensor("op_5073"), val = tensor([1, 1])]; + tensor v_97_pad_type_0 = const()[name = tensor("v_97_pad_type_0"), val = tensor("custom")]; + tensor v_97_pad_0 = const()[name = tensor("v_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337074240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337893504))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_97_cast = conv(dilations = var_5073, groups = var_4943, pad = v_97_pad_0, pad_type = v_97_pad_type_0, strides = var_5071, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("v_97_cast")]; + tensor var_5077 = const()[name = tensor("op_5077"), val = tensor([2, 20, 64, -1])]; + tensor var_5078_cast = reshape(shape = var_5077, x = q_97_cast)[name = tensor("op_5078_cast")]; + tensor var_5079 = const()[name = tensor("op_5079"), val = tensor([2, 20, 64, -1])]; + tensor var_5080_cast = reshape(shape = var_5079, x = k_97_cast)[name = tensor("op_5080_cast")]; + tensor var_5081 = const()[name = tensor("op_5081"), val = tensor([2, 20, 64, -1])]; + tensor var_5082_cast = reshape(shape = var_5081, x = v_97_cast)[name = tensor("op_5082_cast")]; + tensor attn_weights_193_transpose_x_0 = const()[name = tensor("attn_weights_193_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_193_transpose_y_0 = const()[name = tensor("attn_weights_193_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_193_cast = matmul(transpose_x = attn_weights_193_transpose_x_0, transpose_y = attn_weights_193_transpose_y_0, x = var_5078_cast, y = var_5080_cast)[name = tensor("attn_weights_193_cast")]; + tensor var_4934_to_fp16 = const()[name = tensor("op_4934_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_195_cast = mul(x = attn_weights_193_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_195_cast")]; + tensor var_5086_cast = softmax(axis = var_4927, x = attn_weights_195_cast)[name = tensor("op_5086_cast")]; + tensor attn_97_transpose_x_0 = const()[name = tensor("attn_97_transpose_x_0"), val = tensor(false)]; + tensor attn_97_transpose_y_0 = const()[name = tensor("attn_97_transpose_y_0"), val = tensor(true)]; + tensor attn_97_cast = matmul(transpose_x = attn_97_transpose_x_0, transpose_y = attn_97_transpose_y_0, x = var_5082_cast, y = var_5086_cast)[name = tensor("attn_97_cast")]; + tensor var_5090 = const()[name = tensor("op_5090"), val = tensor([2, 1280, 1, -1])]; + tensor input_327_cast = reshape(shape = var_5090, x = attn_97_cast)[name = tensor("input_327_cast")]; + tensor var_5095 = const()[name = tensor("op_5095"), val = tensor([1, 1])]; + tensor var_5097 = const()[name = tensor("op_5097"), val = tensor([1, 1])]; + tensor var_5099_pad_type_0 = const()[name = tensor("op_5099_pad_type_0"), val = tensor("custom")]; + tensor var_5099_pad_0 = const()[name = tensor("op_5099_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337893632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338712896))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338713024)))]; + tensor var_5099_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_5097, groups = var_4943, pad = var_5099_pad_0, pad_type = var_5099_pad_type_0, strides = var_5095, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_327_cast)[name = tensor("op_5099_cast")]; + tensor inputs_147_cast = add(x = var_5099_cast, y = inputs_145_cast)[name = tensor("inputs_147_cast")]; + tensor var_5103 = const()[name = tensor("op_5103"), val = tensor([1])]; + tensor channels_mean_147_cast = reduce_mean(axes = var_5103, keep_dims = var_4938, x = inputs_147_cast)[name = tensor("channels_mean_147_cast")]; + tensor zero_mean_147_cast = sub(x = inputs_147_cast, y = channels_mean_147_cast)[name = tensor("zero_mean_147_cast")]; + tensor zero_mean_sq_147_cast = mul(x = zero_mean_147_cast, y = zero_mean_147_cast)[name = tensor("zero_mean_sq_147_cast")]; + tensor var_5107 = const()[name = tensor("op_5107"), val = tensor([1])]; + tensor var_5108_cast = reduce_mean(axes = var_5107, keep_dims = var_4938, x = zero_mean_sq_147_cast)[name = tensor("op_5108_cast")]; + tensor var_5109_to_fp16 = const()[name = tensor("op_5109_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5110_cast = add(x = var_5108_cast, y = var_5109_to_fp16)[name = tensor("op_5110_cast")]; + tensor denom_147_epsilon_0_to_fp16 = const()[name = tensor("denom_147_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_147_cast = rsqrt(epsilon = denom_147_epsilon_0_to_fp16, x = var_5110_cast)[name = tensor("denom_147_cast")]; + tensor out_147_cast = mul(x = zero_mean_147_cast, y = denom_147_cast)[name = tensor("out_147_cast")]; + tensor var_5114_to_fp16 = const()[name = tensor("op_5114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338715648)))]; + tensor var_5115_cast = add(x = out_147_cast, y = var_5114_to_fp16)[name = tensor("op_5115_cast")]; + tensor var_5117_to_fp16 = const()[name = tensor("op_5117_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338718272)))]; + tensor hidden_states_211_cast = mul(x = var_5115_cast, y = var_5117_to_fp16)[name = tensor("hidden_states_211_cast")]; + tensor var_5124 = const()[name = tensor("op_5124"), val = tensor([1, 1])]; + tensor var_5126 = const()[name = tensor("op_5126"), val = tensor([1, 1])]; + tensor q_99_pad_type_0 = const()[name = tensor("q_99_pad_type_0"), val = tensor("custom")]; + tensor q_99_pad_0 = const()[name = tensor("q_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338720896))), lut = tensor([-0x1.964p-7, 0x1.96p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_99_cast = conv(dilations = var_5126, groups = var_4943, pad = q_99_pad_0, pad_type = q_99_pad_type_0, strides = var_5124, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_211_cast)[name = tensor("q_99_cast")]; + tensor var_5130 = const()[name = tensor("op_5130"), val = tensor([1, 1])]; + tensor var_5132 = const()[name = tensor("op_5132"), val = tensor([1, 1])]; + tensor k_99_pad_type_0 = const()[name = tensor("k_99_pad_type_0"), val = tensor("custom")]; + tensor k_99_pad_0 = const()[name = tensor("k_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338925760))), lut = tensor([-0x1.444p-7, 0x1.44cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_99_cast = conv(dilations = var_5132, groups = var_4943, pad = k_99_pad_0, pad_type = k_99_pad_type_0, strides = var_5130, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_99_cast")]; + tensor var_5136 = const()[name = tensor("op_5136"), val = tensor([1, 1])]; + tensor var_5138 = const()[name = tensor("op_5138"), val = tensor([1, 1])]; + tensor v_99_pad_type_0 = const()[name = tensor("v_99_pad_type_0"), val = tensor("custom")]; + tensor v_99_pad_0 = const()[name = tensor("v_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339253504))), lut = tensor([-0x1.658p-7, 0x1.66p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_99_cast = conv(dilations = var_5138, groups = var_4943, pad = v_99_pad_0, pad_type = v_99_pad_type_0, strides = var_5136, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_99_cast")]; + tensor var_5142 = const()[name = tensor("op_5142"), val = tensor([2, 20, 64, -1])]; + tensor var_5143_cast = reshape(shape = var_5142, x = q_99_cast)[name = tensor("op_5143_cast")]; + tensor var_5144 = const()[name = tensor("op_5144"), val = tensor([2, 20, 64, -1])]; + tensor var_5145_cast = reshape(shape = var_5144, x = k_99_cast)[name = tensor("op_5145_cast")]; + tensor var_5146 = const()[name = tensor("op_5146"), val = tensor([2, 20, 64, -1])]; + tensor var_5147_cast = reshape(shape = var_5146, x = v_99_cast)[name = tensor("op_5147_cast")]; + tensor attn_weights_197_transpose_x_0 = const()[name = tensor("attn_weights_197_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_197_transpose_y_0 = const()[name = tensor("attn_weights_197_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_197_cast = matmul(transpose_x = attn_weights_197_transpose_x_0, transpose_y = attn_weights_197_transpose_y_0, x = var_5143_cast, y = var_5145_cast)[name = tensor("attn_weights_197_cast")]; + tensor attn_weights_199_cast = mul(x = attn_weights_197_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_199_cast")]; + tensor var_5151_cast = softmax(axis = var_4927, x = attn_weights_199_cast)[name = tensor("op_5151_cast")]; + tensor attn_99_transpose_x_0 = const()[name = tensor("attn_99_transpose_x_0"), val = tensor(false)]; + tensor attn_99_transpose_y_0 = const()[name = tensor("attn_99_transpose_y_0"), val = tensor(true)]; + tensor attn_99_cast = matmul(transpose_x = attn_99_transpose_x_0, transpose_y = attn_99_transpose_y_0, x = var_5147_cast, y = var_5151_cast)[name = tensor("attn_99_cast")]; + tensor var_5155 = const()[name = tensor("op_5155"), val = tensor([2, 1280, 1, -1])]; + tensor input_329_cast = reshape(shape = var_5155, x = attn_99_cast)[name = tensor("input_329_cast")]; + tensor var_5160 = const()[name = tensor("op_5160"), val = tensor([1, 1])]; + tensor var_5162 = const()[name = tensor("op_5162"), val = tensor([1, 1])]; + tensor var_5164_pad_type_0 = const()[name = tensor("op_5164_pad_type_0"), val = tensor("custom")]; + tensor var_5164_pad_0 = const()[name = tensor("op_5164_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339581248))), lut = tensor([-0x1.78cp-8, 0x1.764p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339786112)))]; + tensor var_5164_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_5162, groups = var_4943, pad = var_5164_pad_0, pad_type = var_5164_pad_type_0, strides = var_5160, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("op_5164_cast")]; + tensor inputs_149_cast = add(x = var_5164_cast, y = inputs_147_cast)[name = tensor("inputs_149_cast")]; + tensor var_5168 = const()[name = tensor("op_5168"), val = tensor([1])]; + tensor channels_mean_149_cast = reduce_mean(axes = var_5168, keep_dims = var_4938, x = inputs_149_cast)[name = tensor("channels_mean_149_cast")]; + tensor zero_mean_149_cast = sub(x = inputs_149_cast, y = channels_mean_149_cast)[name = tensor("zero_mean_149_cast")]; + tensor zero_mean_sq_149_cast = mul(x = zero_mean_149_cast, y = zero_mean_149_cast)[name = tensor("zero_mean_sq_149_cast")]; + tensor var_5172 = const()[name = tensor("op_5172"), val = tensor([1])]; + tensor var_5173_cast = reduce_mean(axes = var_5172, keep_dims = var_4938, x = zero_mean_sq_149_cast)[name = tensor("op_5173_cast")]; + tensor var_5174_to_fp16 = const()[name = tensor("op_5174_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5175_cast = add(x = var_5173_cast, y = var_5174_to_fp16)[name = tensor("op_5175_cast")]; + tensor denom_149_epsilon_0_to_fp16 = const()[name = tensor("denom_149_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_149_cast = rsqrt(epsilon = denom_149_epsilon_0_to_fp16, x = var_5175_cast)[name = tensor("denom_149_cast")]; + tensor out_149_cast = mul(x = zero_mean_149_cast, y = denom_149_cast)[name = tensor("out_149_cast")]; + tensor var_5179_to_fp16 = const()[name = tensor("op_5179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339788736)))]; + tensor var_5180_cast = add(x = out_149_cast, y = var_5179_to_fp16)[name = tensor("op_5180_cast")]; + tensor var_5182_to_fp16 = const()[name = tensor("op_5182_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339791360)))]; + tensor input_331_cast = mul(x = var_5180_cast, y = var_5182_to_fp16)[name = tensor("input_331_cast")]; + tensor var_5190 = const()[name = tensor("op_5190"), val = tensor([1, 1])]; + tensor var_5192 = const()[name = tensor("op_5192"), val = tensor([1, 1])]; + tensor var_5194_pad_type_0 = const()[name = tensor("op_5194_pad_type_0"), val = tensor("custom")]; + tensor var_5194_pad_0 = const()[name = tensor("op_5194_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(339793984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346347648))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346347776)))]; + tensor var_5194_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_5192, groups = var_4943, pad = var_5194_pad_0, pad_type = var_5194_pad_type_0, strides = var_5190, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_331_cast)[name = tensor("op_5194_cast")]; + tensor var_5195_split_sizes_0 = const()[name = tensor("op_5195_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5195_axis_0 = const()[name = tensor("op_5195_axis_0"), val = tensor(1)]; + tensor var_5195_cast_0, tensor var_5195_cast_1 = split(axis = var_5195_axis_0, split_sizes = var_5195_split_sizes_0, x = var_5194_cast)[name = tensor("op_5195_cast")]; + tensor var_5197_mode_0 = const()[name = tensor("op_5197_mode_0"), val = tensor("EXACT")]; + tensor var_5197_cast = gelu(mode = var_5197_mode_0, x = var_5195_cast_1)[name = tensor("op_5197_cast")]; + tensor input_333_cast = mul(x = var_5195_cast_0, y = var_5197_cast)[name = tensor("input_333_cast")]; + tensor var_5201 = const()[name = tensor("op_5201"), val = tensor([1, 1])]; + tensor var_5203 = const()[name = tensor("op_5203"), val = tensor([1, 1])]; + tensor var_5205_pad_type_0 = const()[name = tensor("op_5205_pad_type_0"), val = tensor("custom")]; + tensor var_5205_pad_0 = const()[name = tensor("op_5205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346368320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349645184))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349645312)))]; + tensor var_5205_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_5203, groups = var_4943, pad = var_5205_pad_0, pad_type = var_5205_pad_type_0, strides = var_5201, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("op_5205_cast")]; + tensor inputs_151_cast = add(x = var_5205_cast, y = inputs_149_cast)[name = tensor("inputs_151_cast")]; + tensor var_5215 = const()[name = tensor("op_5215"), val = tensor([1])]; + tensor channels_mean_151_cast = reduce_mean(axes = var_5215, keep_dims = var_4938, x = inputs_151_cast)[name = tensor("channels_mean_151_cast")]; + tensor zero_mean_151_cast = sub(x = inputs_151_cast, y = channels_mean_151_cast)[name = tensor("zero_mean_151_cast")]; + tensor zero_mean_sq_151_cast = mul(x = zero_mean_151_cast, y = zero_mean_151_cast)[name = tensor("zero_mean_sq_151_cast")]; + tensor var_5219 = const()[name = tensor("op_5219"), val = tensor([1])]; + tensor var_5220_cast = reduce_mean(axes = var_5219, keep_dims = var_4938, x = zero_mean_sq_151_cast)[name = tensor("op_5220_cast")]; + tensor var_5221_to_fp16 = const()[name = tensor("op_5221_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5222_cast = add(x = var_5220_cast, y = var_5221_to_fp16)[name = tensor("op_5222_cast")]; + tensor denom_151_epsilon_0_to_fp16 = const()[name = tensor("denom_151_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_151_cast = rsqrt(epsilon = denom_151_epsilon_0_to_fp16, x = var_5222_cast)[name = tensor("denom_151_cast")]; + tensor out_151_cast = mul(x = zero_mean_151_cast, y = denom_151_cast)[name = tensor("out_151_cast")]; + tensor var_5226_to_fp16 = const()[name = tensor("op_5226_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349647936)))]; + tensor var_5227_cast = add(x = out_151_cast, y = var_5226_to_fp16)[name = tensor("op_5227_cast")]; + tensor var_5229_to_fp16 = const()[name = tensor("op_5229_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349650560)))]; + tensor hidden_states_215_cast = mul(x = var_5227_cast, y = var_5229_to_fp16)[name = tensor("hidden_states_215_cast")]; + tensor var_5236 = const()[name = tensor("op_5236"), val = tensor([1, 1])]; + tensor var_5238 = const()[name = tensor("op_5238"), val = tensor([1, 1])]; + tensor q_101_pad_type_0 = const()[name = tensor("q_101_pad_type_0"), val = tensor("custom")]; + tensor q_101_pad_0 = const()[name = tensor("q_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349653184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350472448))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_101_cast = conv(dilations = var_5238, groups = var_4943, pad = q_101_pad_0, pad_type = q_101_pad_type_0, strides = var_5236, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("q_101_cast")]; + tensor var_5242 = const()[name = tensor("op_5242"), val = tensor([1, 1])]; + tensor var_5244 = const()[name = tensor("op_5244"), val = tensor([1, 1])]; + tensor k_101_pad_type_0 = const()[name = tensor("k_101_pad_type_0"), val = tensor("custom")]; + tensor k_101_pad_0 = const()[name = tensor("k_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350472576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(351291840))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_101_cast = conv(dilations = var_5244, groups = var_4943, pad = k_101_pad_0, pad_type = k_101_pad_type_0, strides = var_5242, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("k_101_cast")]; + tensor var_5248 = const()[name = tensor("op_5248"), val = tensor([1, 1])]; + tensor var_5250 = const()[name = tensor("op_5250"), val = tensor([1, 1])]; + tensor v_101_pad_type_0 = const()[name = tensor("v_101_pad_type_0"), val = tensor("custom")]; + tensor v_101_pad_0 = const()[name = tensor("v_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(351291968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352111232))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_101_cast = conv(dilations = var_5250, groups = var_4943, pad = v_101_pad_0, pad_type = v_101_pad_type_0, strides = var_5248, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("v_101_cast")]; + tensor var_5254 = const()[name = tensor("op_5254"), val = tensor([2, 20, 64, -1])]; + tensor var_5255_cast = reshape(shape = var_5254, x = q_101_cast)[name = tensor("op_5255_cast")]; + tensor var_5256 = const()[name = tensor("op_5256"), val = tensor([2, 20, 64, -1])]; + tensor var_5257_cast = reshape(shape = var_5256, x = k_101_cast)[name = tensor("op_5257_cast")]; + tensor var_5258 = const()[name = tensor("op_5258"), val = tensor([2, 20, 64, -1])]; + tensor var_5259_cast = reshape(shape = var_5258, x = v_101_cast)[name = tensor("op_5259_cast")]; + tensor attn_weights_201_transpose_x_0 = const()[name = tensor("attn_weights_201_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_201_transpose_y_0 = const()[name = tensor("attn_weights_201_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_201_cast = matmul(transpose_x = attn_weights_201_transpose_x_0, transpose_y = attn_weights_201_transpose_y_0, x = var_5255_cast, y = var_5257_cast)[name = tensor("attn_weights_201_cast")]; + tensor attn_weights_203_cast = mul(x = attn_weights_201_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_203_cast")]; + tensor var_5263_cast = softmax(axis = var_4927, x = attn_weights_203_cast)[name = tensor("op_5263_cast")]; + tensor attn_101_transpose_x_0 = const()[name = tensor("attn_101_transpose_x_0"), val = tensor(false)]; + tensor attn_101_transpose_y_0 = const()[name = tensor("attn_101_transpose_y_0"), val = tensor(true)]; + tensor attn_101_cast = matmul(transpose_x = attn_101_transpose_x_0, transpose_y = attn_101_transpose_y_0, x = var_5259_cast, y = var_5263_cast)[name = tensor("attn_101_cast")]; + tensor var_5267 = const()[name = tensor("op_5267"), val = tensor([2, 1280, 1, -1])]; + tensor input_335_cast = reshape(shape = var_5267, x = attn_101_cast)[name = tensor("input_335_cast")]; + tensor var_5272 = const()[name = tensor("op_5272"), val = tensor([1, 1])]; + tensor var_5274 = const()[name = tensor("op_5274"), val = tensor([1, 1])]; + tensor var_5276_pad_type_0 = const()[name = tensor("op_5276_pad_type_0"), val = tensor("custom")]; + tensor var_5276_pad_0 = const()[name = tensor("op_5276_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352111360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352930624))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352930752)))]; + tensor var_5276_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_5274, groups = var_4943, pad = var_5276_pad_0, pad_type = var_5276_pad_type_0, strides = var_5272, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_335_cast)[name = tensor("op_5276_cast")]; + tensor inputs_153_cast = add(x = var_5276_cast, y = inputs_151_cast)[name = tensor("inputs_153_cast")]; + tensor var_5280 = const()[name = tensor("op_5280"), val = tensor([1])]; + tensor channels_mean_153_cast = reduce_mean(axes = var_5280, keep_dims = var_4938, x = inputs_153_cast)[name = tensor("channels_mean_153_cast")]; + tensor zero_mean_153_cast = sub(x = inputs_153_cast, y = channels_mean_153_cast)[name = tensor("zero_mean_153_cast")]; + tensor zero_mean_sq_153_cast = mul(x = zero_mean_153_cast, y = zero_mean_153_cast)[name = tensor("zero_mean_sq_153_cast")]; + tensor var_5284 = const()[name = tensor("op_5284"), val = tensor([1])]; + tensor var_5285_cast = reduce_mean(axes = var_5284, keep_dims = var_4938, x = zero_mean_sq_153_cast)[name = tensor("op_5285_cast")]; + tensor var_5286_to_fp16 = const()[name = tensor("op_5286_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5287_cast = add(x = var_5285_cast, y = var_5286_to_fp16)[name = tensor("op_5287_cast")]; + tensor denom_153_epsilon_0_to_fp16 = const()[name = tensor("denom_153_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_153_cast = rsqrt(epsilon = denom_153_epsilon_0_to_fp16, x = var_5287_cast)[name = tensor("denom_153_cast")]; + tensor out_153_cast = mul(x = zero_mean_153_cast, y = denom_153_cast)[name = tensor("out_153_cast")]; + tensor var_5291_to_fp16 = const()[name = tensor("op_5291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352933376)))]; + tensor var_5292_cast = add(x = out_153_cast, y = var_5291_to_fp16)[name = tensor("op_5292_cast")]; + tensor var_5294_to_fp16 = const()[name = tensor("op_5294_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352936000)))]; + tensor hidden_states_217_cast = mul(x = var_5292_cast, y = var_5294_to_fp16)[name = tensor("hidden_states_217_cast")]; + tensor var_5301 = const()[name = tensor("op_5301"), val = tensor([1, 1])]; + tensor var_5303 = const()[name = tensor("op_5303"), val = tensor([1, 1])]; + tensor q_103_pad_type_0 = const()[name = tensor("q_103_pad_type_0"), val = tensor("custom")]; + tensor q_103_pad_0 = const()[name = tensor("q_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(352938624))), lut = tensor([-0x1.acp-7, 0x1.ab4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_103_cast = conv(dilations = var_5303, groups = var_4943, pad = q_103_pad_0, pad_type = q_103_pad_type_0, strides = var_5301, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_217_cast)[name = tensor("q_103_cast")]; + tensor var_5307 = const()[name = tensor("op_5307"), val = tensor([1, 1])]; + tensor var_5309 = const()[name = tensor("op_5309"), val = tensor([1, 1])]; + tensor k_103_pad_type_0 = const()[name = tensor("k_103_pad_type_0"), val = tensor("custom")]; + tensor k_103_pad_0 = const()[name = tensor("k_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(353143488))), lut = tensor([-0x1.514p-7, 0x1.528p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_103_cast = conv(dilations = var_5309, groups = var_4943, pad = k_103_pad_0, pad_type = k_103_pad_type_0, strides = var_5307, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_103_cast")]; + tensor var_5313 = const()[name = tensor("op_5313"), val = tensor([1, 1])]; + tensor var_5315 = const()[name = tensor("op_5315"), val = tensor([1, 1])]; + tensor v_103_pad_type_0 = const()[name = tensor("v_103_pad_type_0"), val = tensor("custom")]; + tensor v_103_pad_0 = const()[name = tensor("v_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(353471232))), lut = tensor([-0x1.88cp-7, 0x1.884p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_103_cast = conv(dilations = var_5315, groups = var_4943, pad = v_103_pad_0, pad_type = v_103_pad_type_0, strides = var_5313, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_103_cast")]; + tensor var_5319 = const()[name = tensor("op_5319"), val = tensor([2, 20, 64, -1])]; + tensor var_5320_cast = reshape(shape = var_5319, x = q_103_cast)[name = tensor("op_5320_cast")]; + tensor var_5321 = const()[name = tensor("op_5321"), val = tensor([2, 20, 64, -1])]; + tensor var_5322_cast = reshape(shape = var_5321, x = k_103_cast)[name = tensor("op_5322_cast")]; + tensor var_5323 = const()[name = tensor("op_5323"), val = tensor([2, 20, 64, -1])]; + tensor var_5324_cast = reshape(shape = var_5323, x = v_103_cast)[name = tensor("op_5324_cast")]; + tensor attn_weights_205_transpose_x_0 = const()[name = tensor("attn_weights_205_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_205_transpose_y_0 = const()[name = tensor("attn_weights_205_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_205_cast = matmul(transpose_x = attn_weights_205_transpose_x_0, transpose_y = attn_weights_205_transpose_y_0, x = var_5320_cast, y = var_5322_cast)[name = tensor("attn_weights_205_cast")]; + tensor attn_weights_207_cast = mul(x = attn_weights_205_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_207_cast")]; + tensor var_5328_cast = softmax(axis = var_4927, x = attn_weights_207_cast)[name = tensor("op_5328_cast")]; + tensor attn_103_transpose_x_0 = const()[name = tensor("attn_103_transpose_x_0"), val = tensor(false)]; + tensor attn_103_transpose_y_0 = const()[name = tensor("attn_103_transpose_y_0"), val = tensor(true)]; + tensor attn_103_cast = matmul(transpose_x = attn_103_transpose_x_0, transpose_y = attn_103_transpose_y_0, x = var_5324_cast, y = var_5328_cast)[name = tensor("attn_103_cast")]; + tensor var_5332 = const()[name = tensor("op_5332"), val = tensor([2, 1280, 1, -1])]; + tensor input_337_cast = reshape(shape = var_5332, x = attn_103_cast)[name = tensor("input_337_cast")]; + tensor var_5337 = const()[name = tensor("op_5337"), val = tensor([1, 1])]; + tensor var_5339 = const()[name = tensor("op_5339"), val = tensor([1, 1])]; + tensor var_5341_pad_type_0 = const()[name = tensor("op_5341_pad_type_0"), val = tensor("custom")]; + tensor var_5341_pad_0 = const()[name = tensor("op_5341_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(353798976))), lut = tensor([-0x1.aacp-8, 0x1.ab4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(354003840)))]; + tensor var_5341_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_5339, groups = var_4943, pad = var_5341_pad_0, pad_type = var_5341_pad_type_0, strides = var_5337, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("op_5341_cast")]; + tensor inputs_155_cast = add(x = var_5341_cast, y = inputs_153_cast)[name = tensor("inputs_155_cast")]; + tensor var_5345 = const()[name = tensor("op_5345"), val = tensor([1])]; + tensor channels_mean_155_cast = reduce_mean(axes = var_5345, keep_dims = var_4938, x = inputs_155_cast)[name = tensor("channels_mean_155_cast")]; + tensor zero_mean_155_cast = sub(x = inputs_155_cast, y = channels_mean_155_cast)[name = tensor("zero_mean_155_cast")]; + tensor zero_mean_sq_155_cast = mul(x = zero_mean_155_cast, y = zero_mean_155_cast)[name = tensor("zero_mean_sq_155_cast")]; + tensor var_5349 = const()[name = tensor("op_5349"), val = tensor([1])]; + tensor var_5350_cast = reduce_mean(axes = var_5349, keep_dims = var_4938, x = zero_mean_sq_155_cast)[name = tensor("op_5350_cast")]; + tensor var_5351_to_fp16 = const()[name = tensor("op_5351_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5352_cast = add(x = var_5350_cast, y = var_5351_to_fp16)[name = tensor("op_5352_cast")]; + tensor denom_155_epsilon_0_to_fp16 = const()[name = tensor("denom_155_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_155_cast = rsqrt(epsilon = denom_155_epsilon_0_to_fp16, x = var_5352_cast)[name = tensor("denom_155_cast")]; + tensor out_155_cast = mul(x = zero_mean_155_cast, y = denom_155_cast)[name = tensor("out_155_cast")]; + tensor var_5356_to_fp16 = const()[name = tensor("op_5356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(354006464)))]; + tensor var_5357_cast = add(x = out_155_cast, y = var_5356_to_fp16)[name = tensor("op_5357_cast")]; + tensor var_5359_to_fp16 = const()[name = tensor("op_5359_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(354009088)))]; + tensor input_339_cast = mul(x = var_5357_cast, y = var_5359_to_fp16)[name = tensor("input_339_cast")]; + tensor var_5367 = const()[name = tensor("op_5367"), val = tensor([1, 1])]; + tensor var_5369 = const()[name = tensor("op_5369"), val = tensor([1, 1])]; + tensor var_5371_pad_type_0 = const()[name = tensor("op_5371_pad_type_0"), val = tensor("custom")]; + tensor var_5371_pad_0 = const()[name = tensor("op_5371_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(354011712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360565376))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360565504)))]; + tensor var_5371_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_5369, groups = var_4943, pad = var_5371_pad_0, pad_type = var_5371_pad_type_0, strides = var_5367, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_339_cast)[name = tensor("op_5371_cast")]; + tensor var_5372_split_sizes_0 = const()[name = tensor("op_5372_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5372_axis_0 = const()[name = tensor("op_5372_axis_0"), val = tensor(1)]; + tensor var_5372_cast_0, tensor var_5372_cast_1 = split(axis = var_5372_axis_0, split_sizes = var_5372_split_sizes_0, x = var_5371_cast)[name = tensor("op_5372_cast")]; + tensor var_5374_mode_0 = const()[name = tensor("op_5374_mode_0"), val = tensor("EXACT")]; + tensor var_5374_cast = gelu(mode = var_5374_mode_0, x = var_5372_cast_1)[name = tensor("op_5374_cast")]; + tensor input_341_cast = mul(x = var_5372_cast_0, y = var_5374_cast)[name = tensor("input_341_cast")]; + tensor var_5378 = const()[name = tensor("op_5378"), val = tensor([1, 1])]; + tensor var_5380 = const()[name = tensor("op_5380"), val = tensor([1, 1])]; + tensor var_5382_pad_type_0 = const()[name = tensor("op_5382_pad_type_0"), val = tensor("custom")]; + tensor var_5382_pad_0 = const()[name = tensor("op_5382_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360586048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363862912))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363863040)))]; + tensor var_5382_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_5380, groups = var_4943, pad = var_5382_pad_0, pad_type = var_5382_pad_type_0, strides = var_5378, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_341_cast)[name = tensor("op_5382_cast")]; + tensor inputs_157_cast = add(x = var_5382_cast, y = inputs_155_cast)[name = tensor("inputs_157_cast")]; + tensor var_5392 = const()[name = tensor("op_5392"), val = tensor([1])]; + tensor channels_mean_157_cast = reduce_mean(axes = var_5392, keep_dims = var_4938, x = inputs_157_cast)[name = tensor("channels_mean_157_cast")]; + tensor zero_mean_157_cast = sub(x = inputs_157_cast, y = channels_mean_157_cast)[name = tensor("zero_mean_157_cast")]; + tensor zero_mean_sq_157_cast = mul(x = zero_mean_157_cast, y = zero_mean_157_cast)[name = tensor("zero_mean_sq_157_cast")]; + tensor var_5396 = const()[name = tensor("op_5396"), val = tensor([1])]; + tensor var_5397_cast = reduce_mean(axes = var_5396, keep_dims = var_4938, x = zero_mean_sq_157_cast)[name = tensor("op_5397_cast")]; + tensor var_5398_to_fp16 = const()[name = tensor("op_5398_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5399_cast = add(x = var_5397_cast, y = var_5398_to_fp16)[name = tensor("op_5399_cast")]; + tensor denom_157_epsilon_0_to_fp16 = const()[name = tensor("denom_157_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_157_cast = rsqrt(epsilon = denom_157_epsilon_0_to_fp16, x = var_5399_cast)[name = tensor("denom_157_cast")]; + tensor out_157_cast = mul(x = zero_mean_157_cast, y = denom_157_cast)[name = tensor("out_157_cast")]; + tensor var_5403_to_fp16 = const()[name = tensor("op_5403_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363865664)))]; + tensor var_5404_cast = add(x = out_157_cast, y = var_5403_to_fp16)[name = tensor("op_5404_cast")]; + tensor var_5406_to_fp16 = const()[name = tensor("op_5406_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363868288)))]; + tensor hidden_states_221_cast = mul(x = var_5404_cast, y = var_5406_to_fp16)[name = tensor("hidden_states_221_cast")]; + tensor var_5413 = const()[name = tensor("op_5413"), val = tensor([1, 1])]; + tensor var_5415 = const()[name = tensor("op_5415"), val = tensor([1, 1])]; + tensor q_105_pad_type_0 = const()[name = tensor("q_105_pad_type_0"), val = tensor("custom")]; + tensor q_105_pad_0 = const()[name = tensor("q_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363870912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(364690176))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_105_cast = conv(dilations = var_5415, groups = var_4943, pad = q_105_pad_0, pad_type = q_105_pad_type_0, strides = var_5413, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("q_105_cast")]; + tensor var_5419 = const()[name = tensor("op_5419"), val = tensor([1, 1])]; + tensor var_5421 = const()[name = tensor("op_5421"), val = tensor([1, 1])]; + tensor k_105_pad_type_0 = const()[name = tensor("k_105_pad_type_0"), val = tensor("custom")]; + tensor k_105_pad_0 = const()[name = tensor("k_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(364690304))), lut = tensor([-0x1.4dp-5, -0x1.914p-7, 0x1.928p-7, 0x1.4d8p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_105_cast = conv(dilations = var_5421, groups = var_4943, pad = k_105_pad_0, pad_type = k_105_pad_type_0, strides = var_5419, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("k_105_cast")]; + tensor var_5425 = const()[name = tensor("op_5425"), val = tensor([1, 1])]; + tensor var_5427 = const()[name = tensor("op_5427"), val = tensor([1, 1])]; + tensor v_105_pad_type_0 = const()[name = tensor("v_105_pad_type_0"), val = tensor("custom")]; + tensor v_105_pad_0 = const()[name = tensor("v_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(365099968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(365919232))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_105_cast = conv(dilations = var_5427, groups = var_4943, pad = v_105_pad_0, pad_type = v_105_pad_type_0, strides = var_5425, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("v_105_cast")]; + tensor var_5431 = const()[name = tensor("op_5431"), val = tensor([2, 20, 64, -1])]; + tensor var_5432_cast = reshape(shape = var_5431, x = q_105_cast)[name = tensor("op_5432_cast")]; + tensor var_5433 = const()[name = tensor("op_5433"), val = tensor([2, 20, 64, -1])]; + tensor var_5434_cast = reshape(shape = var_5433, x = k_105_cast)[name = tensor("op_5434_cast")]; + tensor var_5435 = const()[name = tensor("op_5435"), val = tensor([2, 20, 64, -1])]; + tensor var_5436_cast = reshape(shape = var_5435, x = v_105_cast)[name = tensor("op_5436_cast")]; + tensor attn_weights_209_transpose_x_0 = const()[name = tensor("attn_weights_209_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_209_transpose_y_0 = const()[name = tensor("attn_weights_209_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_209_cast = matmul(transpose_x = attn_weights_209_transpose_x_0, transpose_y = attn_weights_209_transpose_y_0, x = var_5432_cast, y = var_5434_cast)[name = tensor("attn_weights_209_cast")]; + tensor attn_weights_211_cast = mul(x = attn_weights_209_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_211_cast")]; + tensor var_5440_cast = softmax(axis = var_4927, x = attn_weights_211_cast)[name = tensor("op_5440_cast")]; + tensor attn_105_transpose_x_0 = const()[name = tensor("attn_105_transpose_x_0"), val = tensor(false)]; + tensor attn_105_transpose_y_0 = const()[name = tensor("attn_105_transpose_y_0"), val = tensor(true)]; + tensor attn_105_cast = matmul(transpose_x = attn_105_transpose_x_0, transpose_y = attn_105_transpose_y_0, x = var_5436_cast, y = var_5440_cast)[name = tensor("attn_105_cast")]; + tensor var_5444 = const()[name = tensor("op_5444"), val = tensor([2, 1280, 1, -1])]; + tensor input_343_cast = reshape(shape = var_5444, x = attn_105_cast)[name = tensor("input_343_cast")]; + tensor var_5449 = const()[name = tensor("op_5449"), val = tensor([1, 1])]; + tensor var_5451 = const()[name = tensor("op_5451"), val = tensor([1, 1])]; + tensor var_5453_pad_type_0 = const()[name = tensor("op_5453_pad_type_0"), val = tensor("custom")]; + tensor var_5453_pad_0 = const()[name = tensor("op_5453_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(365919360))), lut = tensor([-0x1.58p-5, -0x1.9e8p-7, 0x1.9dp-7, 0x1.578p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366329024)))]; + tensor var_5453_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_5451, groups = var_4943, pad = var_5453_pad_0, pad_type = var_5453_pad_type_0, strides = var_5449, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_343_cast)[name = tensor("op_5453_cast")]; + tensor inputs_159_cast = add(x = var_5453_cast, y = inputs_157_cast)[name = tensor("inputs_159_cast")]; + tensor var_5457 = const()[name = tensor("op_5457"), val = tensor([1])]; + tensor channels_mean_159_cast = reduce_mean(axes = var_5457, keep_dims = var_4938, x = inputs_159_cast)[name = tensor("channels_mean_159_cast")]; + tensor zero_mean_159_cast = sub(x = inputs_159_cast, y = channels_mean_159_cast)[name = tensor("zero_mean_159_cast")]; + tensor zero_mean_sq_159_cast = mul(x = zero_mean_159_cast, y = zero_mean_159_cast)[name = tensor("zero_mean_sq_159_cast")]; + tensor var_5461 = const()[name = tensor("op_5461"), val = tensor([1])]; + tensor var_5462_cast = reduce_mean(axes = var_5461, keep_dims = var_4938, x = zero_mean_sq_159_cast)[name = tensor("op_5462_cast")]; + tensor var_5463_to_fp16 = const()[name = tensor("op_5463_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5464_cast = add(x = var_5462_cast, y = var_5463_to_fp16)[name = tensor("op_5464_cast")]; + tensor denom_159_epsilon_0_to_fp16 = const()[name = tensor("denom_159_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_159_cast = rsqrt(epsilon = denom_159_epsilon_0_to_fp16, x = var_5464_cast)[name = tensor("denom_159_cast")]; + tensor out_159_cast = mul(x = zero_mean_159_cast, y = denom_159_cast)[name = tensor("out_159_cast")]; + tensor var_5468_to_fp16 = const()[name = tensor("op_5468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366331648)))]; + tensor var_5469_cast = add(x = out_159_cast, y = var_5468_to_fp16)[name = tensor("op_5469_cast")]; + tensor var_5471_to_fp16 = const()[name = tensor("op_5471_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366334272)))]; + tensor hidden_states_223_cast = mul(x = var_5469_cast, y = var_5471_to_fp16)[name = tensor("hidden_states_223_cast")]; + tensor var_5478 = const()[name = tensor("op_5478"), val = tensor([1, 1])]; + tensor var_5480 = const()[name = tensor("op_5480"), val = tensor([1, 1])]; + tensor q_107_pad_type_0 = const()[name = tensor("q_107_pad_type_0"), val = tensor("custom")]; + tensor q_107_pad_0 = const()[name = tensor("q_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366336896))), lut = tensor([-0x1.93p-7, 0x1.938p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_107_cast = conv(dilations = var_5480, groups = var_4943, pad = q_107_pad_0, pad_type = q_107_pad_type_0, strides = var_5478, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_223_cast)[name = tensor("q_107_cast")]; + tensor var_5484 = const()[name = tensor("op_5484"), val = tensor([1, 1])]; + tensor var_5486 = const()[name = tensor("op_5486"), val = tensor([1, 1])]; + tensor k_107_pad_type_0 = const()[name = tensor("k_107_pad_type_0"), val = tensor("custom")]; + tensor k_107_pad_0 = const()[name = tensor("k_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366541760))), lut = tensor([-0x1.294p-7, 0x1.28cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_107_cast = conv(dilations = var_5486, groups = var_4943, pad = k_107_pad_0, pad_type = k_107_pad_type_0, strides = var_5484, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_107_cast")]; + tensor var_5490 = const()[name = tensor("op_5490"), val = tensor([1, 1])]; + tensor var_5492 = const()[name = tensor("op_5492"), val = tensor([1, 1])]; + tensor v_107_pad_type_0 = const()[name = tensor("v_107_pad_type_0"), val = tensor("custom")]; + tensor v_107_pad_0 = const()[name = tensor("v_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(366869504))), lut = tensor([-0x1.63cp-7, 0x1.62cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_107_cast = conv(dilations = var_5492, groups = var_4943, pad = v_107_pad_0, pad_type = v_107_pad_type_0, strides = var_5490, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_107_cast")]; + tensor var_5496 = const()[name = tensor("op_5496"), val = tensor([2, 20, 64, -1])]; + tensor var_5497_cast = reshape(shape = var_5496, x = q_107_cast)[name = tensor("op_5497_cast")]; + tensor var_5498 = const()[name = tensor("op_5498"), val = tensor([2, 20, 64, -1])]; + tensor var_5499_cast = reshape(shape = var_5498, x = k_107_cast)[name = tensor("op_5499_cast")]; + tensor var_5500 = const()[name = tensor("op_5500"), val = tensor([2, 20, 64, -1])]; + tensor var_5501_cast = reshape(shape = var_5500, x = v_107_cast)[name = tensor("op_5501_cast")]; + tensor attn_weights_213_transpose_x_0 = const()[name = tensor("attn_weights_213_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_213_transpose_y_0 = const()[name = tensor("attn_weights_213_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_213_cast = matmul(transpose_x = attn_weights_213_transpose_x_0, transpose_y = attn_weights_213_transpose_y_0, x = var_5497_cast, y = var_5499_cast)[name = tensor("attn_weights_213_cast")]; + tensor attn_weights_215_cast = mul(x = attn_weights_213_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_215_cast")]; + tensor var_5505_cast = softmax(axis = var_4927, x = attn_weights_215_cast)[name = tensor("op_5505_cast")]; + tensor attn_107_transpose_x_0 = const()[name = tensor("attn_107_transpose_x_0"), val = tensor(false)]; + tensor attn_107_transpose_y_0 = const()[name = tensor("attn_107_transpose_y_0"), val = tensor(true)]; + tensor attn_107_cast = matmul(transpose_x = attn_107_transpose_x_0, transpose_y = attn_107_transpose_y_0, x = var_5501_cast, y = var_5505_cast)[name = tensor("attn_107_cast")]; + tensor var_5509 = const()[name = tensor("op_5509"), val = tensor([2, 1280, 1, -1])]; + tensor input_345_cast = reshape(shape = var_5509, x = attn_107_cast)[name = tensor("input_345_cast")]; + tensor var_5514 = const()[name = tensor("op_5514"), val = tensor([1, 1])]; + tensor var_5516 = const()[name = tensor("op_5516"), val = tensor([1, 1])]; + tensor var_5518_pad_type_0 = const()[name = tensor("op_5518_pad_type_0"), val = tensor("custom")]; + tensor var_5518_pad_0 = const()[name = tensor("op_5518_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367197248))), lut = tensor([-0x1.878p-8, 0x1.86cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367402112)))]; + tensor var_5518_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_5516, groups = var_4943, pad = var_5518_pad_0, pad_type = var_5518_pad_type_0, strides = var_5514, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("op_5518_cast")]; + tensor inputs_161_cast = add(x = var_5518_cast, y = inputs_159_cast)[name = tensor("inputs_161_cast")]; + tensor var_5522 = const()[name = tensor("op_5522"), val = tensor([1])]; + tensor channels_mean_161_cast = reduce_mean(axes = var_5522, keep_dims = var_4938, x = inputs_161_cast)[name = tensor("channels_mean_161_cast")]; + tensor zero_mean_161_cast = sub(x = inputs_161_cast, y = channels_mean_161_cast)[name = tensor("zero_mean_161_cast")]; + tensor zero_mean_sq_161_cast = mul(x = zero_mean_161_cast, y = zero_mean_161_cast)[name = tensor("zero_mean_sq_161_cast")]; + tensor var_5526 = const()[name = tensor("op_5526"), val = tensor([1])]; + tensor var_5527_cast = reduce_mean(axes = var_5526, keep_dims = var_4938, x = zero_mean_sq_161_cast)[name = tensor("op_5527_cast")]; + tensor var_5528_to_fp16 = const()[name = tensor("op_5528_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5529_cast = add(x = var_5527_cast, y = var_5528_to_fp16)[name = tensor("op_5529_cast")]; + tensor denom_161_epsilon_0_to_fp16 = const()[name = tensor("denom_161_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_161_cast = rsqrt(epsilon = denom_161_epsilon_0_to_fp16, x = var_5529_cast)[name = tensor("denom_161_cast")]; + tensor out_161_cast = mul(x = zero_mean_161_cast, y = denom_161_cast)[name = tensor("out_161_cast")]; + tensor var_5533_to_fp16 = const()[name = tensor("op_5533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367404736)))]; + tensor var_5534_cast = add(x = out_161_cast, y = var_5533_to_fp16)[name = tensor("op_5534_cast")]; + tensor var_5536_to_fp16 = const()[name = tensor("op_5536_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367407360)))]; + tensor input_347_cast = mul(x = var_5534_cast, y = var_5536_to_fp16)[name = tensor("input_347_cast")]; + tensor var_5544 = const()[name = tensor("op_5544"), val = tensor([1, 1])]; + tensor var_5546 = const()[name = tensor("op_5546"), val = tensor([1, 1])]; + tensor var_5548_pad_type_0 = const()[name = tensor("op_5548_pad_type_0"), val = tensor("custom")]; + tensor var_5548_pad_0 = const()[name = tensor("op_5548_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367409984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373963648))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373963776)))]; + tensor var_5548_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_5546, groups = var_4943, pad = var_5548_pad_0, pad_type = var_5548_pad_type_0, strides = var_5544, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_347_cast)[name = tensor("op_5548_cast")]; + tensor var_5549_split_sizes_0 = const()[name = tensor("op_5549_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5549_axis_0 = const()[name = tensor("op_5549_axis_0"), val = tensor(1)]; + tensor var_5549_cast_0, tensor var_5549_cast_1 = split(axis = var_5549_axis_0, split_sizes = var_5549_split_sizes_0, x = var_5548_cast)[name = tensor("op_5549_cast")]; + tensor var_5551_mode_0 = const()[name = tensor("op_5551_mode_0"), val = tensor("EXACT")]; + tensor var_5551_cast = gelu(mode = var_5551_mode_0, x = var_5549_cast_1)[name = tensor("op_5551_cast")]; + tensor input_349_cast = mul(x = var_5549_cast_0, y = var_5551_cast)[name = tensor("input_349_cast")]; + tensor var_5555 = const()[name = tensor("op_5555"), val = tensor([1, 1])]; + tensor var_5557 = const()[name = tensor("op_5557"), val = tensor([1, 1])]; + tensor var_5559_pad_type_0 = const()[name = tensor("op_5559_pad_type_0"), val = tensor("custom")]; + tensor var_5559_pad_0 = const()[name = tensor("op_5559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373984320))), lut = tensor([-0x1.37p-5, -0x1.744p-7, 0x1.758p-7, 0x1.374p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375622784)))]; + tensor var_5559_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_5557, groups = var_4943, pad = var_5559_pad_0, pad_type = var_5559_pad_type_0, strides = var_5555, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("op_5559_cast")]; + tensor inputs_163_cast = add(x = var_5559_cast, y = inputs_161_cast)[name = tensor("inputs_163_cast")]; + tensor var_5569 = const()[name = tensor("op_5569"), val = tensor([1])]; + tensor channels_mean_163_cast = reduce_mean(axes = var_5569, keep_dims = var_4938, x = inputs_163_cast)[name = tensor("channels_mean_163_cast")]; + tensor zero_mean_163_cast = sub(x = inputs_163_cast, y = channels_mean_163_cast)[name = tensor("zero_mean_163_cast")]; + tensor zero_mean_sq_163_cast = mul(x = zero_mean_163_cast, y = zero_mean_163_cast)[name = tensor("zero_mean_sq_163_cast")]; + tensor var_5573 = const()[name = tensor("op_5573"), val = tensor([1])]; + tensor var_5574_cast = reduce_mean(axes = var_5573, keep_dims = var_4938, x = zero_mean_sq_163_cast)[name = tensor("op_5574_cast")]; + tensor var_5575_to_fp16 = const()[name = tensor("op_5575_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5576_cast = add(x = var_5574_cast, y = var_5575_to_fp16)[name = tensor("op_5576_cast")]; + tensor denom_163_epsilon_0_to_fp16 = const()[name = tensor("denom_163_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_163_cast = rsqrt(epsilon = denom_163_epsilon_0_to_fp16, x = var_5576_cast)[name = tensor("denom_163_cast")]; + tensor out_163_cast = mul(x = zero_mean_163_cast, y = denom_163_cast)[name = tensor("out_163_cast")]; + tensor var_5580_to_fp16 = const()[name = tensor("op_5580_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375625408)))]; + tensor var_5581_cast = add(x = out_163_cast, y = var_5580_to_fp16)[name = tensor("op_5581_cast")]; + tensor var_5583_to_fp16 = const()[name = tensor("op_5583_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375628032)))]; + tensor hidden_states_227_cast = mul(x = var_5581_cast, y = var_5583_to_fp16)[name = tensor("hidden_states_227_cast")]; + tensor var_5590 = const()[name = tensor("op_5590"), val = tensor([1, 1])]; + tensor var_5592 = const()[name = tensor("op_5592"), val = tensor([1, 1])]; + tensor q_109_pad_type_0 = const()[name = tensor("q_109_pad_type_0"), val = tensor("custom")]; + tensor q_109_pad_0 = const()[name = tensor("q_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(375630656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(376449920))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_109_cast = conv(dilations = var_5592, groups = var_4943, pad = q_109_pad_0, pad_type = q_109_pad_type_0, strides = var_5590, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("q_109_cast")]; + tensor var_5596 = const()[name = tensor("op_5596"), val = tensor([1, 1])]; + tensor var_5598 = const()[name = tensor("op_5598"), val = tensor([1, 1])]; + tensor k_109_pad_type_0 = const()[name = tensor("k_109_pad_type_0"), val = tensor("custom")]; + tensor k_109_pad_0 = const()[name = tensor("k_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(376450048))), lut = tensor([-0x1.44cp-5, -0x1.88cp-7, 0x1.858p-7, 0x1.43cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_109_cast = conv(dilations = var_5598, groups = var_4943, pad = k_109_pad_0, pad_type = k_109_pad_type_0, strides = var_5596, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("k_109_cast")]; + tensor var_5602 = const()[name = tensor("op_5602"), val = tensor([1, 1])]; + tensor var_5604 = const()[name = tensor("op_5604"), val = tensor([1, 1])]; + tensor v_109_pad_type_0 = const()[name = tensor("v_109_pad_type_0"), val = tensor("custom")]; + tensor v_109_pad_0 = const()[name = tensor("v_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(376859712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377678976))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_109_cast = conv(dilations = var_5604, groups = var_4943, pad = v_109_pad_0, pad_type = v_109_pad_type_0, strides = var_5602, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("v_109_cast")]; + tensor var_5608 = const()[name = tensor("op_5608"), val = tensor([2, 20, 64, -1])]; + tensor var_5609_cast = reshape(shape = var_5608, x = q_109_cast)[name = tensor("op_5609_cast")]; + tensor var_5610 = const()[name = tensor("op_5610"), val = tensor([2, 20, 64, -1])]; + tensor var_5611_cast = reshape(shape = var_5610, x = k_109_cast)[name = tensor("op_5611_cast")]; + tensor var_5612 = const()[name = tensor("op_5612"), val = tensor([2, 20, 64, -1])]; + tensor var_5613_cast = reshape(shape = var_5612, x = v_109_cast)[name = tensor("op_5613_cast")]; + tensor attn_weights_217_transpose_x_0 = const()[name = tensor("attn_weights_217_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_217_transpose_y_0 = const()[name = tensor("attn_weights_217_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_217_cast = matmul(transpose_x = attn_weights_217_transpose_x_0, transpose_y = attn_weights_217_transpose_y_0, x = var_5609_cast, y = var_5611_cast)[name = tensor("attn_weights_217_cast")]; + tensor attn_weights_219_cast = mul(x = attn_weights_217_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_219_cast")]; + tensor var_5617_cast = softmax(axis = var_4927, x = attn_weights_219_cast)[name = tensor("op_5617_cast")]; + tensor attn_109_transpose_x_0 = const()[name = tensor("attn_109_transpose_x_0"), val = tensor(false)]; + tensor attn_109_transpose_y_0 = const()[name = tensor("attn_109_transpose_y_0"), val = tensor(true)]; + tensor attn_109_cast = matmul(transpose_x = attn_109_transpose_x_0, transpose_y = attn_109_transpose_y_0, x = var_5613_cast, y = var_5617_cast)[name = tensor("attn_109_cast")]; + tensor var_5621 = const()[name = tensor("op_5621"), val = tensor([2, 1280, 1, -1])]; + tensor input_351_cast = reshape(shape = var_5621, x = attn_109_cast)[name = tensor("input_351_cast")]; + tensor var_5626 = const()[name = tensor("op_5626"), val = tensor([1, 1])]; + tensor var_5628 = const()[name = tensor("op_5628"), val = tensor([1, 1])]; + tensor var_5630_pad_type_0 = const()[name = tensor("op_5630_pad_type_0"), val = tensor("custom")]; + tensor var_5630_pad_0 = const()[name = tensor("op_5630_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377679104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378498368))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378498496)))]; + tensor var_5630_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_5628, groups = var_4943, pad = var_5630_pad_0, pad_type = var_5630_pad_type_0, strides = var_5626, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_351_cast)[name = tensor("op_5630_cast")]; + tensor inputs_165_cast = add(x = var_5630_cast, y = inputs_163_cast)[name = tensor("inputs_165_cast")]; + tensor var_5634 = const()[name = tensor("op_5634"), val = tensor([1])]; + tensor channels_mean_165_cast = reduce_mean(axes = var_5634, keep_dims = var_4938, x = inputs_165_cast)[name = tensor("channels_mean_165_cast")]; + tensor zero_mean_165_cast = sub(x = inputs_165_cast, y = channels_mean_165_cast)[name = tensor("zero_mean_165_cast")]; + tensor zero_mean_sq_165_cast = mul(x = zero_mean_165_cast, y = zero_mean_165_cast)[name = tensor("zero_mean_sq_165_cast")]; + tensor var_5638 = const()[name = tensor("op_5638"), val = tensor([1])]; + tensor var_5639_cast = reduce_mean(axes = var_5638, keep_dims = var_4938, x = zero_mean_sq_165_cast)[name = tensor("op_5639_cast")]; + tensor var_5640_to_fp16 = const()[name = tensor("op_5640_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5641_cast = add(x = var_5639_cast, y = var_5640_to_fp16)[name = tensor("op_5641_cast")]; + tensor denom_165_epsilon_0_to_fp16 = const()[name = tensor("denom_165_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_165_cast = rsqrt(epsilon = denom_165_epsilon_0_to_fp16, x = var_5641_cast)[name = tensor("denom_165_cast")]; + tensor out_165_cast = mul(x = zero_mean_165_cast, y = denom_165_cast)[name = tensor("out_165_cast")]; + tensor var_5645_to_fp16 = const()[name = tensor("op_5645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378501120)))]; + tensor var_5646_cast = add(x = out_165_cast, y = var_5645_to_fp16)[name = tensor("op_5646_cast")]; + tensor var_5648_to_fp16 = const()[name = tensor("op_5648_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378503744)))]; + tensor hidden_states_229_cast = mul(x = var_5646_cast, y = var_5648_to_fp16)[name = tensor("hidden_states_229_cast")]; + tensor var_5655 = const()[name = tensor("op_5655"), val = tensor([1, 1])]; + tensor var_5657 = const()[name = tensor("op_5657"), val = tensor([1, 1])]; + tensor q_111_pad_type_0 = const()[name = tensor("q_111_pad_type_0"), val = tensor("custom")]; + tensor q_111_pad_0 = const()[name = tensor("q_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378506368))), lut = tensor([-0x1.7b4p-7, 0x1.7a4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_111_cast = conv(dilations = var_5657, groups = var_4943, pad = q_111_pad_0, pad_type = q_111_pad_type_0, strides = var_5655, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_229_cast)[name = tensor("q_111_cast")]; + tensor var_5661 = const()[name = tensor("op_5661"), val = tensor([1, 1])]; + tensor var_5663 = const()[name = tensor("op_5663"), val = tensor([1, 1])]; + tensor k_111_pad_type_0 = const()[name = tensor("k_111_pad_type_0"), val = tensor("custom")]; + tensor k_111_pad_0 = const()[name = tensor("k_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(378711232))), lut = tensor([-0x1.038p-7, 0x1.044p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_111_cast = conv(dilations = var_5663, groups = var_4943, pad = k_111_pad_0, pad_type = k_111_pad_type_0, strides = var_5661, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_111_cast")]; + tensor var_5667 = const()[name = tensor("op_5667"), val = tensor([1, 1])]; + tensor var_5669 = const()[name = tensor("op_5669"), val = tensor([1, 1])]; + tensor v_111_pad_type_0 = const()[name = tensor("v_111_pad_type_0"), val = tensor("custom")]; + tensor v_111_pad_0 = const()[name = tensor("v_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379038976))), lut = tensor([-0x1.354p-7, 0x1.35cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_111_cast = conv(dilations = var_5669, groups = var_4943, pad = v_111_pad_0, pad_type = v_111_pad_type_0, strides = var_5667, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_111_cast")]; + tensor var_5673 = const()[name = tensor("op_5673"), val = tensor([2, 20, 64, -1])]; + tensor var_5674_cast = reshape(shape = var_5673, x = q_111_cast)[name = tensor("op_5674_cast")]; + tensor var_5675 = const()[name = tensor("op_5675"), val = tensor([2, 20, 64, -1])]; + tensor var_5676_cast = reshape(shape = var_5675, x = k_111_cast)[name = tensor("op_5676_cast")]; + tensor var_5677 = const()[name = tensor("op_5677"), val = tensor([2, 20, 64, -1])]; + tensor var_5678_cast = reshape(shape = var_5677, x = v_111_cast)[name = tensor("op_5678_cast")]; + tensor attn_weights_221_transpose_x_0 = const()[name = tensor("attn_weights_221_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_221_transpose_y_0 = const()[name = tensor("attn_weights_221_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_221_cast = matmul(transpose_x = attn_weights_221_transpose_x_0, transpose_y = attn_weights_221_transpose_y_0, x = var_5674_cast, y = var_5676_cast)[name = tensor("attn_weights_221_cast")]; + tensor attn_weights_223_cast = mul(x = attn_weights_221_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_223_cast")]; + tensor var_5682_cast = softmax(axis = var_4927, x = attn_weights_223_cast)[name = tensor("op_5682_cast")]; + tensor attn_111_transpose_x_0 = const()[name = tensor("attn_111_transpose_x_0"), val = tensor(false)]; + tensor attn_111_transpose_y_0 = const()[name = tensor("attn_111_transpose_y_0"), val = tensor(true)]; + tensor attn_111_cast = matmul(transpose_x = attn_111_transpose_x_0, transpose_y = attn_111_transpose_y_0, x = var_5678_cast, y = var_5682_cast)[name = tensor("attn_111_cast")]; + tensor var_5686 = const()[name = tensor("op_5686"), val = tensor([2, 1280, 1, -1])]; + tensor input_353_cast = reshape(shape = var_5686, x = attn_111_cast)[name = tensor("input_353_cast")]; + tensor var_5691 = const()[name = tensor("op_5691"), val = tensor([1, 1])]; + tensor var_5693 = const()[name = tensor("op_5693"), val = tensor([1, 1])]; + tensor var_5695_pad_type_0 = const()[name = tensor("op_5695_pad_type_0"), val = tensor("custom")]; + tensor var_5695_pad_0 = const()[name = tensor("op_5695_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379366720))), lut = tensor([-0x1.5a4p-8, 0x1.5bcp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379571584)))]; + tensor var_5695_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_5693, groups = var_4943, pad = var_5695_pad_0, pad_type = var_5695_pad_type_0, strides = var_5691, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("op_5695_cast")]; + tensor inputs_167_cast = add(x = var_5695_cast, y = inputs_165_cast)[name = tensor("inputs_167_cast")]; + tensor var_5699 = const()[name = tensor("op_5699"), val = tensor([1])]; + tensor channels_mean_167_cast = reduce_mean(axes = var_5699, keep_dims = var_4938, x = inputs_167_cast)[name = tensor("channels_mean_167_cast")]; + tensor zero_mean_167_cast = sub(x = inputs_167_cast, y = channels_mean_167_cast)[name = tensor("zero_mean_167_cast")]; + tensor zero_mean_sq_167_cast = mul(x = zero_mean_167_cast, y = zero_mean_167_cast)[name = tensor("zero_mean_sq_167_cast")]; + tensor var_5703 = const()[name = tensor("op_5703"), val = tensor([1])]; + tensor var_5704_cast = reduce_mean(axes = var_5703, keep_dims = var_4938, x = zero_mean_sq_167_cast)[name = tensor("op_5704_cast")]; + tensor var_5705_to_fp16 = const()[name = tensor("op_5705_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5706_cast = add(x = var_5704_cast, y = var_5705_to_fp16)[name = tensor("op_5706_cast")]; + tensor denom_167_epsilon_0_to_fp16 = const()[name = tensor("denom_167_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_167_cast = rsqrt(epsilon = denom_167_epsilon_0_to_fp16, x = var_5706_cast)[name = tensor("denom_167_cast")]; + tensor out_167_cast = mul(x = zero_mean_167_cast, y = denom_167_cast)[name = tensor("out_167_cast")]; + tensor var_5710_to_fp16 = const()[name = tensor("op_5710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379574208)))]; + tensor var_5711_cast = add(x = out_167_cast, y = var_5710_to_fp16)[name = tensor("op_5711_cast")]; + tensor var_5713_to_fp16 = const()[name = tensor("op_5713_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379576832)))]; + tensor input_355_cast = mul(x = var_5711_cast, y = var_5713_to_fp16)[name = tensor("input_355_cast")]; + tensor var_5721 = const()[name = tensor("op_5721"), val = tensor([1, 1])]; + tensor var_5723 = const()[name = tensor("op_5723"), val = tensor([1, 1])]; + tensor var_5725_pad_type_0 = const()[name = tensor("op_5725_pad_type_0"), val = tensor("custom")]; + tensor var_5725_pad_0 = const()[name = tensor("op_5725_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379579456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(386133120))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(386133248)))]; + tensor var_5725_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_5723, groups = var_4943, pad = var_5725_pad_0, pad_type = var_5725_pad_type_0, strides = var_5721, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_355_cast)[name = tensor("op_5725_cast")]; + tensor var_5726_split_sizes_0 = const()[name = tensor("op_5726_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5726_axis_0 = const()[name = tensor("op_5726_axis_0"), val = tensor(1)]; + tensor var_5726_cast_0, tensor var_5726_cast_1 = split(axis = var_5726_axis_0, split_sizes = var_5726_split_sizes_0, x = var_5725_cast)[name = tensor("op_5726_cast")]; + tensor var_5728_mode_0 = const()[name = tensor("op_5728_mode_0"), val = tensor("EXACT")]; + tensor var_5728_cast = gelu(mode = var_5728_mode_0, x = var_5726_cast_1)[name = tensor("op_5728_cast")]; + tensor input_357_cast = mul(x = var_5726_cast_0, y = var_5728_cast)[name = tensor("input_357_cast")]; + tensor var_5732 = const()[name = tensor("op_5732"), val = tensor([1, 1])]; + tensor var_5734 = const()[name = tensor("op_5734"), val = tensor([1, 1])]; + tensor var_5736_pad_type_0 = const()[name = tensor("op_5736_pad_type_0"), val = tensor("custom")]; + tensor var_5736_pad_0 = const()[name = tensor("op_5736_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(386153792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389430656))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389430784)))]; + tensor var_5736_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_5734, groups = var_4943, pad = var_5736_pad_0, pad_type = var_5736_pad_type_0, strides = var_5732, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_357_cast)[name = tensor("op_5736_cast")]; + tensor inputs_169_cast = add(x = var_5736_cast, y = inputs_167_cast)[name = tensor("inputs_169_cast")]; + tensor var_5746 = const()[name = tensor("op_5746"), val = tensor([1])]; + tensor channels_mean_169_cast = reduce_mean(axes = var_5746, keep_dims = var_4938, x = inputs_169_cast)[name = tensor("channels_mean_169_cast")]; + tensor zero_mean_169_cast = sub(x = inputs_169_cast, y = channels_mean_169_cast)[name = tensor("zero_mean_169_cast")]; + tensor zero_mean_sq_169_cast = mul(x = zero_mean_169_cast, y = zero_mean_169_cast)[name = tensor("zero_mean_sq_169_cast")]; + tensor var_5750 = const()[name = tensor("op_5750"), val = tensor([1])]; + tensor var_5751_cast = reduce_mean(axes = var_5750, keep_dims = var_4938, x = zero_mean_sq_169_cast)[name = tensor("op_5751_cast")]; + tensor var_5752_to_fp16 = const()[name = tensor("op_5752_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5753_cast = add(x = var_5751_cast, y = var_5752_to_fp16)[name = tensor("op_5753_cast")]; + tensor denom_169_epsilon_0_to_fp16 = const()[name = tensor("denom_169_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_169_cast = rsqrt(epsilon = denom_169_epsilon_0_to_fp16, x = var_5753_cast)[name = tensor("denom_169_cast")]; + tensor out_169_cast = mul(x = zero_mean_169_cast, y = denom_169_cast)[name = tensor("out_169_cast")]; + tensor var_5757_to_fp16 = const()[name = tensor("op_5757_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389433408)))]; + tensor var_5758_cast = add(x = out_169_cast, y = var_5757_to_fp16)[name = tensor("op_5758_cast")]; + tensor var_5760_to_fp16 = const()[name = tensor("op_5760_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389436032)))]; + tensor hidden_states_233_cast = mul(x = var_5758_cast, y = var_5760_to_fp16)[name = tensor("hidden_states_233_cast")]; + tensor var_5767 = const()[name = tensor("op_5767"), val = tensor([1, 1])]; + tensor var_5769 = const()[name = tensor("op_5769"), val = tensor([1, 1])]; + tensor q_113_pad_type_0 = const()[name = tensor("q_113_pad_type_0"), val = tensor("custom")]; + tensor q_113_pad_0 = const()[name = tensor("q_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(389438656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390257920))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_113_cast = conv(dilations = var_5769, groups = var_4943, pad = q_113_pad_0, pad_type = q_113_pad_type_0, strides = var_5767, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("q_113_cast")]; + tensor var_5773 = const()[name = tensor("op_5773"), val = tensor([1, 1])]; + tensor var_5775 = const()[name = tensor("op_5775"), val = tensor([1, 1])]; + tensor k_113_pad_type_0 = const()[name = tensor("k_113_pad_type_0"), val = tensor("custom")]; + tensor k_113_pad_0 = const()[name = tensor("k_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390258048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391077312))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_113_cast = conv(dilations = var_5775, groups = var_4943, pad = k_113_pad_0, pad_type = k_113_pad_type_0, strides = var_5773, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("k_113_cast")]; + tensor var_5779 = const()[name = tensor("op_5779"), val = tensor([1, 1])]; + tensor var_5781 = const()[name = tensor("op_5781"), val = tensor([1, 1])]; + tensor v_113_pad_type_0 = const()[name = tensor("v_113_pad_type_0"), val = tensor("custom")]; + tensor v_113_pad_0 = const()[name = tensor("v_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391077440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391896704))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_113_cast = conv(dilations = var_5781, groups = var_4943, pad = v_113_pad_0, pad_type = v_113_pad_type_0, strides = var_5779, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("v_113_cast")]; + tensor var_5785 = const()[name = tensor("op_5785"), val = tensor([2, 20, 64, -1])]; + tensor var_5786_cast = reshape(shape = var_5785, x = q_113_cast)[name = tensor("op_5786_cast")]; + tensor var_5787 = const()[name = tensor("op_5787"), val = tensor([2, 20, 64, -1])]; + tensor var_5788_cast = reshape(shape = var_5787, x = k_113_cast)[name = tensor("op_5788_cast")]; + tensor var_5789 = const()[name = tensor("op_5789"), val = tensor([2, 20, 64, -1])]; + tensor var_5790_cast = reshape(shape = var_5789, x = v_113_cast)[name = tensor("op_5790_cast")]; + tensor attn_weights_225_transpose_x_0 = const()[name = tensor("attn_weights_225_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_225_transpose_y_0 = const()[name = tensor("attn_weights_225_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_225_cast = matmul(transpose_x = attn_weights_225_transpose_x_0, transpose_y = attn_weights_225_transpose_y_0, x = var_5786_cast, y = var_5788_cast)[name = tensor("attn_weights_225_cast")]; + tensor attn_weights_227_cast = mul(x = attn_weights_225_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_227_cast")]; + tensor var_5794_cast = softmax(axis = var_4927, x = attn_weights_227_cast)[name = tensor("op_5794_cast")]; + tensor attn_113_transpose_x_0 = const()[name = tensor("attn_113_transpose_x_0"), val = tensor(false)]; + tensor attn_113_transpose_y_0 = const()[name = tensor("attn_113_transpose_y_0"), val = tensor(true)]; + tensor attn_113_cast = matmul(transpose_x = attn_113_transpose_x_0, transpose_y = attn_113_transpose_y_0, x = var_5790_cast, y = var_5794_cast)[name = tensor("attn_113_cast")]; + tensor var_5798 = const()[name = tensor("op_5798"), val = tensor([2, 1280, 1, -1])]; + tensor input_359_cast = reshape(shape = var_5798, x = attn_113_cast)[name = tensor("input_359_cast")]; + tensor var_5803 = const()[name = tensor("op_5803"), val = tensor([1, 1])]; + tensor var_5805 = const()[name = tensor("op_5805"), val = tensor([1, 1])]; + tensor var_5807_pad_type_0 = const()[name = tensor("op_5807_pad_type_0"), val = tensor("custom")]; + tensor var_5807_pad_0 = const()[name = tensor("op_5807_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391896832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392716096))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392716224)))]; + tensor var_5807_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_5805, groups = var_4943, pad = var_5807_pad_0, pad_type = var_5807_pad_type_0, strides = var_5803, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_359_cast)[name = tensor("op_5807_cast")]; + tensor inputs_171_cast = add(x = var_5807_cast, y = inputs_169_cast)[name = tensor("inputs_171_cast")]; + tensor var_5811 = const()[name = tensor("op_5811"), val = tensor([1])]; + tensor channels_mean_171_cast = reduce_mean(axes = var_5811, keep_dims = var_4938, x = inputs_171_cast)[name = tensor("channels_mean_171_cast")]; + tensor zero_mean_171_cast = sub(x = inputs_171_cast, y = channels_mean_171_cast)[name = tensor("zero_mean_171_cast")]; + tensor zero_mean_sq_171_cast = mul(x = zero_mean_171_cast, y = zero_mean_171_cast)[name = tensor("zero_mean_sq_171_cast")]; + tensor var_5815 = const()[name = tensor("op_5815"), val = tensor([1])]; + tensor var_5816_cast = reduce_mean(axes = var_5815, keep_dims = var_4938, x = zero_mean_sq_171_cast)[name = tensor("op_5816_cast")]; + tensor var_5817_to_fp16 = const()[name = tensor("op_5817_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5818_cast = add(x = var_5816_cast, y = var_5817_to_fp16)[name = tensor("op_5818_cast")]; + tensor denom_171_epsilon_0_to_fp16 = const()[name = tensor("denom_171_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_171_cast = rsqrt(epsilon = denom_171_epsilon_0_to_fp16, x = var_5818_cast)[name = tensor("denom_171_cast")]; + tensor out_171_cast = mul(x = zero_mean_171_cast, y = denom_171_cast)[name = tensor("out_171_cast")]; + tensor var_5822_to_fp16 = const()[name = tensor("op_5822_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392718848)))]; + tensor var_5823_cast = add(x = out_171_cast, y = var_5822_to_fp16)[name = tensor("op_5823_cast")]; + tensor var_5825_to_fp16 = const()[name = tensor("op_5825_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392721472)))]; + tensor hidden_states_235_cast = mul(x = var_5823_cast, y = var_5825_to_fp16)[name = tensor("hidden_states_235_cast")]; + tensor var_5832 = const()[name = tensor("op_5832"), val = tensor([1, 1])]; + tensor var_5834 = const()[name = tensor("op_5834"), val = tensor([1, 1])]; + tensor q_115_pad_type_0 = const()[name = tensor("q_115_pad_type_0"), val = tensor("custom")]; + tensor q_115_pad_0 = const()[name = tensor("q_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392724096))), lut = tensor([-0x1.6f8p-7, 0x1.6f8p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_115_cast = conv(dilations = var_5834, groups = var_4943, pad = q_115_pad_0, pad_type = q_115_pad_type_0, strides = var_5832, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_235_cast)[name = tensor("q_115_cast")]; + tensor var_5838 = const()[name = tensor("op_5838"), val = tensor([1, 1])]; + tensor var_5840 = const()[name = tensor("op_5840"), val = tensor([1, 1])]; + tensor k_115_pad_type_0 = const()[name = tensor("k_115_pad_type_0"), val = tensor("custom")]; + tensor k_115_pad_0 = const()[name = tensor("k_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(392928960))), lut = tensor([-0x1.e54p-8, 0x1.e5p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_115_cast = conv(dilations = var_5840, groups = var_4943, pad = k_115_pad_0, pad_type = k_115_pad_type_0, strides = var_5838, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_115_cast")]; + tensor var_5844 = const()[name = tensor("op_5844"), val = tensor([1, 1])]; + tensor var_5846 = const()[name = tensor("op_5846"), val = tensor([1, 1])]; + tensor v_115_pad_type_0 = const()[name = tensor("v_115_pad_type_0"), val = tensor("custom")]; + tensor v_115_pad_0 = const()[name = tensor("v_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393256704))), lut = tensor([-0x1.1b4p-7, 0x1.1bp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_115_cast = conv(dilations = var_5846, groups = var_4943, pad = v_115_pad_0, pad_type = v_115_pad_type_0, strides = var_5844, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_115_cast")]; + tensor var_5850 = const()[name = tensor("op_5850"), val = tensor([2, 20, 64, -1])]; + tensor var_5851_cast = reshape(shape = var_5850, x = q_115_cast)[name = tensor("op_5851_cast")]; + tensor var_5852 = const()[name = tensor("op_5852"), val = tensor([2, 20, 64, -1])]; + tensor var_5853_cast = reshape(shape = var_5852, x = k_115_cast)[name = tensor("op_5853_cast")]; + tensor var_5854 = const()[name = tensor("op_5854"), val = tensor([2, 20, 64, -1])]; + tensor var_5855_cast = reshape(shape = var_5854, x = v_115_cast)[name = tensor("op_5855_cast")]; + tensor attn_weights_229_transpose_x_0 = const()[name = tensor("attn_weights_229_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_229_transpose_y_0 = const()[name = tensor("attn_weights_229_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_229_cast = matmul(transpose_x = attn_weights_229_transpose_x_0, transpose_y = attn_weights_229_transpose_y_0, x = var_5851_cast, y = var_5853_cast)[name = tensor("attn_weights_229_cast")]; + tensor attn_weights_231_cast = mul(x = attn_weights_229_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_231_cast")]; + tensor var_5859_cast = softmax(axis = var_4927, x = attn_weights_231_cast)[name = tensor("op_5859_cast")]; + tensor attn_115_transpose_x_0 = const()[name = tensor("attn_115_transpose_x_0"), val = tensor(false)]; + tensor attn_115_transpose_y_0 = const()[name = tensor("attn_115_transpose_y_0"), val = tensor(true)]; + tensor attn_115_cast = matmul(transpose_x = attn_115_transpose_x_0, transpose_y = attn_115_transpose_y_0, x = var_5855_cast, y = var_5859_cast)[name = tensor("attn_115_cast")]; + tensor var_5863 = const()[name = tensor("op_5863"), val = tensor([2, 1280, 1, -1])]; + tensor input_361_cast = reshape(shape = var_5863, x = attn_115_cast)[name = tensor("input_361_cast")]; + tensor var_5868 = const()[name = tensor("op_5868"), val = tensor([1, 1])]; + tensor var_5870 = const()[name = tensor("op_5870"), val = tensor([1, 1])]; + tensor var_5872_pad_type_0 = const()[name = tensor("op_5872_pad_type_0"), val = tensor("custom")]; + tensor var_5872_pad_0 = const()[name = tensor("op_5872_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393584448))), lut = tensor([-0x1.478p-8, 0x1.47p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393789312)))]; + tensor var_5872_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_5870, groups = var_4943, pad = var_5872_pad_0, pad_type = var_5872_pad_type_0, strides = var_5868, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("op_5872_cast")]; + tensor inputs_173_cast = add(x = var_5872_cast, y = inputs_171_cast)[name = tensor("inputs_173_cast")]; + tensor var_5876 = const()[name = tensor("op_5876"), val = tensor([1])]; + tensor channels_mean_173_cast = reduce_mean(axes = var_5876, keep_dims = var_4938, x = inputs_173_cast)[name = tensor("channels_mean_173_cast")]; + tensor zero_mean_173_cast = sub(x = inputs_173_cast, y = channels_mean_173_cast)[name = tensor("zero_mean_173_cast")]; + tensor zero_mean_sq_173_cast = mul(x = zero_mean_173_cast, y = zero_mean_173_cast)[name = tensor("zero_mean_sq_173_cast")]; + tensor var_5880 = const()[name = tensor("op_5880"), val = tensor([1])]; + tensor var_5881_cast = reduce_mean(axes = var_5880, keep_dims = var_4938, x = zero_mean_sq_173_cast)[name = tensor("op_5881_cast")]; + tensor var_5882_to_fp16 = const()[name = tensor("op_5882_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5883_cast = add(x = var_5881_cast, y = var_5882_to_fp16)[name = tensor("op_5883_cast")]; + tensor denom_173_epsilon_0_to_fp16 = const()[name = tensor("denom_173_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_173_cast = rsqrt(epsilon = denom_173_epsilon_0_to_fp16, x = var_5883_cast)[name = tensor("denom_173_cast")]; + tensor out_173_cast = mul(x = zero_mean_173_cast, y = denom_173_cast)[name = tensor("out_173_cast")]; + tensor var_5887_to_fp16 = const()[name = tensor("op_5887_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393791936)))]; + tensor var_5888_cast = add(x = out_173_cast, y = var_5887_to_fp16)[name = tensor("op_5888_cast")]; + tensor var_5890_to_fp16 = const()[name = tensor("op_5890_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393794560)))]; + tensor input_363_cast = mul(x = var_5888_cast, y = var_5890_to_fp16)[name = tensor("input_363_cast")]; + tensor var_5898 = const()[name = tensor("op_5898"), val = tensor([1, 1])]; + tensor var_5900 = const()[name = tensor("op_5900"), val = tensor([1, 1])]; + tensor var_5902_pad_type_0 = const()[name = tensor("op_5902_pad_type_0"), val = tensor("custom")]; + tensor var_5902_pad_0 = const()[name = tensor("op_5902_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(393797184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(400350848))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(400350976)))]; + tensor var_5902_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_5900, groups = var_4943, pad = var_5902_pad_0, pad_type = var_5902_pad_type_0, strides = var_5898, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_363_cast)[name = tensor("op_5902_cast")]; + tensor var_5903_split_sizes_0 = const()[name = tensor("op_5903_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5903_axis_0 = const()[name = tensor("op_5903_axis_0"), val = tensor(1)]; + tensor var_5903_cast_0, tensor var_5903_cast_1 = split(axis = var_5903_axis_0, split_sizes = var_5903_split_sizes_0, x = var_5902_cast)[name = tensor("op_5903_cast")]; + tensor var_5905_mode_0 = const()[name = tensor("op_5905_mode_0"), val = tensor("EXACT")]; + tensor var_5905_cast = gelu(mode = var_5905_mode_0, x = var_5903_cast_1)[name = tensor("op_5905_cast")]; + tensor input_365_cast = mul(x = var_5903_cast_0, y = var_5905_cast)[name = tensor("input_365_cast")]; + tensor var_5909 = const()[name = tensor("op_5909"), val = tensor([1, 1])]; + tensor var_5911 = const()[name = tensor("op_5911"), val = tensor([1, 1])]; + tensor var_5913_pad_type_0 = const()[name = tensor("op_5913_pad_type_0"), val = tensor("custom")]; + tensor var_5913_pad_0 = const()[name = tensor("op_5913_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(400371520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403648384))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403648512)))]; + tensor var_5913_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_5911, groups = var_4943, pad = var_5913_pad_0, pad_type = var_5913_pad_type_0, strides = var_5909, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("op_5913_cast")]; + tensor inputs_175_cast = add(x = var_5913_cast, y = inputs_173_cast)[name = tensor("inputs_175_cast")]; + tensor var_5923 = const()[name = tensor("op_5923"), val = tensor([1])]; + tensor channels_mean_175_cast = reduce_mean(axes = var_5923, keep_dims = var_4938, x = inputs_175_cast)[name = tensor("channels_mean_175_cast")]; + tensor zero_mean_175_cast = sub(x = inputs_175_cast, y = channels_mean_175_cast)[name = tensor("zero_mean_175_cast")]; + tensor zero_mean_sq_175_cast = mul(x = zero_mean_175_cast, y = zero_mean_175_cast)[name = tensor("zero_mean_sq_175_cast")]; + tensor var_5927 = const()[name = tensor("op_5927"), val = tensor([1])]; + tensor var_5928_cast = reduce_mean(axes = var_5927, keep_dims = var_4938, x = zero_mean_sq_175_cast)[name = tensor("op_5928_cast")]; + tensor var_5929_to_fp16 = const()[name = tensor("op_5929_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5930_cast = add(x = var_5928_cast, y = var_5929_to_fp16)[name = tensor("op_5930_cast")]; + tensor denom_175_epsilon_0_to_fp16 = const()[name = tensor("denom_175_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_175_cast = rsqrt(epsilon = denom_175_epsilon_0_to_fp16, x = var_5930_cast)[name = tensor("denom_175_cast")]; + tensor out_175_cast = mul(x = zero_mean_175_cast, y = denom_175_cast)[name = tensor("out_175_cast")]; + tensor var_5934_to_fp16 = const()[name = tensor("op_5934_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403651136)))]; + tensor var_5935_cast = add(x = out_175_cast, y = var_5934_to_fp16)[name = tensor("op_5935_cast")]; + tensor var_5937_to_fp16 = const()[name = tensor("op_5937_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403653760)))]; + tensor hidden_states_239_cast = mul(x = var_5935_cast, y = var_5937_to_fp16)[name = tensor("hidden_states_239_cast")]; + tensor var_5944 = const()[name = tensor("op_5944"), val = tensor([1, 1])]; + tensor var_5946 = const()[name = tensor("op_5946"), val = tensor([1, 1])]; + tensor q_117_pad_type_0 = const()[name = tensor("q_117_pad_type_0"), val = tensor("custom")]; + tensor q_117_pad_0 = const()[name = tensor("q_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403656384))), lut = tensor([-0x1.42p-5, -0x1.84p-7, 0x1.83cp-7, 0x1.42p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_117_cast = conv(dilations = var_5946, groups = var_4943, pad = q_117_pad_0, pad_type = q_117_pad_type_0, strides = var_5944, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("q_117_cast")]; + tensor var_5950 = const()[name = tensor("op_5950"), val = tensor([1, 1])]; + tensor var_5952 = const()[name = tensor("op_5952"), val = tensor([1, 1])]; + tensor k_117_pad_type_0 = const()[name = tensor("k_117_pad_type_0"), val = tensor("custom")]; + tensor k_117_pad_0 = const()[name = tensor("k_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(404066048))), lut = tensor([-0x1.414p-5, -0x1.83cp-7, 0x1.83cp-7, 0x1.418p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_117_cast = conv(dilations = var_5952, groups = var_4943, pad = k_117_pad_0, pad_type = k_117_pad_type_0, strides = var_5950, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("k_117_cast")]; + tensor var_5956 = const()[name = tensor("op_5956"), val = tensor([1, 1])]; + tensor var_5958 = const()[name = tensor("op_5958"), val = tensor([1, 1])]; + tensor v_117_pad_type_0 = const()[name = tensor("v_117_pad_type_0"), val = tensor("custom")]; + tensor v_117_pad_0 = const()[name = tensor("v_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(404475712))), lut = tensor([-0x1.4f4p-5, -0x1.92p-7, 0x1.964p-7, 0x1.4fcp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_117_cast = conv(dilations = var_5958, groups = var_4943, pad = v_117_pad_0, pad_type = v_117_pad_type_0, strides = var_5956, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("v_117_cast")]; + tensor var_5962 = const()[name = tensor("op_5962"), val = tensor([2, 20, 64, -1])]; + tensor var_5963_cast = reshape(shape = var_5962, x = q_117_cast)[name = tensor("op_5963_cast")]; + tensor var_5964 = const()[name = tensor("op_5964"), val = tensor([2, 20, 64, -1])]; + tensor var_5965_cast = reshape(shape = var_5964, x = k_117_cast)[name = tensor("op_5965_cast")]; + tensor var_5966 = const()[name = tensor("op_5966"), val = tensor([2, 20, 64, -1])]; + tensor var_5967_cast = reshape(shape = var_5966, x = v_117_cast)[name = tensor("op_5967_cast")]; + tensor attn_weights_233_transpose_x_0 = const()[name = tensor("attn_weights_233_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_233_transpose_y_0 = const()[name = tensor("attn_weights_233_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_233_cast = matmul(transpose_x = attn_weights_233_transpose_x_0, transpose_y = attn_weights_233_transpose_y_0, x = var_5963_cast, y = var_5965_cast)[name = tensor("attn_weights_233_cast")]; + tensor attn_weights_235_cast = mul(x = attn_weights_233_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_235_cast")]; + tensor var_5971_cast = softmax(axis = var_4927, x = attn_weights_235_cast)[name = tensor("op_5971_cast")]; + tensor attn_117_transpose_x_0 = const()[name = tensor("attn_117_transpose_x_0"), val = tensor(false)]; + tensor attn_117_transpose_y_0 = const()[name = tensor("attn_117_transpose_y_0"), val = tensor(true)]; + tensor attn_117_cast = matmul(transpose_x = attn_117_transpose_x_0, transpose_y = attn_117_transpose_y_0, x = var_5967_cast, y = var_5971_cast)[name = tensor("attn_117_cast")]; + tensor var_5975 = const()[name = tensor("op_5975"), val = tensor([2, 1280, 1, -1])]; + tensor input_367_cast = reshape(shape = var_5975, x = attn_117_cast)[name = tensor("input_367_cast")]; + tensor var_5980 = const()[name = tensor("op_5980"), val = tensor([1, 1])]; + tensor var_5982 = const()[name = tensor("op_5982"), val = tensor([1, 1])]; + tensor var_5984_pad_type_0 = const()[name = tensor("op_5984_pad_type_0"), val = tensor("custom")]; + tensor var_5984_pad_0 = const()[name = tensor("op_5984_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(404885376))), lut = tensor([-0x1.4ep-5, -0x1.908p-7, 0x1.964p-7, 0x1.4fcp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405295040)))]; + tensor var_5984_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_5982, groups = var_4943, pad = var_5984_pad_0, pad_type = var_5984_pad_type_0, strides = var_5980, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_367_cast)[name = tensor("op_5984_cast")]; + tensor inputs_177_cast = add(x = var_5984_cast, y = inputs_175_cast)[name = tensor("inputs_177_cast")]; + tensor var_5988 = const()[name = tensor("op_5988"), val = tensor([1])]; + tensor channels_mean_177_cast = reduce_mean(axes = var_5988, keep_dims = var_4938, x = inputs_177_cast)[name = tensor("channels_mean_177_cast")]; + tensor zero_mean_177_cast = sub(x = inputs_177_cast, y = channels_mean_177_cast)[name = tensor("zero_mean_177_cast")]; + tensor zero_mean_sq_177_cast = mul(x = zero_mean_177_cast, y = zero_mean_177_cast)[name = tensor("zero_mean_sq_177_cast")]; + tensor var_5992 = const()[name = tensor("op_5992"), val = tensor([1])]; + tensor var_5993_cast = reduce_mean(axes = var_5992, keep_dims = var_4938, x = zero_mean_sq_177_cast)[name = tensor("op_5993_cast")]; + tensor var_5994_to_fp16 = const()[name = tensor("op_5994_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5995_cast = add(x = var_5993_cast, y = var_5994_to_fp16)[name = tensor("op_5995_cast")]; + tensor denom_177_epsilon_0_to_fp16 = const()[name = tensor("denom_177_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_177_cast = rsqrt(epsilon = denom_177_epsilon_0_to_fp16, x = var_5995_cast)[name = tensor("denom_177_cast")]; + tensor out_177_cast = mul(x = zero_mean_177_cast, y = denom_177_cast)[name = tensor("out_177_cast")]; + tensor var_5999_to_fp16 = const()[name = tensor("op_5999_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405297664)))]; + tensor var_6000_cast = add(x = out_177_cast, y = var_5999_to_fp16)[name = tensor("op_6000_cast")]; + tensor var_6002_to_fp16 = const()[name = tensor("op_6002_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405300288)))]; + tensor hidden_states_241_cast = mul(x = var_6000_cast, y = var_6002_to_fp16)[name = tensor("hidden_states_241_cast")]; + tensor var_6009 = const()[name = tensor("op_6009"), val = tensor([1, 1])]; + tensor var_6011 = const()[name = tensor("op_6011"), val = tensor([1, 1])]; + tensor q_119_pad_type_0 = const()[name = tensor("q_119_pad_type_0"), val = tensor("custom")]; + tensor q_119_pad_0 = const()[name = tensor("q_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405302912))), lut = tensor([-0x1.6a4p-7, 0x1.6ap-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_119_cast = conv(dilations = var_6011, groups = var_4943, pad = q_119_pad_0, pad_type = q_119_pad_type_0, strides = var_6009, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_241_cast)[name = tensor("q_119_cast")]; + tensor var_6015 = const()[name = tensor("op_6015"), val = tensor([1, 1])]; + tensor var_6017 = const()[name = tensor("op_6017"), val = tensor([1, 1])]; + tensor k_119_pad_type_0 = const()[name = tensor("k_119_pad_type_0"), val = tensor("custom")]; + tensor k_119_pad_0 = const()[name = tensor("k_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405507776))), lut = tensor([-0x1.d9cp-8, 0x1.d8p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_119_cast = conv(dilations = var_6017, groups = var_4943, pad = k_119_pad_0, pad_type = k_119_pad_type_0, strides = var_6015, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_119_cast")]; + tensor var_6021 = const()[name = tensor("op_6021"), val = tensor([1, 1])]; + tensor var_6023 = const()[name = tensor("op_6023"), val = tensor([1, 1])]; + tensor v_119_pad_type_0 = const()[name = tensor("v_119_pad_type_0"), val = tensor("custom")]; + tensor v_119_pad_0 = const()[name = tensor("v_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(405835520))), lut = tensor([-0x1.128p-7, 0x1.13p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_119_cast = conv(dilations = var_6023, groups = var_4943, pad = v_119_pad_0, pad_type = v_119_pad_type_0, strides = var_6021, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_119_cast")]; + tensor var_6027 = const()[name = tensor("op_6027"), val = tensor([2, 20, 64, -1])]; + tensor var_6028_cast = reshape(shape = var_6027, x = q_119_cast)[name = tensor("op_6028_cast")]; + tensor var_6029 = const()[name = tensor("op_6029"), val = tensor([2, 20, 64, -1])]; + tensor var_6030_cast = reshape(shape = var_6029, x = k_119_cast)[name = tensor("op_6030_cast")]; + tensor var_6031 = const()[name = tensor("op_6031"), val = tensor([2, 20, 64, -1])]; + tensor var_6032_cast = reshape(shape = var_6031, x = v_119_cast)[name = tensor("op_6032_cast")]; + tensor attn_weights_237_transpose_x_0 = const()[name = tensor("attn_weights_237_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_237_transpose_y_0 = const()[name = tensor("attn_weights_237_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_237_cast = matmul(transpose_x = attn_weights_237_transpose_x_0, transpose_y = attn_weights_237_transpose_y_0, x = var_6028_cast, y = var_6030_cast)[name = tensor("attn_weights_237_cast")]; + tensor attn_weights_239_cast = mul(x = attn_weights_237_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_239_cast")]; + tensor var_6036_cast = softmax(axis = var_4927, x = attn_weights_239_cast)[name = tensor("op_6036_cast")]; + tensor attn_119_transpose_x_0 = const()[name = tensor("attn_119_transpose_x_0"), val = tensor(false)]; + tensor attn_119_transpose_y_0 = const()[name = tensor("attn_119_transpose_y_0"), val = tensor(true)]; + tensor attn_119_cast = matmul(transpose_x = attn_119_transpose_x_0, transpose_y = attn_119_transpose_y_0, x = var_6032_cast, y = var_6036_cast)[name = tensor("attn_119_cast")]; + tensor var_6040 = const()[name = tensor("op_6040"), val = tensor([2, 1280, 1, -1])]; + tensor input_369_cast = reshape(shape = var_6040, x = attn_119_cast)[name = tensor("input_369_cast")]; + tensor var_6045 = const()[name = tensor("op_6045"), val = tensor([1, 1])]; + tensor var_6047 = const()[name = tensor("op_6047"), val = tensor([1, 1])]; + tensor var_6049_pad_type_0 = const()[name = tensor("op_6049_pad_type_0"), val = tensor("custom")]; + tensor var_6049_pad_0 = const()[name = tensor("op_6049_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(406163264))), lut = tensor([-0x1.45p-8, 0x1.46p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(406368128)))]; + tensor var_6049_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_6047, groups = var_4943, pad = var_6049_pad_0, pad_type = var_6049_pad_type_0, strides = var_6045, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("op_6049_cast")]; + tensor inputs_179_cast = add(x = var_6049_cast, y = inputs_177_cast)[name = tensor("inputs_179_cast")]; + tensor var_6053 = const()[name = tensor("op_6053"), val = tensor([1])]; + tensor channels_mean_179_cast = reduce_mean(axes = var_6053, keep_dims = var_4938, x = inputs_179_cast)[name = tensor("channels_mean_179_cast")]; + tensor zero_mean_179_cast = sub(x = inputs_179_cast, y = channels_mean_179_cast)[name = tensor("zero_mean_179_cast")]; + tensor zero_mean_sq_179_cast = mul(x = zero_mean_179_cast, y = zero_mean_179_cast)[name = tensor("zero_mean_sq_179_cast")]; + tensor var_6057 = const()[name = tensor("op_6057"), val = tensor([1])]; + tensor var_6058_cast = reduce_mean(axes = var_6057, keep_dims = var_4938, x = zero_mean_sq_179_cast)[name = tensor("op_6058_cast")]; + tensor var_6059_to_fp16 = const()[name = tensor("op_6059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6060_cast = add(x = var_6058_cast, y = var_6059_to_fp16)[name = tensor("op_6060_cast")]; + tensor denom_179_epsilon_0_to_fp16 = const()[name = tensor("denom_179_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_179_cast = rsqrt(epsilon = denom_179_epsilon_0_to_fp16, x = var_6060_cast)[name = tensor("denom_179_cast")]; + tensor out_179_cast = mul(x = zero_mean_179_cast, y = denom_179_cast)[name = tensor("out_179_cast")]; + tensor var_6064_to_fp16 = const()[name = tensor("op_6064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(406370752)))]; + tensor var_6065_cast = add(x = out_179_cast, y = var_6064_to_fp16)[name = tensor("op_6065_cast")]; + tensor var_6067_to_fp16 = const()[name = tensor("op_6067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(406373376)))]; + tensor input_371_cast = mul(x = var_6065_cast, y = var_6067_to_fp16)[name = tensor("input_371_cast")]; + tensor var_6075 = const()[name = tensor("op_6075"), val = tensor([1, 1])]; + tensor var_6077 = const()[name = tensor("op_6077"), val = tensor([1, 1])]; + tensor var_6079_pad_type_0 = const()[name = tensor("op_6079_pad_type_0"), val = tensor("custom")]; + tensor var_6079_pad_0 = const()[name = tensor("op_6079_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(406376000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412929664))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412929792)))]; + tensor var_6079_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_6077, groups = var_4943, pad = var_6079_pad_0, pad_type = var_6079_pad_type_0, strides = var_6075, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_371_cast)[name = tensor("op_6079_cast")]; + tensor var_6080_split_sizes_0 = const()[name = tensor("op_6080_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6080_axis_0 = const()[name = tensor("op_6080_axis_0"), val = tensor(1)]; + tensor var_6080_cast_0, tensor var_6080_cast_1 = split(axis = var_6080_axis_0, split_sizes = var_6080_split_sizes_0, x = var_6079_cast)[name = tensor("op_6080_cast")]; + tensor var_6082_mode_0 = const()[name = tensor("op_6082_mode_0"), val = tensor("EXACT")]; + tensor var_6082_cast = gelu(mode = var_6082_mode_0, x = var_6080_cast_1)[name = tensor("op_6082_cast")]; + tensor input_373_cast = mul(x = var_6080_cast_0, y = var_6082_cast)[name = tensor("input_373_cast")]; + tensor var_6086 = const()[name = tensor("op_6086"), val = tensor([1, 1])]; + tensor var_6088 = const()[name = tensor("op_6088"), val = tensor([1, 1])]; + tensor var_6090_pad_type_0 = const()[name = tensor("op_6090_pad_type_0"), val = tensor("custom")]; + tensor var_6090_pad_0 = const()[name = tensor("op_6090_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(412950336))), lut = tensor([-0x1.304p-5, -0x1.6c8p-7, 0x1.6d8p-7, 0x1.308p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414588800)))]; + tensor var_6090_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_6088, groups = var_4943, pad = var_6090_pad_0, pad_type = var_6090_pad_type_0, strides = var_6086, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_373_cast)[name = tensor("op_6090_cast")]; + tensor inputs_181_cast = add(x = var_6090_cast, y = inputs_179_cast)[name = tensor("inputs_181_cast")]; + tensor var_6100 = const()[name = tensor("op_6100"), val = tensor([1])]; + tensor channels_mean_181_cast = reduce_mean(axes = var_6100, keep_dims = var_4938, x = inputs_181_cast)[name = tensor("channels_mean_181_cast")]; + tensor zero_mean_181_cast = sub(x = inputs_181_cast, y = channels_mean_181_cast)[name = tensor("zero_mean_181_cast")]; + tensor zero_mean_sq_181_cast = mul(x = zero_mean_181_cast, y = zero_mean_181_cast)[name = tensor("zero_mean_sq_181_cast")]; + tensor var_6104 = const()[name = tensor("op_6104"), val = tensor([1])]; + tensor var_6105_cast = reduce_mean(axes = var_6104, keep_dims = var_4938, x = zero_mean_sq_181_cast)[name = tensor("op_6105_cast")]; + tensor var_6106_to_fp16 = const()[name = tensor("op_6106_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6107_cast = add(x = var_6105_cast, y = var_6106_to_fp16)[name = tensor("op_6107_cast")]; + tensor denom_181_epsilon_0_to_fp16 = const()[name = tensor("denom_181_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_181_cast = rsqrt(epsilon = denom_181_epsilon_0_to_fp16, x = var_6107_cast)[name = tensor("denom_181_cast")]; + tensor out_181_cast = mul(x = zero_mean_181_cast, y = denom_181_cast)[name = tensor("out_181_cast")]; + tensor var_6111_to_fp16 = const()[name = tensor("op_6111_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414591424)))]; + tensor var_6112_cast = add(x = out_181_cast, y = var_6111_to_fp16)[name = tensor("op_6112_cast")]; + tensor var_6114_to_fp16 = const()[name = tensor("op_6114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414594048)))]; + tensor hidden_states_245_cast = mul(x = var_6112_cast, y = var_6114_to_fp16)[name = tensor("hidden_states_245_cast")]; + tensor var_6121 = const()[name = tensor("op_6121"), val = tensor([1, 1])]; + tensor var_6123 = const()[name = tensor("op_6123"), val = tensor([1, 1])]; + tensor q_121_pad_type_0 = const()[name = tensor("q_121_pad_type_0"), val = tensor("custom")]; + tensor q_121_pad_0 = const()[name = tensor("q_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(414596672))), lut = tensor([-0x1.29cp-5, -0x1.694p-7, 0x1.64p-7, 0x1.28cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_121_cast = conv(dilations = var_6123, groups = var_4943, pad = q_121_pad_0, pad_type = q_121_pad_type_0, strides = var_6121, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("q_121_cast")]; + tensor var_6127 = const()[name = tensor("op_6127"), val = tensor([1, 1])]; + tensor var_6129 = const()[name = tensor("op_6129"), val = tensor([1, 1])]; + tensor k_121_pad_type_0 = const()[name = tensor("k_121_pad_type_0"), val = tensor("custom")]; + tensor k_121_pad_0 = const()[name = tensor("k_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415006336))), lut = tensor([-0x1.28p-5, -0x1.644p-7, 0x1.688p-7, 0x1.294p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_121_cast = conv(dilations = var_6129, groups = var_4943, pad = k_121_pad_0, pad_type = k_121_pad_type_0, strides = var_6127, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("k_121_cast")]; + tensor var_6133 = const()[name = tensor("op_6133"), val = tensor([1, 1])]; + tensor var_6135 = const()[name = tensor("op_6135"), val = tensor([1, 1])]; + tensor v_121_pad_type_0 = const()[name = tensor("v_121_pad_type_0"), val = tensor("custom")]; + tensor v_121_pad_0 = const()[name = tensor("v_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415416000))), lut = tensor([-0x1.3b4p-5, -0x1.7ap-7, 0x1.7f8p-7, 0x1.3c8p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_121_cast = conv(dilations = var_6135, groups = var_4943, pad = v_121_pad_0, pad_type = v_121_pad_type_0, strides = var_6133, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("v_121_cast")]; + tensor var_6139 = const()[name = tensor("op_6139"), val = tensor([2, 20, 64, -1])]; + tensor var_6140_cast = reshape(shape = var_6139, x = q_121_cast)[name = tensor("op_6140_cast")]; + tensor var_6141 = const()[name = tensor("op_6141"), val = tensor([2, 20, 64, -1])]; + tensor var_6142_cast = reshape(shape = var_6141, x = k_121_cast)[name = tensor("op_6142_cast")]; + tensor var_6143 = const()[name = tensor("op_6143"), val = tensor([2, 20, 64, -1])]; + tensor var_6144_cast = reshape(shape = var_6143, x = v_121_cast)[name = tensor("op_6144_cast")]; + tensor attn_weights_241_transpose_x_0 = const()[name = tensor("attn_weights_241_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_241_transpose_y_0 = const()[name = tensor("attn_weights_241_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_241_cast = matmul(transpose_x = attn_weights_241_transpose_x_0, transpose_y = attn_weights_241_transpose_y_0, x = var_6140_cast, y = var_6142_cast)[name = tensor("attn_weights_241_cast")]; + tensor attn_weights_243_cast = mul(x = attn_weights_241_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_243_cast")]; + tensor var_6148_cast = softmax(axis = var_4927, x = attn_weights_243_cast)[name = tensor("op_6148_cast")]; + tensor attn_121_transpose_x_0 = const()[name = tensor("attn_121_transpose_x_0"), val = tensor(false)]; + tensor attn_121_transpose_y_0 = const()[name = tensor("attn_121_transpose_y_0"), val = tensor(true)]; + tensor attn_121_cast = matmul(transpose_x = attn_121_transpose_x_0, transpose_y = attn_121_transpose_y_0, x = var_6144_cast, y = var_6148_cast)[name = tensor("attn_121_cast")]; + tensor var_6152 = const()[name = tensor("op_6152"), val = tensor([2, 1280, 1, -1])]; + tensor input_375_cast = reshape(shape = var_6152, x = attn_121_cast)[name = tensor("input_375_cast")]; + tensor var_6157 = const()[name = tensor("op_6157"), val = tensor([1, 1])]; + tensor var_6159 = const()[name = tensor("op_6159"), val = tensor([1, 1])]; + tensor var_6161_pad_type_0 = const()[name = tensor("op_6161_pad_type_0"), val = tensor("custom")]; + tensor var_6161_pad_0 = const()[name = tensor("op_6161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(415825664))), lut = tensor([-0x1.39cp-5, -0x1.7cp-7, 0x1.75cp-7, 0x1.388p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416235328)))]; + tensor var_6161_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_6159, groups = var_4943, pad = var_6161_pad_0, pad_type = var_6161_pad_type_0, strides = var_6157, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_375_cast)[name = tensor("op_6161_cast")]; + tensor inputs_183_cast = add(x = var_6161_cast, y = inputs_181_cast)[name = tensor("inputs_183_cast")]; + tensor var_6165 = const()[name = tensor("op_6165"), val = tensor([1])]; + tensor channels_mean_183_cast = reduce_mean(axes = var_6165, keep_dims = var_4938, x = inputs_183_cast)[name = tensor("channels_mean_183_cast")]; + tensor zero_mean_183_cast = sub(x = inputs_183_cast, y = channels_mean_183_cast)[name = tensor("zero_mean_183_cast")]; + tensor zero_mean_sq_183_cast = mul(x = zero_mean_183_cast, y = zero_mean_183_cast)[name = tensor("zero_mean_sq_183_cast")]; + tensor var_6169 = const()[name = tensor("op_6169"), val = tensor([1])]; + tensor var_6170_cast = reduce_mean(axes = var_6169, keep_dims = var_4938, x = zero_mean_sq_183_cast)[name = tensor("op_6170_cast")]; + tensor var_6171_to_fp16 = const()[name = tensor("op_6171_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6172_cast = add(x = var_6170_cast, y = var_6171_to_fp16)[name = tensor("op_6172_cast")]; + tensor denom_183_epsilon_0_to_fp16 = const()[name = tensor("denom_183_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_183_cast = rsqrt(epsilon = denom_183_epsilon_0_to_fp16, x = var_6172_cast)[name = tensor("denom_183_cast")]; + tensor out_183_cast = mul(x = zero_mean_183_cast, y = denom_183_cast)[name = tensor("out_183_cast")]; + tensor var_6176_to_fp16 = const()[name = tensor("op_6176_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416237952)))]; + tensor var_6177_cast = add(x = out_183_cast, y = var_6176_to_fp16)[name = tensor("op_6177_cast")]; + tensor var_6179_to_fp16 = const()[name = tensor("op_6179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416240576)))]; + tensor hidden_states_247_cast = mul(x = var_6177_cast, y = var_6179_to_fp16)[name = tensor("hidden_states_247_cast")]; + tensor var_6186 = const()[name = tensor("op_6186"), val = tensor([1, 1])]; + tensor var_6188 = const()[name = tensor("op_6188"), val = tensor([1, 1])]; + tensor q_123_pad_type_0 = const()[name = tensor("q_123_pad_type_0"), val = tensor("custom")]; + tensor q_123_pad_0 = const()[name = tensor("q_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416243200))), lut = tensor([-0x1.664p-7, 0x1.668p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_123_cast = conv(dilations = var_6188, groups = var_4943, pad = q_123_pad_0, pad_type = q_123_pad_type_0, strides = var_6186, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_247_cast)[name = tensor("q_123_cast")]; + tensor var_6192 = const()[name = tensor("op_6192"), val = tensor([1, 1])]; + tensor var_6194 = const()[name = tensor("op_6194"), val = tensor([1, 1])]; + tensor k_123_pad_type_0 = const()[name = tensor("k_123_pad_type_0"), val = tensor("custom")]; + tensor k_123_pad_0 = const()[name = tensor("k_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416448064))), lut = tensor([-0x1.c18p-8, 0x1.c1p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_123_cast = conv(dilations = var_6194, groups = var_4943, pad = k_123_pad_0, pad_type = k_123_pad_type_0, strides = var_6192, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_123_cast")]; + tensor var_6198 = const()[name = tensor("op_6198"), val = tensor([1, 1])]; + tensor var_6200 = const()[name = tensor("op_6200"), val = tensor([1, 1])]; + tensor v_123_pad_type_0 = const()[name = tensor("v_123_pad_type_0"), val = tensor("custom")]; + tensor v_123_pad_0 = const()[name = tensor("v_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(416775808))), lut = tensor([-0x1.f8cp-8, 0x1.f88p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_123_cast = conv(dilations = var_6200, groups = var_4943, pad = v_123_pad_0, pad_type = v_123_pad_type_0, strides = var_6198, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_123_cast")]; + tensor var_6204 = const()[name = tensor("op_6204"), val = tensor([2, 20, 64, -1])]; + tensor var_6205_cast = reshape(shape = var_6204, x = q_123_cast)[name = tensor("op_6205_cast")]; + tensor var_6206 = const()[name = tensor("op_6206"), val = tensor([2, 20, 64, -1])]; + tensor var_6207_cast = reshape(shape = var_6206, x = k_123_cast)[name = tensor("op_6207_cast")]; + tensor var_6208 = const()[name = tensor("op_6208"), val = tensor([2, 20, 64, -1])]; + tensor var_6209_cast = reshape(shape = var_6208, x = v_123_cast)[name = tensor("op_6209_cast")]; + tensor attn_weights_245_transpose_x_0 = const()[name = tensor("attn_weights_245_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_245_transpose_y_0 = const()[name = tensor("attn_weights_245_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_245_cast = matmul(transpose_x = attn_weights_245_transpose_x_0, transpose_y = attn_weights_245_transpose_y_0, x = var_6205_cast, y = var_6207_cast)[name = tensor("attn_weights_245_cast")]; + tensor attn_weights_247_cast = mul(x = attn_weights_245_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_247_cast")]; + tensor var_6213_cast = softmax(axis = var_4927, x = attn_weights_247_cast)[name = tensor("op_6213_cast")]; + tensor attn_123_transpose_x_0 = const()[name = tensor("attn_123_transpose_x_0"), val = tensor(false)]; + tensor attn_123_transpose_y_0 = const()[name = tensor("attn_123_transpose_y_0"), val = tensor(true)]; + tensor attn_123_cast = matmul(transpose_x = attn_123_transpose_x_0, transpose_y = attn_123_transpose_y_0, x = var_6209_cast, y = var_6213_cast)[name = tensor("attn_123_cast")]; + tensor var_6217 = const()[name = tensor("op_6217"), val = tensor([2, 1280, 1, -1])]; + tensor input_377_cast = reshape(shape = var_6217, x = attn_123_cast)[name = tensor("input_377_cast")]; + tensor var_6222 = const()[name = tensor("op_6222"), val = tensor([1, 1])]; + tensor var_6224 = const()[name = tensor("op_6224"), val = tensor([1, 1])]; + tensor var_6226_pad_type_0 = const()[name = tensor("op_6226_pad_type_0"), val = tensor("custom")]; + tensor var_6226_pad_0 = const()[name = tensor("op_6226_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417103552))), lut = tensor([-0x1.35p-8, 0x1.37p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417308416)))]; + tensor var_6226_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_6224, groups = var_4943, pad = var_6226_pad_0, pad_type = var_6226_pad_type_0, strides = var_6222, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_377_cast)[name = tensor("op_6226_cast")]; + tensor inputs_185_cast = add(x = var_6226_cast, y = inputs_183_cast)[name = tensor("inputs_185_cast")]; + tensor var_6230 = const()[name = tensor("op_6230"), val = tensor([1])]; + tensor channels_mean_185_cast = reduce_mean(axes = var_6230, keep_dims = var_4938, x = inputs_185_cast)[name = tensor("channels_mean_185_cast")]; + tensor zero_mean_185_cast = sub(x = inputs_185_cast, y = channels_mean_185_cast)[name = tensor("zero_mean_185_cast")]; + tensor zero_mean_sq_185_cast = mul(x = zero_mean_185_cast, y = zero_mean_185_cast)[name = tensor("zero_mean_sq_185_cast")]; + tensor var_6234 = const()[name = tensor("op_6234"), val = tensor([1])]; + tensor var_6235_cast = reduce_mean(axes = var_6234, keep_dims = var_4938, x = zero_mean_sq_185_cast)[name = tensor("op_6235_cast")]; + tensor var_6236_to_fp16 = const()[name = tensor("op_6236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6237_cast = add(x = var_6235_cast, y = var_6236_to_fp16)[name = tensor("op_6237_cast")]; + tensor denom_185_epsilon_0_to_fp16 = const()[name = tensor("denom_185_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_185_cast = rsqrt(epsilon = denom_185_epsilon_0_to_fp16, x = var_6237_cast)[name = tensor("denom_185_cast")]; + tensor out_185_cast = mul(x = zero_mean_185_cast, y = denom_185_cast)[name = tensor("out_185_cast")]; + tensor var_6241_to_fp16 = const()[name = tensor("op_6241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417311040)))]; + tensor var_6242_cast = add(x = out_185_cast, y = var_6241_to_fp16)[name = tensor("op_6242_cast")]; + tensor var_6244_to_fp16 = const()[name = tensor("op_6244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417313664)))]; + tensor input_379_cast = mul(x = var_6242_cast, y = var_6244_to_fp16)[name = tensor("input_379_cast")]; + tensor var_6252 = const()[name = tensor("op_6252"), val = tensor([1, 1])]; + tensor var_6254 = const()[name = tensor("op_6254"), val = tensor([1, 1])]; + tensor var_6256_pad_type_0 = const()[name = tensor("op_6256_pad_type_0"), val = tensor("custom")]; + tensor var_6256_pad_0 = const()[name = tensor("op_6256_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417316288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423869952))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423870080)))]; + tensor var_6256_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_6254, groups = var_4943, pad = var_6256_pad_0, pad_type = var_6256_pad_type_0, strides = var_6252, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_379_cast)[name = tensor("op_6256_cast")]; + tensor var_6257_split_sizes_0 = const()[name = tensor("op_6257_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6257_axis_0 = const()[name = tensor("op_6257_axis_0"), val = tensor(1)]; + tensor var_6257_cast_0, tensor var_6257_cast_1 = split(axis = var_6257_axis_0, split_sizes = var_6257_split_sizes_0, x = var_6256_cast)[name = tensor("op_6257_cast")]; + tensor var_6259_mode_0 = const()[name = tensor("op_6259_mode_0"), val = tensor("EXACT")]; + tensor var_6259_cast = gelu(mode = var_6259_mode_0, x = var_6257_cast_1)[name = tensor("op_6259_cast")]; + tensor input_381_cast = mul(x = var_6257_cast_0, y = var_6259_cast)[name = tensor("input_381_cast")]; + tensor var_6263 = const()[name = tensor("op_6263"), val = tensor([1, 1])]; + tensor var_6265 = const()[name = tensor("op_6265"), val = tensor([1, 1])]; + tensor var_6267_pad_type_0 = const()[name = tensor("op_6267_pad_type_0"), val = tensor("custom")]; + tensor var_6267_pad_0 = const()[name = tensor("op_6267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423890624))), lut = tensor([-0x1.2d8p-5, -0x1.69p-7, 0x1.6acp-7, 0x1.2ep-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425529088)))]; + tensor var_6267_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_6265, groups = var_4943, pad = var_6267_pad_0, pad_type = var_6267_pad_type_0, strides = var_6263, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_381_cast)[name = tensor("op_6267_cast")]; + tensor inputs_187_cast = add(x = var_6267_cast, y = inputs_185_cast)[name = tensor("inputs_187_cast")]; + tensor var_6277 = const()[name = tensor("op_6277"), val = tensor([1])]; + tensor channels_mean_187_cast = reduce_mean(axes = var_6277, keep_dims = var_4938, x = inputs_187_cast)[name = tensor("channels_mean_187_cast")]; + tensor zero_mean_187_cast = sub(x = inputs_187_cast, y = channels_mean_187_cast)[name = tensor("zero_mean_187_cast")]; + tensor zero_mean_sq_187_cast = mul(x = zero_mean_187_cast, y = zero_mean_187_cast)[name = tensor("zero_mean_sq_187_cast")]; + tensor var_6281 = const()[name = tensor("op_6281"), val = tensor([1])]; + tensor var_6282_cast = reduce_mean(axes = var_6281, keep_dims = var_4938, x = zero_mean_sq_187_cast)[name = tensor("op_6282_cast")]; + tensor var_6283_to_fp16 = const()[name = tensor("op_6283_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6284_cast = add(x = var_6282_cast, y = var_6283_to_fp16)[name = tensor("op_6284_cast")]; + tensor denom_187_epsilon_0_to_fp16 = const()[name = tensor("denom_187_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_187_cast = rsqrt(epsilon = denom_187_epsilon_0_to_fp16, x = var_6284_cast)[name = tensor("denom_187_cast")]; + tensor out_187_cast = mul(x = zero_mean_187_cast, y = denom_187_cast)[name = tensor("out_187_cast")]; + tensor var_6288_to_fp16 = const()[name = tensor("op_6288_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425531712)))]; + tensor var_6289_cast = add(x = out_187_cast, y = var_6288_to_fp16)[name = tensor("op_6289_cast")]; + tensor var_6291_to_fp16 = const()[name = tensor("op_6291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425534336)))]; + tensor hidden_states_251_cast = mul(x = var_6289_cast, y = var_6291_to_fp16)[name = tensor("hidden_states_251_cast")]; + tensor var_6298 = const()[name = tensor("op_6298"), val = tensor([1, 1])]; + tensor var_6300 = const()[name = tensor("op_6300"), val = tensor([1, 1])]; + tensor q_125_pad_type_0 = const()[name = tensor("q_125_pad_type_0"), val = tensor("custom")]; + tensor q_125_pad_0 = const()[name = tensor("q_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425536960))), lut = tensor([-0x1.2e8p-5, -0x1.6b4p-7, 0x1.6ecp-7, 0x1.2fp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_125_cast = conv(dilations = var_6300, groups = var_4943, pad = q_125_pad_0, pad_type = q_125_pad_type_0, strides = var_6298, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("q_125_cast")]; + tensor var_6304 = const()[name = tensor("op_6304"), val = tensor([1, 1])]; + tensor var_6306 = const()[name = tensor("op_6306"), val = tensor([1, 1])]; + tensor k_125_pad_type_0 = const()[name = tensor("k_125_pad_type_0"), val = tensor("custom")]; + tensor k_125_pad_0 = const()[name = tensor("k_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425946624))), lut = tensor([-0x1.2ep-5, -0x1.6acp-7, 0x1.6fcp-7, 0x1.2f4p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_125_cast = conv(dilations = var_6306, groups = var_4943, pad = k_125_pad_0, pad_type = k_125_pad_type_0, strides = var_6304, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("k_125_cast")]; + tensor var_6310 = const()[name = tensor("op_6310"), val = tensor([1, 1])]; + tensor var_6312 = const()[name = tensor("op_6312"), val = tensor([1, 1])]; + tensor v_125_pad_type_0 = const()[name = tensor("v_125_pad_type_0"), val = tensor("custom")]; + tensor v_125_pad_0 = const()[name = tensor("v_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(426356288))), lut = tensor([-0x1.454p-5, -0x1.88p-7, 0x1.864p-7, 0x1.44cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_125_cast = conv(dilations = var_6312, groups = var_4943, pad = v_125_pad_0, pad_type = v_125_pad_type_0, strides = var_6310, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("v_125_cast")]; + tensor var_6316 = const()[name = tensor("op_6316"), val = tensor([2, 20, 64, -1])]; + tensor var_6317_cast = reshape(shape = var_6316, x = q_125_cast)[name = tensor("op_6317_cast")]; + tensor var_6318 = const()[name = tensor("op_6318"), val = tensor([2, 20, 64, -1])]; + tensor var_6319_cast = reshape(shape = var_6318, x = k_125_cast)[name = tensor("op_6319_cast")]; + tensor var_6320 = const()[name = tensor("op_6320"), val = tensor([2, 20, 64, -1])]; + tensor var_6321_cast = reshape(shape = var_6320, x = v_125_cast)[name = tensor("op_6321_cast")]; + tensor attn_weights_249_transpose_x_0 = const()[name = tensor("attn_weights_249_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_249_transpose_y_0 = const()[name = tensor("attn_weights_249_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_249_cast = matmul(transpose_x = attn_weights_249_transpose_x_0, transpose_y = attn_weights_249_transpose_y_0, x = var_6317_cast, y = var_6319_cast)[name = tensor("attn_weights_249_cast")]; + tensor attn_weights_251_cast = mul(x = attn_weights_249_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_251_cast")]; + tensor var_6325_cast = softmax(axis = var_4927, x = attn_weights_251_cast)[name = tensor("op_6325_cast")]; + tensor attn_125_transpose_x_0 = const()[name = tensor("attn_125_transpose_x_0"), val = tensor(false)]; + tensor attn_125_transpose_y_0 = const()[name = tensor("attn_125_transpose_y_0"), val = tensor(true)]; + tensor attn_125_cast = matmul(transpose_x = attn_125_transpose_x_0, transpose_y = attn_125_transpose_y_0, x = var_6321_cast, y = var_6325_cast)[name = tensor("attn_125_cast")]; + tensor var_6329 = const()[name = tensor("op_6329"), val = tensor([2, 1280, 1, -1])]; + tensor input_383_cast = reshape(shape = var_6329, x = attn_125_cast)[name = tensor("input_383_cast")]; + tensor var_6334 = const()[name = tensor("op_6334"), val = tensor([1, 1])]; + tensor var_6336 = const()[name = tensor("op_6336"), val = tensor([1, 1])]; + tensor var_6338_pad_type_0 = const()[name = tensor("op_6338_pad_type_0"), val = tensor("custom")]; + tensor var_6338_pad_0 = const()[name = tensor("op_6338_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(426765952))), lut = tensor([-0x1.414p-5, -0x1.83cp-7, 0x1.7e8p-7, 0x1.3f4p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427175616)))]; + tensor var_6338_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_6336, groups = var_4943, pad = var_6338_pad_0, pad_type = var_6338_pad_type_0, strides = var_6334, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_383_cast)[name = tensor("op_6338_cast")]; + tensor inputs_189_cast = add(x = var_6338_cast, y = inputs_187_cast)[name = tensor("inputs_189_cast")]; + tensor var_6342 = const()[name = tensor("op_6342"), val = tensor([1])]; + tensor channels_mean_189_cast = reduce_mean(axes = var_6342, keep_dims = var_4938, x = inputs_189_cast)[name = tensor("channels_mean_189_cast")]; + tensor zero_mean_189_cast = sub(x = inputs_189_cast, y = channels_mean_189_cast)[name = tensor("zero_mean_189_cast")]; + tensor zero_mean_sq_189_cast = mul(x = zero_mean_189_cast, y = zero_mean_189_cast)[name = tensor("zero_mean_sq_189_cast")]; + tensor var_6346 = const()[name = tensor("op_6346"), val = tensor([1])]; + tensor var_6347_cast = reduce_mean(axes = var_6346, keep_dims = var_4938, x = zero_mean_sq_189_cast)[name = tensor("op_6347_cast")]; + tensor var_6348_to_fp16 = const()[name = tensor("op_6348_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6349_cast = add(x = var_6347_cast, y = var_6348_to_fp16)[name = tensor("op_6349_cast")]; + tensor denom_189_epsilon_0_to_fp16 = const()[name = tensor("denom_189_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_189_cast = rsqrt(epsilon = denom_189_epsilon_0_to_fp16, x = var_6349_cast)[name = tensor("denom_189_cast")]; + tensor out_189_cast = mul(x = zero_mean_189_cast, y = denom_189_cast)[name = tensor("out_189_cast")]; + tensor var_6353_to_fp16 = const()[name = tensor("op_6353_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427178240)))]; + tensor var_6354_cast = add(x = out_189_cast, y = var_6353_to_fp16)[name = tensor("op_6354_cast")]; + tensor var_6356_to_fp16 = const()[name = tensor("op_6356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427180864)))]; + tensor hidden_states_253_cast = mul(x = var_6354_cast, y = var_6356_to_fp16)[name = tensor("hidden_states_253_cast")]; + tensor var_6363 = const()[name = tensor("op_6363"), val = tensor([1, 1])]; + tensor var_6365 = const()[name = tensor("op_6365"), val = tensor([1, 1])]; + tensor q_127_pad_type_0 = const()[name = tensor("q_127_pad_type_0"), val = tensor("custom")]; + tensor q_127_pad_0 = const()[name = tensor("q_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427183488))), lut = tensor([-0x1.61p-7, 0x1.61p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_127_cast = conv(dilations = var_6365, groups = var_4943, pad = q_127_pad_0, pad_type = q_127_pad_type_0, strides = var_6363, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_253_cast)[name = tensor("q_127_cast")]; + tensor var_6369 = const()[name = tensor("op_6369"), val = tensor([1, 1])]; + tensor var_6371 = const()[name = tensor("op_6371"), val = tensor([1, 1])]; + tensor k_127_pad_type_0 = const()[name = tensor("k_127_pad_type_0"), val = tensor("custom")]; + tensor k_127_pad_0 = const()[name = tensor("k_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427388352))), lut = tensor([-0x1.b7p-8, 0x1.b6cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_127_cast = conv(dilations = var_6371, groups = var_4943, pad = k_127_pad_0, pad_type = k_127_pad_type_0, strides = var_6369, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_127_cast")]; + tensor var_6375 = const()[name = tensor("op_6375"), val = tensor([1, 1])]; + tensor var_6377 = const()[name = tensor("op_6377"), val = tensor([1, 1])]; + tensor v_127_pad_type_0 = const()[name = tensor("v_127_pad_type_0"), val = tensor("custom")]; + tensor v_127_pad_0 = const()[name = tensor("v_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427716096))), lut = tensor([-0x1.e78p-8, 0x1.e74p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_127_cast = conv(dilations = var_6377, groups = var_4943, pad = v_127_pad_0, pad_type = v_127_pad_type_0, strides = var_6375, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_127_cast")]; + tensor var_6381 = const()[name = tensor("op_6381"), val = tensor([2, 20, 64, -1])]; + tensor var_6382_cast = reshape(shape = var_6381, x = q_127_cast)[name = tensor("op_6382_cast")]; + tensor var_6383 = const()[name = tensor("op_6383"), val = tensor([2, 20, 64, -1])]; + tensor var_6384_cast = reshape(shape = var_6383, x = k_127_cast)[name = tensor("op_6384_cast")]; + tensor var_6385 = const()[name = tensor("op_6385"), val = tensor([2, 20, 64, -1])]; + tensor var_6386_cast = reshape(shape = var_6385, x = v_127_cast)[name = tensor("op_6386_cast")]; + tensor attn_weights_253_transpose_x_0 = const()[name = tensor("attn_weights_253_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_253_transpose_y_0 = const()[name = tensor("attn_weights_253_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_253_cast = matmul(transpose_x = attn_weights_253_transpose_x_0, transpose_y = attn_weights_253_transpose_y_0, x = var_6382_cast, y = var_6384_cast)[name = tensor("attn_weights_253_cast")]; + tensor attn_weights_255_cast = mul(x = attn_weights_253_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_255_cast")]; + tensor var_6390_cast = softmax(axis = var_4927, x = attn_weights_255_cast)[name = tensor("op_6390_cast")]; + tensor attn_127_transpose_x_0 = const()[name = tensor("attn_127_transpose_x_0"), val = tensor(false)]; + tensor attn_127_transpose_y_0 = const()[name = tensor("attn_127_transpose_y_0"), val = tensor(true)]; + tensor attn_127_cast = matmul(transpose_x = attn_127_transpose_x_0, transpose_y = attn_127_transpose_y_0, x = var_6386_cast, y = var_6390_cast)[name = tensor("attn_127_cast")]; + tensor var_6394 = const()[name = tensor("op_6394"), val = tensor([2, 1280, 1, -1])]; + tensor input_385_cast = reshape(shape = var_6394, x = attn_127_cast)[name = tensor("input_385_cast")]; + tensor var_6399 = const()[name = tensor("op_6399"), val = tensor([1, 1])]; + tensor var_6401 = const()[name = tensor("op_6401"), val = tensor([1, 1])]; + tensor var_6403_pad_type_0 = const()[name = tensor("op_6403_pad_type_0"), val = tensor("custom")]; + tensor var_6403_pad_0 = const()[name = tensor("op_6403_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428043840))), lut = tensor([-0x1.37cp-8, 0x1.364p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428248704)))]; + tensor var_6403_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_6401, groups = var_4943, pad = var_6403_pad_0, pad_type = var_6403_pad_type_0, strides = var_6399, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_385_cast)[name = tensor("op_6403_cast")]; + tensor inputs_191_cast = add(x = var_6403_cast, y = inputs_189_cast)[name = tensor("inputs_191_cast")]; + tensor var_6407 = const()[name = tensor("op_6407"), val = tensor([1])]; + tensor channels_mean_191_cast = reduce_mean(axes = var_6407, keep_dims = var_4938, x = inputs_191_cast)[name = tensor("channels_mean_191_cast")]; + tensor zero_mean_191_cast = sub(x = inputs_191_cast, y = channels_mean_191_cast)[name = tensor("zero_mean_191_cast")]; + tensor zero_mean_sq_191_cast = mul(x = zero_mean_191_cast, y = zero_mean_191_cast)[name = tensor("zero_mean_sq_191_cast")]; + tensor var_6411 = const()[name = tensor("op_6411"), val = tensor([1])]; + tensor var_6412_cast = reduce_mean(axes = var_6411, keep_dims = var_4938, x = zero_mean_sq_191_cast)[name = tensor("op_6412_cast")]; + tensor var_6413_to_fp16 = const()[name = tensor("op_6413_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6414_cast = add(x = var_6412_cast, y = var_6413_to_fp16)[name = tensor("op_6414_cast")]; + tensor denom_191_epsilon_0_to_fp16 = const()[name = tensor("denom_191_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_191_cast = rsqrt(epsilon = denom_191_epsilon_0_to_fp16, x = var_6414_cast)[name = tensor("denom_191_cast")]; + tensor out_191_cast = mul(x = zero_mean_191_cast, y = denom_191_cast)[name = tensor("out_191_cast")]; + tensor var_6418_to_fp16 = const()[name = tensor("op_6418_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428251328)))]; + tensor var_6419_cast = add(x = out_191_cast, y = var_6418_to_fp16)[name = tensor("op_6419_cast")]; + tensor var_6421_to_fp16 = const()[name = tensor("op_6421_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428253952)))]; + tensor input_387_cast = mul(x = var_6419_cast, y = var_6421_to_fp16)[name = tensor("input_387_cast")]; + tensor var_6429 = const()[name = tensor("op_6429"), val = tensor([1, 1])]; + tensor var_6431 = const()[name = tensor("op_6431"), val = tensor([1, 1])]; + tensor var_6433_pad_type_0 = const()[name = tensor("op_6433_pad_type_0"), val = tensor("custom")]; + tensor var_6433_pad_0 = const()[name = tensor("op_6433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428256576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434810240))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434810368)))]; + tensor var_6433_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_6431, groups = var_4943, pad = var_6433_pad_0, pad_type = var_6433_pad_type_0, strides = var_6429, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_387_cast)[name = tensor("op_6433_cast")]; + tensor var_6434_split_sizes_0 = const()[name = tensor("op_6434_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6434_axis_0 = const()[name = tensor("op_6434_axis_0"), val = tensor(1)]; + tensor var_6434_cast_0, tensor var_6434_cast_1 = split(axis = var_6434_axis_0, split_sizes = var_6434_split_sizes_0, x = var_6433_cast)[name = tensor("op_6434_cast")]; + tensor var_6436_mode_0 = const()[name = tensor("op_6436_mode_0"), val = tensor("EXACT")]; + tensor var_6436_cast = gelu(mode = var_6436_mode_0, x = var_6434_cast_1)[name = tensor("op_6436_cast")]; + tensor input_389_cast = mul(x = var_6434_cast_0, y = var_6436_cast)[name = tensor("input_389_cast")]; + tensor var_6440 = const()[name = tensor("op_6440"), val = tensor([1, 1])]; + tensor var_6442 = const()[name = tensor("op_6442"), val = tensor([1, 1])]; + tensor var_6444_pad_type_0 = const()[name = tensor("op_6444_pad_type_0"), val = tensor("custom")]; + tensor var_6444_pad_0 = const()[name = tensor("op_6444_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(434830912))), lut = tensor([-0x1.2ap-5, -0x1.658p-7, 0x1.64cp-7, 0x1.2ap-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436469376)))]; + tensor var_6444_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_6442, groups = var_4943, pad = var_6444_pad_0, pad_type = var_6444_pad_type_0, strides = var_6440, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_389_cast)[name = tensor("op_6444_cast")]; + tensor inputs_193_cast = add(x = var_6444_cast, y = inputs_191_cast)[name = tensor("inputs_193_cast")]; + tensor var_6454 = const()[name = tensor("op_6454"), val = tensor([1])]; + tensor channels_mean_193_cast = reduce_mean(axes = var_6454, keep_dims = var_4938, x = inputs_193_cast)[name = tensor("channels_mean_193_cast")]; + tensor zero_mean_193_cast = sub(x = inputs_193_cast, y = channels_mean_193_cast)[name = tensor("zero_mean_193_cast")]; + tensor zero_mean_sq_193_cast = mul(x = zero_mean_193_cast, y = zero_mean_193_cast)[name = tensor("zero_mean_sq_193_cast")]; + tensor var_6458 = const()[name = tensor("op_6458"), val = tensor([1])]; + tensor var_6459_cast = reduce_mean(axes = var_6458, keep_dims = var_4938, x = zero_mean_sq_193_cast)[name = tensor("op_6459_cast")]; + tensor var_6460_to_fp16 = const()[name = tensor("op_6460_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6461_cast = add(x = var_6459_cast, y = var_6460_to_fp16)[name = tensor("op_6461_cast")]; + tensor denom_193_epsilon_0_to_fp16 = const()[name = tensor("denom_193_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_193_cast = rsqrt(epsilon = denom_193_epsilon_0_to_fp16, x = var_6461_cast)[name = tensor("denom_193_cast")]; + tensor out_193_cast = mul(x = zero_mean_193_cast, y = denom_193_cast)[name = tensor("out_193_cast")]; + tensor var_6465_to_fp16 = const()[name = tensor("op_6465_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436472000)))]; + tensor var_6466_cast = add(x = out_193_cast, y = var_6465_to_fp16)[name = tensor("op_6466_cast")]; + tensor var_6468_to_fp16 = const()[name = tensor("op_6468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436474624)))]; + tensor hidden_states_257_cast = mul(x = var_6466_cast, y = var_6468_to_fp16)[name = tensor("hidden_states_257_cast")]; + tensor var_6475 = const()[name = tensor("op_6475"), val = tensor([1, 1])]; + tensor var_6477 = const()[name = tensor("op_6477"), val = tensor([1, 1])]; + tensor q_129_pad_type_0 = const()[name = tensor("q_129_pad_type_0"), val = tensor("custom")]; + tensor q_129_pad_0 = const()[name = tensor("q_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436477248))), lut = tensor([-0x1.32cp-5, -0x1.6ecp-7, 0x1.738p-7, 0x1.338p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_129_cast = conv(dilations = var_6477, groups = var_4943, pad = q_129_pad_0, pad_type = q_129_pad_type_0, strides = var_6475, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("q_129_cast")]; + tensor var_6481 = const()[name = tensor("op_6481"), val = tensor([1, 1])]; + tensor var_6483 = const()[name = tensor("op_6483"), val = tensor([1, 1])]; + tensor k_129_pad_type_0 = const()[name = tensor("k_129_pad_type_0"), val = tensor("custom")]; + tensor k_129_pad_0 = const()[name = tensor("k_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(436886912))), lut = tensor([-0x1.328p-5, -0x1.71p-7, 0x1.724p-7, 0x1.32cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_129_cast = conv(dilations = var_6483, groups = var_4943, pad = k_129_pad_0, pad_type = k_129_pad_type_0, strides = var_6481, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("k_129_cast")]; + tensor var_6487 = const()[name = tensor("op_6487"), val = tensor([1, 1])]; + tensor var_6489 = const()[name = tensor("op_6489"), val = tensor([1, 1])]; + tensor v_129_pad_type_0 = const()[name = tensor("v_129_pad_type_0"), val = tensor("custom")]; + tensor v_129_pad_0 = const()[name = tensor("v_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(437296576))), lut = tensor([-0x1.4acp-5, -0x1.8fp-7, 0x1.8a8p-7, 0x1.4a4p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_129_cast = conv(dilations = var_6489, groups = var_4943, pad = v_129_pad_0, pad_type = v_129_pad_type_0, strides = var_6487, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("v_129_cast")]; + tensor var_6493 = const()[name = tensor("op_6493"), val = tensor([2, 20, 64, -1])]; + tensor var_6494_cast = reshape(shape = var_6493, x = q_129_cast)[name = tensor("op_6494_cast")]; + tensor var_6495 = const()[name = tensor("op_6495"), val = tensor([2, 20, 64, -1])]; + tensor var_6496_cast = reshape(shape = var_6495, x = k_129_cast)[name = tensor("op_6496_cast")]; + tensor var_6497 = const()[name = tensor("op_6497"), val = tensor([2, 20, 64, -1])]; + tensor var_6498_cast = reshape(shape = var_6497, x = v_129_cast)[name = tensor("op_6498_cast")]; + tensor attn_weights_257_transpose_x_0 = const()[name = tensor("attn_weights_257_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_257_transpose_y_0 = const()[name = tensor("attn_weights_257_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_257_cast = matmul(transpose_x = attn_weights_257_transpose_x_0, transpose_y = attn_weights_257_transpose_y_0, x = var_6494_cast, y = var_6496_cast)[name = tensor("attn_weights_257_cast")]; + tensor attn_weights_259_cast = mul(x = attn_weights_257_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_259_cast")]; + tensor var_6502_cast = softmax(axis = var_4927, x = attn_weights_259_cast)[name = tensor("op_6502_cast")]; + tensor attn_129_transpose_x_0 = const()[name = tensor("attn_129_transpose_x_0"), val = tensor(false)]; + tensor attn_129_transpose_y_0 = const()[name = tensor("attn_129_transpose_y_0"), val = tensor(true)]; + tensor attn_129_cast = matmul(transpose_x = attn_129_transpose_x_0, transpose_y = attn_129_transpose_y_0, x = var_6498_cast, y = var_6502_cast)[name = tensor("attn_129_cast")]; + tensor var_6506 = const()[name = tensor("op_6506"), val = tensor([2, 1280, 1, -1])]; + tensor input_391_cast = reshape(shape = var_6506, x = attn_129_cast)[name = tensor("input_391_cast")]; + tensor var_6511 = const()[name = tensor("op_6511"), val = tensor([1, 1])]; + tensor var_6513 = const()[name = tensor("op_6513"), val = tensor([1, 1])]; + tensor var_6515_pad_type_0 = const()[name = tensor("op_6515_pad_type_0"), val = tensor("custom")]; + tensor var_6515_pad_0 = const()[name = tensor("op_6515_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(437706240))), lut = tensor([-0x1.46p-5, -0x1.884p-7, 0x1.854p-7, 0x1.448p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438115904)))]; + tensor var_6515_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_6513, groups = var_4943, pad = var_6515_pad_0, pad_type = var_6515_pad_type_0, strides = var_6511, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_391_cast)[name = tensor("op_6515_cast")]; + tensor inputs_195_cast = add(x = var_6515_cast, y = inputs_193_cast)[name = tensor("inputs_195_cast")]; + tensor var_6519 = const()[name = tensor("op_6519"), val = tensor([1])]; + tensor channels_mean_195_cast = reduce_mean(axes = var_6519, keep_dims = var_4938, x = inputs_195_cast)[name = tensor("channels_mean_195_cast")]; + tensor zero_mean_195_cast = sub(x = inputs_195_cast, y = channels_mean_195_cast)[name = tensor("zero_mean_195_cast")]; + tensor zero_mean_sq_195_cast = mul(x = zero_mean_195_cast, y = zero_mean_195_cast)[name = tensor("zero_mean_sq_195_cast")]; + tensor var_6523 = const()[name = tensor("op_6523"), val = tensor([1])]; + tensor var_6524_cast = reduce_mean(axes = var_6523, keep_dims = var_4938, x = zero_mean_sq_195_cast)[name = tensor("op_6524_cast")]; + tensor var_6525_to_fp16 = const()[name = tensor("op_6525_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6526_cast = add(x = var_6524_cast, y = var_6525_to_fp16)[name = tensor("op_6526_cast")]; + tensor denom_195_epsilon_0_to_fp16 = const()[name = tensor("denom_195_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_195_cast = rsqrt(epsilon = denom_195_epsilon_0_to_fp16, x = var_6526_cast)[name = tensor("denom_195_cast")]; + tensor out_195_cast = mul(x = zero_mean_195_cast, y = denom_195_cast)[name = tensor("out_195_cast")]; + tensor var_6530_to_fp16 = const()[name = tensor("op_6530_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438118528)))]; + tensor var_6531_cast = add(x = out_195_cast, y = var_6530_to_fp16)[name = tensor("op_6531_cast")]; + tensor var_6533_to_fp16 = const()[name = tensor("op_6533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438121152)))]; + tensor hidden_states_259_cast = mul(x = var_6531_cast, y = var_6533_to_fp16)[name = tensor("hidden_states_259_cast")]; + tensor var_6540 = const()[name = tensor("op_6540"), val = tensor([1, 1])]; + tensor var_6542 = const()[name = tensor("op_6542"), val = tensor([1, 1])]; + tensor q_131_pad_type_0 = const()[name = tensor("q_131_pad_type_0"), val = tensor("custom")]; + tensor q_131_pad_0 = const()[name = tensor("q_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438123776))), lut = tensor([-0x1.60cp-7, 0x1.604p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_131_cast = conv(dilations = var_6542, groups = var_4943, pad = q_131_pad_0, pad_type = q_131_pad_type_0, strides = var_6540, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_259_cast)[name = tensor("q_131_cast")]; + tensor var_6546 = const()[name = tensor("op_6546"), val = tensor([1, 1])]; + tensor var_6548 = const()[name = tensor("op_6548"), val = tensor([1, 1])]; + tensor k_131_pad_type_0 = const()[name = tensor("k_131_pad_type_0"), val = tensor("custom")]; + tensor k_131_pad_0 = const()[name = tensor("k_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438328640))), lut = tensor([-0x1.b4p-8, 0x1.b3cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_131_cast = conv(dilations = var_6548, groups = var_4943, pad = k_131_pad_0, pad_type = k_131_pad_type_0, strides = var_6546, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_131_cast")]; + tensor var_6552 = const()[name = tensor("op_6552"), val = tensor([1, 1])]; + tensor var_6554 = const()[name = tensor("op_6554"), val = tensor([1, 1])]; + tensor v_131_pad_type_0 = const()[name = tensor("v_131_pad_type_0"), val = tensor("custom")]; + tensor v_131_pad_0 = const()[name = tensor("v_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438656384))), lut = tensor([-0x1.e54p-8, 0x1.e38p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_131_cast = conv(dilations = var_6554, groups = var_4943, pad = v_131_pad_0, pad_type = v_131_pad_type_0, strides = var_6552, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_131_cast")]; + tensor var_6558 = const()[name = tensor("op_6558"), val = tensor([2, 20, 64, -1])]; + tensor var_6559_cast = reshape(shape = var_6558, x = q_131_cast)[name = tensor("op_6559_cast")]; + tensor var_6560 = const()[name = tensor("op_6560"), val = tensor([2, 20, 64, -1])]; + tensor var_6561_cast = reshape(shape = var_6560, x = k_131_cast)[name = tensor("op_6561_cast")]; + tensor var_6562 = const()[name = tensor("op_6562"), val = tensor([2, 20, 64, -1])]; + tensor var_6563_cast = reshape(shape = var_6562, x = v_131_cast)[name = tensor("op_6563_cast")]; + tensor attn_weights_261_transpose_x_0 = const()[name = tensor("attn_weights_261_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_261_transpose_y_0 = const()[name = tensor("attn_weights_261_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_261_cast = matmul(transpose_x = attn_weights_261_transpose_x_0, transpose_y = attn_weights_261_transpose_y_0, x = var_6559_cast, y = var_6561_cast)[name = tensor("attn_weights_261_cast")]; + tensor attn_weights_263_cast = mul(x = attn_weights_261_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_263_cast")]; + tensor var_6567_cast = softmax(axis = var_4927, x = attn_weights_263_cast)[name = tensor("op_6567_cast")]; + tensor attn_131_transpose_x_0 = const()[name = tensor("attn_131_transpose_x_0"), val = tensor(false)]; + tensor attn_131_transpose_y_0 = const()[name = tensor("attn_131_transpose_y_0"), val = tensor(true)]; + tensor attn_131_cast = matmul(transpose_x = attn_131_transpose_x_0, transpose_y = attn_131_transpose_y_0, x = var_6563_cast, y = var_6567_cast)[name = tensor("attn_131_cast")]; + tensor var_6571 = const()[name = tensor("op_6571"), val = tensor([2, 1280, 1, -1])]; + tensor input_393_cast = reshape(shape = var_6571, x = attn_131_cast)[name = tensor("input_393_cast")]; + tensor var_6576 = const()[name = tensor("op_6576"), val = tensor([1, 1])]; + tensor var_6578 = const()[name = tensor("op_6578"), val = tensor([1, 1])]; + tensor var_6580_pad_type_0 = const()[name = tensor("op_6580_pad_type_0"), val = tensor("custom")]; + tensor var_6580_pad_0 = const()[name = tensor("op_6580_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(438984128))), lut = tensor([-0x1.42p-8, 0x1.41cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439188992)))]; + tensor var_6580_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_6578, groups = var_4943, pad = var_6580_pad_0, pad_type = var_6580_pad_type_0, strides = var_6576, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_393_cast)[name = tensor("op_6580_cast")]; + tensor inputs_197_cast = add(x = var_6580_cast, y = inputs_195_cast)[name = tensor("inputs_197_cast")]; + tensor var_6584 = const()[name = tensor("op_6584"), val = tensor([1])]; + tensor channels_mean_197_cast = reduce_mean(axes = var_6584, keep_dims = var_4938, x = inputs_197_cast)[name = tensor("channels_mean_197_cast")]; + tensor zero_mean_197_cast = sub(x = inputs_197_cast, y = channels_mean_197_cast)[name = tensor("zero_mean_197_cast")]; + tensor zero_mean_sq_197_cast = mul(x = zero_mean_197_cast, y = zero_mean_197_cast)[name = tensor("zero_mean_sq_197_cast")]; + tensor var_6588 = const()[name = tensor("op_6588"), val = tensor([1])]; + tensor var_6589_cast = reduce_mean(axes = var_6588, keep_dims = var_4938, x = zero_mean_sq_197_cast)[name = tensor("op_6589_cast")]; + tensor var_6590_to_fp16 = const()[name = tensor("op_6590_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6591_cast = add(x = var_6589_cast, y = var_6590_to_fp16)[name = tensor("op_6591_cast")]; + tensor denom_197_epsilon_0_to_fp16 = const()[name = tensor("denom_197_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_197_cast = rsqrt(epsilon = denom_197_epsilon_0_to_fp16, x = var_6591_cast)[name = tensor("denom_197_cast")]; + tensor out_197_cast = mul(x = zero_mean_197_cast, y = denom_197_cast)[name = tensor("out_197_cast")]; + tensor var_6595_to_fp16 = const()[name = tensor("op_6595_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439191616)))]; + tensor var_6596_cast = add(x = out_197_cast, y = var_6595_to_fp16)[name = tensor("op_6596_cast")]; + tensor var_6598_to_fp16 = const()[name = tensor("op_6598_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439194240)))]; + tensor input_395_cast = mul(x = var_6596_cast, y = var_6598_to_fp16)[name = tensor("input_395_cast")]; + tensor var_6606 = const()[name = tensor("op_6606"), val = tensor([1, 1])]; + tensor var_6608 = const()[name = tensor("op_6608"), val = tensor([1, 1])]; + tensor var_6610_pad_type_0 = const()[name = tensor("op_6610_pad_type_0"), val = tensor("custom")]; + tensor var_6610_pad_0 = const()[name = tensor("op_6610_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(439196864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445750528))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445750656)))]; + tensor var_6610_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_6608, groups = var_4943, pad = var_6610_pad_0, pad_type = var_6610_pad_type_0, strides = var_6606, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_395_cast)[name = tensor("op_6610_cast")]; + tensor var_6611_split_sizes_0 = const()[name = tensor("op_6611_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6611_axis_0 = const()[name = tensor("op_6611_axis_0"), val = tensor(1)]; + tensor var_6611_cast_0, tensor var_6611_cast_1 = split(axis = var_6611_axis_0, split_sizes = var_6611_split_sizes_0, x = var_6610_cast)[name = tensor("op_6611_cast")]; + tensor var_6613_mode_0 = const()[name = tensor("op_6613_mode_0"), val = tensor("EXACT")]; + tensor var_6613_cast = gelu(mode = var_6613_mode_0, x = var_6611_cast_1)[name = tensor("op_6613_cast")]; + tensor input_397_cast = mul(x = var_6611_cast_0, y = var_6613_cast)[name = tensor("input_397_cast")]; + tensor var_6617 = const()[name = tensor("op_6617"), val = tensor([1, 1])]; + tensor var_6619 = const()[name = tensor("op_6619"), val = tensor([1, 1])]; + tensor var_6621_pad_type_0 = const()[name = tensor("op_6621_pad_type_0"), val = tensor("custom")]; + tensor var_6621_pad_0 = const()[name = tensor("op_6621_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445771200))), lut = tensor([-0x1.268p-5, -0x1.604p-7, 0x1.63p-7, 0x1.278p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447409664)))]; + tensor var_6621_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_6619, groups = var_4943, pad = var_6621_pad_0, pad_type = var_6621_pad_type_0, strides = var_6617, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_397_cast)[name = tensor("op_6621_cast")]; + tensor inputs_199_cast = add(x = var_6621_cast, y = inputs_197_cast)[name = tensor("inputs_199_cast")]; + tensor var_6631 = const()[name = tensor("op_6631"), val = tensor([1])]; + tensor channels_mean_199_cast = reduce_mean(axes = var_6631, keep_dims = var_4938, x = inputs_199_cast)[name = tensor("channels_mean_199_cast")]; + tensor zero_mean_199_cast = sub(x = inputs_199_cast, y = channels_mean_199_cast)[name = tensor("zero_mean_199_cast")]; + tensor zero_mean_sq_199_cast = mul(x = zero_mean_199_cast, y = zero_mean_199_cast)[name = tensor("zero_mean_sq_199_cast")]; + tensor var_6635 = const()[name = tensor("op_6635"), val = tensor([1])]; + tensor var_6636_cast = reduce_mean(axes = var_6635, keep_dims = var_4938, x = zero_mean_sq_199_cast)[name = tensor("op_6636_cast")]; + tensor var_6637_to_fp16 = const()[name = tensor("op_6637_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6638_cast = add(x = var_6636_cast, y = var_6637_to_fp16)[name = tensor("op_6638_cast")]; + tensor denom_199_epsilon_0_to_fp16 = const()[name = tensor("denom_199_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_199_cast = rsqrt(epsilon = denom_199_epsilon_0_to_fp16, x = var_6638_cast)[name = tensor("denom_199_cast")]; + tensor out_199_cast = mul(x = zero_mean_199_cast, y = denom_199_cast)[name = tensor("out_199_cast")]; + tensor var_6642_to_fp16 = const()[name = tensor("op_6642_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447412288)))]; + tensor var_6643_cast = add(x = out_199_cast, y = var_6642_to_fp16)[name = tensor("op_6643_cast")]; + tensor var_6645_to_fp16 = const()[name = tensor("op_6645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447414912)))]; + tensor hidden_states_263_cast = mul(x = var_6643_cast, y = var_6645_to_fp16)[name = tensor("hidden_states_263_cast")]; + tensor var_6652 = const()[name = tensor("op_6652"), val = tensor([1, 1])]; + tensor var_6654 = const()[name = tensor("op_6654"), val = tensor([1, 1])]; + tensor q_133_pad_type_0 = const()[name = tensor("q_133_pad_type_0"), val = tensor("custom")]; + tensor q_133_pad_0 = const()[name = tensor("q_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447417536))), lut = tensor([-0x1.338p-5, -0x1.718p-7, 0x1.724p-7, 0x1.334p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_133_cast = conv(dilations = var_6654, groups = var_4943, pad = q_133_pad_0, pad_type = q_133_pad_type_0, strides = var_6652, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("q_133_cast")]; + tensor var_6658 = const()[name = tensor("op_6658"), val = tensor([1, 1])]; + tensor var_6660 = const()[name = tensor("op_6660"), val = tensor([1, 1])]; + tensor k_133_pad_type_0 = const()[name = tensor("k_133_pad_type_0"), val = tensor("custom")]; + tensor k_133_pad_0 = const()[name = tensor("k_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447827200))), lut = tensor([-0x1.318p-5, -0x1.6e8p-7, 0x1.734p-7, 0x1.328p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_133_cast = conv(dilations = var_6660, groups = var_4943, pad = k_133_pad_0, pad_type = k_133_pad_type_0, strides = var_6658, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("k_133_cast")]; + tensor var_6664 = const()[name = tensor("op_6664"), val = tensor([1, 1])]; + tensor var_6666 = const()[name = tensor("op_6666"), val = tensor([1, 1])]; + tensor v_133_pad_type_0 = const()[name = tensor("v_133_pad_type_0"), val = tensor("custom")]; + tensor v_133_pad_0 = const()[name = tensor("v_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448236864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449056128))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_133_cast = conv(dilations = var_6666, groups = var_4943, pad = v_133_pad_0, pad_type = v_133_pad_type_0, strides = var_6664, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("v_133_cast")]; + tensor var_6670 = const()[name = tensor("op_6670"), val = tensor([2, 20, 64, -1])]; + tensor var_6671_cast = reshape(shape = var_6670, x = q_133_cast)[name = tensor("op_6671_cast")]; + tensor var_6672 = const()[name = tensor("op_6672"), val = tensor([2, 20, 64, -1])]; + tensor var_6673_cast = reshape(shape = var_6672, x = k_133_cast)[name = tensor("op_6673_cast")]; + tensor var_6674 = const()[name = tensor("op_6674"), val = tensor([2, 20, 64, -1])]; + tensor var_6675_cast = reshape(shape = var_6674, x = v_133_cast)[name = tensor("op_6675_cast")]; + tensor attn_weights_265_transpose_x_0 = const()[name = tensor("attn_weights_265_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_265_transpose_y_0 = const()[name = tensor("attn_weights_265_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_265_cast = matmul(transpose_x = attn_weights_265_transpose_x_0, transpose_y = attn_weights_265_transpose_y_0, x = var_6671_cast, y = var_6673_cast)[name = tensor("attn_weights_265_cast")]; + tensor attn_weights_267_cast = mul(x = attn_weights_265_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_267_cast")]; + tensor var_6679_cast = softmax(axis = var_4927, x = attn_weights_267_cast)[name = tensor("op_6679_cast")]; + tensor attn_133_transpose_x_0 = const()[name = tensor("attn_133_transpose_x_0"), val = tensor(false)]; + tensor attn_133_transpose_y_0 = const()[name = tensor("attn_133_transpose_y_0"), val = tensor(true)]; + tensor attn_133_cast = matmul(transpose_x = attn_133_transpose_x_0, transpose_y = attn_133_transpose_y_0, x = var_6675_cast, y = var_6679_cast)[name = tensor("attn_133_cast")]; + tensor var_6683 = const()[name = tensor("op_6683"), val = tensor([2, 1280, 1, -1])]; + tensor input_399_cast = reshape(shape = var_6683, x = attn_133_cast)[name = tensor("input_399_cast")]; + tensor var_6688 = const()[name = tensor("op_6688"), val = tensor([1, 1])]; + tensor var_6690 = const()[name = tensor("op_6690"), val = tensor([1, 1])]; + tensor var_6692_pad_type_0 = const()[name = tensor("op_6692_pad_type_0"), val = tensor("custom")]; + tensor var_6692_pad_0 = const()[name = tensor("op_6692_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449056256))), lut = tensor([-0x1.2b8p-5, -0x1.6ap-7, 0x1.664p-7, 0x1.2a8p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449465920)))]; + tensor var_6692_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_6690, groups = var_4943, pad = var_6692_pad_0, pad_type = var_6692_pad_type_0, strides = var_6688, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_399_cast)[name = tensor("op_6692_cast")]; + tensor inputs_201_cast = add(x = var_6692_cast, y = inputs_199_cast)[name = tensor("inputs_201_cast")]; + tensor var_6696 = const()[name = tensor("op_6696"), val = tensor([1])]; + tensor channels_mean_201_cast = reduce_mean(axes = var_6696, keep_dims = var_4938, x = inputs_201_cast)[name = tensor("channels_mean_201_cast")]; + tensor zero_mean_201_cast = sub(x = inputs_201_cast, y = channels_mean_201_cast)[name = tensor("zero_mean_201_cast")]; + tensor zero_mean_sq_201_cast = mul(x = zero_mean_201_cast, y = zero_mean_201_cast)[name = tensor("zero_mean_sq_201_cast")]; + tensor var_6700 = const()[name = tensor("op_6700"), val = tensor([1])]; + tensor var_6701_cast = reduce_mean(axes = var_6700, keep_dims = var_4938, x = zero_mean_sq_201_cast)[name = tensor("op_6701_cast")]; + tensor var_6702_to_fp16 = const()[name = tensor("op_6702_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6703_cast = add(x = var_6701_cast, y = var_6702_to_fp16)[name = tensor("op_6703_cast")]; + tensor denom_201_epsilon_0_to_fp16 = const()[name = tensor("denom_201_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_201_cast = rsqrt(epsilon = denom_201_epsilon_0_to_fp16, x = var_6703_cast)[name = tensor("denom_201_cast")]; + tensor out_201_cast = mul(x = zero_mean_201_cast, y = denom_201_cast)[name = tensor("out_201_cast")]; + tensor var_6707_to_fp16 = const()[name = tensor("op_6707_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449468544)))]; + tensor var_6708_cast = add(x = out_201_cast, y = var_6707_to_fp16)[name = tensor("op_6708_cast")]; + tensor var_6710_to_fp16 = const()[name = tensor("op_6710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449471168)))]; + tensor hidden_states_265_cast = mul(x = var_6708_cast, y = var_6710_to_fp16)[name = tensor("hidden_states_265_cast")]; + tensor var_6717 = const()[name = tensor("op_6717"), val = tensor([1, 1])]; + tensor var_6719 = const()[name = tensor("op_6719"), val = tensor([1, 1])]; + tensor q_135_pad_type_0 = const()[name = tensor("q_135_pad_type_0"), val = tensor("custom")]; + tensor q_135_pad_0 = const()[name = tensor("q_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449473792))), lut = tensor([-0x1.5ccp-7, 0x1.5ccp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_135_cast = conv(dilations = var_6719, groups = var_4943, pad = q_135_pad_0, pad_type = q_135_pad_type_0, strides = var_6717, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_265_cast)[name = tensor("q_135_cast")]; + tensor var_6723 = const()[name = tensor("op_6723"), val = tensor([1, 1])]; + tensor var_6725 = const()[name = tensor("op_6725"), val = tensor([1, 1])]; + tensor k_135_pad_type_0 = const()[name = tensor("k_135_pad_type_0"), val = tensor("custom")]; + tensor k_135_pad_0 = const()[name = tensor("k_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449678656))), lut = tensor([-0x1.aa4p-8, 0x1.abp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_135_cast = conv(dilations = var_6725, groups = var_4943, pad = k_135_pad_0, pad_type = k_135_pad_type_0, strides = var_6723, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_135_cast")]; + tensor var_6729 = const()[name = tensor("op_6729"), val = tensor([1, 1])]; + tensor var_6731 = const()[name = tensor("op_6731"), val = tensor([1, 1])]; + tensor v_135_pad_type_0 = const()[name = tensor("v_135_pad_type_0"), val = tensor("custom")]; + tensor v_135_pad_0 = const()[name = tensor("v_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450006400))), lut = tensor([-0x1.cdcp-8, 0x1.cd4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_135_cast = conv(dilations = var_6731, groups = var_4943, pad = v_135_pad_0, pad_type = v_135_pad_type_0, strides = var_6729, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_135_cast")]; + tensor var_6735 = const()[name = tensor("op_6735"), val = tensor([2, 20, 64, -1])]; + tensor var_6736_cast = reshape(shape = var_6735, x = q_135_cast)[name = tensor("op_6736_cast")]; + tensor var_6737 = const()[name = tensor("op_6737"), val = tensor([2, 20, 64, -1])]; + tensor var_6738_cast = reshape(shape = var_6737, x = k_135_cast)[name = tensor("op_6738_cast")]; + tensor var_6739 = const()[name = tensor("op_6739"), val = tensor([2, 20, 64, -1])]; + tensor var_6740_cast = reshape(shape = var_6739, x = v_135_cast)[name = tensor("op_6740_cast")]; + tensor attn_weights_269_transpose_x_0 = const()[name = tensor("attn_weights_269_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_269_transpose_y_0 = const()[name = tensor("attn_weights_269_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_269_cast = matmul(transpose_x = attn_weights_269_transpose_x_0, transpose_y = attn_weights_269_transpose_y_0, x = var_6736_cast, y = var_6738_cast)[name = tensor("attn_weights_269_cast")]; + tensor attn_weights_271_cast = mul(x = attn_weights_269_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_271_cast")]; + tensor var_6744_cast = softmax(axis = var_4927, x = attn_weights_271_cast)[name = tensor("op_6744_cast")]; + tensor attn_135_transpose_x_0 = const()[name = tensor("attn_135_transpose_x_0"), val = tensor(false)]; + tensor attn_135_transpose_y_0 = const()[name = tensor("attn_135_transpose_y_0"), val = tensor(true)]; + tensor attn_135_cast = matmul(transpose_x = attn_135_transpose_x_0, transpose_y = attn_135_transpose_y_0, x = var_6740_cast, y = var_6744_cast)[name = tensor("attn_135_cast")]; + tensor var_6748 = const()[name = tensor("op_6748"), val = tensor([2, 1280, 1, -1])]; + tensor input_401_cast = reshape(shape = var_6748, x = attn_135_cast)[name = tensor("input_401_cast")]; + tensor var_6753 = const()[name = tensor("op_6753"), val = tensor([1, 1])]; + tensor var_6755 = const()[name = tensor("op_6755"), val = tensor([1, 1])]; + tensor var_6757_pad_type_0 = const()[name = tensor("op_6757_pad_type_0"), val = tensor("custom")]; + tensor var_6757_pad_0 = const()[name = tensor("op_6757_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450334144))), lut = tensor([-0x1.48cp-8, 0x1.49p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450539008)))]; + tensor var_6757_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_6755, groups = var_4943, pad = var_6757_pad_0, pad_type = var_6757_pad_type_0, strides = var_6753, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_401_cast)[name = tensor("op_6757_cast")]; + tensor inputs_203_cast = add(x = var_6757_cast, y = inputs_201_cast)[name = tensor("inputs_203_cast")]; + tensor var_6761 = const()[name = tensor("op_6761"), val = tensor([1])]; + tensor channels_mean_203_cast = reduce_mean(axes = var_6761, keep_dims = var_4938, x = inputs_203_cast)[name = tensor("channels_mean_203_cast")]; + tensor zero_mean_203_cast = sub(x = inputs_203_cast, y = channels_mean_203_cast)[name = tensor("zero_mean_203_cast")]; + tensor zero_mean_sq_203_cast = mul(x = zero_mean_203_cast, y = zero_mean_203_cast)[name = tensor("zero_mean_sq_203_cast")]; + tensor var_6765 = const()[name = tensor("op_6765"), val = tensor([1])]; + tensor var_6766_cast = reduce_mean(axes = var_6765, keep_dims = var_4938, x = zero_mean_sq_203_cast)[name = tensor("op_6766_cast")]; + tensor var_6767_to_fp16 = const()[name = tensor("op_6767_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6768_cast = add(x = var_6766_cast, y = var_6767_to_fp16)[name = tensor("op_6768_cast")]; + tensor denom_203_epsilon_0_to_fp16 = const()[name = tensor("denom_203_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_203_cast = rsqrt(epsilon = denom_203_epsilon_0_to_fp16, x = var_6768_cast)[name = tensor("denom_203_cast")]; + tensor out_203_cast = mul(x = zero_mean_203_cast, y = denom_203_cast)[name = tensor("out_203_cast")]; + tensor var_6772_to_fp16 = const()[name = tensor("op_6772_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450541632)))]; + tensor var_6773_cast = add(x = out_203_cast, y = var_6772_to_fp16)[name = tensor("op_6773_cast")]; + tensor var_6775_to_fp16 = const()[name = tensor("op_6775_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450544256)))]; + tensor input_403_cast = mul(x = var_6773_cast, y = var_6775_to_fp16)[name = tensor("input_403_cast")]; + tensor var_6783 = const()[name = tensor("op_6783"), val = tensor([1, 1])]; + tensor var_6785 = const()[name = tensor("op_6785"), val = tensor([1, 1])]; + tensor var_6787_pad_type_0 = const()[name = tensor("op_6787_pad_type_0"), val = tensor("custom")]; + tensor var_6787_pad_0 = const()[name = tensor("op_6787_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450546880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(457100544))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(457100672)))]; + tensor var_6787_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_6785, groups = var_4943, pad = var_6787_pad_0, pad_type = var_6787_pad_type_0, strides = var_6783, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_403_cast)[name = tensor("op_6787_cast")]; + tensor var_6788_split_sizes_0 = const()[name = tensor("op_6788_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6788_axis_0 = const()[name = tensor("op_6788_axis_0"), val = tensor(1)]; + tensor var_6788_cast_0, tensor var_6788_cast_1 = split(axis = var_6788_axis_0, split_sizes = var_6788_split_sizes_0, x = var_6787_cast)[name = tensor("op_6788_cast")]; + tensor var_6790_mode_0 = const()[name = tensor("op_6790_mode_0"), val = tensor("EXACT")]; + tensor var_6790_cast = gelu(mode = var_6790_mode_0, x = var_6788_cast_1)[name = tensor("op_6790_cast")]; + tensor input_405_cast = mul(x = var_6788_cast_0, y = var_6790_cast)[name = tensor("input_405_cast")]; + tensor var_6794 = const()[name = tensor("op_6794"), val = tensor([1, 1])]; + tensor var_6796 = const()[name = tensor("op_6796"), val = tensor([1, 1])]; + tensor var_6798_pad_type_0 = const()[name = tensor("op_6798_pad_type_0"), val = tensor("custom")]; + tensor var_6798_pad_0 = const()[name = tensor("op_6798_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(457121216))), lut = tensor([-0x1.25p-5, -0x1.5d4p-7, 0x1.63p-7, 0x1.268p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458759680)))]; + tensor var_6798_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_6796, groups = var_4943, pad = var_6798_pad_0, pad_type = var_6798_pad_type_0, strides = var_6794, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_405_cast)[name = tensor("op_6798_cast")]; + tensor hidden_states_269_cast = add(x = var_6798_cast, y = inputs_203_cast)[name = tensor("hidden_states_269_cast")]; + tensor var_6800 = const()[name = tensor("op_6800"), val = tensor([2, 1280, 32, 32])]; + tensor input_407_cast = reshape(shape = var_6800, x = hidden_states_269_cast)[name = tensor("input_407_cast")]; + tensor var_6804 = const()[name = tensor("op_6804"), val = tensor([1, 1])]; + tensor var_6806 = const()[name = tensor("op_6806"), val = tensor([1, 1])]; + tensor hidden_states_271_pad_type_0 = const()[name = tensor("hidden_states_271_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_271_pad_0 = const()[name = tensor("hidden_states_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458762304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(459581568))), name = tensor("mid_block_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(459581696)))]; + tensor hidden_states_271_cast = conv(bias = mid_block_attentions_0_proj_out_bias_to_fp16, dilations = var_6806, groups = var_4943, pad = hidden_states_271_pad_0, pad_type = hidden_states_271_pad_type_0, strides = var_6804, weight = mid_block_attentions_0_proj_out_weight_to_fp16_palettized, x = input_407_cast)[name = tensor("hidden_states_271_cast")]; + tensor input_409_cast = add(x = hidden_states_271_cast, y = hidden_states_205_cast)[name = tensor("input_409_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = input_409_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(459584320)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(459586944)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_413_cast = silu(x = add_39_cast)[name = tensor("input_413_cast")]; + tensor var_6821 = const()[name = tensor("op_6821"), val = tensor([1, 1])]; + tensor var_6823 = const()[name = tensor("op_6823"), val = tensor([1, 1])]; + tensor hidden_states_273_pad_type_0 = const()[name = tensor("hidden_states_273_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_273_pad_0 = const()[name = tensor("hidden_states_273_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(459589568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466962432))), name = tensor("mid_block_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466962560)))]; + tensor hidden_states_273_cast = conv(bias = mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_6823, groups = var_4943, pad = hidden_states_273_pad_0, pad_type = hidden_states_273_pad_type_0, strides = var_6821, weight = mid_block_resnets_1_conv1_weight_to_fp16_palettized, x = input_413_cast)[name = tensor("hidden_states_273_cast")]; + tensor var_6829 = const()[name = tensor("op_6829"), val = tensor([1, 1])]; + tensor var_6831 = const()[name = tensor("op_6831"), val = tensor([1, 1])]; + tensor temb_15_pad_type_0 = const()[name = tensor("temb_15_pad_type_0"), val = tensor("custom")]; + tensor temb_15_pad_0 = const()[name = tensor("temb_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466965184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467784448))), name = tensor("mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467784576)))]; + tensor temb_15_cast = conv(bias = mid_block_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_6831, groups = var_4943, pad = temb_15_pad_0, pad_type = temb_15_pad_type_0, strides = var_6829, weight = mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_15_cast")]; + tensor input_417_cast = add(x = hidden_states_273_cast, y = temb_15_cast)[name = tensor("input_417_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_417_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467787200)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467789824)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_421_cast = silu(x = add_41_cast)[name = tensor("input_421_cast")]; + tensor var_6841 = const()[name = tensor("op_6841"), val = tensor([1, 1])]; + tensor var_6843 = const()[name = tensor("op_6843"), val = tensor([1, 1])]; + tensor hidden_states_275_pad_type_0 = const()[name = tensor("hidden_states_275_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_275_pad_0 = const()[name = tensor("hidden_states_275_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467792448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475165312))), name = tensor("mid_block_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475165440)))]; + tensor hidden_states_275_cast = conv(bias = mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_6843, groups = var_4943, pad = hidden_states_275_pad_0, pad_type = hidden_states_275_pad_type_0, strides = var_6841, weight = mid_block_resnets_1_conv2_weight_to_fp16_palettized, x = input_421_cast)[name = tensor("hidden_states_275_cast")]; + tensor hidden_states_277_cast = add(x = input_409_cast, y = hidden_states_275_cast)[name = tensor("hidden_states_277_cast")]; + tensor var_6849 = const()[name = tensor("op_6849"), val = tensor(3)]; + tensor var_6860 = const()[name = tensor("op_6860"), val = tensor(true)]; + tensor var_6865 = const()[name = tensor("op_6865"), val = tensor(1)]; + tensor input_423_interleave_0 = const()[name = tensor("input_423_interleave_0"), val = tensor(false)]; + tensor input_423_cast = concat(axis = var_6865, interleave = input_423_interleave_0, values = (hidden_states_277_cast, input_311_cast))[name = tensor("input_423_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = input_423_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_mean_0_to_fp16 = const()[name = tensor("add_43_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475168064)))]; + tensor add_43_variance_0_to_fp16 = const()[name = tensor("add_43_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475173248)))]; + tensor add_43_gamma_0_to_fp16 = const()[name = tensor("add_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475178432)))]; + tensor add_43_beta_0_to_fp16 = const()[name = tensor("add_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475183616)))]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_427_cast = silu(x = add_43_cast)[name = tensor("input_427_cast")]; + tensor var_6894 = const()[name = tensor("op_6894"), val = tensor([1, 1])]; + tensor var_6896 = const()[name = tensor("op_6896"), val = tensor([1, 1])]; + tensor hidden_states_279_pad_type_0 = const()[name = tensor("hidden_states_279_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_279_pad_0 = const()[name = tensor("hidden_states_279_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(475188800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489934464))), name = tensor("up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489934592)))]; + tensor hidden_states_279_cast = conv(bias = up_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_6896, groups = var_6865, pad = hidden_states_279_pad_0, pad_type = hidden_states_279_pad_type_0, strides = var_6894, weight = up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_427_cast)[name = tensor("hidden_states_279_cast")]; + tensor var_6902 = const()[name = tensor("op_6902"), val = tensor([1, 1])]; + tensor var_6904 = const()[name = tensor("op_6904"), val = tensor([1, 1])]; + tensor temb_17_pad_type_0 = const()[name = tensor("temb_17_pad_type_0"), val = tensor("custom")]; + tensor temb_17_pad_0 = const()[name = tensor("temb_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489937216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490756480))), name = tensor("up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490756608)))]; + tensor temb_17_cast = conv(bias = up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_6904, groups = var_6865, pad = temb_17_pad_0, pad_type = temb_17_pad_type_0, strides = var_6902, weight = up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_17_cast")]; + tensor input_431_cast = add(x = hidden_states_279_cast, y = temb_17_cast)[name = tensor("input_431_cast")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_88_cast = reshape(shape = reshape_88_shape_0, x = input_431_cast)[name = tensor("reshape_88_cast")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66_cast = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88_cast)[name = tensor("reduce_mean_66_cast")]; + tensor sub_44_cast = sub(x = reshape_88_cast, y = reduce_mean_66_cast)[name = tensor("sub_44_cast")]; + tensor square_22_cast = square(x = sub_44_cast)[name = tensor("square_22_cast")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68_cast = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22_cast)[name = tensor("reduce_mean_68_cast")]; + tensor add_44_y_0_to_fp16 = const()[name = tensor("add_44_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_44_cast = add(x = reduce_mean_68_cast, y = add_44_y_0_to_fp16)[name = tensor("add_44_cast")]; + tensor sqrt_22_cast = sqrt(x = add_44_cast)[name = tensor("sqrt_22_cast")]; + tensor real_div_22_cast = real_div(x = sub_44_cast, y = sqrt_22_cast)[name = tensor("real_div_22_cast")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_89_cast = reshape(shape = reshape_89_shape_0, x = real_div_22_cast)[name = tensor("reshape_89_cast")]; + tensor add_45_gamma_0_to_fp16 = const()[name = tensor("add_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490759232)))]; + tensor add_45_beta_0_to_fp16 = const()[name = tensor("add_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490761856)))]; + tensor add_45_epsilon_0_to_fp16 = const()[name = tensor("add_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_45_cast = batch_norm(beta = add_45_beta_0_to_fp16, epsilon = add_45_epsilon_0_to_fp16, gamma = add_45_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_89_cast)[name = tensor("add_45_cast")]; + tensor input_435_cast = silu(x = add_45_cast)[name = tensor("input_435_cast")]; + tensor var_6914 = const()[name = tensor("op_6914"), val = tensor([1, 1])]; + tensor var_6916 = const()[name = tensor("op_6916"), val = tensor([1, 1])]; + tensor hidden_states_281_pad_type_0 = const()[name = tensor("hidden_states_281_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_281_pad_0 = const()[name = tensor("hidden_states_281_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490764480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498137344))), name = tensor("up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498137472)))]; + tensor hidden_states_281_cast = conv(bias = up_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_6916, groups = var_6865, pad = hidden_states_281_pad_0, pad_type = hidden_states_281_pad_type_0, strides = var_6914, weight = up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_435_cast)[name = tensor("hidden_states_281_cast")]; + tensor var_6921 = const()[name = tensor("op_6921"), val = tensor([1, 1])]; + tensor var_6923 = const()[name = tensor("op_6923"), val = tensor([1, 1])]; + tensor x_5_pad_type_0 = const()[name = tensor("x_5_pad_type_0"), val = tensor("custom")]; + tensor x_5_pad_0 = const()[name = tensor("x_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498140096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499778560))), name = tensor("up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499778688)))]; + tensor x_5_cast = conv(bias = up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_6923, groups = var_6865, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = var_6921, weight = up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_423_cast)[name = tensor("x_5_cast")]; + tensor hidden_states_283_cast = add(x = x_5_cast, y = hidden_states_281_cast)[name = tensor("hidden_states_283_cast")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_92_cast = reshape(shape = reshape_92_shape_0, x = hidden_states_283_cast)[name = tensor("reshape_92_cast")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69_cast = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92_cast)[name = tensor("reduce_mean_69_cast")]; + tensor sub_46_cast = sub(x = reshape_92_cast, y = reduce_mean_69_cast)[name = tensor("sub_46_cast")]; + tensor square_23_cast = square(x = sub_46_cast)[name = tensor("square_23_cast")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71_cast = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23_cast)[name = tensor("reduce_mean_71_cast")]; + tensor add_46_y_0_to_fp16 = const()[name = tensor("add_46_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_46_cast = add(x = reduce_mean_71_cast, y = add_46_y_0_to_fp16)[name = tensor("add_46_cast")]; + tensor sqrt_23_cast = sqrt(x = add_46_cast)[name = tensor("sqrt_23_cast")]; + tensor real_div_23_cast = real_div(x = sub_46_cast, y = sqrt_23_cast)[name = tensor("real_div_23_cast")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_93_cast = reshape(shape = reshape_93_shape_0, x = real_div_23_cast)[name = tensor("reshape_93_cast")]; + tensor add_47_gamma_0_to_fp16 = const()[name = tensor("add_47_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499781312)))]; + tensor add_47_beta_0_to_fp16 = const()[name = tensor("add_47_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499783936)))]; + tensor add_47_epsilon_0_to_fp16 = const()[name = tensor("add_47_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_47_cast = batch_norm(beta = add_47_beta_0_to_fp16, epsilon = add_47_epsilon_0_to_fp16, gamma = add_47_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_93_cast)[name = tensor("add_47_cast")]; + tensor var_6961 = const()[name = tensor("op_6961"), val = tensor([1, 1])]; + tensor var_6963 = const()[name = tensor("op_6963"), val = tensor([1, 1])]; + tensor hidden_states_285_pad_type_0 = const()[name = tensor("hidden_states_285_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_285_pad_0 = const()[name = tensor("hidden_states_285_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(499786560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(500605824))), name = tensor("up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(500605952)))]; + tensor hidden_states_285_cast = conv(bias = up_blocks_0_attentions_0_proj_in_bias_to_fp16, dilations = var_6963, groups = var_6865, pad = hidden_states_285_pad_0, pad_type = hidden_states_285_pad_type_0, strides = var_6961, weight = up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized, x = add_47_cast)[name = tensor("hidden_states_285_cast")]; + tensor var_6968 = const()[name = tensor("op_6968"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_205_cast = reshape(shape = var_6968, x = hidden_states_285_cast)[name = tensor("inputs_205_cast")]; + tensor var_6978 = const()[name = tensor("op_6978"), val = tensor([1])]; + tensor channels_mean_205_cast = reduce_mean(axes = var_6978, keep_dims = var_6860, x = inputs_205_cast)[name = tensor("channels_mean_205_cast")]; + tensor zero_mean_205_cast = sub(x = inputs_205_cast, y = channels_mean_205_cast)[name = tensor("zero_mean_205_cast")]; + tensor zero_mean_sq_205_cast = mul(x = zero_mean_205_cast, y = zero_mean_205_cast)[name = tensor("zero_mean_sq_205_cast")]; + tensor var_6982 = const()[name = tensor("op_6982"), val = tensor([1])]; + tensor var_6983_cast = reduce_mean(axes = var_6982, keep_dims = var_6860, x = zero_mean_sq_205_cast)[name = tensor("op_6983_cast")]; + tensor var_6984_to_fp16 = const()[name = tensor("op_6984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6985_cast = add(x = var_6983_cast, y = var_6984_to_fp16)[name = tensor("op_6985_cast")]; + tensor denom_205_epsilon_0_to_fp16 = const()[name = tensor("denom_205_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_205_cast = rsqrt(epsilon = denom_205_epsilon_0_to_fp16, x = var_6985_cast)[name = tensor("denom_205_cast")]; + tensor out_205_cast = mul(x = zero_mean_205_cast, y = denom_205_cast)[name = tensor("out_205_cast")]; + tensor var_6989_to_fp16 = const()[name = tensor("op_6989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(500608576)))]; + tensor var_6990_cast = add(x = out_205_cast, y = var_6989_to_fp16)[name = tensor("op_6990_cast")]; + tensor var_6992_to_fp16 = const()[name = tensor("op_6992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(500611200)))]; + tensor hidden_states_287_cast = mul(x = var_6990_cast, y = var_6992_to_fp16)[name = tensor("hidden_states_287_cast")]; + tensor var_6999 = const()[name = tensor("op_6999"), val = tensor([1, 1])]; + tensor var_7001 = const()[name = tensor("op_7001"), val = tensor([1, 1])]; + tensor q_137_pad_type_0 = const()[name = tensor("q_137_pad_type_0"), val = tensor("custom")]; + tensor q_137_pad_0 = const()[name = tensor("q_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(500613824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(501433088))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_137_cast = conv(dilations = var_7001, groups = var_6865, pad = q_137_pad_0, pad_type = q_137_pad_type_0, strides = var_6999, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("q_137_cast")]; + tensor var_7005 = const()[name = tensor("op_7005"), val = tensor([1, 1])]; + tensor var_7007 = const()[name = tensor("op_7007"), val = tensor([1, 1])]; + tensor k_137_pad_type_0 = const()[name = tensor("k_137_pad_type_0"), val = tensor("custom")]; + tensor k_137_pad_0 = const()[name = tensor("k_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(501433216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502252480))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_137_cast = conv(dilations = var_7007, groups = var_6865, pad = k_137_pad_0, pad_type = k_137_pad_type_0, strides = var_7005, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("k_137_cast")]; + tensor var_7011 = const()[name = tensor("op_7011"), val = tensor([1, 1])]; + tensor var_7013 = const()[name = tensor("op_7013"), val = tensor([1, 1])]; + tensor v_137_pad_type_0 = const()[name = tensor("v_137_pad_type_0"), val = tensor("custom")]; + tensor v_137_pad_0 = const()[name = tensor("v_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(502252608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503071872))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_137_cast = conv(dilations = var_7013, groups = var_6865, pad = v_137_pad_0, pad_type = v_137_pad_type_0, strides = var_7011, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("v_137_cast")]; + tensor var_7017 = const()[name = tensor("op_7017"), val = tensor([2, 20, 64, -1])]; + tensor var_7018_cast = reshape(shape = var_7017, x = q_137_cast)[name = tensor("op_7018_cast")]; + tensor var_7019 = const()[name = tensor("op_7019"), val = tensor([2, 20, 64, -1])]; + tensor var_7020_cast = reshape(shape = var_7019, x = k_137_cast)[name = tensor("op_7020_cast")]; + tensor var_7021 = const()[name = tensor("op_7021"), val = tensor([2, 20, 64, -1])]; + tensor var_7022_cast = reshape(shape = var_7021, x = v_137_cast)[name = tensor("op_7022_cast")]; + tensor attn_weights_273_transpose_x_0 = const()[name = tensor("attn_weights_273_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_273_transpose_y_0 = const()[name = tensor("attn_weights_273_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_273_cast = matmul(transpose_x = attn_weights_273_transpose_x_0, transpose_y = attn_weights_273_transpose_y_0, x = var_7018_cast, y = var_7020_cast)[name = tensor("attn_weights_273_cast")]; + tensor var_6856_to_fp16 = const()[name = tensor("op_6856_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_275_cast = mul(x = attn_weights_273_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_275_cast")]; + tensor var_7026_cast = softmax(axis = var_6849, x = attn_weights_275_cast)[name = tensor("op_7026_cast")]; + tensor attn_137_transpose_x_0 = const()[name = tensor("attn_137_transpose_x_0"), val = tensor(false)]; + tensor attn_137_transpose_y_0 = const()[name = tensor("attn_137_transpose_y_0"), val = tensor(true)]; + tensor attn_137_cast = matmul(transpose_x = attn_137_transpose_x_0, transpose_y = attn_137_transpose_y_0, x = var_7022_cast, y = var_7026_cast)[name = tensor("attn_137_cast")]; + tensor var_7030 = const()[name = tensor("op_7030"), val = tensor([2, 1280, 1, -1])]; + tensor input_439_cast = reshape(shape = var_7030, x = attn_137_cast)[name = tensor("input_439_cast")]; + tensor var_7035 = const()[name = tensor("op_7035"), val = tensor([1, 1])]; + tensor var_7037 = const()[name = tensor("op_7037"), val = tensor([1, 1])]; + tensor var_7039_pad_type_0 = const()[name = tensor("op_7039_pad_type_0"), val = tensor("custom")]; + tensor var_7039_pad_0 = const()[name = tensor("op_7039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503072000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503891264))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503891392)))]; + tensor var_7039_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_7037, groups = var_6865, pad = var_7039_pad_0, pad_type = var_7039_pad_type_0, strides = var_7035, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_439_cast)[name = tensor("op_7039_cast")]; + tensor inputs_207_cast = add(x = var_7039_cast, y = inputs_205_cast)[name = tensor("inputs_207_cast")]; + tensor var_7043 = const()[name = tensor("op_7043"), val = tensor([1])]; + tensor channels_mean_207_cast = reduce_mean(axes = var_7043, keep_dims = var_6860, x = inputs_207_cast)[name = tensor("channels_mean_207_cast")]; + tensor zero_mean_207_cast = sub(x = inputs_207_cast, y = channels_mean_207_cast)[name = tensor("zero_mean_207_cast")]; + tensor zero_mean_sq_207_cast = mul(x = zero_mean_207_cast, y = zero_mean_207_cast)[name = tensor("zero_mean_sq_207_cast")]; + tensor var_7047 = const()[name = tensor("op_7047"), val = tensor([1])]; + tensor var_7048_cast = reduce_mean(axes = var_7047, keep_dims = var_6860, x = zero_mean_sq_207_cast)[name = tensor("op_7048_cast")]; + tensor var_7049_to_fp16 = const()[name = tensor("op_7049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7050_cast = add(x = var_7048_cast, y = var_7049_to_fp16)[name = tensor("op_7050_cast")]; + tensor denom_207_epsilon_0_to_fp16 = const()[name = tensor("denom_207_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_207_cast = rsqrt(epsilon = denom_207_epsilon_0_to_fp16, x = var_7050_cast)[name = tensor("denom_207_cast")]; + tensor out_207_cast = mul(x = zero_mean_207_cast, y = denom_207_cast)[name = tensor("out_207_cast")]; + tensor var_7054_to_fp16 = const()[name = tensor("op_7054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503894016)))]; + tensor var_7055_cast = add(x = out_207_cast, y = var_7054_to_fp16)[name = tensor("op_7055_cast")]; + tensor var_7057_to_fp16 = const()[name = tensor("op_7057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503896640)))]; + tensor hidden_states_289_cast = mul(x = var_7055_cast, y = var_7057_to_fp16)[name = tensor("hidden_states_289_cast")]; + tensor var_7064 = const()[name = tensor("op_7064"), val = tensor([1, 1])]; + tensor var_7066 = const()[name = tensor("op_7066"), val = tensor([1, 1])]; + tensor q_139_pad_type_0 = const()[name = tensor("q_139_pad_type_0"), val = tensor("custom")]; + tensor q_139_pad_0 = const()[name = tensor("q_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(503899264))), lut = tensor([-0x1.f74p-7, 0x1.f98p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_139_cast = conv(dilations = var_7066, groups = var_6865, pad = q_139_pad_0, pad_type = q_139_pad_type_0, strides = var_7064, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_289_cast)[name = tensor("q_139_cast")]; + tensor var_7070 = const()[name = tensor("op_7070"), val = tensor([1, 1])]; + tensor var_7072 = const()[name = tensor("op_7072"), val = tensor([1, 1])]; + tensor k_139_pad_type_0 = const()[name = tensor("k_139_pad_type_0"), val = tensor("custom")]; + tensor k_139_pad_0 = const()[name = tensor("k_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504104128))), lut = tensor([-0x1.d24p-7, 0x1.d68p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_139_cast = conv(dilations = var_7072, groups = var_6865, pad = k_139_pad_0, pad_type = k_139_pad_type_0, strides = var_7070, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_139_cast")]; + tensor var_7076 = const()[name = tensor("op_7076"), val = tensor([1, 1])]; + tensor var_7078 = const()[name = tensor("op_7078"), val = tensor([1, 1])]; + tensor v_139_pad_type_0 = const()[name = tensor("v_139_pad_type_0"), val = tensor("custom")]; + tensor v_139_pad_0 = const()[name = tensor("v_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504431872))), lut = tensor([-0x1.ca4p-7, 0x1.c88p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_139_cast = conv(dilations = var_7078, groups = var_6865, pad = v_139_pad_0, pad_type = v_139_pad_type_0, strides = var_7076, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_139_cast")]; + tensor var_7082 = const()[name = tensor("op_7082"), val = tensor([2, 20, 64, -1])]; + tensor var_7083_cast = reshape(shape = var_7082, x = q_139_cast)[name = tensor("op_7083_cast")]; + tensor var_7084 = const()[name = tensor("op_7084"), val = tensor([2, 20, 64, -1])]; + tensor var_7085_cast = reshape(shape = var_7084, x = k_139_cast)[name = tensor("op_7085_cast")]; + tensor var_7086 = const()[name = tensor("op_7086"), val = tensor([2, 20, 64, -1])]; + tensor var_7087_cast = reshape(shape = var_7086, x = v_139_cast)[name = tensor("op_7087_cast")]; + tensor attn_weights_277_transpose_x_0 = const()[name = tensor("attn_weights_277_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_277_transpose_y_0 = const()[name = tensor("attn_weights_277_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_277_cast = matmul(transpose_x = attn_weights_277_transpose_x_0, transpose_y = attn_weights_277_transpose_y_0, x = var_7083_cast, y = var_7085_cast)[name = tensor("attn_weights_277_cast")]; + tensor attn_weights_279_cast = mul(x = attn_weights_277_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_279_cast")]; + tensor var_7091_cast = softmax(axis = var_6849, x = attn_weights_279_cast)[name = tensor("op_7091_cast")]; + tensor attn_139_transpose_x_0 = const()[name = tensor("attn_139_transpose_x_0"), val = tensor(false)]; + tensor attn_139_transpose_y_0 = const()[name = tensor("attn_139_transpose_y_0"), val = tensor(true)]; + tensor attn_139_cast = matmul(transpose_x = attn_139_transpose_x_0, transpose_y = attn_139_transpose_y_0, x = var_7087_cast, y = var_7091_cast)[name = tensor("attn_139_cast")]; + tensor var_7095 = const()[name = tensor("op_7095"), val = tensor([2, 1280, 1, -1])]; + tensor input_441_cast = reshape(shape = var_7095, x = attn_139_cast)[name = tensor("input_441_cast")]; + tensor var_7100 = const()[name = tensor("op_7100"), val = tensor([1, 1])]; + tensor var_7102 = const()[name = tensor("op_7102"), val = tensor([1, 1])]; + tensor var_7104_pad_type_0 = const()[name = tensor("op_7104_pad_type_0"), val = tensor("custom")]; + tensor var_7104_pad_0 = const()[name = tensor("op_7104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504759616))), lut = tensor([-0x1.f6cp-8, 0x1.f54p-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504964480)))]; + tensor var_7104_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_7102, groups = var_6865, pad = var_7104_pad_0, pad_type = var_7104_pad_type_0, strides = var_7100, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_441_cast)[name = tensor("op_7104_cast")]; + tensor inputs_209_cast = add(x = var_7104_cast, y = inputs_207_cast)[name = tensor("inputs_209_cast")]; + tensor var_7108 = const()[name = tensor("op_7108"), val = tensor([1])]; + tensor channels_mean_209_cast = reduce_mean(axes = var_7108, keep_dims = var_6860, x = inputs_209_cast)[name = tensor("channels_mean_209_cast")]; + tensor zero_mean_209_cast = sub(x = inputs_209_cast, y = channels_mean_209_cast)[name = tensor("zero_mean_209_cast")]; + tensor zero_mean_sq_209_cast = mul(x = zero_mean_209_cast, y = zero_mean_209_cast)[name = tensor("zero_mean_sq_209_cast")]; + tensor var_7112 = const()[name = tensor("op_7112"), val = tensor([1])]; + tensor var_7113_cast = reduce_mean(axes = var_7112, keep_dims = var_6860, x = zero_mean_sq_209_cast)[name = tensor("op_7113_cast")]; + tensor var_7114_to_fp16 = const()[name = tensor("op_7114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7115_cast = add(x = var_7113_cast, y = var_7114_to_fp16)[name = tensor("op_7115_cast")]; + tensor denom_209_epsilon_0_to_fp16 = const()[name = tensor("denom_209_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_209_cast = rsqrt(epsilon = denom_209_epsilon_0_to_fp16, x = var_7115_cast)[name = tensor("denom_209_cast")]; + tensor out_209_cast = mul(x = zero_mean_209_cast, y = denom_209_cast)[name = tensor("out_209_cast")]; + tensor var_7119_to_fp16 = const()[name = tensor("op_7119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504967104)))]; + tensor var_7120_cast = add(x = out_209_cast, y = var_7119_to_fp16)[name = tensor("op_7120_cast")]; + tensor var_7122_to_fp16 = const()[name = tensor("op_7122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504969728)))]; + tensor input_443_cast = mul(x = var_7120_cast, y = var_7122_to_fp16)[name = tensor("input_443_cast")]; + tensor var_7130 = const()[name = tensor("op_7130"), val = tensor([1, 1])]; + tensor var_7132 = const()[name = tensor("op_7132"), val = tensor([1, 1])]; + tensor var_7134_pad_type_0 = const()[name = tensor("op_7134_pad_type_0"), val = tensor("custom")]; + tensor var_7134_pad_0 = const()[name = tensor("op_7134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504972352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511526016))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511526144)))]; + tensor var_7134_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_7132, groups = var_6865, pad = var_7134_pad_0, pad_type = var_7134_pad_type_0, strides = var_7130, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_443_cast)[name = tensor("op_7134_cast")]; + tensor var_7135_split_sizes_0 = const()[name = tensor("op_7135_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7135_axis_0 = const()[name = tensor("op_7135_axis_0"), val = tensor(1)]; + tensor var_7135_cast_0, tensor var_7135_cast_1 = split(axis = var_7135_axis_0, split_sizes = var_7135_split_sizes_0, x = var_7134_cast)[name = tensor("op_7135_cast")]; + tensor var_7137_mode_0 = const()[name = tensor("op_7137_mode_0"), val = tensor("EXACT")]; + tensor var_7137_cast = gelu(mode = var_7137_mode_0, x = var_7135_cast_1)[name = tensor("op_7137_cast")]; + tensor input_445_cast = mul(x = var_7135_cast_0, y = var_7137_cast)[name = tensor("input_445_cast")]; + tensor var_7141 = const()[name = tensor("op_7141"), val = tensor([1, 1])]; + tensor var_7143 = const()[name = tensor("op_7143"), val = tensor([1, 1])]; + tensor var_7145_pad_type_0 = const()[name = tensor("op_7145_pad_type_0"), val = tensor("custom")]; + tensor var_7145_pad_0 = const()[name = tensor("op_7145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511546688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514823552))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514823680)))]; + tensor var_7145_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_7143, groups = var_6865, pad = var_7145_pad_0, pad_type = var_7145_pad_type_0, strides = var_7141, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_445_cast)[name = tensor("op_7145_cast")]; + tensor inputs_211_cast = add(x = var_7145_cast, y = inputs_209_cast)[name = tensor("inputs_211_cast")]; + tensor var_7155 = const()[name = tensor("op_7155"), val = tensor([1])]; + tensor channels_mean_211_cast = reduce_mean(axes = var_7155, keep_dims = var_6860, x = inputs_211_cast)[name = tensor("channels_mean_211_cast")]; + tensor zero_mean_211_cast = sub(x = inputs_211_cast, y = channels_mean_211_cast)[name = tensor("zero_mean_211_cast")]; + tensor zero_mean_sq_211_cast = mul(x = zero_mean_211_cast, y = zero_mean_211_cast)[name = tensor("zero_mean_sq_211_cast")]; + tensor var_7159 = const()[name = tensor("op_7159"), val = tensor([1])]; + tensor var_7160_cast = reduce_mean(axes = var_7159, keep_dims = var_6860, x = zero_mean_sq_211_cast)[name = tensor("op_7160_cast")]; + tensor var_7161_to_fp16 = const()[name = tensor("op_7161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7162_cast = add(x = var_7160_cast, y = var_7161_to_fp16)[name = tensor("op_7162_cast")]; + tensor denom_211_epsilon_0_to_fp16 = const()[name = tensor("denom_211_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_211_cast = rsqrt(epsilon = denom_211_epsilon_0_to_fp16, x = var_7162_cast)[name = tensor("denom_211_cast")]; + tensor out_211_cast = mul(x = zero_mean_211_cast, y = denom_211_cast)[name = tensor("out_211_cast")]; + tensor var_7166_to_fp16 = const()[name = tensor("op_7166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514826304)))]; + tensor var_7167_cast = add(x = out_211_cast, y = var_7166_to_fp16)[name = tensor("op_7167_cast")]; + tensor var_7169_to_fp16 = const()[name = tensor("op_7169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514828928)))]; + tensor hidden_states_293_cast = mul(x = var_7167_cast, y = var_7169_to_fp16)[name = tensor("hidden_states_293_cast")]; + tensor var_7176 = const()[name = tensor("op_7176"), val = tensor([1, 1])]; + tensor var_7178 = const()[name = tensor("op_7178"), val = tensor([1, 1])]; + tensor q_141_pad_type_0 = const()[name = tensor("q_141_pad_type_0"), val = tensor("custom")]; + tensor q_141_pad_0 = const()[name = tensor("q_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514831552))), lut = tensor([-0x1.5b4p-5, -0x1.a14p-7, 0x1.a2cp-7, 0x1.5cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_141_cast = conv(dilations = var_7178, groups = var_6865, pad = q_141_pad_0, pad_type = q_141_pad_type_0, strides = var_7176, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("q_141_cast")]; + tensor var_7182 = const()[name = tensor("op_7182"), val = tensor([1, 1])]; + tensor var_7184 = const()[name = tensor("op_7184"), val = tensor([1, 1])]; + tensor k_141_pad_type_0 = const()[name = tensor("k_141_pad_type_0"), val = tensor("custom")]; + tensor k_141_pad_0 = const()[name = tensor("k_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515241216))), lut = tensor([-0x1.5d8p-5, -0x1.a5cp-7, 0x1.a44p-7, 0x1.5ccp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_141_cast = conv(dilations = var_7184, groups = var_6865, pad = k_141_pad_0, pad_type = k_141_pad_type_0, strides = var_7182, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("k_141_cast")]; + tensor var_7188 = const()[name = tensor("op_7188"), val = tensor([1, 1])]; + tensor var_7190 = const()[name = tensor("op_7190"), val = tensor([1, 1])]; + tensor v_141_pad_type_0 = const()[name = tensor("v_141_pad_type_0"), val = tensor("custom")]; + tensor v_141_pad_0 = const()[name = tensor("v_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515650880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(516470144))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_141_cast = conv(dilations = var_7190, groups = var_6865, pad = v_141_pad_0, pad_type = v_141_pad_type_0, strides = var_7188, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("v_141_cast")]; + tensor var_7194 = const()[name = tensor("op_7194"), val = tensor([2, 20, 64, -1])]; + tensor var_7195_cast = reshape(shape = var_7194, x = q_141_cast)[name = tensor("op_7195_cast")]; + tensor var_7196 = const()[name = tensor("op_7196"), val = tensor([2, 20, 64, -1])]; + tensor var_7197_cast = reshape(shape = var_7196, x = k_141_cast)[name = tensor("op_7197_cast")]; + tensor var_7198 = const()[name = tensor("op_7198"), val = tensor([2, 20, 64, -1])]; + tensor var_7199_cast = reshape(shape = var_7198, x = v_141_cast)[name = tensor("op_7199_cast")]; + tensor attn_weights_281_transpose_x_0 = const()[name = tensor("attn_weights_281_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_281_transpose_y_0 = const()[name = tensor("attn_weights_281_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_281_cast = matmul(transpose_x = attn_weights_281_transpose_x_0, transpose_y = attn_weights_281_transpose_y_0, x = var_7195_cast, y = var_7197_cast)[name = tensor("attn_weights_281_cast")]; + tensor attn_weights_283_cast = mul(x = attn_weights_281_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_283_cast")]; + tensor var_7203_cast = softmax(axis = var_6849, x = attn_weights_283_cast)[name = tensor("op_7203_cast")]; + tensor attn_141_transpose_x_0 = const()[name = tensor("attn_141_transpose_x_0"), val = tensor(false)]; + tensor attn_141_transpose_y_0 = const()[name = tensor("attn_141_transpose_y_0"), val = tensor(true)]; + tensor attn_141_cast = matmul(transpose_x = attn_141_transpose_x_0, transpose_y = attn_141_transpose_y_0, x = var_7199_cast, y = var_7203_cast)[name = tensor("attn_141_cast")]; + tensor var_7207 = const()[name = tensor("op_7207"), val = tensor([2, 1280, 1, -1])]; + tensor input_447_cast = reshape(shape = var_7207, x = attn_141_cast)[name = tensor("input_447_cast")]; + tensor var_7212 = const()[name = tensor("op_7212"), val = tensor([1, 1])]; + tensor var_7214 = const()[name = tensor("op_7214"), val = tensor([1, 1])]; + tensor var_7216_pad_type_0 = const()[name = tensor("op_7216_pad_type_0"), val = tensor("custom")]; + tensor var_7216_pad_0 = const()[name = tensor("op_7216_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(516470272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517289536))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517289664)))]; + tensor var_7216_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_7214, groups = var_6865, pad = var_7216_pad_0, pad_type = var_7216_pad_type_0, strides = var_7212, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_447_cast)[name = tensor("op_7216_cast")]; + tensor inputs_213_cast = add(x = var_7216_cast, y = inputs_211_cast)[name = tensor("inputs_213_cast")]; + tensor var_7220 = const()[name = tensor("op_7220"), val = tensor([1])]; + tensor channels_mean_213_cast = reduce_mean(axes = var_7220, keep_dims = var_6860, x = inputs_213_cast)[name = tensor("channels_mean_213_cast")]; + tensor zero_mean_213_cast = sub(x = inputs_213_cast, y = channels_mean_213_cast)[name = tensor("zero_mean_213_cast")]; + tensor zero_mean_sq_213_cast = mul(x = zero_mean_213_cast, y = zero_mean_213_cast)[name = tensor("zero_mean_sq_213_cast")]; + tensor var_7224 = const()[name = tensor("op_7224"), val = tensor([1])]; + tensor var_7225_cast = reduce_mean(axes = var_7224, keep_dims = var_6860, x = zero_mean_sq_213_cast)[name = tensor("op_7225_cast")]; + tensor var_7226_to_fp16 = const()[name = tensor("op_7226_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7227_cast = add(x = var_7225_cast, y = var_7226_to_fp16)[name = tensor("op_7227_cast")]; + tensor denom_213_epsilon_0_to_fp16 = const()[name = tensor("denom_213_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_213_cast = rsqrt(epsilon = denom_213_epsilon_0_to_fp16, x = var_7227_cast)[name = tensor("denom_213_cast")]; + tensor out_213_cast = mul(x = zero_mean_213_cast, y = denom_213_cast)[name = tensor("out_213_cast")]; + tensor var_7231_to_fp16 = const()[name = tensor("op_7231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517292288)))]; + tensor var_7232_cast = add(x = out_213_cast, y = var_7231_to_fp16)[name = tensor("op_7232_cast")]; + tensor var_7234_to_fp16 = const()[name = tensor("op_7234_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517294912)))]; + tensor hidden_states_295_cast = mul(x = var_7232_cast, y = var_7234_to_fp16)[name = tensor("hidden_states_295_cast")]; + tensor var_7241 = const()[name = tensor("op_7241"), val = tensor([1, 1])]; + tensor var_7243 = const()[name = tensor("op_7243"), val = tensor([1, 1])]; + tensor q_143_pad_type_0 = const()[name = tensor("q_143_pad_type_0"), val = tensor("custom")]; + tensor q_143_pad_0 = const()[name = tensor("q_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517297536))), lut = tensor([-0x1.298p-6, 0x1.28cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_143_cast = conv(dilations = var_7243, groups = var_6865, pad = q_143_pad_0, pad_type = q_143_pad_type_0, strides = var_7241, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_295_cast)[name = tensor("q_143_cast")]; + tensor var_7247 = const()[name = tensor("op_7247"), val = tensor([1, 1])]; + tensor var_7249 = const()[name = tensor("op_7249"), val = tensor([1, 1])]; + tensor k_143_pad_type_0 = const()[name = tensor("k_143_pad_type_0"), val = tensor("custom")]; + tensor k_143_pad_0 = const()[name = tensor("k_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517502400))), lut = tensor([-0x1.0c4p-6, 0x1.0bp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_143_cast = conv(dilations = var_7249, groups = var_6865, pad = k_143_pad_0, pad_type = k_143_pad_type_0, strides = var_7247, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_143_cast")]; + tensor var_7253 = const()[name = tensor("op_7253"), val = tensor([1, 1])]; + tensor var_7255 = const()[name = tensor("op_7255"), val = tensor([1, 1])]; + tensor v_143_pad_type_0 = const()[name = tensor("v_143_pad_type_0"), val = tensor("custom")]; + tensor v_143_pad_0 = const()[name = tensor("v_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(517830144))), lut = tensor([-0x1.09cp-6, 0x1.0ap-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_143_cast = conv(dilations = var_7255, groups = var_6865, pad = v_143_pad_0, pad_type = v_143_pad_type_0, strides = var_7253, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_143_cast")]; + tensor var_7259 = const()[name = tensor("op_7259"), val = tensor([2, 20, 64, -1])]; + tensor var_7260_cast = reshape(shape = var_7259, x = q_143_cast)[name = tensor("op_7260_cast")]; + tensor var_7261 = const()[name = tensor("op_7261"), val = tensor([2, 20, 64, -1])]; + tensor var_7262_cast = reshape(shape = var_7261, x = k_143_cast)[name = tensor("op_7262_cast")]; + tensor var_7263 = const()[name = tensor("op_7263"), val = tensor([2, 20, 64, -1])]; + tensor var_7264_cast = reshape(shape = var_7263, x = v_143_cast)[name = tensor("op_7264_cast")]; + tensor attn_weights_285_transpose_x_0 = const()[name = tensor("attn_weights_285_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_285_transpose_y_0 = const()[name = tensor("attn_weights_285_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_285_cast = matmul(transpose_x = attn_weights_285_transpose_x_0, transpose_y = attn_weights_285_transpose_y_0, x = var_7260_cast, y = var_7262_cast)[name = tensor("attn_weights_285_cast")]; + tensor attn_weights_287_cast = mul(x = attn_weights_285_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_287_cast")]; + tensor var_7268_cast = softmax(axis = var_6849, x = attn_weights_287_cast)[name = tensor("op_7268_cast")]; + tensor attn_143_transpose_x_0 = const()[name = tensor("attn_143_transpose_x_0"), val = tensor(false)]; + tensor attn_143_transpose_y_0 = const()[name = tensor("attn_143_transpose_y_0"), val = tensor(true)]; + tensor attn_143_cast = matmul(transpose_x = attn_143_transpose_x_0, transpose_y = attn_143_transpose_y_0, x = var_7264_cast, y = var_7268_cast)[name = tensor("attn_143_cast")]; + tensor var_7272 = const()[name = tensor("op_7272"), val = tensor([2, 1280, 1, -1])]; + tensor input_449_cast = reshape(shape = var_7272, x = attn_143_cast)[name = tensor("input_449_cast")]; + tensor var_7277 = const()[name = tensor("op_7277"), val = tensor([1, 1])]; + tensor var_7279 = const()[name = tensor("op_7279"), val = tensor([1, 1])]; + tensor var_7281_pad_type_0 = const()[name = tensor("op_7281_pad_type_0"), val = tensor("custom")]; + tensor var_7281_pad_0 = const()[name = tensor("op_7281_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518157888))), lut = tensor([-0x1.2fp-7, 0x1.2f4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518362752)))]; + tensor var_7281_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_7279, groups = var_6865, pad = var_7281_pad_0, pad_type = var_7281_pad_type_0, strides = var_7277, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_449_cast)[name = tensor("op_7281_cast")]; + tensor inputs_215_cast = add(x = var_7281_cast, y = inputs_213_cast)[name = tensor("inputs_215_cast")]; + tensor var_7285 = const()[name = tensor("op_7285"), val = tensor([1])]; + tensor channels_mean_215_cast = reduce_mean(axes = var_7285, keep_dims = var_6860, x = inputs_215_cast)[name = tensor("channels_mean_215_cast")]; + tensor zero_mean_215_cast = sub(x = inputs_215_cast, y = channels_mean_215_cast)[name = tensor("zero_mean_215_cast")]; + tensor zero_mean_sq_215_cast = mul(x = zero_mean_215_cast, y = zero_mean_215_cast)[name = tensor("zero_mean_sq_215_cast")]; + tensor var_7289 = const()[name = tensor("op_7289"), val = tensor([1])]; + tensor var_7290_cast = reduce_mean(axes = var_7289, keep_dims = var_6860, x = zero_mean_sq_215_cast)[name = tensor("op_7290_cast")]; + tensor var_7291_to_fp16 = const()[name = tensor("op_7291_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7292_cast = add(x = var_7290_cast, y = var_7291_to_fp16)[name = tensor("op_7292_cast")]; + tensor denom_215_epsilon_0_to_fp16 = const()[name = tensor("denom_215_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_215_cast = rsqrt(epsilon = denom_215_epsilon_0_to_fp16, x = var_7292_cast)[name = tensor("denom_215_cast")]; + tensor out_215_cast = mul(x = zero_mean_215_cast, y = denom_215_cast)[name = tensor("out_215_cast")]; + tensor var_7296_to_fp16 = const()[name = tensor("op_7296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518365376)))]; + tensor var_7297_cast = add(x = out_215_cast, y = var_7296_to_fp16)[name = tensor("op_7297_cast")]; + tensor var_7299_to_fp16 = const()[name = tensor("op_7299_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518368000)))]; + tensor input_451_cast = mul(x = var_7297_cast, y = var_7299_to_fp16)[name = tensor("input_451_cast")]; + tensor var_7307 = const()[name = tensor("op_7307"), val = tensor([1, 1])]; + tensor var_7309 = const()[name = tensor("op_7309"), val = tensor([1, 1])]; + tensor var_7311_pad_type_0 = const()[name = tensor("op_7311_pad_type_0"), val = tensor("custom")]; + tensor var_7311_pad_0 = const()[name = tensor("op_7311_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(518370624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524924288))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524924416)))]; + tensor var_7311_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_7309, groups = var_6865, pad = var_7311_pad_0, pad_type = var_7311_pad_type_0, strides = var_7307, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_451_cast)[name = tensor("op_7311_cast")]; + tensor var_7312_split_sizes_0 = const()[name = tensor("op_7312_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7312_axis_0 = const()[name = tensor("op_7312_axis_0"), val = tensor(1)]; + tensor var_7312_cast_0, tensor var_7312_cast_1 = split(axis = var_7312_axis_0, split_sizes = var_7312_split_sizes_0, x = var_7311_cast)[name = tensor("op_7312_cast")]; + tensor var_7314_mode_0 = const()[name = tensor("op_7314_mode_0"), val = tensor("EXACT")]; + tensor var_7314_cast = gelu(mode = var_7314_mode_0, x = var_7312_cast_1)[name = tensor("op_7314_cast")]; + tensor input_453_cast = mul(x = var_7312_cast_0, y = var_7314_cast)[name = tensor("input_453_cast")]; + tensor var_7318 = const()[name = tensor("op_7318"), val = tensor([1, 1])]; + tensor var_7320 = const()[name = tensor("op_7320"), val = tensor([1, 1])]; + tensor var_7322_pad_type_0 = const()[name = tensor("op_7322_pad_type_0"), val = tensor("custom")]; + tensor var_7322_pad_0 = const()[name = tensor("op_7322_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524944960))), lut = tensor([-0x1.56cp-5, -0x1.9bcp-7, 0x1.994p-7, 0x1.56p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526583424)))]; + tensor var_7322_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_7320, groups = var_6865, pad = var_7322_pad_0, pad_type = var_7322_pad_type_0, strides = var_7318, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_453_cast)[name = tensor("op_7322_cast")]; + tensor inputs_217_cast = add(x = var_7322_cast, y = inputs_215_cast)[name = tensor("inputs_217_cast")]; + tensor var_7332 = const()[name = tensor("op_7332"), val = tensor([1])]; + tensor channels_mean_217_cast = reduce_mean(axes = var_7332, keep_dims = var_6860, x = inputs_217_cast)[name = tensor("channels_mean_217_cast")]; + tensor zero_mean_217_cast = sub(x = inputs_217_cast, y = channels_mean_217_cast)[name = tensor("zero_mean_217_cast")]; + tensor zero_mean_sq_217_cast = mul(x = zero_mean_217_cast, y = zero_mean_217_cast)[name = tensor("zero_mean_sq_217_cast")]; + tensor var_7336 = const()[name = tensor("op_7336"), val = tensor([1])]; + tensor var_7337_cast = reduce_mean(axes = var_7336, keep_dims = var_6860, x = zero_mean_sq_217_cast)[name = tensor("op_7337_cast")]; + tensor var_7338_to_fp16 = const()[name = tensor("op_7338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7339_cast = add(x = var_7337_cast, y = var_7338_to_fp16)[name = tensor("op_7339_cast")]; + tensor denom_217_epsilon_0_to_fp16 = const()[name = tensor("denom_217_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_217_cast = rsqrt(epsilon = denom_217_epsilon_0_to_fp16, x = var_7339_cast)[name = tensor("denom_217_cast")]; + tensor out_217_cast = mul(x = zero_mean_217_cast, y = denom_217_cast)[name = tensor("out_217_cast")]; + tensor var_7343_to_fp16 = const()[name = tensor("op_7343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526586048)))]; + tensor var_7344_cast = add(x = out_217_cast, y = var_7343_to_fp16)[name = tensor("op_7344_cast")]; + tensor var_7346_to_fp16 = const()[name = tensor("op_7346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526588672)))]; + tensor hidden_states_299_cast = mul(x = var_7344_cast, y = var_7346_to_fp16)[name = tensor("hidden_states_299_cast")]; + tensor var_7353 = const()[name = tensor("op_7353"), val = tensor([1, 1])]; + tensor var_7355 = const()[name = tensor("op_7355"), val = tensor([1, 1])]; + tensor q_145_pad_type_0 = const()[name = tensor("q_145_pad_type_0"), val = tensor("custom")]; + tensor q_145_pad_0 = const()[name = tensor("q_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526591296))), lut = tensor([-0x1.6p-5, -0x1.a58p-7, 0x1.ac4p-7, 0x1.62p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_145_cast = conv(dilations = var_7355, groups = var_6865, pad = q_145_pad_0, pad_type = q_145_pad_type_0, strides = var_7353, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("q_145_cast")]; + tensor var_7359 = const()[name = tensor("op_7359"), val = tensor([1, 1])]; + tensor var_7361 = const()[name = tensor("op_7361"), val = tensor([1, 1])]; + tensor k_145_pad_type_0 = const()[name = tensor("k_145_pad_type_0"), val = tensor("custom")]; + tensor k_145_pad_0 = const()[name = tensor("k_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527000960))), lut = tensor([-0x1.628p-5, -0x1.aa4p-7, 0x1.abcp-7, 0x1.638p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_145_cast = conv(dilations = var_7361, groups = var_6865, pad = k_145_pad_0, pad_type = k_145_pad_type_0, strides = var_7359, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("k_145_cast")]; + tensor var_7365 = const()[name = tensor("op_7365"), val = tensor([1, 1])]; + tensor var_7367 = const()[name = tensor("op_7367"), val = tensor([1, 1])]; + tensor v_145_pad_type_0 = const()[name = tensor("v_145_pad_type_0"), val = tensor("custom")]; + tensor v_145_pad_0 = const()[name = tensor("v_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527410624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528229888))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_145_cast = conv(dilations = var_7367, groups = var_6865, pad = v_145_pad_0, pad_type = v_145_pad_type_0, strides = var_7365, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("v_145_cast")]; + tensor var_7371 = const()[name = tensor("op_7371"), val = tensor([2, 20, 64, -1])]; + tensor var_7372_cast = reshape(shape = var_7371, x = q_145_cast)[name = tensor("op_7372_cast")]; + tensor var_7373 = const()[name = tensor("op_7373"), val = tensor([2, 20, 64, -1])]; + tensor var_7374_cast = reshape(shape = var_7373, x = k_145_cast)[name = tensor("op_7374_cast")]; + tensor var_7375 = const()[name = tensor("op_7375"), val = tensor([2, 20, 64, -1])]; + tensor var_7376_cast = reshape(shape = var_7375, x = v_145_cast)[name = tensor("op_7376_cast")]; + tensor attn_weights_289_transpose_x_0 = const()[name = tensor("attn_weights_289_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_289_transpose_y_0 = const()[name = tensor("attn_weights_289_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_289_cast = matmul(transpose_x = attn_weights_289_transpose_x_0, transpose_y = attn_weights_289_transpose_y_0, x = var_7372_cast, y = var_7374_cast)[name = tensor("attn_weights_289_cast")]; + tensor attn_weights_291_cast = mul(x = attn_weights_289_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_291_cast")]; + tensor var_7380_cast = softmax(axis = var_6849, x = attn_weights_291_cast)[name = tensor("op_7380_cast")]; + tensor attn_145_transpose_x_0 = const()[name = tensor("attn_145_transpose_x_0"), val = tensor(false)]; + tensor attn_145_transpose_y_0 = const()[name = tensor("attn_145_transpose_y_0"), val = tensor(true)]; + tensor attn_145_cast = matmul(transpose_x = attn_145_transpose_x_0, transpose_y = attn_145_transpose_y_0, x = var_7376_cast, y = var_7380_cast)[name = tensor("attn_145_cast")]; + tensor var_7384 = const()[name = tensor("op_7384"), val = tensor([2, 1280, 1, -1])]; + tensor input_455_cast = reshape(shape = var_7384, x = attn_145_cast)[name = tensor("input_455_cast")]; + tensor var_7389 = const()[name = tensor("op_7389"), val = tensor([1, 1])]; + tensor var_7391 = const()[name = tensor("op_7391"), val = tensor([1, 1])]; + tensor var_7393_pad_type_0 = const()[name = tensor("op_7393_pad_type_0"), val = tensor("custom")]; + tensor var_7393_pad_0 = const()[name = tensor("op_7393_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(528230016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529049280))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529049408)))]; + tensor var_7393_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_7391, groups = var_6865, pad = var_7393_pad_0, pad_type = var_7393_pad_type_0, strides = var_7389, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_455_cast)[name = tensor("op_7393_cast")]; + tensor inputs_219_cast = add(x = var_7393_cast, y = inputs_217_cast)[name = tensor("inputs_219_cast")]; + tensor var_7397 = const()[name = tensor("op_7397"), val = tensor([1])]; + tensor channels_mean_219_cast = reduce_mean(axes = var_7397, keep_dims = var_6860, x = inputs_219_cast)[name = tensor("channels_mean_219_cast")]; + tensor zero_mean_219_cast = sub(x = inputs_219_cast, y = channels_mean_219_cast)[name = tensor("zero_mean_219_cast")]; + tensor zero_mean_sq_219_cast = mul(x = zero_mean_219_cast, y = zero_mean_219_cast)[name = tensor("zero_mean_sq_219_cast")]; + tensor var_7401 = const()[name = tensor("op_7401"), val = tensor([1])]; + tensor var_7402_cast = reduce_mean(axes = var_7401, keep_dims = var_6860, x = zero_mean_sq_219_cast)[name = tensor("op_7402_cast")]; + tensor var_7403_to_fp16 = const()[name = tensor("op_7403_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7404_cast = add(x = var_7402_cast, y = var_7403_to_fp16)[name = tensor("op_7404_cast")]; + tensor denom_219_epsilon_0_to_fp16 = const()[name = tensor("denom_219_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_219_cast = rsqrt(epsilon = denom_219_epsilon_0_to_fp16, x = var_7404_cast)[name = tensor("denom_219_cast")]; + tensor out_219_cast = mul(x = zero_mean_219_cast, y = denom_219_cast)[name = tensor("out_219_cast")]; + tensor var_7408_to_fp16 = const()[name = tensor("op_7408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529052032)))]; + tensor var_7409_cast = add(x = out_219_cast, y = var_7408_to_fp16)[name = tensor("op_7409_cast")]; + tensor var_7411_to_fp16 = const()[name = tensor("op_7411_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529054656)))]; + tensor hidden_states_301_cast = mul(x = var_7409_cast, y = var_7411_to_fp16)[name = tensor("hidden_states_301_cast")]; + tensor var_7418 = const()[name = tensor("op_7418"), val = tensor([1, 1])]; + tensor var_7420 = const()[name = tensor("op_7420"), val = tensor([1, 1])]; + tensor q_147_pad_type_0 = const()[name = tensor("q_147_pad_type_0"), val = tensor("custom")]; + tensor q_147_pad_0 = const()[name = tensor("q_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529057280))), lut = tensor([-0x1.47cp-6, 0x1.48cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_147_cast = conv(dilations = var_7420, groups = var_6865, pad = q_147_pad_0, pad_type = q_147_pad_type_0, strides = var_7418, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_301_cast)[name = tensor("q_147_cast")]; + tensor var_7424 = const()[name = tensor("op_7424"), val = tensor([1, 1])]; + tensor var_7426 = const()[name = tensor("op_7426"), val = tensor([1, 1])]; + tensor k_147_pad_type_0 = const()[name = tensor("k_147_pad_type_0"), val = tensor("custom")]; + tensor k_147_pad_0 = const()[name = tensor("k_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529262144))), lut = tensor([-0x1.20cp-6, 0x1.20cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_147_cast = conv(dilations = var_7426, groups = var_6865, pad = k_147_pad_0, pad_type = k_147_pad_type_0, strides = var_7424, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_147_cast")]; + tensor var_7430 = const()[name = tensor("op_7430"), val = tensor([1, 1])]; + tensor var_7432 = const()[name = tensor("op_7432"), val = tensor([1, 1])]; + tensor v_147_pad_type_0 = const()[name = tensor("v_147_pad_type_0"), val = tensor("custom")]; + tensor v_147_pad_0 = const()[name = tensor("v_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(529589888))), lut = tensor([-0x1.25cp-5, -0x1.544p-7, 0x1.518p-7, 0x1.254p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_147_cast = conv(dilations = var_7432, groups = var_6865, pad = v_147_pad_0, pad_type = v_147_pad_type_0, strides = var_7430, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_147_cast")]; + tensor var_7436 = const()[name = tensor("op_7436"), val = tensor([2, 20, 64, -1])]; + tensor var_7437_cast = reshape(shape = var_7436, x = q_147_cast)[name = tensor("op_7437_cast")]; + tensor var_7438 = const()[name = tensor("op_7438"), val = tensor([2, 20, 64, -1])]; + tensor var_7439_cast = reshape(shape = var_7438, x = k_147_cast)[name = tensor("op_7439_cast")]; + tensor var_7440 = const()[name = tensor("op_7440"), val = tensor([2, 20, 64, -1])]; + tensor var_7441_cast = reshape(shape = var_7440, x = v_147_cast)[name = tensor("op_7441_cast")]; + tensor attn_weights_293_transpose_x_0 = const()[name = tensor("attn_weights_293_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_293_transpose_y_0 = const()[name = tensor("attn_weights_293_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_293_cast = matmul(transpose_x = attn_weights_293_transpose_x_0, transpose_y = attn_weights_293_transpose_y_0, x = var_7437_cast, y = var_7439_cast)[name = tensor("attn_weights_293_cast")]; + tensor attn_weights_295_cast = mul(x = attn_weights_293_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_295_cast")]; + tensor var_7445_cast = softmax(axis = var_6849, x = attn_weights_295_cast)[name = tensor("op_7445_cast")]; + tensor attn_147_transpose_x_0 = const()[name = tensor("attn_147_transpose_x_0"), val = tensor(false)]; + tensor attn_147_transpose_y_0 = const()[name = tensor("attn_147_transpose_y_0"), val = tensor(true)]; + tensor attn_147_cast = matmul(transpose_x = attn_147_transpose_x_0, transpose_y = attn_147_transpose_y_0, x = var_7441_cast, y = var_7445_cast)[name = tensor("attn_147_cast")]; + tensor var_7449 = const()[name = tensor("op_7449"), val = tensor([2, 1280, 1, -1])]; + tensor input_457_cast = reshape(shape = var_7449, x = attn_147_cast)[name = tensor("input_457_cast")]; + tensor var_7454 = const()[name = tensor("op_7454"), val = tensor([1, 1])]; + tensor var_7456 = const()[name = tensor("op_7456"), val = tensor([1, 1])]; + tensor var_7458_pad_type_0 = const()[name = tensor("op_7458_pad_type_0"), val = tensor("custom")]; + tensor var_7458_pad_0 = const()[name = tensor("op_7458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530245312))), lut = tensor([-0x1.52cp-7, 0x1.548p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530450176)))]; + tensor var_7458_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_7456, groups = var_6865, pad = var_7458_pad_0, pad_type = var_7458_pad_type_0, strides = var_7454, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_457_cast)[name = tensor("op_7458_cast")]; + tensor inputs_221_cast = add(x = var_7458_cast, y = inputs_219_cast)[name = tensor("inputs_221_cast")]; + tensor var_7462 = const()[name = tensor("op_7462"), val = tensor([1])]; + tensor channels_mean_221_cast = reduce_mean(axes = var_7462, keep_dims = var_6860, x = inputs_221_cast)[name = tensor("channels_mean_221_cast")]; + tensor zero_mean_221_cast = sub(x = inputs_221_cast, y = channels_mean_221_cast)[name = tensor("zero_mean_221_cast")]; + tensor zero_mean_sq_221_cast = mul(x = zero_mean_221_cast, y = zero_mean_221_cast)[name = tensor("zero_mean_sq_221_cast")]; + tensor var_7466 = const()[name = tensor("op_7466"), val = tensor([1])]; + tensor var_7467_cast = reduce_mean(axes = var_7466, keep_dims = var_6860, x = zero_mean_sq_221_cast)[name = tensor("op_7467_cast")]; + tensor var_7468_to_fp16 = const()[name = tensor("op_7468_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7469_cast = add(x = var_7467_cast, y = var_7468_to_fp16)[name = tensor("op_7469_cast")]; + tensor denom_221_epsilon_0_to_fp16 = const()[name = tensor("denom_221_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_221_cast = rsqrt(epsilon = denom_221_epsilon_0_to_fp16, x = var_7469_cast)[name = tensor("denom_221_cast")]; + tensor out_221_cast = mul(x = zero_mean_221_cast, y = denom_221_cast)[name = tensor("out_221_cast")]; + tensor var_7473_to_fp16 = const()[name = tensor("op_7473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530452800)))]; + tensor var_7474_cast = add(x = out_221_cast, y = var_7473_to_fp16)[name = tensor("op_7474_cast")]; + tensor var_7476_to_fp16 = const()[name = tensor("op_7476_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530455424)))]; + tensor input_459_cast = mul(x = var_7474_cast, y = var_7476_to_fp16)[name = tensor("input_459_cast")]; + tensor var_7484 = const()[name = tensor("op_7484"), val = tensor([1, 1])]; + tensor var_7486 = const()[name = tensor("op_7486"), val = tensor([1, 1])]; + tensor var_7488_pad_type_0 = const()[name = tensor("op_7488_pad_type_0"), val = tensor("custom")]; + tensor var_7488_pad_0 = const()[name = tensor("op_7488_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530458048))), lut = tensor([-0x1.5a8p-5, -0x1.a08p-7, 0x1.9f4p-7, 0x1.5a4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533734912)))]; + tensor var_7488_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_7486, groups = var_6865, pad = var_7488_pad_0, pad_type = var_7488_pad_type_0, strides = var_7484, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_459_cast)[name = tensor("op_7488_cast")]; + tensor var_7489_split_sizes_0 = const()[name = tensor("op_7489_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7489_axis_0 = const()[name = tensor("op_7489_axis_0"), val = tensor(1)]; + tensor var_7489_cast_0, tensor var_7489_cast_1 = split(axis = var_7489_axis_0, split_sizes = var_7489_split_sizes_0, x = var_7488_cast)[name = tensor("op_7489_cast")]; + tensor var_7491_mode_0 = const()[name = tensor("op_7491_mode_0"), val = tensor("EXACT")]; + tensor var_7491_cast = gelu(mode = var_7491_mode_0, x = var_7489_cast_1)[name = tensor("op_7491_cast")]; + tensor input_461_cast = mul(x = var_7489_cast_0, y = var_7491_cast)[name = tensor("input_461_cast")]; + tensor var_7495 = const()[name = tensor("op_7495"), val = tensor([1, 1])]; + tensor var_7497 = const()[name = tensor("op_7497"), val = tensor([1, 1])]; + tensor var_7499_pad_type_0 = const()[name = tensor("op_7499_pad_type_0"), val = tensor("custom")]; + tensor var_7499_pad_0 = const()[name = tensor("op_7499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533755456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537032320))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537032448)))]; + tensor var_7499_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_7497, groups = var_6865, pad = var_7499_pad_0, pad_type = var_7499_pad_type_0, strides = var_7495, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_461_cast)[name = tensor("op_7499_cast")]; + tensor inputs_223_cast = add(x = var_7499_cast, y = inputs_221_cast)[name = tensor("inputs_223_cast")]; + tensor var_7509 = const()[name = tensor("op_7509"), val = tensor([1])]; + tensor channels_mean_223_cast = reduce_mean(axes = var_7509, keep_dims = var_6860, x = inputs_223_cast)[name = tensor("channels_mean_223_cast")]; + tensor zero_mean_223_cast = sub(x = inputs_223_cast, y = channels_mean_223_cast)[name = tensor("zero_mean_223_cast")]; + tensor zero_mean_sq_223_cast = mul(x = zero_mean_223_cast, y = zero_mean_223_cast)[name = tensor("zero_mean_sq_223_cast")]; + tensor var_7513 = const()[name = tensor("op_7513"), val = tensor([1])]; + tensor var_7514_cast = reduce_mean(axes = var_7513, keep_dims = var_6860, x = zero_mean_sq_223_cast)[name = tensor("op_7514_cast")]; + tensor var_7515_to_fp16 = const()[name = tensor("op_7515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7516_cast = add(x = var_7514_cast, y = var_7515_to_fp16)[name = tensor("op_7516_cast")]; + tensor denom_223_epsilon_0_to_fp16 = const()[name = tensor("denom_223_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_223_cast = rsqrt(epsilon = denom_223_epsilon_0_to_fp16, x = var_7516_cast)[name = tensor("denom_223_cast")]; + tensor out_223_cast = mul(x = zero_mean_223_cast, y = denom_223_cast)[name = tensor("out_223_cast")]; + tensor var_7520_to_fp16 = const()[name = tensor("op_7520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537035072)))]; + tensor var_7521_cast = add(x = out_223_cast, y = var_7520_to_fp16)[name = tensor("op_7521_cast")]; + tensor var_7523_to_fp16 = const()[name = tensor("op_7523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537037696)))]; + tensor hidden_states_305_cast = mul(x = var_7521_cast, y = var_7523_to_fp16)[name = tensor("hidden_states_305_cast")]; + tensor var_7530 = const()[name = tensor("op_7530"), val = tensor([1, 1])]; + tensor var_7532 = const()[name = tensor("op_7532"), val = tensor([1, 1])]; + tensor q_149_pad_type_0 = const()[name = tensor("q_149_pad_type_0"), val = tensor("custom")]; + tensor q_149_pad_0 = const()[name = tensor("q_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537040320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537859584))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_149_cast = conv(dilations = var_7532, groups = var_6865, pad = q_149_pad_0, pad_type = q_149_pad_type_0, strides = var_7530, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("q_149_cast")]; + tensor var_7536 = const()[name = tensor("op_7536"), val = tensor([1, 1])]; + tensor var_7538 = const()[name = tensor("op_7538"), val = tensor([1, 1])]; + tensor k_149_pad_type_0 = const()[name = tensor("k_149_pad_type_0"), val = tensor("custom")]; + tensor k_149_pad_0 = const()[name = tensor("k_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537859712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538678976))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_149_cast = conv(dilations = var_7538, groups = var_6865, pad = k_149_pad_0, pad_type = k_149_pad_type_0, strides = var_7536, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("k_149_cast")]; + tensor var_7542 = const()[name = tensor("op_7542"), val = tensor([1, 1])]; + tensor var_7544 = const()[name = tensor("op_7544"), val = tensor([1, 1])]; + tensor v_149_pad_type_0 = const()[name = tensor("v_149_pad_type_0"), val = tensor("custom")]; + tensor v_149_pad_0 = const()[name = tensor("v_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538679104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539498368))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_149_cast = conv(dilations = var_7544, groups = var_6865, pad = v_149_pad_0, pad_type = v_149_pad_type_0, strides = var_7542, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("v_149_cast")]; + tensor var_7548 = const()[name = tensor("op_7548"), val = tensor([2, 20, 64, -1])]; + tensor var_7549_cast = reshape(shape = var_7548, x = q_149_cast)[name = tensor("op_7549_cast")]; + tensor var_7550 = const()[name = tensor("op_7550"), val = tensor([2, 20, 64, -1])]; + tensor var_7551_cast = reshape(shape = var_7550, x = k_149_cast)[name = tensor("op_7551_cast")]; + tensor var_7552 = const()[name = tensor("op_7552"), val = tensor([2, 20, 64, -1])]; + tensor var_7553_cast = reshape(shape = var_7552, x = v_149_cast)[name = tensor("op_7553_cast")]; + tensor attn_weights_297_transpose_x_0 = const()[name = tensor("attn_weights_297_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_297_transpose_y_0 = const()[name = tensor("attn_weights_297_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_297_cast = matmul(transpose_x = attn_weights_297_transpose_x_0, transpose_y = attn_weights_297_transpose_y_0, x = var_7549_cast, y = var_7551_cast)[name = tensor("attn_weights_297_cast")]; + tensor attn_weights_299_cast = mul(x = attn_weights_297_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_299_cast")]; + tensor var_7557_cast = softmax(axis = var_6849, x = attn_weights_299_cast)[name = tensor("op_7557_cast")]; + tensor attn_149_transpose_x_0 = const()[name = tensor("attn_149_transpose_x_0"), val = tensor(false)]; + tensor attn_149_transpose_y_0 = const()[name = tensor("attn_149_transpose_y_0"), val = tensor(true)]; + tensor attn_149_cast = matmul(transpose_x = attn_149_transpose_x_0, transpose_y = attn_149_transpose_y_0, x = var_7553_cast, y = var_7557_cast)[name = tensor("attn_149_cast")]; + tensor var_7561 = const()[name = tensor("op_7561"), val = tensor([2, 1280, 1, -1])]; + tensor input_463_cast = reshape(shape = var_7561, x = attn_149_cast)[name = tensor("input_463_cast")]; + tensor var_7566 = const()[name = tensor("op_7566"), val = tensor([1, 1])]; + tensor var_7568 = const()[name = tensor("op_7568"), val = tensor([1, 1])]; + tensor var_7570_pad_type_0 = const()[name = tensor("op_7570_pad_type_0"), val = tensor("custom")]; + tensor var_7570_pad_0 = const()[name = tensor("op_7570_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539498496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540317760))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540317888)))]; + tensor var_7570_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_7568, groups = var_6865, pad = var_7570_pad_0, pad_type = var_7570_pad_type_0, strides = var_7566, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_463_cast)[name = tensor("op_7570_cast")]; + tensor inputs_225_cast = add(x = var_7570_cast, y = inputs_223_cast)[name = tensor("inputs_225_cast")]; + tensor var_7574 = const()[name = tensor("op_7574"), val = tensor([1])]; + tensor channels_mean_225_cast = reduce_mean(axes = var_7574, keep_dims = var_6860, x = inputs_225_cast)[name = tensor("channels_mean_225_cast")]; + tensor zero_mean_225_cast = sub(x = inputs_225_cast, y = channels_mean_225_cast)[name = tensor("zero_mean_225_cast")]; + tensor zero_mean_sq_225_cast = mul(x = zero_mean_225_cast, y = zero_mean_225_cast)[name = tensor("zero_mean_sq_225_cast")]; + tensor var_7578 = const()[name = tensor("op_7578"), val = tensor([1])]; + tensor var_7579_cast = reduce_mean(axes = var_7578, keep_dims = var_6860, x = zero_mean_sq_225_cast)[name = tensor("op_7579_cast")]; + tensor var_7580_to_fp16 = const()[name = tensor("op_7580_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7581_cast = add(x = var_7579_cast, y = var_7580_to_fp16)[name = tensor("op_7581_cast")]; + tensor denom_225_epsilon_0_to_fp16 = const()[name = tensor("denom_225_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_225_cast = rsqrt(epsilon = denom_225_epsilon_0_to_fp16, x = var_7581_cast)[name = tensor("denom_225_cast")]; + tensor out_225_cast = mul(x = zero_mean_225_cast, y = denom_225_cast)[name = tensor("out_225_cast")]; + tensor var_7585_to_fp16 = const()[name = tensor("op_7585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540320512)))]; + tensor var_7586_cast = add(x = out_225_cast, y = var_7585_to_fp16)[name = tensor("op_7586_cast")]; + tensor var_7588_to_fp16 = const()[name = tensor("op_7588_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540323136)))]; + tensor hidden_states_307_cast = mul(x = var_7586_cast, y = var_7588_to_fp16)[name = tensor("hidden_states_307_cast")]; + tensor var_7595 = const()[name = tensor("op_7595"), val = tensor([1, 1])]; + tensor var_7597 = const()[name = tensor("op_7597"), val = tensor([1, 1])]; + tensor q_151_pad_type_0 = const()[name = tensor("q_151_pad_type_0"), val = tensor("custom")]; + tensor q_151_pad_0 = const()[name = tensor("q_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540325760))), lut = tensor([-0x1.3fcp-6, 0x1.3fcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_151_cast = conv(dilations = var_7597, groups = var_6865, pad = q_151_pad_0, pad_type = q_151_pad_type_0, strides = var_7595, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_307_cast)[name = tensor("q_151_cast")]; + tensor var_7601 = const()[name = tensor("op_7601"), val = tensor([1, 1])]; + tensor var_7603 = const()[name = tensor("op_7603"), val = tensor([1, 1])]; + tensor k_151_pad_type_0 = const()[name = tensor("k_151_pad_type_0"), val = tensor("custom")]; + tensor k_151_pad_0 = const()[name = tensor("k_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540530624))), lut = tensor([-0x1.114p-6, 0x1.114p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_151_cast = conv(dilations = var_7603, groups = var_6865, pad = k_151_pad_0, pad_type = k_151_pad_type_0, strides = var_7601, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_151_cast")]; + tensor var_7607 = const()[name = tensor("op_7607"), val = tensor([1, 1])]; + tensor var_7609 = const()[name = tensor("op_7609"), val = tensor([1, 1])]; + tensor v_151_pad_type_0 = const()[name = tensor("v_151_pad_type_0"), val = tensor("custom")]; + tensor v_151_pad_0 = const()[name = tensor("v_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540858368))), lut = tensor([-0x1.24p-6, 0x1.228p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_151_cast = conv(dilations = var_7609, groups = var_6865, pad = v_151_pad_0, pad_type = v_151_pad_type_0, strides = var_7607, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_151_cast")]; + tensor var_7613 = const()[name = tensor("op_7613"), val = tensor([2, 20, 64, -1])]; + tensor var_7614_cast = reshape(shape = var_7613, x = q_151_cast)[name = tensor("op_7614_cast")]; + tensor var_7615 = const()[name = tensor("op_7615"), val = tensor([2, 20, 64, -1])]; + tensor var_7616_cast = reshape(shape = var_7615, x = k_151_cast)[name = tensor("op_7616_cast")]; + tensor var_7617 = const()[name = tensor("op_7617"), val = tensor([2, 20, 64, -1])]; + tensor var_7618_cast = reshape(shape = var_7617, x = v_151_cast)[name = tensor("op_7618_cast")]; + tensor attn_weights_301_transpose_x_0 = const()[name = tensor("attn_weights_301_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_301_transpose_y_0 = const()[name = tensor("attn_weights_301_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_301_cast = matmul(transpose_x = attn_weights_301_transpose_x_0, transpose_y = attn_weights_301_transpose_y_0, x = var_7614_cast, y = var_7616_cast)[name = tensor("attn_weights_301_cast")]; + tensor attn_weights_303_cast = mul(x = attn_weights_301_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_303_cast")]; + tensor var_7622_cast = softmax(axis = var_6849, x = attn_weights_303_cast)[name = tensor("op_7622_cast")]; + tensor attn_151_transpose_x_0 = const()[name = tensor("attn_151_transpose_x_0"), val = tensor(false)]; + tensor attn_151_transpose_y_0 = const()[name = tensor("attn_151_transpose_y_0"), val = tensor(true)]; + tensor attn_151_cast = matmul(transpose_x = attn_151_transpose_x_0, transpose_y = attn_151_transpose_y_0, x = var_7618_cast, y = var_7622_cast)[name = tensor("attn_151_cast")]; + tensor var_7626 = const()[name = tensor("op_7626"), val = tensor([2, 1280, 1, -1])]; + tensor input_465_cast = reshape(shape = var_7626, x = attn_151_cast)[name = tensor("input_465_cast")]; + tensor var_7631 = const()[name = tensor("op_7631"), val = tensor([1, 1])]; + tensor var_7633 = const()[name = tensor("op_7633"), val = tensor([1, 1])]; + tensor var_7635_pad_type_0 = const()[name = tensor("op_7635_pad_type_0"), val = tensor("custom")]; + tensor var_7635_pad_0 = const()[name = tensor("op_7635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541186112))), lut = tensor([-0x1.574p-7, 0x1.56cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541390976)))]; + tensor var_7635_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_7633, groups = var_6865, pad = var_7635_pad_0, pad_type = var_7635_pad_type_0, strides = var_7631, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_465_cast)[name = tensor("op_7635_cast")]; + tensor inputs_227_cast = add(x = var_7635_cast, y = inputs_225_cast)[name = tensor("inputs_227_cast")]; + tensor var_7639 = const()[name = tensor("op_7639"), val = tensor([1])]; + tensor channels_mean_227_cast = reduce_mean(axes = var_7639, keep_dims = var_6860, x = inputs_227_cast)[name = tensor("channels_mean_227_cast")]; + tensor zero_mean_227_cast = sub(x = inputs_227_cast, y = channels_mean_227_cast)[name = tensor("zero_mean_227_cast")]; + tensor zero_mean_sq_227_cast = mul(x = zero_mean_227_cast, y = zero_mean_227_cast)[name = tensor("zero_mean_sq_227_cast")]; + tensor var_7643 = const()[name = tensor("op_7643"), val = tensor([1])]; + tensor var_7644_cast = reduce_mean(axes = var_7643, keep_dims = var_6860, x = zero_mean_sq_227_cast)[name = tensor("op_7644_cast")]; + tensor var_7645_to_fp16 = const()[name = tensor("op_7645_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7646_cast = add(x = var_7644_cast, y = var_7645_to_fp16)[name = tensor("op_7646_cast")]; + tensor denom_227_epsilon_0_to_fp16 = const()[name = tensor("denom_227_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_227_cast = rsqrt(epsilon = denom_227_epsilon_0_to_fp16, x = var_7646_cast)[name = tensor("denom_227_cast")]; + tensor out_227_cast = mul(x = zero_mean_227_cast, y = denom_227_cast)[name = tensor("out_227_cast")]; + tensor var_7650_to_fp16 = const()[name = tensor("op_7650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541393600)))]; + tensor var_7651_cast = add(x = out_227_cast, y = var_7650_to_fp16)[name = tensor("op_7651_cast")]; + tensor var_7653_to_fp16 = const()[name = tensor("op_7653_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541396224)))]; + tensor input_467_cast = mul(x = var_7651_cast, y = var_7653_to_fp16)[name = tensor("input_467_cast")]; + tensor var_7661 = const()[name = tensor("op_7661"), val = tensor([1, 1])]; + tensor var_7663 = const()[name = tensor("op_7663"), val = tensor([1, 1])]; + tensor var_7665_pad_type_0 = const()[name = tensor("op_7665_pad_type_0"), val = tensor("custom")]; + tensor var_7665_pad_0 = const()[name = tensor("op_7665_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(541398848))), lut = tensor([-0x1.608p-5, -0x1.a7cp-7, 0x1.a5cp-7, 0x1.6p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(544675712)))]; + tensor var_7665_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_7663, groups = var_6865, pad = var_7665_pad_0, pad_type = var_7665_pad_type_0, strides = var_7661, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_467_cast)[name = tensor("op_7665_cast")]; + tensor var_7666_split_sizes_0 = const()[name = tensor("op_7666_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7666_axis_0 = const()[name = tensor("op_7666_axis_0"), val = tensor(1)]; + tensor var_7666_cast_0, tensor var_7666_cast_1 = split(axis = var_7666_axis_0, split_sizes = var_7666_split_sizes_0, x = var_7665_cast)[name = tensor("op_7666_cast")]; + tensor var_7668_mode_0 = const()[name = tensor("op_7668_mode_0"), val = tensor("EXACT")]; + tensor var_7668_cast = gelu(mode = var_7668_mode_0, x = var_7666_cast_1)[name = tensor("op_7668_cast")]; + tensor input_469_cast = mul(x = var_7666_cast_0, y = var_7668_cast)[name = tensor("input_469_cast")]; + tensor var_7672 = const()[name = tensor("op_7672"), val = tensor([1, 1])]; + tensor var_7674 = const()[name = tensor("op_7674"), val = tensor([1, 1])]; + tensor var_7676_pad_type_0 = const()[name = tensor("op_7676_pad_type_0"), val = tensor("custom")]; + tensor var_7676_pad_0 = const()[name = tensor("op_7676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(544696256))), lut = tensor([-0x1.5bcp-5, -0x1.a18p-7, 0x1.a0cp-7, 0x1.5cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546334720)))]; + tensor var_7676_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_7674, groups = var_6865, pad = var_7676_pad_0, pad_type = var_7676_pad_type_0, strides = var_7672, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_469_cast)[name = tensor("op_7676_cast")]; + tensor inputs_229_cast = add(x = var_7676_cast, y = inputs_227_cast)[name = tensor("inputs_229_cast")]; + tensor var_7686 = const()[name = tensor("op_7686"), val = tensor([1])]; + tensor channels_mean_229_cast = reduce_mean(axes = var_7686, keep_dims = var_6860, x = inputs_229_cast)[name = tensor("channels_mean_229_cast")]; + tensor zero_mean_229_cast = sub(x = inputs_229_cast, y = channels_mean_229_cast)[name = tensor("zero_mean_229_cast")]; + tensor zero_mean_sq_229_cast = mul(x = zero_mean_229_cast, y = zero_mean_229_cast)[name = tensor("zero_mean_sq_229_cast")]; + tensor var_7690 = const()[name = tensor("op_7690"), val = tensor([1])]; + tensor var_7691_cast = reduce_mean(axes = var_7690, keep_dims = var_6860, x = zero_mean_sq_229_cast)[name = tensor("op_7691_cast")]; + tensor var_7692_to_fp16 = const()[name = tensor("op_7692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7693_cast = add(x = var_7691_cast, y = var_7692_to_fp16)[name = tensor("op_7693_cast")]; + tensor denom_229_epsilon_0_to_fp16 = const()[name = tensor("denom_229_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_229_cast = rsqrt(epsilon = denom_229_epsilon_0_to_fp16, x = var_7693_cast)[name = tensor("denom_229_cast")]; + tensor out_229_cast = mul(x = zero_mean_229_cast, y = denom_229_cast)[name = tensor("out_229_cast")]; + tensor var_7697_to_fp16 = const()[name = tensor("op_7697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546337344)))]; + tensor var_7698_cast = add(x = out_229_cast, y = var_7697_to_fp16)[name = tensor("op_7698_cast")]; + tensor var_7700_to_fp16 = const()[name = tensor("op_7700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546339968)))]; + tensor hidden_states_311_cast = mul(x = var_7698_cast, y = var_7700_to_fp16)[name = tensor("hidden_states_311_cast")]; + tensor var_7707 = const()[name = tensor("op_7707"), val = tensor([1, 1])]; + tensor var_7709 = const()[name = tensor("op_7709"), val = tensor([1, 1])]; + tensor q_153_pad_type_0 = const()[name = tensor("q_153_pad_type_0"), val = tensor("custom")]; + tensor q_153_pad_0 = const()[name = tensor("q_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546342592))), lut = tensor([-0x1.6ap-5, -0x1.b3cp-7, 0x1.b1p-7, 0x1.69p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_153_cast = conv(dilations = var_7709, groups = var_6865, pad = q_153_pad_0, pad_type = q_153_pad_type_0, strides = var_7707, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("q_153_cast")]; + tensor var_7713 = const()[name = tensor("op_7713"), val = tensor([1, 1])]; + tensor var_7715 = const()[name = tensor("op_7715"), val = tensor([1, 1])]; + tensor k_153_pad_type_0 = const()[name = tensor("k_153_pad_type_0"), val = tensor("custom")]; + tensor k_153_pad_0 = const()[name = tensor("k_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(546752256))), lut = tensor([-0x1.6ap-5, -0x1.b1cp-7, 0x1.b64p-7, 0x1.6bp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_153_cast = conv(dilations = var_7715, groups = var_6865, pad = k_153_pad_0, pad_type = k_153_pad_type_0, strides = var_7713, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("k_153_cast")]; + tensor var_7719 = const()[name = tensor("op_7719"), val = tensor([1, 1])]; + tensor var_7721 = const()[name = tensor("op_7721"), val = tensor([1, 1])]; + tensor v_153_pad_type_0 = const()[name = tensor("v_153_pad_type_0"), val = tensor("custom")]; + tensor v_153_pad_0 = const()[name = tensor("v_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547161920))), lut = tensor([-0x1.66cp-5, -0x1.af8p-7, 0x1.b1p-7, 0x1.678p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_153_cast = conv(dilations = var_7721, groups = var_6865, pad = v_153_pad_0, pad_type = v_153_pad_type_0, strides = var_7719, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("v_153_cast")]; + tensor var_7725 = const()[name = tensor("op_7725"), val = tensor([2, 20, 64, -1])]; + tensor var_7726_cast = reshape(shape = var_7725, x = q_153_cast)[name = tensor("op_7726_cast")]; + tensor var_7727 = const()[name = tensor("op_7727"), val = tensor([2, 20, 64, -1])]; + tensor var_7728_cast = reshape(shape = var_7727, x = k_153_cast)[name = tensor("op_7728_cast")]; + tensor var_7729 = const()[name = tensor("op_7729"), val = tensor([2, 20, 64, -1])]; + tensor var_7730_cast = reshape(shape = var_7729, x = v_153_cast)[name = tensor("op_7730_cast")]; + tensor attn_weights_305_transpose_x_0 = const()[name = tensor("attn_weights_305_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_305_transpose_y_0 = const()[name = tensor("attn_weights_305_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_305_cast = matmul(transpose_x = attn_weights_305_transpose_x_0, transpose_y = attn_weights_305_transpose_y_0, x = var_7726_cast, y = var_7728_cast)[name = tensor("attn_weights_305_cast")]; + tensor attn_weights_307_cast = mul(x = attn_weights_305_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_307_cast")]; + tensor var_7734_cast = softmax(axis = var_6849, x = attn_weights_307_cast)[name = tensor("op_7734_cast")]; + tensor attn_153_transpose_x_0 = const()[name = tensor("attn_153_transpose_x_0"), val = tensor(false)]; + tensor attn_153_transpose_y_0 = const()[name = tensor("attn_153_transpose_y_0"), val = tensor(true)]; + tensor attn_153_cast = matmul(transpose_x = attn_153_transpose_x_0, transpose_y = attn_153_transpose_y_0, x = var_7730_cast, y = var_7734_cast)[name = tensor("attn_153_cast")]; + tensor var_7738 = const()[name = tensor("op_7738"), val = tensor([2, 1280, 1, -1])]; + tensor input_471_cast = reshape(shape = var_7738, x = attn_153_cast)[name = tensor("input_471_cast")]; + tensor var_7743 = const()[name = tensor("op_7743"), val = tensor([1, 1])]; + tensor var_7745 = const()[name = tensor("op_7745"), val = tensor([1, 1])]; + tensor var_7747_pad_type_0 = const()[name = tensor("op_7747_pad_type_0"), val = tensor("custom")]; + tensor var_7747_pad_0 = const()[name = tensor("op_7747_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547571584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548390848))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548390976)))]; + tensor var_7747_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_7745, groups = var_6865, pad = var_7747_pad_0, pad_type = var_7747_pad_type_0, strides = var_7743, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_471_cast)[name = tensor("op_7747_cast")]; + tensor inputs_231_cast = add(x = var_7747_cast, y = inputs_229_cast)[name = tensor("inputs_231_cast")]; + tensor var_7751 = const()[name = tensor("op_7751"), val = tensor([1])]; + tensor channels_mean_231_cast = reduce_mean(axes = var_7751, keep_dims = var_6860, x = inputs_231_cast)[name = tensor("channels_mean_231_cast")]; + tensor zero_mean_231_cast = sub(x = inputs_231_cast, y = channels_mean_231_cast)[name = tensor("zero_mean_231_cast")]; + tensor zero_mean_sq_231_cast = mul(x = zero_mean_231_cast, y = zero_mean_231_cast)[name = tensor("zero_mean_sq_231_cast")]; + tensor var_7755 = const()[name = tensor("op_7755"), val = tensor([1])]; + tensor var_7756_cast = reduce_mean(axes = var_7755, keep_dims = var_6860, x = zero_mean_sq_231_cast)[name = tensor("op_7756_cast")]; + tensor var_7757_to_fp16 = const()[name = tensor("op_7757_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7758_cast = add(x = var_7756_cast, y = var_7757_to_fp16)[name = tensor("op_7758_cast")]; + tensor denom_231_epsilon_0_to_fp16 = const()[name = tensor("denom_231_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_231_cast = rsqrt(epsilon = denom_231_epsilon_0_to_fp16, x = var_7758_cast)[name = tensor("denom_231_cast")]; + tensor out_231_cast = mul(x = zero_mean_231_cast, y = denom_231_cast)[name = tensor("out_231_cast")]; + tensor var_7762_to_fp16 = const()[name = tensor("op_7762_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548393600)))]; + tensor var_7763_cast = add(x = out_231_cast, y = var_7762_to_fp16)[name = tensor("op_7763_cast")]; + tensor var_7765_to_fp16 = const()[name = tensor("op_7765_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548396224)))]; + tensor hidden_states_313_cast = mul(x = var_7763_cast, y = var_7765_to_fp16)[name = tensor("hidden_states_313_cast")]; + tensor var_7772 = const()[name = tensor("op_7772"), val = tensor([1, 1])]; + tensor var_7774 = const()[name = tensor("op_7774"), val = tensor([1, 1])]; + tensor q_155_pad_type_0 = const()[name = tensor("q_155_pad_type_0"), val = tensor("custom")]; + tensor q_155_pad_0 = const()[name = tensor("q_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548398848))), lut = tensor([-0x1.22cp-6, 0x1.234p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_155_cast = conv(dilations = var_7774, groups = var_6865, pad = q_155_pad_0, pad_type = q_155_pad_type_0, strides = var_7772, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_313_cast)[name = tensor("q_155_cast")]; + tensor var_7778 = const()[name = tensor("op_7778"), val = tensor([1, 1])]; + tensor var_7780 = const()[name = tensor("op_7780"), val = tensor([1, 1])]; + tensor k_155_pad_type_0 = const()[name = tensor("k_155_pad_type_0"), val = tensor("custom")]; + tensor k_155_pad_0 = const()[name = tensor("k_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548603712))), lut = tensor([-0x1.da8p-7, 0x1.da8p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_155_cast = conv(dilations = var_7780, groups = var_6865, pad = k_155_pad_0, pad_type = k_155_pad_type_0, strides = var_7778, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_155_cast")]; + tensor var_7784 = const()[name = tensor("op_7784"), val = tensor([1, 1])]; + tensor var_7786 = const()[name = tensor("op_7786"), val = tensor([1, 1])]; + tensor v_155_pad_type_0 = const()[name = tensor("v_155_pad_type_0"), val = tensor("custom")]; + tensor v_155_pad_0 = const()[name = tensor("v_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(548931456))), lut = tensor([-0x1.14cp-6, 0x1.15p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_155_cast = conv(dilations = var_7786, groups = var_6865, pad = v_155_pad_0, pad_type = v_155_pad_type_0, strides = var_7784, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_155_cast")]; + tensor var_7790 = const()[name = tensor("op_7790"), val = tensor([2, 20, 64, -1])]; + tensor var_7791_cast = reshape(shape = var_7790, x = q_155_cast)[name = tensor("op_7791_cast")]; + tensor var_7792 = const()[name = tensor("op_7792"), val = tensor([2, 20, 64, -1])]; + tensor var_7793_cast = reshape(shape = var_7792, x = k_155_cast)[name = tensor("op_7793_cast")]; + tensor var_7794 = const()[name = tensor("op_7794"), val = tensor([2, 20, 64, -1])]; + tensor var_7795_cast = reshape(shape = var_7794, x = v_155_cast)[name = tensor("op_7795_cast")]; + tensor attn_weights_309_transpose_x_0 = const()[name = tensor("attn_weights_309_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_309_transpose_y_0 = const()[name = tensor("attn_weights_309_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_309_cast = matmul(transpose_x = attn_weights_309_transpose_x_0, transpose_y = attn_weights_309_transpose_y_0, x = var_7791_cast, y = var_7793_cast)[name = tensor("attn_weights_309_cast")]; + tensor attn_weights_311_cast = mul(x = attn_weights_309_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_311_cast")]; + tensor var_7799_cast = softmax(axis = var_6849, x = attn_weights_311_cast)[name = tensor("op_7799_cast")]; + tensor attn_155_transpose_x_0 = const()[name = tensor("attn_155_transpose_x_0"), val = tensor(false)]; + tensor attn_155_transpose_y_0 = const()[name = tensor("attn_155_transpose_y_0"), val = tensor(true)]; + tensor attn_155_cast = matmul(transpose_x = attn_155_transpose_x_0, transpose_y = attn_155_transpose_y_0, x = var_7795_cast, y = var_7799_cast)[name = tensor("attn_155_cast")]; + tensor var_7803 = const()[name = tensor("op_7803"), val = tensor([2, 1280, 1, -1])]; + tensor input_473_cast = reshape(shape = var_7803, x = attn_155_cast)[name = tensor("input_473_cast")]; + tensor var_7808 = const()[name = tensor("op_7808"), val = tensor([1, 1])]; + tensor var_7810 = const()[name = tensor("op_7810"), val = tensor([1, 1])]; + tensor var_7812_pad_type_0 = const()[name = tensor("op_7812_pad_type_0"), val = tensor("custom")]; + tensor var_7812_pad_0 = const()[name = tensor("op_7812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549259200))), lut = tensor([-0x1.4acp-7, 0x1.494p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549464064)))]; + tensor var_7812_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_7810, groups = var_6865, pad = var_7812_pad_0, pad_type = var_7812_pad_type_0, strides = var_7808, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_473_cast)[name = tensor("op_7812_cast")]; + tensor inputs_233_cast = add(x = var_7812_cast, y = inputs_231_cast)[name = tensor("inputs_233_cast")]; + tensor var_7816 = const()[name = tensor("op_7816"), val = tensor([1])]; + tensor channels_mean_233_cast = reduce_mean(axes = var_7816, keep_dims = var_6860, x = inputs_233_cast)[name = tensor("channels_mean_233_cast")]; + tensor zero_mean_233_cast = sub(x = inputs_233_cast, y = channels_mean_233_cast)[name = tensor("zero_mean_233_cast")]; + tensor zero_mean_sq_233_cast = mul(x = zero_mean_233_cast, y = zero_mean_233_cast)[name = tensor("zero_mean_sq_233_cast")]; + tensor var_7820 = const()[name = tensor("op_7820"), val = tensor([1])]; + tensor var_7821_cast = reduce_mean(axes = var_7820, keep_dims = var_6860, x = zero_mean_sq_233_cast)[name = tensor("op_7821_cast")]; + tensor var_7822_to_fp16 = const()[name = tensor("op_7822_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7823_cast = add(x = var_7821_cast, y = var_7822_to_fp16)[name = tensor("op_7823_cast")]; + tensor denom_233_epsilon_0_to_fp16 = const()[name = tensor("denom_233_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_233_cast = rsqrt(epsilon = denom_233_epsilon_0_to_fp16, x = var_7823_cast)[name = tensor("denom_233_cast")]; + tensor out_233_cast = mul(x = zero_mean_233_cast, y = denom_233_cast)[name = tensor("out_233_cast")]; + tensor var_7827_to_fp16 = const()[name = tensor("op_7827_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549466688)))]; + tensor var_7828_cast = add(x = out_233_cast, y = var_7827_to_fp16)[name = tensor("op_7828_cast")]; + tensor var_7830_to_fp16 = const()[name = tensor("op_7830_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549469312)))]; + tensor input_475_cast = mul(x = var_7828_cast, y = var_7830_to_fp16)[name = tensor("input_475_cast")]; + tensor var_7838 = const()[name = tensor("op_7838"), val = tensor([1, 1])]; + tensor var_7840 = const()[name = tensor("op_7840"), val = tensor([1, 1])]; + tensor var_7842_pad_type_0 = const()[name = tensor("op_7842_pad_type_0"), val = tensor("custom")]; + tensor var_7842_pad_0 = const()[name = tensor("op_7842_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(549471936))), lut = tensor([-0x1.648p-5, -0x1.ac4p-7, 0x1.ac8p-7, 0x1.648p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552748800)))]; + tensor var_7842_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_7840, groups = var_6865, pad = var_7842_pad_0, pad_type = var_7842_pad_type_0, strides = var_7838, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_475_cast)[name = tensor("op_7842_cast")]; + tensor var_7843_split_sizes_0 = const()[name = tensor("op_7843_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7843_axis_0 = const()[name = tensor("op_7843_axis_0"), val = tensor(1)]; + tensor var_7843_cast_0, tensor var_7843_cast_1 = split(axis = var_7843_axis_0, split_sizes = var_7843_split_sizes_0, x = var_7842_cast)[name = tensor("op_7843_cast")]; + tensor var_7845_mode_0 = const()[name = tensor("op_7845_mode_0"), val = tensor("EXACT")]; + tensor var_7845_cast = gelu(mode = var_7845_mode_0, x = var_7843_cast_1)[name = tensor("op_7845_cast")]; + tensor input_477_cast = mul(x = var_7843_cast_0, y = var_7845_cast)[name = tensor("input_477_cast")]; + tensor var_7849 = const()[name = tensor("op_7849"), val = tensor([1, 1])]; + tensor var_7851 = const()[name = tensor("op_7851"), val = tensor([1, 1])]; + tensor var_7853_pad_type_0 = const()[name = tensor("op_7853_pad_type_0"), val = tensor("custom")]; + tensor var_7853_pad_0 = const()[name = tensor("op_7853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552769344))), lut = tensor([-0x1.5ecp-5, -0x1.a4p-7, 0x1.a5p-7, 0x1.5fp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554407808)))]; + tensor var_7853_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_7851, groups = var_6865, pad = var_7853_pad_0, pad_type = var_7853_pad_type_0, strides = var_7849, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_477_cast)[name = tensor("op_7853_cast")]; + tensor inputs_235_cast = add(x = var_7853_cast, y = inputs_233_cast)[name = tensor("inputs_235_cast")]; + tensor var_7863 = const()[name = tensor("op_7863"), val = tensor([1])]; + tensor channels_mean_235_cast = reduce_mean(axes = var_7863, keep_dims = var_6860, x = inputs_235_cast)[name = tensor("channels_mean_235_cast")]; + tensor zero_mean_235_cast = sub(x = inputs_235_cast, y = channels_mean_235_cast)[name = tensor("zero_mean_235_cast")]; + tensor zero_mean_sq_235_cast = mul(x = zero_mean_235_cast, y = zero_mean_235_cast)[name = tensor("zero_mean_sq_235_cast")]; + tensor var_7867 = const()[name = tensor("op_7867"), val = tensor([1])]; + tensor var_7868_cast = reduce_mean(axes = var_7867, keep_dims = var_6860, x = zero_mean_sq_235_cast)[name = tensor("op_7868_cast")]; + tensor var_7869_to_fp16 = const()[name = tensor("op_7869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7870_cast = add(x = var_7868_cast, y = var_7869_to_fp16)[name = tensor("op_7870_cast")]; + tensor denom_235_epsilon_0_to_fp16 = const()[name = tensor("denom_235_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_235_cast = rsqrt(epsilon = denom_235_epsilon_0_to_fp16, x = var_7870_cast)[name = tensor("denom_235_cast")]; + tensor out_235_cast = mul(x = zero_mean_235_cast, y = denom_235_cast)[name = tensor("out_235_cast")]; + tensor var_7874_to_fp16 = const()[name = tensor("op_7874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554410432)))]; + tensor var_7875_cast = add(x = out_235_cast, y = var_7874_to_fp16)[name = tensor("op_7875_cast")]; + tensor var_7877_to_fp16 = const()[name = tensor("op_7877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554413056)))]; + tensor hidden_states_317_cast = mul(x = var_7875_cast, y = var_7877_to_fp16)[name = tensor("hidden_states_317_cast")]; + tensor var_7884 = const()[name = tensor("op_7884"), val = tensor([1, 1])]; + tensor var_7886 = const()[name = tensor("op_7886"), val = tensor([1, 1])]; + tensor q_157_pad_type_0 = const()[name = tensor("q_157_pad_type_0"), val = tensor("custom")]; + tensor q_157_pad_0 = const()[name = tensor("q_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554415680))), lut = tensor([-0x1.7d4p-6, 0x1.7b4p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_157_cast = conv(dilations = var_7886, groups = var_6865, pad = q_157_pad_0, pad_type = q_157_pad_type_0, strides = var_7884, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("q_157_cast")]; + tensor var_7890 = const()[name = tensor("op_7890"), val = tensor([1, 1])]; + tensor var_7892 = const()[name = tensor("op_7892"), val = tensor([1, 1])]; + tensor k_157_pad_type_0 = const()[name = tensor("k_157_pad_type_0"), val = tensor("custom")]; + tensor k_157_pad_0 = const()[name = tensor("k_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554620544))), lut = tensor([-0x1.7e4p-6, 0x1.7ccp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_157_cast = conv(dilations = var_7892, groups = var_6865, pad = k_157_pad_0, pad_type = k_157_pad_type_0, strides = var_7890, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("k_157_cast")]; + tensor var_7896 = const()[name = tensor("op_7896"), val = tensor([1, 1])]; + tensor var_7898 = const()[name = tensor("op_7898"), val = tensor([1, 1])]; + tensor v_157_pad_type_0 = const()[name = tensor("v_157_pad_type_0"), val = tensor("custom")]; + tensor v_157_pad_0 = const()[name = tensor("v_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554825408))), lut = tensor([-0x1.658p-5, -0x1.afcp-7, 0x1.aa4p-7, 0x1.64p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_157_cast = conv(dilations = var_7898, groups = var_6865, pad = v_157_pad_0, pad_type = v_157_pad_type_0, strides = var_7896, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("v_157_cast")]; + tensor var_7902 = const()[name = tensor("op_7902"), val = tensor([2, 20, 64, -1])]; + tensor var_7903_cast = reshape(shape = var_7902, x = q_157_cast)[name = tensor("op_7903_cast")]; + tensor var_7904 = const()[name = tensor("op_7904"), val = tensor([2, 20, 64, -1])]; + tensor var_7905_cast = reshape(shape = var_7904, x = k_157_cast)[name = tensor("op_7905_cast")]; + tensor var_7906 = const()[name = tensor("op_7906"), val = tensor([2, 20, 64, -1])]; + tensor var_7907_cast = reshape(shape = var_7906, x = v_157_cast)[name = tensor("op_7907_cast")]; + tensor attn_weights_313_transpose_x_0 = const()[name = tensor("attn_weights_313_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_313_transpose_y_0 = const()[name = tensor("attn_weights_313_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_313_cast = matmul(transpose_x = attn_weights_313_transpose_x_0, transpose_y = attn_weights_313_transpose_y_0, x = var_7903_cast, y = var_7905_cast)[name = tensor("attn_weights_313_cast")]; + tensor attn_weights_315_cast = mul(x = attn_weights_313_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_315_cast")]; + tensor var_7911_cast = softmax(axis = var_6849, x = attn_weights_315_cast)[name = tensor("op_7911_cast")]; + tensor attn_157_transpose_x_0 = const()[name = tensor("attn_157_transpose_x_0"), val = tensor(false)]; + tensor attn_157_transpose_y_0 = const()[name = tensor("attn_157_transpose_y_0"), val = tensor(true)]; + tensor attn_157_cast = matmul(transpose_x = attn_157_transpose_x_0, transpose_y = attn_157_transpose_y_0, x = var_7907_cast, y = var_7911_cast)[name = tensor("attn_157_cast")]; + tensor var_7915 = const()[name = tensor("op_7915"), val = tensor([2, 1280, 1, -1])]; + tensor input_479_cast = reshape(shape = var_7915, x = attn_157_cast)[name = tensor("input_479_cast")]; + tensor var_7920 = const()[name = tensor("op_7920"), val = tensor([1, 1])]; + tensor var_7922 = const()[name = tensor("op_7922"), val = tensor([1, 1])]; + tensor var_7924_pad_type_0 = const()[name = tensor("op_7924_pad_type_0"), val = tensor("custom")]; + tensor var_7924_pad_0 = const()[name = tensor("op_7924_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555235072))), lut = tensor([-0x1.61p-5, -0x1.a5p-7, 0x1.ac8p-7, 0x1.62cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555644736)))]; + tensor var_7924_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_7922, groups = var_6865, pad = var_7924_pad_0, pad_type = var_7924_pad_type_0, strides = var_7920, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_479_cast)[name = tensor("op_7924_cast")]; + tensor inputs_237_cast = add(x = var_7924_cast, y = inputs_235_cast)[name = tensor("inputs_237_cast")]; + tensor var_7928 = const()[name = tensor("op_7928"), val = tensor([1])]; + tensor channels_mean_237_cast = reduce_mean(axes = var_7928, keep_dims = var_6860, x = inputs_237_cast)[name = tensor("channels_mean_237_cast")]; + tensor zero_mean_237_cast = sub(x = inputs_237_cast, y = channels_mean_237_cast)[name = tensor("zero_mean_237_cast")]; + tensor zero_mean_sq_237_cast = mul(x = zero_mean_237_cast, y = zero_mean_237_cast)[name = tensor("zero_mean_sq_237_cast")]; + tensor var_7932 = const()[name = tensor("op_7932"), val = tensor([1])]; + tensor var_7933_cast = reduce_mean(axes = var_7932, keep_dims = var_6860, x = zero_mean_sq_237_cast)[name = tensor("op_7933_cast")]; + tensor var_7934_to_fp16 = const()[name = tensor("op_7934_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7935_cast = add(x = var_7933_cast, y = var_7934_to_fp16)[name = tensor("op_7935_cast")]; + tensor denom_237_epsilon_0_to_fp16 = const()[name = tensor("denom_237_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_237_cast = rsqrt(epsilon = denom_237_epsilon_0_to_fp16, x = var_7935_cast)[name = tensor("denom_237_cast")]; + tensor out_237_cast = mul(x = zero_mean_237_cast, y = denom_237_cast)[name = tensor("out_237_cast")]; + tensor var_7939_to_fp16 = const()[name = tensor("op_7939_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555647360)))]; + tensor var_7940_cast = add(x = out_237_cast, y = var_7939_to_fp16)[name = tensor("op_7940_cast")]; + tensor var_7942_to_fp16 = const()[name = tensor("op_7942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555649984)))]; + tensor hidden_states_319_cast = mul(x = var_7940_cast, y = var_7942_to_fp16)[name = tensor("hidden_states_319_cast")]; + tensor var_7949 = const()[name = tensor("op_7949"), val = tensor([1, 1])]; + tensor var_7951 = const()[name = tensor("op_7951"), val = tensor([1, 1])]; + tensor q_159_pad_type_0 = const()[name = tensor("q_159_pad_type_0"), val = tensor("custom")]; + tensor q_159_pad_0 = const()[name = tensor("q_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555652608))), lut = tensor([-0x1.144p-6, 0x1.148p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_159_cast = conv(dilations = var_7951, groups = var_6865, pad = q_159_pad_0, pad_type = q_159_pad_type_0, strides = var_7949, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_319_cast)[name = tensor("q_159_cast")]; + tensor var_7955 = const()[name = tensor("op_7955"), val = tensor([1, 1])]; + tensor var_7957 = const()[name = tensor("op_7957"), val = tensor([1, 1])]; + tensor k_159_pad_type_0 = const()[name = tensor("k_159_pad_type_0"), val = tensor("custom")]; + tensor k_159_pad_0 = const()[name = tensor("k_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(555857472))), lut = tensor([-0x1.b5cp-7, 0x1.b6p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_159_cast = conv(dilations = var_7957, groups = var_6865, pad = k_159_pad_0, pad_type = k_159_pad_type_0, strides = var_7955, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_159_cast")]; + tensor var_7961 = const()[name = tensor("op_7961"), val = tensor([1, 1])]; + tensor var_7963 = const()[name = tensor("op_7963"), val = tensor([1, 1])]; + tensor v_159_pad_type_0 = const()[name = tensor("v_159_pad_type_0"), val = tensor("custom")]; + tensor v_159_pad_0 = const()[name = tensor("v_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556185216))), lut = tensor([-0x1.0cp-6, 0x1.0c8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_159_cast = conv(dilations = var_7963, groups = var_6865, pad = v_159_pad_0, pad_type = v_159_pad_type_0, strides = var_7961, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_159_cast")]; + tensor var_7967 = const()[name = tensor("op_7967"), val = tensor([2, 20, 64, -1])]; + tensor var_7968_cast = reshape(shape = var_7967, x = q_159_cast)[name = tensor("op_7968_cast")]; + tensor var_7969 = const()[name = tensor("op_7969"), val = tensor([2, 20, 64, -1])]; + tensor var_7970_cast = reshape(shape = var_7969, x = k_159_cast)[name = tensor("op_7970_cast")]; + tensor var_7971 = const()[name = tensor("op_7971"), val = tensor([2, 20, 64, -1])]; + tensor var_7972_cast = reshape(shape = var_7971, x = v_159_cast)[name = tensor("op_7972_cast")]; + tensor attn_weights_317_transpose_x_0 = const()[name = tensor("attn_weights_317_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_317_transpose_y_0 = const()[name = tensor("attn_weights_317_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_317_cast = matmul(transpose_x = attn_weights_317_transpose_x_0, transpose_y = attn_weights_317_transpose_y_0, x = var_7968_cast, y = var_7970_cast)[name = tensor("attn_weights_317_cast")]; + tensor attn_weights_319_cast = mul(x = attn_weights_317_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_319_cast")]; + tensor var_7976_cast = softmax(axis = var_6849, x = attn_weights_319_cast)[name = tensor("op_7976_cast")]; + tensor attn_159_transpose_x_0 = const()[name = tensor("attn_159_transpose_x_0"), val = tensor(false)]; + tensor attn_159_transpose_y_0 = const()[name = tensor("attn_159_transpose_y_0"), val = tensor(true)]; + tensor attn_159_cast = matmul(transpose_x = attn_159_transpose_x_0, transpose_y = attn_159_transpose_y_0, x = var_7972_cast, y = var_7976_cast)[name = tensor("attn_159_cast")]; + tensor var_7980 = const()[name = tensor("op_7980"), val = tensor([2, 1280, 1, -1])]; + tensor input_481_cast = reshape(shape = var_7980, x = attn_159_cast)[name = tensor("input_481_cast")]; + tensor var_7985 = const()[name = tensor("op_7985"), val = tensor([1, 1])]; + tensor var_7987 = const()[name = tensor("op_7987"), val = tensor([1, 1])]; + tensor var_7989_pad_type_0 = const()[name = tensor("op_7989_pad_type_0"), val = tensor("custom")]; + tensor var_7989_pad_0 = const()[name = tensor("op_7989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556512960))), lut = tensor([-0x1.46p-7, 0x1.45cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556717824)))]; + tensor var_7989_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_7987, groups = var_6865, pad = var_7989_pad_0, pad_type = var_7989_pad_type_0, strides = var_7985, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_481_cast)[name = tensor("op_7989_cast")]; + tensor inputs_239_cast = add(x = var_7989_cast, y = inputs_237_cast)[name = tensor("inputs_239_cast")]; + tensor var_7993 = const()[name = tensor("op_7993"), val = tensor([1])]; + tensor channels_mean_239_cast = reduce_mean(axes = var_7993, keep_dims = var_6860, x = inputs_239_cast)[name = tensor("channels_mean_239_cast")]; + tensor zero_mean_239_cast = sub(x = inputs_239_cast, y = channels_mean_239_cast)[name = tensor("zero_mean_239_cast")]; + tensor zero_mean_sq_239_cast = mul(x = zero_mean_239_cast, y = zero_mean_239_cast)[name = tensor("zero_mean_sq_239_cast")]; + tensor var_7997 = const()[name = tensor("op_7997"), val = tensor([1])]; + tensor var_7998_cast = reduce_mean(axes = var_7997, keep_dims = var_6860, x = zero_mean_sq_239_cast)[name = tensor("op_7998_cast")]; + tensor var_7999_to_fp16 = const()[name = tensor("op_7999_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8000_cast = add(x = var_7998_cast, y = var_7999_to_fp16)[name = tensor("op_8000_cast")]; + tensor denom_239_epsilon_0_to_fp16 = const()[name = tensor("denom_239_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_239_cast = rsqrt(epsilon = denom_239_epsilon_0_to_fp16, x = var_8000_cast)[name = tensor("denom_239_cast")]; + tensor out_239_cast = mul(x = zero_mean_239_cast, y = denom_239_cast)[name = tensor("out_239_cast")]; + tensor var_8004_to_fp16 = const()[name = tensor("op_8004_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556720448)))]; + tensor var_8005_cast = add(x = out_239_cast, y = var_8004_to_fp16)[name = tensor("op_8005_cast")]; + tensor var_8007_to_fp16 = const()[name = tensor("op_8007_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556723072)))]; + tensor input_483_cast = mul(x = var_8005_cast, y = var_8007_to_fp16)[name = tensor("input_483_cast")]; + tensor var_8015 = const()[name = tensor("op_8015"), val = tensor([1, 1])]; + tensor var_8017 = const()[name = tensor("op_8017"), val = tensor([1, 1])]; + tensor var_8019_pad_type_0 = const()[name = tensor("op_8019_pad_type_0"), val = tensor("custom")]; + tensor var_8019_pad_0 = const()[name = tensor("op_8019_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(556725696))), lut = tensor([-0x1.694p-5, -0x1.b1cp-7, 0x1.b24p-7, 0x1.69p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(560002560)))]; + tensor var_8019_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_8017, groups = var_6865, pad = var_8019_pad_0, pad_type = var_8019_pad_type_0, strides = var_8015, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_483_cast)[name = tensor("op_8019_cast")]; + tensor var_8020_split_sizes_0 = const()[name = tensor("op_8020_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8020_axis_0 = const()[name = tensor("op_8020_axis_0"), val = tensor(1)]; + tensor var_8020_cast_0, tensor var_8020_cast_1 = split(axis = var_8020_axis_0, split_sizes = var_8020_split_sizes_0, x = var_8019_cast)[name = tensor("op_8020_cast")]; + tensor var_8022_mode_0 = const()[name = tensor("op_8022_mode_0"), val = tensor("EXACT")]; + tensor var_8022_cast = gelu(mode = var_8022_mode_0, x = var_8020_cast_1)[name = tensor("op_8022_cast")]; + tensor input_485_cast = mul(x = var_8020_cast_0, y = var_8022_cast)[name = tensor("input_485_cast")]; + tensor var_8026 = const()[name = tensor("op_8026"), val = tensor([1, 1])]; + tensor var_8028 = const()[name = tensor("op_8028"), val = tensor([1, 1])]; + tensor var_8030_pad_type_0 = const()[name = tensor("op_8030_pad_type_0"), val = tensor("custom")]; + tensor var_8030_pad_0 = const()[name = tensor("op_8030_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(560023104))), lut = tensor([-0x1.618p-5, -0x1.a7p-7, 0x1.aap-7, 0x1.624p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561661568)))]; + tensor var_8030_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_8028, groups = var_6865, pad = var_8030_pad_0, pad_type = var_8030_pad_type_0, strides = var_8026, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_485_cast)[name = tensor("op_8030_cast")]; + tensor inputs_241_cast = add(x = var_8030_cast, y = inputs_239_cast)[name = tensor("inputs_241_cast")]; + tensor var_8040 = const()[name = tensor("op_8040"), val = tensor([1])]; + tensor channels_mean_241_cast = reduce_mean(axes = var_8040, keep_dims = var_6860, x = inputs_241_cast)[name = tensor("channels_mean_241_cast")]; + tensor zero_mean_241_cast = sub(x = inputs_241_cast, y = channels_mean_241_cast)[name = tensor("zero_mean_241_cast")]; + tensor zero_mean_sq_241_cast = mul(x = zero_mean_241_cast, y = zero_mean_241_cast)[name = tensor("zero_mean_sq_241_cast")]; + tensor var_8044 = const()[name = tensor("op_8044"), val = tensor([1])]; + tensor var_8045_cast = reduce_mean(axes = var_8044, keep_dims = var_6860, x = zero_mean_sq_241_cast)[name = tensor("op_8045_cast")]; + tensor var_8046_to_fp16 = const()[name = tensor("op_8046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8047_cast = add(x = var_8045_cast, y = var_8046_to_fp16)[name = tensor("op_8047_cast")]; + tensor denom_241_epsilon_0_to_fp16 = const()[name = tensor("denom_241_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_241_cast = rsqrt(epsilon = denom_241_epsilon_0_to_fp16, x = var_8047_cast)[name = tensor("denom_241_cast")]; + tensor out_241_cast = mul(x = zero_mean_241_cast, y = denom_241_cast)[name = tensor("out_241_cast")]; + tensor var_8051_to_fp16 = const()[name = tensor("op_8051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561664192)))]; + tensor var_8052_cast = add(x = out_241_cast, y = var_8051_to_fp16)[name = tensor("op_8052_cast")]; + tensor var_8054_to_fp16 = const()[name = tensor("op_8054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561666816)))]; + tensor hidden_states_323_cast = mul(x = var_8052_cast, y = var_8054_to_fp16)[name = tensor("hidden_states_323_cast")]; + tensor var_8061 = const()[name = tensor("op_8061"), val = tensor([1, 1])]; + tensor var_8063 = const()[name = tensor("op_8063"), val = tensor([1, 1])]; + tensor q_161_pad_type_0 = const()[name = tensor("q_161_pad_type_0"), val = tensor("custom")]; + tensor q_161_pad_0 = const()[name = tensor("q_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561669440))), lut = tensor([-0x1.7a8p-6, 0x1.7cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_161_cast = conv(dilations = var_8063, groups = var_6865, pad = q_161_pad_0, pad_type = q_161_pad_type_0, strides = var_8061, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("q_161_cast")]; + tensor var_8067 = const()[name = tensor("op_8067"), val = tensor([1, 1])]; + tensor var_8069 = const()[name = tensor("op_8069"), val = tensor([1, 1])]; + tensor k_161_pad_type_0 = const()[name = tensor("k_161_pad_type_0"), val = tensor("custom")]; + tensor k_161_pad_0 = const()[name = tensor("k_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561874304))), lut = tensor([-0x1.7b8p-6, 0x1.7bcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_161_cast = conv(dilations = var_8069, groups = var_6865, pad = k_161_pad_0, pad_type = k_161_pad_type_0, strides = var_8067, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("k_161_cast")]; + tensor var_8073 = const()[name = tensor("op_8073"), val = tensor([1, 1])]; + tensor var_8075 = const()[name = tensor("op_8075"), val = tensor([1, 1])]; + tensor v_161_pad_type_0 = const()[name = tensor("v_161_pad_type_0"), val = tensor("custom")]; + tensor v_161_pad_0 = const()[name = tensor("v_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562079168))), lut = tensor([-0x1.5c4p-5, -0x1.a0cp-7, 0x1.a84p-7, 0x1.5e4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_161_cast = conv(dilations = var_8075, groups = var_6865, pad = v_161_pad_0, pad_type = v_161_pad_type_0, strides = var_8073, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("v_161_cast")]; + tensor var_8079 = const()[name = tensor("op_8079"), val = tensor([2, 20, 64, -1])]; + tensor var_8080_cast = reshape(shape = var_8079, x = q_161_cast)[name = tensor("op_8080_cast")]; + tensor var_8081 = const()[name = tensor("op_8081"), val = tensor([2, 20, 64, -1])]; + tensor var_8082_cast = reshape(shape = var_8081, x = k_161_cast)[name = tensor("op_8082_cast")]; + tensor var_8083 = const()[name = tensor("op_8083"), val = tensor([2, 20, 64, -1])]; + tensor var_8084_cast = reshape(shape = var_8083, x = v_161_cast)[name = tensor("op_8084_cast")]; + tensor attn_weights_321_transpose_x_0 = const()[name = tensor("attn_weights_321_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_321_transpose_y_0 = const()[name = tensor("attn_weights_321_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_321_cast = matmul(transpose_x = attn_weights_321_transpose_x_0, transpose_y = attn_weights_321_transpose_y_0, x = var_8080_cast, y = var_8082_cast)[name = tensor("attn_weights_321_cast")]; + tensor attn_weights_323_cast = mul(x = attn_weights_321_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_323_cast")]; + tensor var_8088_cast = softmax(axis = var_6849, x = attn_weights_323_cast)[name = tensor("op_8088_cast")]; + tensor attn_161_transpose_x_0 = const()[name = tensor("attn_161_transpose_x_0"), val = tensor(false)]; + tensor attn_161_transpose_y_0 = const()[name = tensor("attn_161_transpose_y_0"), val = tensor(true)]; + tensor attn_161_cast = matmul(transpose_x = attn_161_transpose_x_0, transpose_y = attn_161_transpose_y_0, x = var_8084_cast, y = var_8088_cast)[name = tensor("attn_161_cast")]; + tensor var_8092 = const()[name = tensor("op_8092"), val = tensor([2, 1280, 1, -1])]; + tensor input_487_cast = reshape(shape = var_8092, x = attn_161_cast)[name = tensor("input_487_cast")]; + tensor var_8097 = const()[name = tensor("op_8097"), val = tensor([1, 1])]; + tensor var_8099 = const()[name = tensor("op_8099"), val = tensor([1, 1])]; + tensor var_8101_pad_type_0 = const()[name = tensor("op_8101_pad_type_0"), val = tensor("custom")]; + tensor var_8101_pad_0 = const()[name = tensor("op_8101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562488832))), lut = tensor([-0x1.5a4p-5, -0x1.9fcp-7, 0x1.a1cp-7, 0x1.5acp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562898496)))]; + tensor var_8101_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_8099, groups = var_6865, pad = var_8101_pad_0, pad_type = var_8101_pad_type_0, strides = var_8097, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_487_cast)[name = tensor("op_8101_cast")]; + tensor inputs_243_cast = add(x = var_8101_cast, y = inputs_241_cast)[name = tensor("inputs_243_cast")]; + tensor var_8105 = const()[name = tensor("op_8105"), val = tensor([1])]; + tensor channels_mean_243_cast = reduce_mean(axes = var_8105, keep_dims = var_6860, x = inputs_243_cast)[name = tensor("channels_mean_243_cast")]; + tensor zero_mean_243_cast = sub(x = inputs_243_cast, y = channels_mean_243_cast)[name = tensor("zero_mean_243_cast")]; + tensor zero_mean_sq_243_cast = mul(x = zero_mean_243_cast, y = zero_mean_243_cast)[name = tensor("zero_mean_sq_243_cast")]; + tensor var_8109 = const()[name = tensor("op_8109"), val = tensor([1])]; + tensor var_8110_cast = reduce_mean(axes = var_8109, keep_dims = var_6860, x = zero_mean_sq_243_cast)[name = tensor("op_8110_cast")]; + tensor var_8111_to_fp16 = const()[name = tensor("op_8111_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8112_cast = add(x = var_8110_cast, y = var_8111_to_fp16)[name = tensor("op_8112_cast")]; + tensor denom_243_epsilon_0_to_fp16 = const()[name = tensor("denom_243_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_243_cast = rsqrt(epsilon = denom_243_epsilon_0_to_fp16, x = var_8112_cast)[name = tensor("denom_243_cast")]; + tensor out_243_cast = mul(x = zero_mean_243_cast, y = denom_243_cast)[name = tensor("out_243_cast")]; + tensor var_8116_to_fp16 = const()[name = tensor("op_8116_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562901120)))]; + tensor var_8117_cast = add(x = out_243_cast, y = var_8116_to_fp16)[name = tensor("op_8117_cast")]; + tensor var_8119_to_fp16 = const()[name = tensor("op_8119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562903744)))]; + tensor hidden_states_325_cast = mul(x = var_8117_cast, y = var_8119_to_fp16)[name = tensor("hidden_states_325_cast")]; + tensor var_8126 = const()[name = tensor("op_8126"), val = tensor([1, 1])]; + tensor var_8128 = const()[name = tensor("op_8128"), val = tensor([1, 1])]; + tensor q_163_pad_type_0 = const()[name = tensor("q_163_pad_type_0"), val = tensor("custom")]; + tensor q_163_pad_0 = const()[name = tensor("q_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(562906368))), lut = tensor([-0x1.f4cp-7, 0x1.f54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_163_cast = conv(dilations = var_8128, groups = var_6865, pad = q_163_pad_0, pad_type = q_163_pad_type_0, strides = var_8126, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_325_cast)[name = tensor("q_163_cast")]; + tensor var_8132 = const()[name = tensor("op_8132"), val = tensor([1, 1])]; + tensor var_8134 = const()[name = tensor("op_8134"), val = tensor([1, 1])]; + tensor k_163_pad_type_0 = const()[name = tensor("k_163_pad_type_0"), val = tensor("custom")]; + tensor k_163_pad_0 = const()[name = tensor("k_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563111232))), lut = tensor([-0x1.7dcp-7, 0x1.7dp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_163_cast = conv(dilations = var_8134, groups = var_6865, pad = k_163_pad_0, pad_type = k_163_pad_type_0, strides = var_8132, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_163_cast")]; + tensor var_8138 = const()[name = tensor("op_8138"), val = tensor([1, 1])]; + tensor var_8140 = const()[name = tensor("op_8140"), val = tensor([1, 1])]; + tensor v_163_pad_type_0 = const()[name = tensor("v_163_pad_type_0"), val = tensor("custom")]; + tensor v_163_pad_0 = const()[name = tensor("v_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563438976))), lut = tensor([-0x1.ed4p-7, 0x1.ea4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_163_cast = conv(dilations = var_8140, groups = var_6865, pad = v_163_pad_0, pad_type = v_163_pad_type_0, strides = var_8138, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_163_cast")]; + tensor var_8144 = const()[name = tensor("op_8144"), val = tensor([2, 20, 64, -1])]; + tensor var_8145_cast = reshape(shape = var_8144, x = q_163_cast)[name = tensor("op_8145_cast")]; + tensor var_8146 = const()[name = tensor("op_8146"), val = tensor([2, 20, 64, -1])]; + tensor var_8147_cast = reshape(shape = var_8146, x = k_163_cast)[name = tensor("op_8147_cast")]; + tensor var_8148 = const()[name = tensor("op_8148"), val = tensor([2, 20, 64, -1])]; + tensor var_8149_cast = reshape(shape = var_8148, x = v_163_cast)[name = tensor("op_8149_cast")]; + tensor attn_weights_325_transpose_x_0 = const()[name = tensor("attn_weights_325_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_325_transpose_y_0 = const()[name = tensor("attn_weights_325_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_325_cast = matmul(transpose_x = attn_weights_325_transpose_x_0, transpose_y = attn_weights_325_transpose_y_0, x = var_8145_cast, y = var_8147_cast)[name = tensor("attn_weights_325_cast")]; + tensor attn_weights_327_cast = mul(x = attn_weights_325_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_327_cast")]; + tensor var_8153_cast = softmax(axis = var_6849, x = attn_weights_327_cast)[name = tensor("op_8153_cast")]; + tensor attn_163_transpose_x_0 = const()[name = tensor("attn_163_transpose_x_0"), val = tensor(false)]; + tensor attn_163_transpose_y_0 = const()[name = tensor("attn_163_transpose_y_0"), val = tensor(true)]; + tensor attn_163_cast = matmul(transpose_x = attn_163_transpose_x_0, transpose_y = attn_163_transpose_y_0, x = var_8149_cast, y = var_8153_cast)[name = tensor("attn_163_cast")]; + tensor var_8157 = const()[name = tensor("op_8157"), val = tensor([2, 1280, 1, -1])]; + tensor input_489_cast = reshape(shape = var_8157, x = attn_163_cast)[name = tensor("input_489_cast")]; + tensor var_8162 = const()[name = tensor("op_8162"), val = tensor([1, 1])]; + tensor var_8164 = const()[name = tensor("op_8164"), val = tensor([1, 1])]; + tensor var_8166_pad_type_0 = const()[name = tensor("op_8166_pad_type_0"), val = tensor("custom")]; + tensor var_8166_pad_0 = const()[name = tensor("op_8166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563766720))), lut = tensor([-0x1.2bp-7, 0x1.2b4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563971584)))]; + tensor var_8166_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_8164, groups = var_6865, pad = var_8166_pad_0, pad_type = var_8166_pad_type_0, strides = var_8162, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_489_cast)[name = tensor("op_8166_cast")]; + tensor inputs_245_cast = add(x = var_8166_cast, y = inputs_243_cast)[name = tensor("inputs_245_cast")]; + tensor var_8170 = const()[name = tensor("op_8170"), val = tensor([1])]; + tensor channels_mean_245_cast = reduce_mean(axes = var_8170, keep_dims = var_6860, x = inputs_245_cast)[name = tensor("channels_mean_245_cast")]; + tensor zero_mean_245_cast = sub(x = inputs_245_cast, y = channels_mean_245_cast)[name = tensor("zero_mean_245_cast")]; + tensor zero_mean_sq_245_cast = mul(x = zero_mean_245_cast, y = zero_mean_245_cast)[name = tensor("zero_mean_sq_245_cast")]; + tensor var_8174 = const()[name = tensor("op_8174"), val = tensor([1])]; + tensor var_8175_cast = reduce_mean(axes = var_8174, keep_dims = var_6860, x = zero_mean_sq_245_cast)[name = tensor("op_8175_cast")]; + tensor var_8176_to_fp16 = const()[name = tensor("op_8176_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8177_cast = add(x = var_8175_cast, y = var_8176_to_fp16)[name = tensor("op_8177_cast")]; + tensor denom_245_epsilon_0_to_fp16 = const()[name = tensor("denom_245_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_245_cast = rsqrt(epsilon = denom_245_epsilon_0_to_fp16, x = var_8177_cast)[name = tensor("denom_245_cast")]; + tensor out_245_cast = mul(x = zero_mean_245_cast, y = denom_245_cast)[name = tensor("out_245_cast")]; + tensor var_8181_to_fp16 = const()[name = tensor("op_8181_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563974208)))]; + tensor var_8182_cast = add(x = out_245_cast, y = var_8181_to_fp16)[name = tensor("op_8182_cast")]; + tensor var_8184_to_fp16 = const()[name = tensor("op_8184_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563976832)))]; + tensor input_491_cast = mul(x = var_8182_cast, y = var_8184_to_fp16)[name = tensor("input_491_cast")]; + tensor var_8192 = const()[name = tensor("op_8192"), val = tensor([1, 1])]; + tensor var_8194 = const()[name = tensor("op_8194"), val = tensor([1, 1])]; + tensor var_8196_pad_type_0 = const()[name = tensor("op_8196_pad_type_0"), val = tensor("custom")]; + tensor var_8196_pad_0 = const()[name = tensor("op_8196_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(563979456))), lut = tensor([-0x1.6d8p-5, -0x1.b7p-7, 0x1.b7cp-7, 0x1.6d8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567256320)))]; + tensor var_8196_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_8194, groups = var_6865, pad = var_8196_pad_0, pad_type = var_8196_pad_type_0, strides = var_8192, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_491_cast)[name = tensor("op_8196_cast")]; + tensor var_8197_split_sizes_0 = const()[name = tensor("op_8197_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8197_axis_0 = const()[name = tensor("op_8197_axis_0"), val = tensor(1)]; + tensor var_8197_cast_0, tensor var_8197_cast_1 = split(axis = var_8197_axis_0, split_sizes = var_8197_split_sizes_0, x = var_8196_cast)[name = tensor("op_8197_cast")]; + tensor var_8199_mode_0 = const()[name = tensor("op_8199_mode_0"), val = tensor("EXACT")]; + tensor var_8199_cast = gelu(mode = var_8199_mode_0, x = var_8197_cast_1)[name = tensor("op_8199_cast")]; + tensor input_493_cast = mul(x = var_8197_cast_0, y = var_8199_cast)[name = tensor("input_493_cast")]; + tensor var_8203 = const()[name = tensor("op_8203"), val = tensor([1, 1])]; + tensor var_8205 = const()[name = tensor("op_8205"), val = tensor([1, 1])]; + tensor var_8207_pad_type_0 = const()[name = tensor("op_8207_pad_type_0"), val = tensor("custom")]; + tensor var_8207_pad_0 = const()[name = tensor("op_8207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567276864))), lut = tensor([-0x1.64cp-5, -0x1.accp-7, 0x1.aa8p-7, 0x1.648p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568915328)))]; + tensor var_8207_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_8205, groups = var_6865, pad = var_8207_pad_0, pad_type = var_8207_pad_type_0, strides = var_8203, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_493_cast)[name = tensor("op_8207_cast")]; + tensor inputs_247_cast = add(x = var_8207_cast, y = inputs_245_cast)[name = tensor("inputs_247_cast")]; + tensor var_8217 = const()[name = tensor("op_8217"), val = tensor([1])]; + tensor channels_mean_247_cast = reduce_mean(axes = var_8217, keep_dims = var_6860, x = inputs_247_cast)[name = tensor("channels_mean_247_cast")]; + tensor zero_mean_247_cast = sub(x = inputs_247_cast, y = channels_mean_247_cast)[name = tensor("zero_mean_247_cast")]; + tensor zero_mean_sq_247_cast = mul(x = zero_mean_247_cast, y = zero_mean_247_cast)[name = tensor("zero_mean_sq_247_cast")]; + tensor var_8221 = const()[name = tensor("op_8221"), val = tensor([1])]; + tensor var_8222_cast = reduce_mean(axes = var_8221, keep_dims = var_6860, x = zero_mean_sq_247_cast)[name = tensor("op_8222_cast")]; + tensor var_8223_to_fp16 = const()[name = tensor("op_8223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8224_cast = add(x = var_8222_cast, y = var_8223_to_fp16)[name = tensor("op_8224_cast")]; + tensor denom_247_epsilon_0_to_fp16 = const()[name = tensor("denom_247_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_247_cast = rsqrt(epsilon = denom_247_epsilon_0_to_fp16, x = var_8224_cast)[name = tensor("denom_247_cast")]; + tensor out_247_cast = mul(x = zero_mean_247_cast, y = denom_247_cast)[name = tensor("out_247_cast")]; + tensor var_8228_to_fp16 = const()[name = tensor("op_8228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568917952)))]; + tensor var_8229_cast = add(x = out_247_cast, y = var_8228_to_fp16)[name = tensor("op_8229_cast")]; + tensor var_8231_to_fp16 = const()[name = tensor("op_8231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568920576)))]; + tensor hidden_states_329_cast = mul(x = var_8229_cast, y = var_8231_to_fp16)[name = tensor("hidden_states_329_cast")]; + tensor var_8238 = const()[name = tensor("op_8238"), val = tensor([1, 1])]; + tensor var_8240 = const()[name = tensor("op_8240"), val = tensor([1, 1])]; + tensor q_165_pad_type_0 = const()[name = tensor("q_165_pad_type_0"), val = tensor("custom")]; + tensor q_165_pad_0 = const()[name = tensor("q_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(568923200))), lut = tensor([-0x1.7d8p-6, 0x1.7c8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_165_cast = conv(dilations = var_8240, groups = var_6865, pad = q_165_pad_0, pad_type = q_165_pad_type_0, strides = var_8238, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("q_165_cast")]; + tensor var_8244 = const()[name = tensor("op_8244"), val = tensor([1, 1])]; + tensor var_8246 = const()[name = tensor("op_8246"), val = tensor([1, 1])]; + tensor k_165_pad_type_0 = const()[name = tensor("k_165_pad_type_0"), val = tensor("custom")]; + tensor k_165_pad_0 = const()[name = tensor("k_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569128064))), lut = tensor([-0x1.7c8p-6, 0x1.7e8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_165_cast = conv(dilations = var_8246, groups = var_6865, pad = k_165_pad_0, pad_type = k_165_pad_type_0, strides = var_8244, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("k_165_cast")]; + tensor var_8250 = const()[name = tensor("op_8250"), val = tensor([1, 1])]; + tensor var_8252 = const()[name = tensor("op_8252"), val = tensor([1, 1])]; + tensor v_165_pad_type_0 = const()[name = tensor("v_165_pad_type_0"), val = tensor("custom")]; + tensor v_165_pad_0 = const()[name = tensor("v_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569332928))), lut = tensor([-0x1.5e4p-5, -0x1.a6p-7, 0x1.a6cp-7, 0x1.5ecp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_165_cast = conv(dilations = var_8252, groups = var_6865, pad = v_165_pad_0, pad_type = v_165_pad_type_0, strides = var_8250, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("v_165_cast")]; + tensor var_8256 = const()[name = tensor("op_8256"), val = tensor([2, 20, 64, -1])]; + tensor var_8257_cast = reshape(shape = var_8256, x = q_165_cast)[name = tensor("op_8257_cast")]; + tensor var_8258 = const()[name = tensor("op_8258"), val = tensor([2, 20, 64, -1])]; + tensor var_8259_cast = reshape(shape = var_8258, x = k_165_cast)[name = tensor("op_8259_cast")]; + tensor var_8260 = const()[name = tensor("op_8260"), val = tensor([2, 20, 64, -1])]; + tensor var_8261_cast = reshape(shape = var_8260, x = v_165_cast)[name = tensor("op_8261_cast")]; + tensor attn_weights_329_transpose_x_0 = const()[name = tensor("attn_weights_329_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_329_transpose_y_0 = const()[name = tensor("attn_weights_329_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_329_cast = matmul(transpose_x = attn_weights_329_transpose_x_0, transpose_y = attn_weights_329_transpose_y_0, x = var_8257_cast, y = var_8259_cast)[name = tensor("attn_weights_329_cast")]; + tensor attn_weights_331_cast = mul(x = attn_weights_329_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_331_cast")]; + tensor var_8265_cast = softmax(axis = var_6849, x = attn_weights_331_cast)[name = tensor("op_8265_cast")]; + tensor attn_165_transpose_x_0 = const()[name = tensor("attn_165_transpose_x_0"), val = tensor(false)]; + tensor attn_165_transpose_y_0 = const()[name = tensor("attn_165_transpose_y_0"), val = tensor(true)]; + tensor attn_165_cast = matmul(transpose_x = attn_165_transpose_x_0, transpose_y = attn_165_transpose_y_0, x = var_8261_cast, y = var_8265_cast)[name = tensor("attn_165_cast")]; + tensor var_8269 = const()[name = tensor("op_8269"), val = tensor([2, 1280, 1, -1])]; + tensor input_495_cast = reshape(shape = var_8269, x = attn_165_cast)[name = tensor("input_495_cast")]; + tensor var_8274 = const()[name = tensor("op_8274"), val = tensor([1, 1])]; + tensor var_8276 = const()[name = tensor("op_8276"), val = tensor([1, 1])]; + tensor var_8278_pad_type_0 = const()[name = tensor("op_8278_pad_type_0"), val = tensor("custom")]; + tensor var_8278_pad_0 = const()[name = tensor("op_8278_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569742592))), lut = tensor([-0x1.5a4p-5, -0x1.a08p-7, 0x1.ap-7, 0x1.5a4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570152256)))]; + tensor var_8278_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_8276, groups = var_6865, pad = var_8278_pad_0, pad_type = var_8278_pad_type_0, strides = var_8274, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_495_cast)[name = tensor("op_8278_cast")]; + tensor inputs_249_cast = add(x = var_8278_cast, y = inputs_247_cast)[name = tensor("inputs_249_cast")]; + tensor var_8282 = const()[name = tensor("op_8282"), val = tensor([1])]; + tensor channels_mean_249_cast = reduce_mean(axes = var_8282, keep_dims = var_6860, x = inputs_249_cast)[name = tensor("channels_mean_249_cast")]; + tensor zero_mean_249_cast = sub(x = inputs_249_cast, y = channels_mean_249_cast)[name = tensor("zero_mean_249_cast")]; + tensor zero_mean_sq_249_cast = mul(x = zero_mean_249_cast, y = zero_mean_249_cast)[name = tensor("zero_mean_sq_249_cast")]; + tensor var_8286 = const()[name = tensor("op_8286"), val = tensor([1])]; + tensor var_8287_cast = reduce_mean(axes = var_8286, keep_dims = var_6860, x = zero_mean_sq_249_cast)[name = tensor("op_8287_cast")]; + tensor var_8288_to_fp16 = const()[name = tensor("op_8288_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8289_cast = add(x = var_8287_cast, y = var_8288_to_fp16)[name = tensor("op_8289_cast")]; + tensor denom_249_epsilon_0_to_fp16 = const()[name = tensor("denom_249_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_249_cast = rsqrt(epsilon = denom_249_epsilon_0_to_fp16, x = var_8289_cast)[name = tensor("denom_249_cast")]; + tensor out_249_cast = mul(x = zero_mean_249_cast, y = denom_249_cast)[name = tensor("out_249_cast")]; + tensor var_8293_to_fp16 = const()[name = tensor("op_8293_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570154880)))]; + tensor var_8294_cast = add(x = out_249_cast, y = var_8293_to_fp16)[name = tensor("op_8294_cast")]; + tensor var_8296_to_fp16 = const()[name = tensor("op_8296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570157504)))]; + tensor hidden_states_331_cast = mul(x = var_8294_cast, y = var_8296_to_fp16)[name = tensor("hidden_states_331_cast")]; + tensor var_8303 = const()[name = tensor("op_8303"), val = tensor([1, 1])]; + tensor var_8305 = const()[name = tensor("op_8305"), val = tensor([1, 1])]; + tensor q_167_pad_type_0 = const()[name = tensor("q_167_pad_type_0"), val = tensor("custom")]; + tensor q_167_pad_0 = const()[name = tensor("q_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570160128))), lut = tensor([-0x1.d44p-7, 0x1.d3cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_167_cast = conv(dilations = var_8305, groups = var_6865, pad = q_167_pad_0, pad_type = q_167_pad_type_0, strides = var_8303, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_331_cast)[name = tensor("q_167_cast")]; + tensor var_8309 = const()[name = tensor("op_8309"), val = tensor([1, 1])]; + tensor var_8311 = const()[name = tensor("op_8311"), val = tensor([1, 1])]; + tensor k_167_pad_type_0 = const()[name = tensor("k_167_pad_type_0"), val = tensor("custom")]; + tensor k_167_pad_0 = const()[name = tensor("k_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570364992))), lut = tensor([-0x1.59cp-7, 0x1.59p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_167_cast = conv(dilations = var_8311, groups = var_6865, pad = k_167_pad_0, pad_type = k_167_pad_type_0, strides = var_8309, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_167_cast")]; + tensor var_8315 = const()[name = tensor("op_8315"), val = tensor([1, 1])]; + tensor var_8317 = const()[name = tensor("op_8317"), val = tensor([1, 1])]; + tensor v_167_pad_type_0 = const()[name = tensor("v_167_pad_type_0"), val = tensor("custom")]; + tensor v_167_pad_0 = const()[name = tensor("v_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570692736))), lut = tensor([-0x1.c78p-7, 0x1.c88p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_167_cast = conv(dilations = var_8317, groups = var_6865, pad = v_167_pad_0, pad_type = v_167_pad_type_0, strides = var_8315, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_167_cast")]; + tensor var_8321 = const()[name = tensor("op_8321"), val = tensor([2, 20, 64, -1])]; + tensor var_8322_cast = reshape(shape = var_8321, x = q_167_cast)[name = tensor("op_8322_cast")]; + tensor var_8323 = const()[name = tensor("op_8323"), val = tensor([2, 20, 64, -1])]; + tensor var_8324_cast = reshape(shape = var_8323, x = k_167_cast)[name = tensor("op_8324_cast")]; + tensor var_8325 = const()[name = tensor("op_8325"), val = tensor([2, 20, 64, -1])]; + tensor var_8326_cast = reshape(shape = var_8325, x = v_167_cast)[name = tensor("op_8326_cast")]; + tensor attn_weights_333_transpose_x_0 = const()[name = tensor("attn_weights_333_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_333_transpose_y_0 = const()[name = tensor("attn_weights_333_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_333_cast = matmul(transpose_x = attn_weights_333_transpose_x_0, transpose_y = attn_weights_333_transpose_y_0, x = var_8322_cast, y = var_8324_cast)[name = tensor("attn_weights_333_cast")]; + tensor attn_weights_335_cast = mul(x = attn_weights_333_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_335_cast")]; + tensor var_8330_cast = softmax(axis = var_6849, x = attn_weights_335_cast)[name = tensor("op_8330_cast")]; + tensor attn_167_transpose_x_0 = const()[name = tensor("attn_167_transpose_x_0"), val = tensor(false)]; + tensor attn_167_transpose_y_0 = const()[name = tensor("attn_167_transpose_y_0"), val = tensor(true)]; + tensor attn_167_cast = matmul(transpose_x = attn_167_transpose_x_0, transpose_y = attn_167_transpose_y_0, x = var_8326_cast, y = var_8330_cast)[name = tensor("attn_167_cast")]; + tensor var_8334 = const()[name = tensor("op_8334"), val = tensor([2, 1280, 1, -1])]; + tensor input_497_cast = reshape(shape = var_8334, x = attn_167_cast)[name = tensor("input_497_cast")]; + tensor var_8339 = const()[name = tensor("op_8339"), val = tensor([1, 1])]; + tensor var_8341 = const()[name = tensor("op_8341"), val = tensor([1, 1])]; + tensor var_8343_pad_type_0 = const()[name = tensor("op_8343_pad_type_0"), val = tensor("custom")]; + tensor var_8343_pad_0 = const()[name = tensor("op_8343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571020480))), lut = tensor([-0x1.1a4p-7, 0x1.194p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571225344)))]; + tensor var_8343_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_8341, groups = var_6865, pad = var_8343_pad_0, pad_type = var_8343_pad_type_0, strides = var_8339, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_497_cast)[name = tensor("op_8343_cast")]; + tensor inputs_251_cast = add(x = var_8343_cast, y = inputs_249_cast)[name = tensor("inputs_251_cast")]; + tensor var_8347 = const()[name = tensor("op_8347"), val = tensor([1])]; + tensor channels_mean_251_cast = reduce_mean(axes = var_8347, keep_dims = var_6860, x = inputs_251_cast)[name = tensor("channels_mean_251_cast")]; + tensor zero_mean_251_cast = sub(x = inputs_251_cast, y = channels_mean_251_cast)[name = tensor("zero_mean_251_cast")]; + tensor zero_mean_sq_251_cast = mul(x = zero_mean_251_cast, y = zero_mean_251_cast)[name = tensor("zero_mean_sq_251_cast")]; + tensor var_8351 = const()[name = tensor("op_8351"), val = tensor([1])]; + tensor var_8352_cast = reduce_mean(axes = var_8351, keep_dims = var_6860, x = zero_mean_sq_251_cast)[name = tensor("op_8352_cast")]; + tensor var_8353_to_fp16 = const()[name = tensor("op_8353_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8354_cast = add(x = var_8352_cast, y = var_8353_to_fp16)[name = tensor("op_8354_cast")]; + tensor denom_251_epsilon_0_to_fp16 = const()[name = tensor("denom_251_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_251_cast = rsqrt(epsilon = denom_251_epsilon_0_to_fp16, x = var_8354_cast)[name = tensor("denom_251_cast")]; + tensor out_251_cast = mul(x = zero_mean_251_cast, y = denom_251_cast)[name = tensor("out_251_cast")]; + tensor var_8358_to_fp16 = const()[name = tensor("op_8358_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571227968)))]; + tensor var_8359_cast = add(x = out_251_cast, y = var_8358_to_fp16)[name = tensor("op_8359_cast")]; + tensor var_8361_to_fp16 = const()[name = tensor("op_8361_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571230592)))]; + tensor input_499_cast = mul(x = var_8359_cast, y = var_8361_to_fp16)[name = tensor("input_499_cast")]; + tensor var_8369 = const()[name = tensor("op_8369"), val = tensor([1, 1])]; + tensor var_8371 = const()[name = tensor("op_8371"), val = tensor([1, 1])]; + tensor var_8373_pad_type_0 = const()[name = tensor("op_8373_pad_type_0"), val = tensor("custom")]; + tensor var_8373_pad_0 = const()[name = tensor("op_8373_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571233216))), lut = tensor([-0x1.71p-5, -0x1.bc8p-7, 0x1.ba4p-7, 0x1.704p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574510080)))]; + tensor var_8373_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_8371, groups = var_6865, pad = var_8373_pad_0, pad_type = var_8373_pad_type_0, strides = var_8369, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_499_cast)[name = tensor("op_8373_cast")]; + tensor var_8374_split_sizes_0 = const()[name = tensor("op_8374_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8374_axis_0 = const()[name = tensor("op_8374_axis_0"), val = tensor(1)]; + tensor var_8374_cast_0, tensor var_8374_cast_1 = split(axis = var_8374_axis_0, split_sizes = var_8374_split_sizes_0, x = var_8373_cast)[name = tensor("op_8374_cast")]; + tensor var_8376_mode_0 = const()[name = tensor("op_8376_mode_0"), val = tensor("EXACT")]; + tensor var_8376_cast = gelu(mode = var_8376_mode_0, x = var_8374_cast_1)[name = tensor("op_8376_cast")]; + tensor input_501_cast = mul(x = var_8374_cast_0, y = var_8376_cast)[name = tensor("input_501_cast")]; + tensor var_8380 = const()[name = tensor("op_8380"), val = tensor([1, 1])]; + tensor var_8382 = const()[name = tensor("op_8382"), val = tensor([1, 1])]; + tensor var_8384_pad_type_0 = const()[name = tensor("op_8384_pad_type_0"), val = tensor("custom")]; + tensor var_8384_pad_0 = const()[name = tensor("op_8384_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574530624))), lut = tensor([-0x1.664p-5, -0x1.ae8p-7, 0x1.accp-7, 0x1.65cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576169088)))]; + tensor var_8384_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_8382, groups = var_6865, pad = var_8384_pad_0, pad_type = var_8384_pad_type_0, strides = var_8380, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_501_cast)[name = tensor("op_8384_cast")]; + tensor inputs_253_cast = add(x = var_8384_cast, y = inputs_251_cast)[name = tensor("inputs_253_cast")]; + tensor var_8394 = const()[name = tensor("op_8394"), val = tensor([1])]; + tensor channels_mean_253_cast = reduce_mean(axes = var_8394, keep_dims = var_6860, x = inputs_253_cast)[name = tensor("channels_mean_253_cast")]; + tensor zero_mean_253_cast = sub(x = inputs_253_cast, y = channels_mean_253_cast)[name = tensor("zero_mean_253_cast")]; + tensor zero_mean_sq_253_cast = mul(x = zero_mean_253_cast, y = zero_mean_253_cast)[name = tensor("zero_mean_sq_253_cast")]; + tensor var_8398 = const()[name = tensor("op_8398"), val = tensor([1])]; + tensor var_8399_cast = reduce_mean(axes = var_8398, keep_dims = var_6860, x = zero_mean_sq_253_cast)[name = tensor("op_8399_cast")]; + tensor var_8400_to_fp16 = const()[name = tensor("op_8400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8401_cast = add(x = var_8399_cast, y = var_8400_to_fp16)[name = tensor("op_8401_cast")]; + tensor denom_253_epsilon_0_to_fp16 = const()[name = tensor("denom_253_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_253_cast = rsqrt(epsilon = denom_253_epsilon_0_to_fp16, x = var_8401_cast)[name = tensor("denom_253_cast")]; + tensor out_253_cast = mul(x = zero_mean_253_cast, y = denom_253_cast)[name = tensor("out_253_cast")]; + tensor var_8405_to_fp16 = const()[name = tensor("op_8405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576171712)))]; + tensor var_8406_cast = add(x = out_253_cast, y = var_8405_to_fp16)[name = tensor("op_8406_cast")]; + tensor var_8408_to_fp16 = const()[name = tensor("op_8408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576174336)))]; + tensor hidden_states_335_cast = mul(x = var_8406_cast, y = var_8408_to_fp16)[name = tensor("hidden_states_335_cast")]; + tensor var_8415 = const()[name = tensor("op_8415"), val = tensor([1, 1])]; + tensor var_8417 = const()[name = tensor("op_8417"), val = tensor([1, 1])]; + tensor q_169_pad_type_0 = const()[name = tensor("q_169_pad_type_0"), val = tensor("custom")]; + tensor q_169_pad_0 = const()[name = tensor("q_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576176960))), lut = tensor([-0x1.804p-6, 0x1.7fcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_169_cast = conv(dilations = var_8417, groups = var_6865, pad = q_169_pad_0, pad_type = q_169_pad_type_0, strides = var_8415, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("q_169_cast")]; + tensor var_8421 = const()[name = tensor("op_8421"), val = tensor([1, 1])]; + tensor var_8423 = const()[name = tensor("op_8423"), val = tensor([1, 1])]; + tensor k_169_pad_type_0 = const()[name = tensor("k_169_pad_type_0"), val = tensor("custom")]; + tensor k_169_pad_0 = const()[name = tensor("k_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576381824))), lut = tensor([-0x1.804p-6, 0x1.804p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_169_cast = conv(dilations = var_8423, groups = var_6865, pad = k_169_pad_0, pad_type = k_169_pad_type_0, strides = var_8421, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("k_169_cast")]; + tensor var_8427 = const()[name = tensor("op_8427"), val = tensor([1, 1])]; + tensor var_8429 = const()[name = tensor("op_8429"), val = tensor([1, 1])]; + tensor v_169_pad_type_0 = const()[name = tensor("v_169_pad_type_0"), val = tensor("custom")]; + tensor v_169_pad_0 = const()[name = tensor("v_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576586688))), lut = tensor([-0x1.5c8p-5, -0x1.a3cp-7, 0x1.a34p-7, 0x1.5c8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_169_cast = conv(dilations = var_8429, groups = var_6865, pad = v_169_pad_0, pad_type = v_169_pad_type_0, strides = var_8427, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("v_169_cast")]; + tensor var_8433 = const()[name = tensor("op_8433"), val = tensor([2, 20, 64, -1])]; + tensor var_8434_cast = reshape(shape = var_8433, x = q_169_cast)[name = tensor("op_8434_cast")]; + tensor var_8435 = const()[name = tensor("op_8435"), val = tensor([2, 20, 64, -1])]; + tensor var_8436_cast = reshape(shape = var_8435, x = k_169_cast)[name = tensor("op_8436_cast")]; + tensor var_8437 = const()[name = tensor("op_8437"), val = tensor([2, 20, 64, -1])]; + tensor var_8438_cast = reshape(shape = var_8437, x = v_169_cast)[name = tensor("op_8438_cast")]; + tensor attn_weights_337_transpose_x_0 = const()[name = tensor("attn_weights_337_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_337_transpose_y_0 = const()[name = tensor("attn_weights_337_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_337_cast = matmul(transpose_x = attn_weights_337_transpose_x_0, transpose_y = attn_weights_337_transpose_y_0, x = var_8434_cast, y = var_8436_cast)[name = tensor("attn_weights_337_cast")]; + tensor attn_weights_339_cast = mul(x = attn_weights_337_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_339_cast")]; + tensor var_8442_cast = softmax(axis = var_6849, x = attn_weights_339_cast)[name = tensor("op_8442_cast")]; + tensor attn_169_transpose_x_0 = const()[name = tensor("attn_169_transpose_x_0"), val = tensor(false)]; + tensor attn_169_transpose_y_0 = const()[name = tensor("attn_169_transpose_y_0"), val = tensor(true)]; + tensor attn_169_cast = matmul(transpose_x = attn_169_transpose_x_0, transpose_y = attn_169_transpose_y_0, x = var_8438_cast, y = var_8442_cast)[name = tensor("attn_169_cast")]; + tensor var_8446 = const()[name = tensor("op_8446"), val = tensor([2, 1280, 1, -1])]; + tensor input_503_cast = reshape(shape = var_8446, x = attn_169_cast)[name = tensor("input_503_cast")]; + tensor var_8451 = const()[name = tensor("op_8451"), val = tensor([1, 1])]; + tensor var_8453 = const()[name = tensor("op_8453"), val = tensor([1, 1])]; + tensor var_8455_pad_type_0 = const()[name = tensor("op_8455_pad_type_0"), val = tensor("custom")]; + tensor var_8455_pad_0 = const()[name = tensor("op_8455_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(576996352))), lut = tensor([-0x1.564p-5, -0x1.9e8p-7, 0x1.984p-7, 0x1.54cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577406016)))]; + tensor var_8455_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_8453, groups = var_6865, pad = var_8455_pad_0, pad_type = var_8455_pad_type_0, strides = var_8451, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_503_cast)[name = tensor("op_8455_cast")]; + tensor inputs_255_cast = add(x = var_8455_cast, y = inputs_253_cast)[name = tensor("inputs_255_cast")]; + tensor var_8459 = const()[name = tensor("op_8459"), val = tensor([1])]; + tensor channels_mean_255_cast = reduce_mean(axes = var_8459, keep_dims = var_6860, x = inputs_255_cast)[name = tensor("channels_mean_255_cast")]; + tensor zero_mean_255_cast = sub(x = inputs_255_cast, y = channels_mean_255_cast)[name = tensor("zero_mean_255_cast")]; + tensor zero_mean_sq_255_cast = mul(x = zero_mean_255_cast, y = zero_mean_255_cast)[name = tensor("zero_mean_sq_255_cast")]; + tensor var_8463 = const()[name = tensor("op_8463"), val = tensor([1])]; + tensor var_8464_cast = reduce_mean(axes = var_8463, keep_dims = var_6860, x = zero_mean_sq_255_cast)[name = tensor("op_8464_cast")]; + tensor var_8465_to_fp16 = const()[name = tensor("op_8465_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8466_cast = add(x = var_8464_cast, y = var_8465_to_fp16)[name = tensor("op_8466_cast")]; + tensor denom_255_epsilon_0_to_fp16 = const()[name = tensor("denom_255_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_255_cast = rsqrt(epsilon = denom_255_epsilon_0_to_fp16, x = var_8466_cast)[name = tensor("denom_255_cast")]; + tensor out_255_cast = mul(x = zero_mean_255_cast, y = denom_255_cast)[name = tensor("out_255_cast")]; + tensor var_8470_to_fp16 = const()[name = tensor("op_8470_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577408640)))]; + tensor var_8471_cast = add(x = out_255_cast, y = var_8470_to_fp16)[name = tensor("op_8471_cast")]; + tensor var_8473_to_fp16 = const()[name = tensor("op_8473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577411264)))]; + tensor hidden_states_337_cast = mul(x = var_8471_cast, y = var_8473_to_fp16)[name = tensor("hidden_states_337_cast")]; + tensor var_8480 = const()[name = tensor("op_8480"), val = tensor([1, 1])]; + tensor var_8482 = const()[name = tensor("op_8482"), val = tensor([1, 1])]; + tensor q_171_pad_type_0 = const()[name = tensor("q_171_pad_type_0"), val = tensor("custom")]; + tensor q_171_pad_0 = const()[name = tensor("q_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577413888))), lut = tensor([-0x1.b48p-7, 0x1.b54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_171_cast = conv(dilations = var_8482, groups = var_6865, pad = q_171_pad_0, pad_type = q_171_pad_type_0, strides = var_8480, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_337_cast)[name = tensor("q_171_cast")]; + tensor var_8486 = const()[name = tensor("op_8486"), val = tensor([1, 1])]; + tensor var_8488 = const()[name = tensor("op_8488"), val = tensor([1, 1])]; + tensor k_171_pad_type_0 = const()[name = tensor("k_171_pad_type_0"), val = tensor("custom")]; + tensor k_171_pad_0 = const()[name = tensor("k_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577618752))), lut = tensor([-0x1.38p-7, 0x1.37p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_171_cast = conv(dilations = var_8488, groups = var_6865, pad = k_171_pad_0, pad_type = k_171_pad_type_0, strides = var_8486, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_171_cast")]; + tensor var_8492 = const()[name = tensor("op_8492"), val = tensor([1, 1])]; + tensor var_8494 = const()[name = tensor("op_8494"), val = tensor([1, 1])]; + tensor v_171_pad_type_0 = const()[name = tensor("v_171_pad_type_0"), val = tensor("custom")]; + tensor v_171_pad_0 = const()[name = tensor("v_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577946496))), lut = tensor([-0x1.a1p-7, 0x1.a28p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_171_cast = conv(dilations = var_8494, groups = var_6865, pad = v_171_pad_0, pad_type = v_171_pad_type_0, strides = var_8492, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_171_cast")]; + tensor var_8498 = const()[name = tensor("op_8498"), val = tensor([2, 20, 64, -1])]; + tensor var_8499_cast = reshape(shape = var_8498, x = q_171_cast)[name = tensor("op_8499_cast")]; + tensor var_8500 = const()[name = tensor("op_8500"), val = tensor([2, 20, 64, -1])]; + tensor var_8501_cast = reshape(shape = var_8500, x = k_171_cast)[name = tensor("op_8501_cast")]; + tensor var_8502 = const()[name = tensor("op_8502"), val = tensor([2, 20, 64, -1])]; + tensor var_8503_cast = reshape(shape = var_8502, x = v_171_cast)[name = tensor("op_8503_cast")]; + tensor attn_weights_341_transpose_x_0 = const()[name = tensor("attn_weights_341_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_341_transpose_y_0 = const()[name = tensor("attn_weights_341_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_341_cast = matmul(transpose_x = attn_weights_341_transpose_x_0, transpose_y = attn_weights_341_transpose_y_0, x = var_8499_cast, y = var_8501_cast)[name = tensor("attn_weights_341_cast")]; + tensor attn_weights_343_cast = mul(x = attn_weights_341_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_343_cast")]; + tensor var_8507_cast = softmax(axis = var_6849, x = attn_weights_343_cast)[name = tensor("op_8507_cast")]; + tensor attn_171_transpose_x_0 = const()[name = tensor("attn_171_transpose_x_0"), val = tensor(false)]; + tensor attn_171_transpose_y_0 = const()[name = tensor("attn_171_transpose_y_0"), val = tensor(true)]; + tensor attn_171_cast = matmul(transpose_x = attn_171_transpose_x_0, transpose_y = attn_171_transpose_y_0, x = var_8503_cast, y = var_8507_cast)[name = tensor("attn_171_cast")]; + tensor var_8511 = const()[name = tensor("op_8511"), val = tensor([2, 1280, 1, -1])]; + tensor input_505_cast = reshape(shape = var_8511, x = attn_171_cast)[name = tensor("input_505_cast")]; + tensor var_8516 = const()[name = tensor("op_8516"), val = tensor([1, 1])]; + tensor var_8518 = const()[name = tensor("op_8518"), val = tensor([1, 1])]; + tensor var_8520_pad_type_0 = const()[name = tensor("op_8520_pad_type_0"), val = tensor("custom")]; + tensor var_8520_pad_0 = const()[name = tensor("op_8520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578274240))), lut = tensor([-0x1.048p-7, 0x1.04cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578479104)))]; + tensor var_8520_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_8518, groups = var_6865, pad = var_8520_pad_0, pad_type = var_8520_pad_type_0, strides = var_8516, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_505_cast)[name = tensor("op_8520_cast")]; + tensor inputs_257_cast = add(x = var_8520_cast, y = inputs_255_cast)[name = tensor("inputs_257_cast")]; + tensor var_8524 = const()[name = tensor("op_8524"), val = tensor([1])]; + tensor channels_mean_257_cast = reduce_mean(axes = var_8524, keep_dims = var_6860, x = inputs_257_cast)[name = tensor("channels_mean_257_cast")]; + tensor zero_mean_257_cast = sub(x = inputs_257_cast, y = channels_mean_257_cast)[name = tensor("zero_mean_257_cast")]; + tensor zero_mean_sq_257_cast = mul(x = zero_mean_257_cast, y = zero_mean_257_cast)[name = tensor("zero_mean_sq_257_cast")]; + tensor var_8528 = const()[name = tensor("op_8528"), val = tensor([1])]; + tensor var_8529_cast = reduce_mean(axes = var_8528, keep_dims = var_6860, x = zero_mean_sq_257_cast)[name = tensor("op_8529_cast")]; + tensor var_8530_to_fp16 = const()[name = tensor("op_8530_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8531_cast = add(x = var_8529_cast, y = var_8530_to_fp16)[name = tensor("op_8531_cast")]; + tensor denom_257_epsilon_0_to_fp16 = const()[name = tensor("denom_257_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_257_cast = rsqrt(epsilon = denom_257_epsilon_0_to_fp16, x = var_8531_cast)[name = tensor("denom_257_cast")]; + tensor out_257_cast = mul(x = zero_mean_257_cast, y = denom_257_cast)[name = tensor("out_257_cast")]; + tensor var_8535_to_fp16 = const()[name = tensor("op_8535_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578481728)))]; + tensor var_8536_cast = add(x = out_257_cast, y = var_8535_to_fp16)[name = tensor("op_8536_cast")]; + tensor var_8538_to_fp16 = const()[name = tensor("op_8538_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578484352)))]; + tensor input_507_cast = mul(x = var_8536_cast, y = var_8538_to_fp16)[name = tensor("input_507_cast")]; + tensor var_8546 = const()[name = tensor("op_8546"), val = tensor([1, 1])]; + tensor var_8548 = const()[name = tensor("op_8548"), val = tensor([1, 1])]; + tensor var_8550_pad_type_0 = const()[name = tensor("op_8550_pad_type_0"), val = tensor("custom")]; + tensor var_8550_pad_0 = const()[name = tensor("op_8550_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578486976))), lut = tensor([-0x1.718p-5, -0x1.bccp-7, 0x1.bcp-7, 0x1.714p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581763840)))]; + tensor var_8550_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_8548, groups = var_6865, pad = var_8550_pad_0, pad_type = var_8550_pad_type_0, strides = var_8546, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_507_cast)[name = tensor("op_8550_cast")]; + tensor var_8551_split_sizes_0 = const()[name = tensor("op_8551_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8551_axis_0 = const()[name = tensor("op_8551_axis_0"), val = tensor(1)]; + tensor var_8551_cast_0, tensor var_8551_cast_1 = split(axis = var_8551_axis_0, split_sizes = var_8551_split_sizes_0, x = var_8550_cast)[name = tensor("op_8551_cast")]; + tensor var_8553_mode_0 = const()[name = tensor("op_8553_mode_0"), val = tensor("EXACT")]; + tensor var_8553_cast = gelu(mode = var_8553_mode_0, x = var_8551_cast_1)[name = tensor("op_8553_cast")]; + tensor input_509_cast = mul(x = var_8551_cast_0, y = var_8553_cast)[name = tensor("input_509_cast")]; + tensor var_8557 = const()[name = tensor("op_8557"), val = tensor([1, 1])]; + tensor var_8559 = const()[name = tensor("op_8559"), val = tensor([1, 1])]; + tensor var_8561_pad_type_0 = const()[name = tensor("op_8561_pad_type_0"), val = tensor("custom")]; + tensor var_8561_pad_0 = const()[name = tensor("op_8561_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581784384))), lut = tensor([-0x1.65p-5, -0x1.ac8p-7, 0x1.accp-7, 0x1.654p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(583422848)))]; + tensor var_8561_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_8559, groups = var_6865, pad = var_8561_pad_0, pad_type = var_8561_pad_type_0, strides = var_8557, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_509_cast)[name = tensor("op_8561_cast")]; + tensor inputs_259_cast = add(x = var_8561_cast, y = inputs_257_cast)[name = tensor("inputs_259_cast")]; + tensor var_8571 = const()[name = tensor("op_8571"), val = tensor([1])]; + tensor channels_mean_259_cast = reduce_mean(axes = var_8571, keep_dims = var_6860, x = inputs_259_cast)[name = tensor("channels_mean_259_cast")]; + tensor zero_mean_259_cast = sub(x = inputs_259_cast, y = channels_mean_259_cast)[name = tensor("zero_mean_259_cast")]; + tensor zero_mean_sq_259_cast = mul(x = zero_mean_259_cast, y = zero_mean_259_cast)[name = tensor("zero_mean_sq_259_cast")]; + tensor var_8575 = const()[name = tensor("op_8575"), val = tensor([1])]; + tensor var_8576_cast = reduce_mean(axes = var_8575, keep_dims = var_6860, x = zero_mean_sq_259_cast)[name = tensor("op_8576_cast")]; + tensor var_8577_to_fp16 = const()[name = tensor("op_8577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8578_cast = add(x = var_8576_cast, y = var_8577_to_fp16)[name = tensor("op_8578_cast")]; + tensor denom_259_epsilon_0_to_fp16 = const()[name = tensor("denom_259_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_259_cast = rsqrt(epsilon = denom_259_epsilon_0_to_fp16, x = var_8578_cast)[name = tensor("denom_259_cast")]; + tensor out_259_cast = mul(x = zero_mean_259_cast, y = denom_259_cast)[name = tensor("out_259_cast")]; + tensor var_8582_to_fp16 = const()[name = tensor("op_8582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(583425472)))]; + tensor var_8583_cast = add(x = out_259_cast, y = var_8582_to_fp16)[name = tensor("op_8583_cast")]; + tensor var_8585_to_fp16 = const()[name = tensor("op_8585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(583428096)))]; + tensor hidden_states_341_cast = mul(x = var_8583_cast, y = var_8585_to_fp16)[name = tensor("hidden_states_341_cast")]; + tensor var_8592 = const()[name = tensor("op_8592"), val = tensor([1, 1])]; + tensor var_8594 = const()[name = tensor("op_8594"), val = tensor([1, 1])]; + tensor q_173_pad_type_0 = const()[name = tensor("q_173_pad_type_0"), val = tensor("custom")]; + tensor q_173_pad_0 = const()[name = tensor("q_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(583430720))), lut = tensor([-0x1.6b4p-5, -0x1.b6cp-7, 0x1.b3cp-7, 0x1.6a4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_173_cast = conv(dilations = var_8594, groups = var_6865, pad = q_173_pad_0, pad_type = q_173_pad_type_0, strides = var_8592, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("q_173_cast")]; + tensor var_8598 = const()[name = tensor("op_8598"), val = tensor([1, 1])]; + tensor var_8600 = const()[name = tensor("op_8600"), val = tensor([1, 1])]; + tensor k_173_pad_type_0 = const()[name = tensor("k_173_pad_type_0"), val = tensor("custom")]; + tensor k_173_pad_0 = const()[name = tensor("k_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(583840384))), lut = tensor([-0x1.81p-6, 0x1.82p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_173_cast = conv(dilations = var_8600, groups = var_6865, pad = k_173_pad_0, pad_type = k_173_pad_type_0, strides = var_8598, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("k_173_cast")]; + tensor var_8604 = const()[name = tensor("op_8604"), val = tensor([1, 1])]; + tensor var_8606 = const()[name = tensor("op_8606"), val = tensor([1, 1])]; + tensor v_173_pad_type_0 = const()[name = tensor("v_173_pad_type_0"), val = tensor("custom")]; + tensor v_173_pad_0 = const()[name = tensor("v_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584045248))), lut = tensor([-0x1.554p-5, -0x1.998p-7, 0x1.9f8p-7, 0x1.56cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_173_cast = conv(dilations = var_8606, groups = var_6865, pad = v_173_pad_0, pad_type = v_173_pad_type_0, strides = var_8604, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("v_173_cast")]; + tensor var_8610 = const()[name = tensor("op_8610"), val = tensor([2, 20, 64, -1])]; + tensor var_8611_cast = reshape(shape = var_8610, x = q_173_cast)[name = tensor("op_8611_cast")]; + tensor var_8612 = const()[name = tensor("op_8612"), val = tensor([2, 20, 64, -1])]; + tensor var_8613_cast = reshape(shape = var_8612, x = k_173_cast)[name = tensor("op_8613_cast")]; + tensor var_8614 = const()[name = tensor("op_8614"), val = tensor([2, 20, 64, -1])]; + tensor var_8615_cast = reshape(shape = var_8614, x = v_173_cast)[name = tensor("op_8615_cast")]; + tensor attn_weights_345_transpose_x_0 = const()[name = tensor("attn_weights_345_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_345_transpose_y_0 = const()[name = tensor("attn_weights_345_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_345_cast = matmul(transpose_x = attn_weights_345_transpose_x_0, transpose_y = attn_weights_345_transpose_y_0, x = var_8611_cast, y = var_8613_cast)[name = tensor("attn_weights_345_cast")]; + tensor attn_weights_347_cast = mul(x = attn_weights_345_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_347_cast")]; + tensor var_8619_cast = softmax(axis = var_6849, x = attn_weights_347_cast)[name = tensor("op_8619_cast")]; + tensor attn_173_transpose_x_0 = const()[name = tensor("attn_173_transpose_x_0"), val = tensor(false)]; + tensor attn_173_transpose_y_0 = const()[name = tensor("attn_173_transpose_y_0"), val = tensor(true)]; + tensor attn_173_cast = matmul(transpose_x = attn_173_transpose_x_0, transpose_y = attn_173_transpose_y_0, x = var_8615_cast, y = var_8619_cast)[name = tensor("attn_173_cast")]; + tensor var_8623 = const()[name = tensor("op_8623"), val = tensor([2, 1280, 1, -1])]; + tensor input_511_cast = reshape(shape = var_8623, x = attn_173_cast)[name = tensor("input_511_cast")]; + tensor var_8628 = const()[name = tensor("op_8628"), val = tensor([1, 1])]; + tensor var_8630 = const()[name = tensor("op_8630"), val = tensor([1, 1])]; + tensor var_8632_pad_type_0 = const()[name = tensor("op_8632_pad_type_0"), val = tensor("custom")]; + tensor var_8632_pad_0 = const()[name = tensor("op_8632_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584454912))), lut = tensor([-0x1.47p-5, -0x1.8cp-7, 0x1.88p-7, 0x1.46p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584864576)))]; + tensor var_8632_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_8630, groups = var_6865, pad = var_8632_pad_0, pad_type = var_8632_pad_type_0, strides = var_8628, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_511_cast)[name = tensor("op_8632_cast")]; + tensor inputs_261_cast = add(x = var_8632_cast, y = inputs_259_cast)[name = tensor("inputs_261_cast")]; + tensor var_8636 = const()[name = tensor("op_8636"), val = tensor([1])]; + tensor channels_mean_261_cast = reduce_mean(axes = var_8636, keep_dims = var_6860, x = inputs_261_cast)[name = tensor("channels_mean_261_cast")]; + tensor zero_mean_261_cast = sub(x = inputs_261_cast, y = channels_mean_261_cast)[name = tensor("zero_mean_261_cast")]; + tensor zero_mean_sq_261_cast = mul(x = zero_mean_261_cast, y = zero_mean_261_cast)[name = tensor("zero_mean_sq_261_cast")]; + tensor var_8640 = const()[name = tensor("op_8640"), val = tensor([1])]; + tensor var_8641_cast = reduce_mean(axes = var_8640, keep_dims = var_6860, x = zero_mean_sq_261_cast)[name = tensor("op_8641_cast")]; + tensor var_8642_to_fp16 = const()[name = tensor("op_8642_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8643_cast = add(x = var_8641_cast, y = var_8642_to_fp16)[name = tensor("op_8643_cast")]; + tensor denom_261_epsilon_0_to_fp16 = const()[name = tensor("denom_261_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_261_cast = rsqrt(epsilon = denom_261_epsilon_0_to_fp16, x = var_8643_cast)[name = tensor("denom_261_cast")]; + tensor out_261_cast = mul(x = zero_mean_261_cast, y = denom_261_cast)[name = tensor("out_261_cast")]; + tensor var_8647_to_fp16 = const()[name = tensor("op_8647_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584867200)))]; + tensor var_8648_cast = add(x = out_261_cast, y = var_8647_to_fp16)[name = tensor("op_8648_cast")]; + tensor var_8650_to_fp16 = const()[name = tensor("op_8650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584869824)))]; + tensor hidden_states_343_cast = mul(x = var_8648_cast, y = var_8650_to_fp16)[name = tensor("hidden_states_343_cast")]; + tensor var_8657 = const()[name = tensor("op_8657"), val = tensor([1, 1])]; + tensor var_8659 = const()[name = tensor("op_8659"), val = tensor([1, 1])]; + tensor q_175_pad_type_0 = const()[name = tensor("q_175_pad_type_0"), val = tensor("custom")]; + tensor q_175_pad_0 = const()[name = tensor("q_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584872448))), lut = tensor([-0x1.944p-7, 0x1.958p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_175_cast = conv(dilations = var_8659, groups = var_6865, pad = q_175_pad_0, pad_type = q_175_pad_type_0, strides = var_8657, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_343_cast)[name = tensor("q_175_cast")]; + tensor var_8663 = const()[name = tensor("op_8663"), val = tensor([1, 1])]; + tensor var_8665 = const()[name = tensor("op_8665"), val = tensor([1, 1])]; + tensor k_175_pad_type_0 = const()[name = tensor("k_175_pad_type_0"), val = tensor("custom")]; + tensor k_175_pad_0 = const()[name = tensor("k_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585077312))), lut = tensor([-0x1.128p-7, 0x1.124p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_175_cast = conv(dilations = var_8665, groups = var_6865, pad = k_175_pad_0, pad_type = k_175_pad_type_0, strides = var_8663, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_175_cast")]; + tensor var_8669 = const()[name = tensor("op_8669"), val = tensor([1, 1])]; + tensor var_8671 = const()[name = tensor("op_8671"), val = tensor([1, 1])]; + tensor v_175_pad_type_0 = const()[name = tensor("v_175_pad_type_0"), val = tensor("custom")]; + tensor v_175_pad_0 = const()[name = tensor("v_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585405056))), lut = tensor([-0x1.57p-7, 0x1.57cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_175_cast = conv(dilations = var_8671, groups = var_6865, pad = v_175_pad_0, pad_type = v_175_pad_type_0, strides = var_8669, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_175_cast")]; + tensor var_8675 = const()[name = tensor("op_8675"), val = tensor([2, 20, 64, -1])]; + tensor var_8676_cast = reshape(shape = var_8675, x = q_175_cast)[name = tensor("op_8676_cast")]; + tensor var_8677 = const()[name = tensor("op_8677"), val = tensor([2, 20, 64, -1])]; + tensor var_8678_cast = reshape(shape = var_8677, x = k_175_cast)[name = tensor("op_8678_cast")]; + tensor var_8679 = const()[name = tensor("op_8679"), val = tensor([2, 20, 64, -1])]; + tensor var_8680_cast = reshape(shape = var_8679, x = v_175_cast)[name = tensor("op_8680_cast")]; + tensor attn_weights_349_transpose_x_0 = const()[name = tensor("attn_weights_349_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_349_transpose_y_0 = const()[name = tensor("attn_weights_349_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_349_cast = matmul(transpose_x = attn_weights_349_transpose_x_0, transpose_y = attn_weights_349_transpose_y_0, x = var_8676_cast, y = var_8678_cast)[name = tensor("attn_weights_349_cast")]; + tensor attn_weights_351_cast = mul(x = attn_weights_349_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_351_cast")]; + tensor var_8684_cast = softmax(axis = var_6849, x = attn_weights_351_cast)[name = tensor("op_8684_cast")]; + tensor attn_175_transpose_x_0 = const()[name = tensor("attn_175_transpose_x_0"), val = tensor(false)]; + tensor attn_175_transpose_y_0 = const()[name = tensor("attn_175_transpose_y_0"), val = tensor(true)]; + tensor attn_175_cast = matmul(transpose_x = attn_175_transpose_x_0, transpose_y = attn_175_transpose_y_0, x = var_8680_cast, y = var_8684_cast)[name = tensor("attn_175_cast")]; + tensor var_8688 = const()[name = tensor("op_8688"), val = tensor([2, 1280, 1, -1])]; + tensor input_513_cast = reshape(shape = var_8688, x = attn_175_cast)[name = tensor("input_513_cast")]; + tensor var_8693 = const()[name = tensor("op_8693"), val = tensor([1, 1])]; + tensor var_8695 = const()[name = tensor("op_8695"), val = tensor([1, 1])]; + tensor var_8697_pad_type_0 = const()[name = tensor("op_8697_pad_type_0"), val = tensor("custom")]; + tensor var_8697_pad_0 = const()[name = tensor("op_8697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585732800))), lut = tensor([-0x1.bep-8, 0x1.bdcp-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585937664)))]; + tensor var_8697_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_8695, groups = var_6865, pad = var_8697_pad_0, pad_type = var_8697_pad_type_0, strides = var_8693, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_513_cast)[name = tensor("op_8697_cast")]; + tensor inputs_263_cast = add(x = var_8697_cast, y = inputs_261_cast)[name = tensor("inputs_263_cast")]; + tensor var_8701 = const()[name = tensor("op_8701"), val = tensor([1])]; + tensor channels_mean_263_cast = reduce_mean(axes = var_8701, keep_dims = var_6860, x = inputs_263_cast)[name = tensor("channels_mean_263_cast")]; + tensor zero_mean_263_cast = sub(x = inputs_263_cast, y = channels_mean_263_cast)[name = tensor("zero_mean_263_cast")]; + tensor zero_mean_sq_263_cast = mul(x = zero_mean_263_cast, y = zero_mean_263_cast)[name = tensor("zero_mean_sq_263_cast")]; + tensor var_8705 = const()[name = tensor("op_8705"), val = tensor([1])]; + tensor var_8706_cast = reduce_mean(axes = var_8705, keep_dims = var_6860, x = zero_mean_sq_263_cast)[name = tensor("op_8706_cast")]; + tensor var_8707_to_fp16 = const()[name = tensor("op_8707_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8708_cast = add(x = var_8706_cast, y = var_8707_to_fp16)[name = tensor("op_8708_cast")]; + tensor denom_263_epsilon_0_to_fp16 = const()[name = tensor("denom_263_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_263_cast = rsqrt(epsilon = denom_263_epsilon_0_to_fp16, x = var_8708_cast)[name = tensor("denom_263_cast")]; + tensor out_263_cast = mul(x = zero_mean_263_cast, y = denom_263_cast)[name = tensor("out_263_cast")]; + tensor var_8712_to_fp16 = const()[name = tensor("op_8712_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585940288)))]; + tensor var_8713_cast = add(x = out_263_cast, y = var_8712_to_fp16)[name = tensor("op_8713_cast")]; + tensor var_8715_to_fp16 = const()[name = tensor("op_8715_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585942912)))]; + tensor input_515_cast = mul(x = var_8713_cast, y = var_8715_to_fp16)[name = tensor("input_515_cast")]; + tensor var_8723 = const()[name = tensor("op_8723"), val = tensor([1, 1])]; + tensor var_8725 = const()[name = tensor("op_8725"), val = tensor([1, 1])]; + tensor var_8727_pad_type_0 = const()[name = tensor("op_8727_pad_type_0"), val = tensor("custom")]; + tensor var_8727_pad_0 = const()[name = tensor("op_8727_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(585945536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592499200))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592499328)))]; + tensor var_8727_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_8725, groups = var_6865, pad = var_8727_pad_0, pad_type = var_8727_pad_type_0, strides = var_8723, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_515_cast)[name = tensor("op_8727_cast")]; + tensor var_8728_split_sizes_0 = const()[name = tensor("op_8728_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8728_axis_0 = const()[name = tensor("op_8728_axis_0"), val = tensor(1)]; + tensor var_8728_cast_0, tensor var_8728_cast_1 = split(axis = var_8728_axis_0, split_sizes = var_8728_split_sizes_0, x = var_8727_cast)[name = tensor("op_8728_cast")]; + tensor var_8730_mode_0 = const()[name = tensor("op_8730_mode_0"), val = tensor("EXACT")]; + tensor var_8730_cast = gelu(mode = var_8730_mode_0, x = var_8728_cast_1)[name = tensor("op_8730_cast")]; + tensor input_517_cast = mul(x = var_8728_cast_0, y = var_8730_cast)[name = tensor("input_517_cast")]; + tensor var_8734 = const()[name = tensor("op_8734"), val = tensor([1, 1])]; + tensor var_8736 = const()[name = tensor("op_8736"), val = tensor([1, 1])]; + tensor var_8738_pad_type_0 = const()[name = tensor("op_8738_pad_type_0"), val = tensor("custom")]; + tensor var_8738_pad_0 = const()[name = tensor("op_8738_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592519872))), lut = tensor([-0x1.60cp-5, -0x1.a84p-7, 0x1.a58p-7, 0x1.6p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594158336)))]; + tensor var_8738_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_8736, groups = var_6865, pad = var_8738_pad_0, pad_type = var_8738_pad_type_0, strides = var_8734, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_517_cast)[name = tensor("op_8738_cast")]; + tensor hidden_states_347_cast = add(x = var_8738_cast, y = inputs_263_cast)[name = tensor("hidden_states_347_cast")]; + tensor var_8740 = const()[name = tensor("op_8740"), val = tensor([2, 1280, 32, 32])]; + tensor input_519_cast = reshape(shape = var_8740, x = hidden_states_347_cast)[name = tensor("input_519_cast")]; + tensor var_8744 = const()[name = tensor("op_8744"), val = tensor([1, 1])]; + tensor var_8746 = const()[name = tensor("op_8746"), val = tensor([1, 1])]; + tensor hidden_states_349_pad_type_0 = const()[name = tensor("hidden_states_349_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_349_pad_0 = const()[name = tensor("hidden_states_349_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594160960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594980224))), name = tensor("up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594980352)))]; + tensor hidden_states_349_cast = conv(bias = up_blocks_0_attentions_0_proj_out_bias_to_fp16, dilations = var_8746, groups = var_6865, pad = hidden_states_349_pad_0, pad_type = hidden_states_349_pad_type_0, strides = var_8744, weight = up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized, x = input_519_cast)[name = tensor("hidden_states_349_cast")]; + tensor hidden_states_351_cast = add(x = hidden_states_349_cast, y = hidden_states_283_cast)[name = tensor("hidden_states_351_cast")]; + tensor input_521_interleave_0 = const()[name = tensor("input_521_interleave_0"), val = tensor(false)]; + tensor input_521_cast = concat(axis = var_6865, interleave = input_521_interleave_0, values = (hidden_states_351_cast, input_213_cast))[name = tensor("input_521_cast")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_96_cast = reshape(shape = reshape_96_shape_0, x = input_521_cast)[name = tensor("reshape_96_cast")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72_cast = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96_cast)[name = tensor("reduce_mean_72_cast")]; + tensor sub_48_cast = sub(x = reshape_96_cast, y = reduce_mean_72_cast)[name = tensor("sub_48_cast")]; + tensor square_24_cast = square(x = sub_48_cast)[name = tensor("square_24_cast")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74_cast = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24_cast)[name = tensor("reduce_mean_74_cast")]; + tensor add_48_y_0_to_fp16 = const()[name = tensor("add_48_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_48_cast = add(x = reduce_mean_74_cast, y = add_48_y_0_to_fp16)[name = tensor("add_48_cast")]; + tensor sqrt_24_cast = sqrt(x = add_48_cast)[name = tensor("sqrt_24_cast")]; + tensor real_div_24_cast = real_div(x = sub_48_cast, y = sqrt_24_cast)[name = tensor("real_div_24_cast")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_97_cast = reshape(shape = reshape_97_shape_0, x = real_div_24_cast)[name = tensor("reshape_97_cast")]; + tensor add_49_gamma_0_to_fp16 = const()[name = tensor("add_49_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594982976)))]; + tensor add_49_beta_0_to_fp16 = const()[name = tensor("add_49_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594988160)))]; + tensor add_49_epsilon_0_to_fp16 = const()[name = tensor("add_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_49_cast = batch_norm(beta = add_49_beta_0_to_fp16, epsilon = add_49_epsilon_0_to_fp16, gamma = add_49_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_97_cast)[name = tensor("add_49_cast")]; + tensor input_525_cast = silu(x = add_49_cast)[name = tensor("input_525_cast")]; + tensor var_8764 = const()[name = tensor("op_8764"), val = tensor([1, 1])]; + tensor var_8766 = const()[name = tensor("op_8766"), val = tensor([1, 1])]; + tensor hidden_states_353_pad_type_0 = const()[name = tensor("hidden_states_353_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_353_pad_0 = const()[name = tensor("hidden_states_353_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(594993344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(609739008))), name = tensor("up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(609739136)))]; + tensor hidden_states_353_cast = conv(bias = up_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_8766, groups = var_6865, pad = hidden_states_353_pad_0, pad_type = hidden_states_353_pad_type_0, strides = var_8764, weight = up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_525_cast)[name = tensor("hidden_states_353_cast")]; + tensor var_8772 = const()[name = tensor("op_8772"), val = tensor([1, 1])]; + tensor var_8774 = const()[name = tensor("op_8774"), val = tensor([1, 1])]; + tensor temb_19_pad_type_0 = const()[name = tensor("temb_19_pad_type_0"), val = tensor("custom")]; + tensor temb_19_pad_0 = const()[name = tensor("temb_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(609741760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610561024))), name = tensor("up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610561152)))]; + tensor temb_19_cast = conv(bias = up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_8774, groups = var_6865, pad = temb_19_pad_0, pad_type = temb_19_pad_type_0, strides = var_8772, weight = up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_19_cast")]; + tensor input_529_cast = add(x = hidden_states_353_cast, y = temb_19_cast)[name = tensor("input_529_cast")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_100_cast = reshape(shape = reshape_100_shape_0, x = input_529_cast)[name = tensor("reshape_100_cast")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75_cast = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100_cast)[name = tensor("reduce_mean_75_cast")]; + tensor sub_50_cast = sub(x = reshape_100_cast, y = reduce_mean_75_cast)[name = tensor("sub_50_cast")]; + tensor square_25_cast = square(x = sub_50_cast)[name = tensor("square_25_cast")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77_cast = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25_cast)[name = tensor("reduce_mean_77_cast")]; + tensor add_50_y_0_to_fp16 = const()[name = tensor("add_50_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_50_cast = add(x = reduce_mean_77_cast, y = add_50_y_0_to_fp16)[name = tensor("add_50_cast")]; + tensor sqrt_25_cast = sqrt(x = add_50_cast)[name = tensor("sqrt_25_cast")]; + tensor real_div_25_cast = real_div(x = sub_50_cast, y = sqrt_25_cast)[name = tensor("real_div_25_cast")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_101_cast = reshape(shape = reshape_101_shape_0, x = real_div_25_cast)[name = tensor("reshape_101_cast")]; + tensor add_51_gamma_0_to_fp16 = const()[name = tensor("add_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610563776)))]; + tensor add_51_beta_0_to_fp16 = const()[name = tensor("add_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610566400)))]; + tensor add_51_epsilon_0_to_fp16 = const()[name = tensor("add_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_51_cast = batch_norm(beta = add_51_beta_0_to_fp16, epsilon = add_51_epsilon_0_to_fp16, gamma = add_51_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_101_cast)[name = tensor("add_51_cast")]; + tensor input_533_cast = silu(x = add_51_cast)[name = tensor("input_533_cast")]; + tensor var_8784 = const()[name = tensor("op_8784"), val = tensor([1, 1])]; + tensor var_8786 = const()[name = tensor("op_8786"), val = tensor([1, 1])]; + tensor hidden_states_355_pad_type_0 = const()[name = tensor("hidden_states_355_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_355_pad_0 = const()[name = tensor("hidden_states_355_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610569024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617941888))), name = tensor("up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617942016)))]; + tensor hidden_states_355_cast = conv(bias = up_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_8786, groups = var_6865, pad = hidden_states_355_pad_0, pad_type = hidden_states_355_pad_type_0, strides = var_8784, weight = up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_533_cast)[name = tensor("hidden_states_355_cast")]; + tensor var_8791 = const()[name = tensor("op_8791"), val = tensor([1, 1])]; + tensor var_8793 = const()[name = tensor("op_8793"), val = tensor([1, 1])]; + tensor x_7_pad_type_0 = const()[name = tensor("x_7_pad_type_0"), val = tensor("custom")]; + tensor x_7_pad_0 = const()[name = tensor("x_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(617944640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620402304))), name = tensor("up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620402496)))]; + tensor x_7_cast = conv(bias = up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_8793, groups = var_6865, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = var_8791, weight = up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_521_cast)[name = tensor("x_7_cast")]; + tensor hidden_states_357_cast = add(x = x_7_cast, y = hidden_states_355_cast)[name = tensor("hidden_states_357_cast")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_104_cast = reshape(shape = reshape_104_shape_0, x = hidden_states_357_cast)[name = tensor("reshape_104_cast")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78_cast = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104_cast)[name = tensor("reduce_mean_78_cast")]; + tensor sub_52_cast = sub(x = reshape_104_cast, y = reduce_mean_78_cast)[name = tensor("sub_52_cast")]; + tensor square_26_cast = square(x = sub_52_cast)[name = tensor("square_26_cast")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80_cast = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26_cast)[name = tensor("reduce_mean_80_cast")]; + tensor add_52_y_0_to_fp16 = const()[name = tensor("add_52_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_52_cast = add(x = reduce_mean_80_cast, y = add_52_y_0_to_fp16)[name = tensor("add_52_cast")]; + tensor sqrt_26_cast = sqrt(x = add_52_cast)[name = tensor("sqrt_26_cast")]; + tensor real_div_26_cast = real_div(x = sub_52_cast, y = sqrt_26_cast)[name = tensor("real_div_26_cast")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_105_cast = reshape(shape = reshape_105_shape_0, x = real_div_26_cast)[name = tensor("reshape_105_cast")]; + tensor add_53_gamma_0_to_fp16 = const()[name = tensor("add_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620405120)))]; + tensor add_53_beta_0_to_fp16 = const()[name = tensor("add_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620407744)))]; + tensor add_53_epsilon_0_to_fp16 = const()[name = tensor("add_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_53_cast = batch_norm(beta = add_53_beta_0_to_fp16, epsilon = add_53_epsilon_0_to_fp16, gamma = add_53_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_105_cast)[name = tensor("add_53_cast")]; + tensor var_8831 = const()[name = tensor("op_8831"), val = tensor([1, 1])]; + tensor var_8833 = const()[name = tensor("op_8833"), val = tensor([1, 1])]; + tensor hidden_states_359_pad_type_0 = const()[name = tensor("hidden_states_359_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_359_pad_0 = const()[name = tensor("hidden_states_359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(620410368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621639232))), name = tensor("up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621639424)))]; + tensor hidden_states_359_cast = conv(bias = up_blocks_0_attentions_1_proj_in_bias_to_fp16, dilations = var_8833, groups = var_6865, pad = hidden_states_359_pad_0, pad_type = hidden_states_359_pad_type_0, strides = var_8831, weight = up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized, x = add_53_cast)[name = tensor("hidden_states_359_cast")]; + tensor var_8838 = const()[name = tensor("op_8838"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_265_cast = reshape(shape = var_8838, x = hidden_states_359_cast)[name = tensor("inputs_265_cast")]; + tensor var_8848 = const()[name = tensor("op_8848"), val = tensor([1])]; + tensor channels_mean_265_cast = reduce_mean(axes = var_8848, keep_dims = var_6860, x = inputs_265_cast)[name = tensor("channels_mean_265_cast")]; + tensor zero_mean_265_cast = sub(x = inputs_265_cast, y = channels_mean_265_cast)[name = tensor("zero_mean_265_cast")]; + tensor zero_mean_sq_265_cast = mul(x = zero_mean_265_cast, y = zero_mean_265_cast)[name = tensor("zero_mean_sq_265_cast")]; + tensor var_8852 = const()[name = tensor("op_8852"), val = tensor([1])]; + tensor var_8853_cast = reduce_mean(axes = var_8852, keep_dims = var_6860, x = zero_mean_sq_265_cast)[name = tensor("op_8853_cast")]; + tensor var_8854_to_fp16 = const()[name = tensor("op_8854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8855_cast = add(x = var_8853_cast, y = var_8854_to_fp16)[name = tensor("op_8855_cast")]; + tensor denom_265_epsilon_0_to_fp16 = const()[name = tensor("denom_265_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_265_cast = rsqrt(epsilon = denom_265_epsilon_0_to_fp16, x = var_8855_cast)[name = tensor("denom_265_cast")]; + tensor out_265_cast = mul(x = zero_mean_265_cast, y = denom_265_cast)[name = tensor("out_265_cast")]; + tensor var_8859_to_fp16 = const()[name = tensor("op_8859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621642048)))]; + tensor var_8860_cast = add(x = out_265_cast, y = var_8859_to_fp16)[name = tensor("op_8860_cast")]; + tensor var_8862_to_fp16 = const()[name = tensor("op_8862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621644672)))]; + tensor hidden_states_361_cast = mul(x = var_8860_cast, y = var_8862_to_fp16)[name = tensor("hidden_states_361_cast")]; + tensor var_8869 = const()[name = tensor("op_8869"), val = tensor([1, 1])]; + tensor var_8871 = const()[name = tensor("op_8871"), val = tensor([1, 1])]; + tensor q_177_pad_type_0 = const()[name = tensor("q_177_pad_type_0"), val = tensor("custom")]; + tensor q_177_pad_0 = const()[name = tensor("q_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(621647296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(622466560))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_177_cast = conv(dilations = var_8871, groups = var_6865, pad = q_177_pad_0, pad_type = q_177_pad_type_0, strides = var_8869, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("q_177_cast")]; + tensor var_8875 = const()[name = tensor("op_8875"), val = tensor([1, 1])]; + tensor var_8877 = const()[name = tensor("op_8877"), val = tensor([1, 1])]; + tensor k_177_pad_type_0 = const()[name = tensor("k_177_pad_type_0"), val = tensor("custom")]; + tensor k_177_pad_0 = const()[name = tensor("k_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(622466688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623285952))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_177_cast = conv(dilations = var_8877, groups = var_6865, pad = k_177_pad_0, pad_type = k_177_pad_type_0, strides = var_8875, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("k_177_cast")]; + tensor var_8881 = const()[name = tensor("op_8881"), val = tensor([1, 1])]; + tensor var_8883 = const()[name = tensor("op_8883"), val = tensor([1, 1])]; + tensor v_177_pad_type_0 = const()[name = tensor("v_177_pad_type_0"), val = tensor("custom")]; + tensor v_177_pad_0 = const()[name = tensor("v_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623286080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(624105344))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_177_cast = conv(dilations = var_8883, groups = var_6865, pad = v_177_pad_0, pad_type = v_177_pad_type_0, strides = var_8881, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("v_177_cast")]; + tensor var_8887 = const()[name = tensor("op_8887"), val = tensor([2, 20, 64, -1])]; + tensor var_8888_cast = reshape(shape = var_8887, x = q_177_cast)[name = tensor("op_8888_cast")]; + tensor var_8889 = const()[name = tensor("op_8889"), val = tensor([2, 20, 64, -1])]; + tensor var_8890_cast = reshape(shape = var_8889, x = k_177_cast)[name = tensor("op_8890_cast")]; + tensor var_8891 = const()[name = tensor("op_8891"), val = tensor([2, 20, 64, -1])]; + tensor var_8892_cast = reshape(shape = var_8891, x = v_177_cast)[name = tensor("op_8892_cast")]; + tensor attn_weights_353_transpose_x_0 = const()[name = tensor("attn_weights_353_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_353_transpose_y_0 = const()[name = tensor("attn_weights_353_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_353_cast = matmul(transpose_x = attn_weights_353_transpose_x_0, transpose_y = attn_weights_353_transpose_y_0, x = var_8888_cast, y = var_8890_cast)[name = tensor("attn_weights_353_cast")]; + tensor attn_weights_355_cast = mul(x = attn_weights_353_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_355_cast")]; + tensor var_8896_cast = softmax(axis = var_6849, x = attn_weights_355_cast)[name = tensor("op_8896_cast")]; + tensor attn_177_transpose_x_0 = const()[name = tensor("attn_177_transpose_x_0"), val = tensor(false)]; + tensor attn_177_transpose_y_0 = const()[name = tensor("attn_177_transpose_y_0"), val = tensor(true)]; + tensor attn_177_cast = matmul(transpose_x = attn_177_transpose_x_0, transpose_y = attn_177_transpose_y_0, x = var_8892_cast, y = var_8896_cast)[name = tensor("attn_177_cast")]; + tensor var_8900 = const()[name = tensor("op_8900"), val = tensor([2, 1280, 1, -1])]; + tensor input_537_cast = reshape(shape = var_8900, x = attn_177_cast)[name = tensor("input_537_cast")]; + tensor var_8905 = const()[name = tensor("op_8905"), val = tensor([1, 1])]; + tensor var_8907 = const()[name = tensor("op_8907"), val = tensor([1, 1])]; + tensor var_8909_pad_type_0 = const()[name = tensor("op_8909_pad_type_0"), val = tensor("custom")]; + tensor var_8909_pad_0 = const()[name = tensor("op_8909_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(624105472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625334336))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625334528)))]; + tensor var_8909_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_8907, groups = var_6865, pad = var_8909_pad_0, pad_type = var_8909_pad_type_0, strides = var_8905, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_537_cast)[name = tensor("op_8909_cast")]; + tensor inputs_267_cast = add(x = var_8909_cast, y = inputs_265_cast)[name = tensor("inputs_267_cast")]; + tensor var_8913 = const()[name = tensor("op_8913"), val = tensor([1])]; + tensor channels_mean_267_cast = reduce_mean(axes = var_8913, keep_dims = var_6860, x = inputs_267_cast)[name = tensor("channels_mean_267_cast")]; + tensor zero_mean_267_cast = sub(x = inputs_267_cast, y = channels_mean_267_cast)[name = tensor("zero_mean_267_cast")]; + tensor zero_mean_sq_267_cast = mul(x = zero_mean_267_cast, y = zero_mean_267_cast)[name = tensor("zero_mean_sq_267_cast")]; + tensor var_8917 = const()[name = tensor("op_8917"), val = tensor([1])]; + tensor var_8918_cast = reduce_mean(axes = var_8917, keep_dims = var_6860, x = zero_mean_sq_267_cast)[name = tensor("op_8918_cast")]; + tensor var_8919_to_fp16 = const()[name = tensor("op_8919_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8920_cast = add(x = var_8918_cast, y = var_8919_to_fp16)[name = tensor("op_8920_cast")]; + tensor denom_267_epsilon_0_to_fp16 = const()[name = tensor("denom_267_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_267_cast = rsqrt(epsilon = denom_267_epsilon_0_to_fp16, x = var_8920_cast)[name = tensor("denom_267_cast")]; + tensor out_267_cast = mul(x = zero_mean_267_cast, y = denom_267_cast)[name = tensor("out_267_cast")]; + tensor var_8924_to_fp16 = const()[name = tensor("op_8924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625337152)))]; + tensor var_8925_cast = add(x = out_267_cast, y = var_8924_to_fp16)[name = tensor("op_8925_cast")]; + tensor var_8927_to_fp16 = const()[name = tensor("op_8927_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625339776)))]; + tensor hidden_states_363_cast = mul(x = var_8925_cast, y = var_8927_to_fp16)[name = tensor("hidden_states_363_cast")]; + tensor var_8934 = const()[name = tensor("op_8934"), val = tensor([1, 1])]; + tensor var_8936 = const()[name = tensor("op_8936"), val = tensor([1, 1])]; + tensor q_179_pad_type_0 = const()[name = tensor("q_179_pad_type_0"), val = tensor("custom")]; + tensor q_179_pad_0 = const()[name = tensor("q_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625342400))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1c8p-7, 0x1.d38p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_179_cast = conv(dilations = var_8936, groups = var_6865, pad = q_179_pad_0, pad_type = q_179_pad_type_0, strides = var_8934, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_363_cast)[name = tensor("q_179_cast")]; + tensor var_8940 = const()[name = tensor("op_8940"), val = tensor([1, 1])]; + tensor var_8942 = const()[name = tensor("op_8942"), val = tensor([1, 1])]; + tensor k_179_pad_type_0 = const()[name = tensor("k_179_pad_type_0"), val = tensor("custom")]; + tensor k_179_pad_0 = const()[name = tensor("k_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(625752064))), lut = tensor([-0x1.b84p-6, -0x1.05p-7, 0x1.07p-7, 0x1.b9cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_179_cast = conv(dilations = var_8942, groups = var_6865, pad = k_179_pad_0, pad_type = k_179_pad_type_0, strides = var_8940, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_179_cast")]; + tensor var_8946 = const()[name = tensor("op_8946"), val = tensor([1, 1])]; + tensor var_8948 = const()[name = tensor("op_8948"), val = tensor([1, 1])]; + tensor v_179_pad_type_0 = const()[name = tensor("v_179_pad_type_0"), val = tensor("custom")]; + tensor v_179_pad_0 = const()[name = tensor("v_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(626407488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627718272))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_179_cast = conv(dilations = var_8948, groups = var_6865, pad = v_179_pad_0, pad_type = v_179_pad_type_0, strides = var_8946, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_179_cast")]; + tensor var_8952 = const()[name = tensor("op_8952"), val = tensor([2, 20, 64, -1])]; + tensor var_8953_cast = reshape(shape = var_8952, x = q_179_cast)[name = tensor("op_8953_cast")]; + tensor var_8954 = const()[name = tensor("op_8954"), val = tensor([2, 20, 64, -1])]; + tensor var_8955_cast = reshape(shape = var_8954, x = k_179_cast)[name = tensor("op_8955_cast")]; + tensor var_8956 = const()[name = tensor("op_8956"), val = tensor([2, 20, 64, -1])]; + tensor var_8957_cast = reshape(shape = var_8956, x = v_179_cast)[name = tensor("op_8957_cast")]; + tensor attn_weights_357_transpose_x_0 = const()[name = tensor("attn_weights_357_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_357_transpose_y_0 = const()[name = tensor("attn_weights_357_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_357_cast = matmul(transpose_x = attn_weights_357_transpose_x_0, transpose_y = attn_weights_357_transpose_y_0, x = var_8953_cast, y = var_8955_cast)[name = tensor("attn_weights_357_cast")]; + tensor attn_weights_359_cast = mul(x = attn_weights_357_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_359_cast")]; + tensor var_8961_cast = softmax(axis = var_6849, x = attn_weights_359_cast)[name = tensor("op_8961_cast")]; + tensor attn_179_transpose_x_0 = const()[name = tensor("attn_179_transpose_x_0"), val = tensor(false)]; + tensor attn_179_transpose_y_0 = const()[name = tensor("attn_179_transpose_y_0"), val = tensor(true)]; + tensor attn_179_cast = matmul(transpose_x = attn_179_transpose_x_0, transpose_y = attn_179_transpose_y_0, x = var_8957_cast, y = var_8961_cast)[name = tensor("attn_179_cast")]; + tensor var_8965 = const()[name = tensor("op_8965"), val = tensor([2, 1280, 1, -1])]; + tensor input_539_cast = reshape(shape = var_8965, x = attn_179_cast)[name = tensor("input_539_cast")]; + tensor var_8970 = const()[name = tensor("op_8970"), val = tensor([1, 1])]; + tensor var_8972 = const()[name = tensor("op_8972"), val = tensor([1, 1])]; + tensor var_8974_pad_type_0 = const()[name = tensor("op_8974_pad_type_0"), val = tensor("custom")]; + tensor var_8974_pad_0 = const()[name = tensor("op_8974_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627718400))), lut = tensor([-0x1.dc4p-7, -0x1.1bcp-8, 0x1.1e8p-8, 0x1.de8p-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(628128064)))]; + tensor var_8974_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_8972, groups = var_6865, pad = var_8974_pad_0, pad_type = var_8974_pad_type_0, strides = var_8970, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_539_cast)[name = tensor("op_8974_cast")]; + tensor inputs_269_cast = add(x = var_8974_cast, y = inputs_267_cast)[name = tensor("inputs_269_cast")]; + tensor var_8978 = const()[name = tensor("op_8978"), val = tensor([1])]; + tensor channels_mean_269_cast = reduce_mean(axes = var_8978, keep_dims = var_6860, x = inputs_269_cast)[name = tensor("channels_mean_269_cast")]; + tensor zero_mean_269_cast = sub(x = inputs_269_cast, y = channels_mean_269_cast)[name = tensor("zero_mean_269_cast")]; + tensor zero_mean_sq_269_cast = mul(x = zero_mean_269_cast, y = zero_mean_269_cast)[name = tensor("zero_mean_sq_269_cast")]; + tensor var_8982 = const()[name = tensor("op_8982"), val = tensor([1])]; + tensor var_8983_cast = reduce_mean(axes = var_8982, keep_dims = var_6860, x = zero_mean_sq_269_cast)[name = tensor("op_8983_cast")]; + tensor var_8984_to_fp16 = const()[name = tensor("op_8984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8985_cast = add(x = var_8983_cast, y = var_8984_to_fp16)[name = tensor("op_8985_cast")]; + tensor denom_269_epsilon_0_to_fp16 = const()[name = tensor("denom_269_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_269_cast = rsqrt(epsilon = denom_269_epsilon_0_to_fp16, x = var_8985_cast)[name = tensor("denom_269_cast")]; + tensor out_269_cast = mul(x = zero_mean_269_cast, y = denom_269_cast)[name = tensor("out_269_cast")]; + tensor var_8989_to_fp16 = const()[name = tensor("op_8989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(628130688)))]; + tensor var_8990_cast = add(x = out_269_cast, y = var_8989_to_fp16)[name = tensor("op_8990_cast")]; + tensor var_8992_to_fp16 = const()[name = tensor("op_8992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(628133312)))]; + tensor input_541_cast = mul(x = var_8990_cast, y = var_8992_to_fp16)[name = tensor("input_541_cast")]; + tensor var_9000 = const()[name = tensor("op_9000"), val = tensor([1, 1])]; + tensor var_9002 = const()[name = tensor("op_9002"), val = tensor([1, 1])]; + tensor var_9004_pad_type_0 = const()[name = tensor("op_9004_pad_type_0"), val = tensor("custom")]; + tensor var_9004_pad_0 = const()[name = tensor("op_9004_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(628135936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634689600))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634689728)))]; + tensor var_9004_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_9002, groups = var_6865, pad = var_9004_pad_0, pad_type = var_9004_pad_type_0, strides = var_9000, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_541_cast)[name = tensor("op_9004_cast")]; + tensor var_9005_split_sizes_0 = const()[name = tensor("op_9005_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9005_axis_0 = const()[name = tensor("op_9005_axis_0"), val = tensor(1)]; + tensor var_9005_cast_0, tensor var_9005_cast_1 = split(axis = var_9005_axis_0, split_sizes = var_9005_split_sizes_0, x = var_9004_cast)[name = tensor("op_9005_cast")]; + tensor var_9007_mode_0 = const()[name = tensor("op_9007_mode_0"), val = tensor("EXACT")]; + tensor var_9007_cast = gelu(mode = var_9007_mode_0, x = var_9005_cast_1)[name = tensor("op_9007_cast")]; + tensor input_543_cast = mul(x = var_9005_cast_0, y = var_9007_cast)[name = tensor("input_543_cast")]; + tensor var_9011 = const()[name = tensor("op_9011"), val = tensor([1, 1])]; + tensor var_9013 = const()[name = tensor("op_9013"), val = tensor([1, 1])]; + tensor var_9015_pad_type_0 = const()[name = tensor("op_9015_pad_type_0"), val = tensor("custom")]; + tensor var_9015_pad_0 = const()[name = tensor("op_9015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(634710272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637987136))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637987264)))]; + tensor var_9015_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_9013, groups = var_6865, pad = var_9015_pad_0, pad_type = var_9015_pad_type_0, strides = var_9011, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_543_cast)[name = tensor("op_9015_cast")]; + tensor inputs_271_cast = add(x = var_9015_cast, y = inputs_269_cast)[name = tensor("inputs_271_cast")]; + tensor var_9025 = const()[name = tensor("op_9025"), val = tensor([1])]; + tensor channels_mean_271_cast = reduce_mean(axes = var_9025, keep_dims = var_6860, x = inputs_271_cast)[name = tensor("channels_mean_271_cast")]; + tensor zero_mean_271_cast = sub(x = inputs_271_cast, y = channels_mean_271_cast)[name = tensor("zero_mean_271_cast")]; + tensor zero_mean_sq_271_cast = mul(x = zero_mean_271_cast, y = zero_mean_271_cast)[name = tensor("zero_mean_sq_271_cast")]; + tensor var_9029 = const()[name = tensor("op_9029"), val = tensor([1])]; + tensor var_9030_cast = reduce_mean(axes = var_9029, keep_dims = var_6860, x = zero_mean_sq_271_cast)[name = tensor("op_9030_cast")]; + tensor var_9031_to_fp16 = const()[name = tensor("op_9031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9032_cast = add(x = var_9030_cast, y = var_9031_to_fp16)[name = tensor("op_9032_cast")]; + tensor denom_271_epsilon_0_to_fp16 = const()[name = tensor("denom_271_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_271_cast = rsqrt(epsilon = denom_271_epsilon_0_to_fp16, x = var_9032_cast)[name = tensor("denom_271_cast")]; + tensor out_271_cast = mul(x = zero_mean_271_cast, y = denom_271_cast)[name = tensor("out_271_cast")]; + tensor var_9036_to_fp16 = const()[name = tensor("op_9036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637989888)))]; + tensor var_9037_cast = add(x = out_271_cast, y = var_9036_to_fp16)[name = tensor("op_9037_cast")]; + tensor var_9039_to_fp16 = const()[name = tensor("op_9039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637992512)))]; + tensor hidden_states_367_cast = mul(x = var_9037_cast, y = var_9039_to_fp16)[name = tensor("hidden_states_367_cast")]; + tensor var_9046 = const()[name = tensor("op_9046"), val = tensor([1, 1])]; + tensor var_9048 = const()[name = tensor("op_9048"), val = tensor([1, 1])]; + tensor q_181_pad_type_0 = const()[name = tensor("q_181_pad_type_0"), val = tensor("custom")]; + tensor q_181_pad_0 = const()[name = tensor("q_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637995136))), lut = tensor([-0x1.5c4p-5, -0x1.a2p-7, 0x1.a38p-7, 0x1.5d8p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_181_cast = conv(dilations = var_9048, groups = var_6865, pad = q_181_pad_0, pad_type = q_181_pad_type_0, strides = var_9046, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("q_181_cast")]; + tensor var_9052 = const()[name = tensor("op_9052"), val = tensor([1, 1])]; + tensor var_9054 = const()[name = tensor("op_9054"), val = tensor([1, 1])]; + tensor k_181_pad_type_0 = const()[name = tensor("k_181_pad_type_0"), val = tensor("custom")]; + tensor k_181_pad_0 = const()[name = tensor("k_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638404800))), lut = tensor([-0x1.5e4p-5, -0x1.a6cp-7, 0x1.9f8p-7, 0x1.5c8p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_181_cast = conv(dilations = var_9054, groups = var_6865, pad = k_181_pad_0, pad_type = k_181_pad_type_0, strides = var_9052, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("k_181_cast")]; + tensor var_9058 = const()[name = tensor("op_9058"), val = tensor([1, 1])]; + tensor var_9060 = const()[name = tensor("op_9060"), val = tensor([1, 1])]; + tensor v_181_pad_type_0 = const()[name = tensor("v_181_pad_type_0"), val = tensor("custom")]; + tensor v_181_pad_0 = const()[name = tensor("v_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(638814464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(639633728))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_181_cast = conv(dilations = var_9060, groups = var_6865, pad = v_181_pad_0, pad_type = v_181_pad_type_0, strides = var_9058, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("v_181_cast")]; + tensor var_9064 = const()[name = tensor("op_9064"), val = tensor([2, 20, 64, -1])]; + tensor var_9065_cast = reshape(shape = var_9064, x = q_181_cast)[name = tensor("op_9065_cast")]; + tensor var_9066 = const()[name = tensor("op_9066"), val = tensor([2, 20, 64, -1])]; + tensor var_9067_cast = reshape(shape = var_9066, x = k_181_cast)[name = tensor("op_9067_cast")]; + tensor var_9068 = const()[name = tensor("op_9068"), val = tensor([2, 20, 64, -1])]; + tensor var_9069_cast = reshape(shape = var_9068, x = v_181_cast)[name = tensor("op_9069_cast")]; + tensor attn_weights_361_transpose_x_0 = const()[name = tensor("attn_weights_361_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_361_transpose_y_0 = const()[name = tensor("attn_weights_361_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_361_cast = matmul(transpose_x = attn_weights_361_transpose_x_0, transpose_y = attn_weights_361_transpose_y_0, x = var_9065_cast, y = var_9067_cast)[name = tensor("attn_weights_361_cast")]; + tensor attn_weights_363_cast = mul(x = attn_weights_361_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_363_cast")]; + tensor var_9073_cast = softmax(axis = var_6849, x = attn_weights_363_cast)[name = tensor("op_9073_cast")]; + tensor attn_181_transpose_x_0 = const()[name = tensor("attn_181_transpose_x_0"), val = tensor(false)]; + tensor attn_181_transpose_y_0 = const()[name = tensor("attn_181_transpose_y_0"), val = tensor(true)]; + tensor attn_181_cast = matmul(transpose_x = attn_181_transpose_x_0, transpose_y = attn_181_transpose_y_0, x = var_9069_cast, y = var_9073_cast)[name = tensor("attn_181_cast")]; + tensor var_9077 = const()[name = tensor("op_9077"), val = tensor([2, 1280, 1, -1])]; + tensor input_545_cast = reshape(shape = var_9077, x = attn_181_cast)[name = tensor("input_545_cast")]; + tensor var_9082 = const()[name = tensor("op_9082"), val = tensor([1, 1])]; + tensor var_9084 = const()[name = tensor("op_9084"), val = tensor([1, 1])]; + tensor var_9086_pad_type_0 = const()[name = tensor("op_9086_pad_type_0"), val = tensor("custom")]; + tensor var_9086_pad_0 = const()[name = tensor("op_9086_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(639633856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640453120))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640453248)))]; + tensor var_9086_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_9084, groups = var_6865, pad = var_9086_pad_0, pad_type = var_9086_pad_type_0, strides = var_9082, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_545_cast)[name = tensor("op_9086_cast")]; + tensor inputs_273_cast = add(x = var_9086_cast, y = inputs_271_cast)[name = tensor("inputs_273_cast")]; + tensor var_9090 = const()[name = tensor("op_9090"), val = tensor([1])]; + tensor channels_mean_273_cast = reduce_mean(axes = var_9090, keep_dims = var_6860, x = inputs_273_cast)[name = tensor("channels_mean_273_cast")]; + tensor zero_mean_273_cast = sub(x = inputs_273_cast, y = channels_mean_273_cast)[name = tensor("zero_mean_273_cast")]; + tensor zero_mean_sq_273_cast = mul(x = zero_mean_273_cast, y = zero_mean_273_cast)[name = tensor("zero_mean_sq_273_cast")]; + tensor var_9094 = const()[name = tensor("op_9094"), val = tensor([1])]; + tensor var_9095_cast = reduce_mean(axes = var_9094, keep_dims = var_6860, x = zero_mean_sq_273_cast)[name = tensor("op_9095_cast")]; + tensor var_9096_to_fp16 = const()[name = tensor("op_9096_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9097_cast = add(x = var_9095_cast, y = var_9096_to_fp16)[name = tensor("op_9097_cast")]; + tensor denom_273_epsilon_0_to_fp16 = const()[name = tensor("denom_273_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_273_cast = rsqrt(epsilon = denom_273_epsilon_0_to_fp16, x = var_9097_cast)[name = tensor("denom_273_cast")]; + tensor out_273_cast = mul(x = zero_mean_273_cast, y = denom_273_cast)[name = tensor("out_273_cast")]; + tensor var_9101_to_fp16 = const()[name = tensor("op_9101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640455872)))]; + tensor var_9102_cast = add(x = out_273_cast, y = var_9101_to_fp16)[name = tensor("op_9102_cast")]; + tensor var_9104_to_fp16 = const()[name = tensor("op_9104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640458496)))]; + tensor hidden_states_369_cast = mul(x = var_9102_cast, y = var_9104_to_fp16)[name = tensor("hidden_states_369_cast")]; + tensor var_9111 = const()[name = tensor("op_9111"), val = tensor([1, 1])]; + tensor var_9113 = const()[name = tensor("op_9113"), val = tensor([1, 1])]; + tensor q_183_pad_type_0 = const()[name = tensor("q_183_pad_type_0"), val = tensor("custom")]; + tensor q_183_pad_0 = const()[name = tensor("q_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640461120))), lut = tensor([-0x1.2bp-5, -0x1.64cp-7, 0x1.60cp-7, 0x1.2ap-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_183_cast = conv(dilations = var_9113, groups = var_6865, pad = q_183_pad_0, pad_type = q_183_pad_type_0, strides = var_9111, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_369_cast)[name = tensor("q_183_cast")]; + tensor var_9117 = const()[name = tensor("op_9117"), val = tensor([1, 1])]; + tensor var_9119 = const()[name = tensor("op_9119"), val = tensor([1, 1])]; + tensor k_183_pad_type_0 = const()[name = tensor("k_183_pad_type_0"), val = tensor("custom")]; + tensor k_183_pad_0 = const()[name = tensor("k_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640870784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(642181568))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_183_cast = conv(dilations = var_9119, groups = var_6865, pad = k_183_pad_0, pad_type = k_183_pad_type_0, strides = var_9117, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_183_cast")]; + tensor var_9123 = const()[name = tensor("op_9123"), val = tensor([1, 1])]; + tensor var_9125 = const()[name = tensor("op_9125"), val = tensor([1, 1])]; + tensor v_183_pad_type_0 = const()[name = tensor("v_183_pad_type_0"), val = tensor("custom")]; + tensor v_183_pad_0 = const()[name = tensor("v_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(642181696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643492480))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_183_cast = conv(dilations = var_9125, groups = var_6865, pad = v_183_pad_0, pad_type = v_183_pad_type_0, strides = var_9123, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_183_cast")]; + tensor var_9129 = const()[name = tensor("op_9129"), val = tensor([2, 20, 64, -1])]; + tensor var_9130_cast = reshape(shape = var_9129, x = q_183_cast)[name = tensor("op_9130_cast")]; + tensor var_9131 = const()[name = tensor("op_9131"), val = tensor([2, 20, 64, -1])]; + tensor var_9132_cast = reshape(shape = var_9131, x = k_183_cast)[name = tensor("op_9132_cast")]; + tensor var_9133 = const()[name = tensor("op_9133"), val = tensor([2, 20, 64, -1])]; + tensor var_9134_cast = reshape(shape = var_9133, x = v_183_cast)[name = tensor("op_9134_cast")]; + tensor attn_weights_365_transpose_x_0 = const()[name = tensor("attn_weights_365_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_365_transpose_y_0 = const()[name = tensor("attn_weights_365_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_365_cast = matmul(transpose_x = attn_weights_365_transpose_x_0, transpose_y = attn_weights_365_transpose_y_0, x = var_9130_cast, y = var_9132_cast)[name = tensor("attn_weights_365_cast")]; + tensor attn_weights_367_cast = mul(x = attn_weights_365_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_367_cast")]; + tensor var_9138_cast = softmax(axis = var_6849, x = attn_weights_367_cast)[name = tensor("op_9138_cast")]; + tensor attn_183_transpose_x_0 = const()[name = tensor("attn_183_transpose_x_0"), val = tensor(false)]; + tensor attn_183_transpose_y_0 = const()[name = tensor("attn_183_transpose_y_0"), val = tensor(true)]; + tensor attn_183_cast = matmul(transpose_x = attn_183_transpose_x_0, transpose_y = attn_183_transpose_y_0, x = var_9134_cast, y = var_9138_cast)[name = tensor("attn_183_cast")]; + tensor var_9142 = const()[name = tensor("op_9142"), val = tensor([2, 1280, 1, -1])]; + tensor input_547_cast = reshape(shape = var_9142, x = attn_183_cast)[name = tensor("input_547_cast")]; + tensor var_9147 = const()[name = tensor("op_9147"), val = tensor([1, 1])]; + tensor var_9149 = const()[name = tensor("op_9149"), val = tensor([1, 1])]; + tensor var_9151_pad_type_0 = const()[name = tensor("op_9151_pad_type_0"), val = tensor("custom")]; + tensor var_9151_pad_0 = const()[name = tensor("op_9151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643492608))), lut = tensor([-0x1.26p-6, -0x1.5fp-8, 0x1.5fp-8, 0x1.264p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643902272)))]; + tensor var_9151_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_9149, groups = var_6865, pad = var_9151_pad_0, pad_type = var_9151_pad_type_0, strides = var_9147, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_547_cast)[name = tensor("op_9151_cast")]; + tensor inputs_275_cast = add(x = var_9151_cast, y = inputs_273_cast)[name = tensor("inputs_275_cast")]; + tensor var_9155 = const()[name = tensor("op_9155"), val = tensor([1])]; + tensor channels_mean_275_cast = reduce_mean(axes = var_9155, keep_dims = var_6860, x = inputs_275_cast)[name = tensor("channels_mean_275_cast")]; + tensor zero_mean_275_cast = sub(x = inputs_275_cast, y = channels_mean_275_cast)[name = tensor("zero_mean_275_cast")]; + tensor zero_mean_sq_275_cast = mul(x = zero_mean_275_cast, y = zero_mean_275_cast)[name = tensor("zero_mean_sq_275_cast")]; + tensor var_9159 = const()[name = tensor("op_9159"), val = tensor([1])]; + tensor var_9160_cast = reduce_mean(axes = var_9159, keep_dims = var_6860, x = zero_mean_sq_275_cast)[name = tensor("op_9160_cast")]; + tensor var_9161_to_fp16 = const()[name = tensor("op_9161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9162_cast = add(x = var_9160_cast, y = var_9161_to_fp16)[name = tensor("op_9162_cast")]; + tensor denom_275_epsilon_0_to_fp16 = const()[name = tensor("denom_275_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_275_cast = rsqrt(epsilon = denom_275_epsilon_0_to_fp16, x = var_9162_cast)[name = tensor("denom_275_cast")]; + tensor out_275_cast = mul(x = zero_mean_275_cast, y = denom_275_cast)[name = tensor("out_275_cast")]; + tensor var_9166_to_fp16 = const()[name = tensor("op_9166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643904896)))]; + tensor var_9167_cast = add(x = out_275_cast, y = var_9166_to_fp16)[name = tensor("op_9167_cast")]; + tensor var_9169_to_fp16 = const()[name = tensor("op_9169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643907520)))]; + tensor input_549_cast = mul(x = var_9167_cast, y = var_9169_to_fp16)[name = tensor("input_549_cast")]; + tensor var_9177 = const()[name = tensor("op_9177"), val = tensor([1, 1])]; + tensor var_9179 = const()[name = tensor("op_9179"), val = tensor([1, 1])]; + tensor var_9181_pad_type_0 = const()[name = tensor("op_9181_pad_type_0"), val = tensor("custom")]; + tensor var_9181_pad_0 = const()[name = tensor("op_9181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(643910144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653740608))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653740800)))]; + tensor var_9181_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_9179, groups = var_6865, pad = var_9181_pad_0, pad_type = var_9181_pad_type_0, strides = var_9177, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_549_cast)[name = tensor("op_9181_cast")]; + tensor var_9182_split_sizes_0 = const()[name = tensor("op_9182_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9182_axis_0 = const()[name = tensor("op_9182_axis_0"), val = tensor(1)]; + tensor var_9182_cast_0, tensor var_9182_cast_1 = split(axis = var_9182_axis_0, split_sizes = var_9182_split_sizes_0, x = var_9181_cast)[name = tensor("op_9182_cast")]; + tensor var_9184_mode_0 = const()[name = tensor("op_9184_mode_0"), val = tensor("EXACT")]; + tensor var_9184_cast = gelu(mode = var_9184_mode_0, x = var_9182_cast_1)[name = tensor("op_9184_cast")]; + tensor input_551_cast = mul(x = var_9182_cast_0, y = var_9184_cast)[name = tensor("input_551_cast")]; + tensor var_9188 = const()[name = tensor("op_9188"), val = tensor([1, 1])]; + tensor var_9190 = const()[name = tensor("op_9190"), val = tensor([1, 1])]; + tensor var_9192_pad_type_0 = const()[name = tensor("op_9192_pad_type_0"), val = tensor("custom")]; + tensor var_9192_pad_0 = const()[name = tensor("op_9192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653761344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657038208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657038336)))]; + tensor var_9192_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_9190, groups = var_6865, pad = var_9192_pad_0, pad_type = var_9192_pad_type_0, strides = var_9188, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_551_cast)[name = tensor("op_9192_cast")]; + tensor inputs_277_cast = add(x = var_9192_cast, y = inputs_275_cast)[name = tensor("inputs_277_cast")]; + tensor var_9202 = const()[name = tensor("op_9202"), val = tensor([1])]; + tensor channels_mean_277_cast = reduce_mean(axes = var_9202, keep_dims = var_6860, x = inputs_277_cast)[name = tensor("channels_mean_277_cast")]; + tensor zero_mean_277_cast = sub(x = inputs_277_cast, y = channels_mean_277_cast)[name = tensor("zero_mean_277_cast")]; + tensor zero_mean_sq_277_cast = mul(x = zero_mean_277_cast, y = zero_mean_277_cast)[name = tensor("zero_mean_sq_277_cast")]; + tensor var_9206 = const()[name = tensor("op_9206"), val = tensor([1])]; + tensor var_9207_cast = reduce_mean(axes = var_9206, keep_dims = var_6860, x = zero_mean_sq_277_cast)[name = tensor("op_9207_cast")]; + tensor var_9208_to_fp16 = const()[name = tensor("op_9208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9209_cast = add(x = var_9207_cast, y = var_9208_to_fp16)[name = tensor("op_9209_cast")]; + tensor denom_277_epsilon_0_to_fp16 = const()[name = tensor("denom_277_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_277_cast = rsqrt(epsilon = denom_277_epsilon_0_to_fp16, x = var_9209_cast)[name = tensor("denom_277_cast")]; + tensor out_277_cast = mul(x = zero_mean_277_cast, y = denom_277_cast)[name = tensor("out_277_cast")]; + tensor var_9213_to_fp16 = const()[name = tensor("op_9213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657040960)))]; + tensor var_9214_cast = add(x = out_277_cast, y = var_9213_to_fp16)[name = tensor("op_9214_cast")]; + tensor var_9216_to_fp16 = const()[name = tensor("op_9216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657043584)))]; + tensor hidden_states_373_cast = mul(x = var_9214_cast, y = var_9216_to_fp16)[name = tensor("hidden_states_373_cast")]; + tensor var_9223 = const()[name = tensor("op_9223"), val = tensor([1, 1])]; + tensor var_9225 = const()[name = tensor("op_9225"), val = tensor([1, 1])]; + tensor q_185_pad_type_0 = const()[name = tensor("q_185_pad_type_0"), val = tensor("custom")]; + tensor q_185_pad_0 = const()[name = tensor("q_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657046208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657865472))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_185_cast = conv(dilations = var_9225, groups = var_6865, pad = q_185_pad_0, pad_type = q_185_pad_type_0, strides = var_9223, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("q_185_cast")]; + tensor var_9229 = const()[name = tensor("op_9229"), val = tensor([1, 1])]; + tensor var_9231 = const()[name = tensor("op_9231"), val = tensor([1, 1])]; + tensor k_185_pad_type_0 = const()[name = tensor("k_185_pad_type_0"), val = tensor("custom")]; + tensor k_185_pad_0 = const()[name = tensor("k_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657865600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(658684864))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_185_cast = conv(dilations = var_9231, groups = var_6865, pad = k_185_pad_0, pad_type = k_185_pad_type_0, strides = var_9229, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("k_185_cast")]; + tensor var_9235 = const()[name = tensor("op_9235"), val = tensor([1, 1])]; + tensor var_9237 = const()[name = tensor("op_9237"), val = tensor([1, 1])]; + tensor v_185_pad_type_0 = const()[name = tensor("v_185_pad_type_0"), val = tensor("custom")]; + tensor v_185_pad_0 = const()[name = tensor("v_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(658684992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659504256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_185_cast = conv(dilations = var_9237, groups = var_6865, pad = v_185_pad_0, pad_type = v_185_pad_type_0, strides = var_9235, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("v_185_cast")]; + tensor var_9241 = const()[name = tensor("op_9241"), val = tensor([2, 20, 64, -1])]; + tensor var_9242_cast = reshape(shape = var_9241, x = q_185_cast)[name = tensor("op_9242_cast")]; + tensor var_9243 = const()[name = tensor("op_9243"), val = tensor([2, 20, 64, -1])]; + tensor var_9244_cast = reshape(shape = var_9243, x = k_185_cast)[name = tensor("op_9244_cast")]; + tensor var_9245 = const()[name = tensor("op_9245"), val = tensor([2, 20, 64, -1])]; + tensor var_9246_cast = reshape(shape = var_9245, x = v_185_cast)[name = tensor("op_9246_cast")]; + tensor attn_weights_369_transpose_x_0 = const()[name = tensor("attn_weights_369_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_369_transpose_y_0 = const()[name = tensor("attn_weights_369_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_369_cast = matmul(transpose_x = attn_weights_369_transpose_x_0, transpose_y = attn_weights_369_transpose_y_0, x = var_9242_cast, y = var_9244_cast)[name = tensor("attn_weights_369_cast")]; + tensor attn_weights_371_cast = mul(x = attn_weights_369_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_371_cast")]; + tensor var_9250_cast = softmax(axis = var_6849, x = attn_weights_371_cast)[name = tensor("op_9250_cast")]; + tensor attn_185_transpose_x_0 = const()[name = tensor("attn_185_transpose_x_0"), val = tensor(false)]; + tensor attn_185_transpose_y_0 = const()[name = tensor("attn_185_transpose_y_0"), val = tensor(true)]; + tensor attn_185_cast = matmul(transpose_x = attn_185_transpose_x_0, transpose_y = attn_185_transpose_y_0, x = var_9246_cast, y = var_9250_cast)[name = tensor("attn_185_cast")]; + tensor var_9254 = const()[name = tensor("op_9254"), val = tensor([2, 1280, 1, -1])]; + tensor input_553_cast = reshape(shape = var_9254, x = attn_185_cast)[name = tensor("input_553_cast")]; + tensor var_9259 = const()[name = tensor("op_9259"), val = tensor([1, 1])]; + tensor var_9261 = const()[name = tensor("op_9261"), val = tensor([1, 1])]; + tensor var_9263_pad_type_0 = const()[name = tensor("op_9263_pad_type_0"), val = tensor("custom")]; + tensor var_9263_pad_0 = const()[name = tensor("op_9263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659504384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660323648))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660323776)))]; + tensor var_9263_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_9261, groups = var_6865, pad = var_9263_pad_0, pad_type = var_9263_pad_type_0, strides = var_9259, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_553_cast)[name = tensor("op_9263_cast")]; + tensor inputs_279_cast = add(x = var_9263_cast, y = inputs_277_cast)[name = tensor("inputs_279_cast")]; + tensor var_9267 = const()[name = tensor("op_9267"), val = tensor([1])]; + tensor channels_mean_279_cast = reduce_mean(axes = var_9267, keep_dims = var_6860, x = inputs_279_cast)[name = tensor("channels_mean_279_cast")]; + tensor zero_mean_279_cast = sub(x = inputs_279_cast, y = channels_mean_279_cast)[name = tensor("zero_mean_279_cast")]; + tensor zero_mean_sq_279_cast = mul(x = zero_mean_279_cast, y = zero_mean_279_cast)[name = tensor("zero_mean_sq_279_cast")]; + tensor var_9271 = const()[name = tensor("op_9271"), val = tensor([1])]; + tensor var_9272_cast = reduce_mean(axes = var_9271, keep_dims = var_6860, x = zero_mean_sq_279_cast)[name = tensor("op_9272_cast")]; + tensor var_9273_to_fp16 = const()[name = tensor("op_9273_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9274_cast = add(x = var_9272_cast, y = var_9273_to_fp16)[name = tensor("op_9274_cast")]; + tensor denom_279_epsilon_0_to_fp16 = const()[name = tensor("denom_279_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_279_cast = rsqrt(epsilon = denom_279_epsilon_0_to_fp16, x = var_9274_cast)[name = tensor("denom_279_cast")]; + tensor out_279_cast = mul(x = zero_mean_279_cast, y = denom_279_cast)[name = tensor("out_279_cast")]; + tensor var_9278_to_fp16 = const()[name = tensor("op_9278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660326400)))]; + tensor var_9279_cast = add(x = out_279_cast, y = var_9278_to_fp16)[name = tensor("op_9279_cast")]; + tensor var_9281_to_fp16 = const()[name = tensor("op_9281_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660329024)))]; + tensor hidden_states_375_cast = mul(x = var_9279_cast, y = var_9281_to_fp16)[name = tensor("hidden_states_375_cast")]; + tensor var_9288 = const()[name = tensor("op_9288"), val = tensor([1, 1])]; + tensor var_9290 = const()[name = tensor("op_9290"), val = tensor([1, 1])]; + tensor q_187_pad_type_0 = const()[name = tensor("q_187_pad_type_0"), val = tensor("custom")]; + tensor q_187_pad_0 = const()[name = tensor("q_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660331648))), lut = tensor([-0x1.348p-5, -0x1.6fcp-7, 0x1.71cp-7, 0x1.354p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_187_cast = conv(dilations = var_9290, groups = var_6865, pad = q_187_pad_0, pad_type = q_187_pad_type_0, strides = var_9288, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_375_cast)[name = tensor("q_187_cast")]; + tensor var_9294 = const()[name = tensor("op_9294"), val = tensor([1, 1])]; + tensor var_9296 = const()[name = tensor("op_9296"), val = tensor([1, 1])]; + tensor k_187_pad_type_0 = const()[name = tensor("k_187_pad_type_0"), val = tensor("custom")]; + tensor k_187_pad_0 = const()[name = tensor("k_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660741312))), lut = tensor([-0x1.13cp-5, -0x1.41cp-7, 0x1.4ap-7, 0x1.154p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_187_cast = conv(dilations = var_9296, groups = var_6865, pad = k_187_pad_0, pad_type = k_187_pad_type_0, strides = var_9294, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_187_cast")]; + tensor var_9300 = const()[name = tensor("op_9300"), val = tensor([1, 1])]; + tensor var_9302 = const()[name = tensor("op_9302"), val = tensor([1, 1])]; + tensor v_187_pad_type_0 = const()[name = tensor("v_187_pad_type_0"), val = tensor("custom")]; + tensor v_187_pad_0 = const()[name = tensor("v_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(661396736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(662707520))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_187_cast = conv(dilations = var_9302, groups = var_6865, pad = v_187_pad_0, pad_type = v_187_pad_type_0, strides = var_9300, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_187_cast")]; + tensor var_9306 = const()[name = tensor("op_9306"), val = tensor([2, 20, 64, -1])]; + tensor var_9307_cast = reshape(shape = var_9306, x = q_187_cast)[name = tensor("op_9307_cast")]; + tensor var_9308 = const()[name = tensor("op_9308"), val = tensor([2, 20, 64, -1])]; + tensor var_9309_cast = reshape(shape = var_9308, x = k_187_cast)[name = tensor("op_9309_cast")]; + tensor var_9310 = const()[name = tensor("op_9310"), val = tensor([2, 20, 64, -1])]; + tensor var_9311_cast = reshape(shape = var_9310, x = v_187_cast)[name = tensor("op_9311_cast")]; + tensor attn_weights_373_transpose_x_0 = const()[name = tensor("attn_weights_373_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_373_transpose_y_0 = const()[name = tensor("attn_weights_373_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_373_cast = matmul(transpose_x = attn_weights_373_transpose_x_0, transpose_y = attn_weights_373_transpose_y_0, x = var_9307_cast, y = var_9309_cast)[name = tensor("attn_weights_373_cast")]; + tensor attn_weights_375_cast = mul(x = attn_weights_373_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_375_cast")]; + tensor var_9315_cast = softmax(axis = var_6849, x = attn_weights_375_cast)[name = tensor("op_9315_cast")]; + tensor attn_187_transpose_x_0 = const()[name = tensor("attn_187_transpose_x_0"), val = tensor(false)]; + tensor attn_187_transpose_y_0 = const()[name = tensor("attn_187_transpose_y_0"), val = tensor(true)]; + tensor attn_187_cast = matmul(transpose_x = attn_187_transpose_x_0, transpose_y = attn_187_transpose_y_0, x = var_9311_cast, y = var_9315_cast)[name = tensor("attn_187_cast")]; + tensor var_9319 = const()[name = tensor("op_9319"), val = tensor([2, 1280, 1, -1])]; + tensor input_555_cast = reshape(shape = var_9319, x = attn_187_cast)[name = tensor("input_555_cast")]; + tensor var_9324 = const()[name = tensor("op_9324"), val = tensor([1, 1])]; + tensor var_9326 = const()[name = tensor("op_9326"), val = tensor([1, 1])]; + tensor var_9328_pad_type_0 = const()[name = tensor("op_9328_pad_type_0"), val = tensor("custom")]; + tensor var_9328_pad_0 = const()[name = tensor("op_9328_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(662707648))), lut = tensor([-0x1.394p-6, -0x1.76cp-8, 0x1.754p-8, 0x1.39p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(663117312)))]; + tensor var_9328_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_9326, groups = var_6865, pad = var_9328_pad_0, pad_type = var_9328_pad_type_0, strides = var_9324, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_555_cast)[name = tensor("op_9328_cast")]; + tensor inputs_281_cast = add(x = var_9328_cast, y = inputs_279_cast)[name = tensor("inputs_281_cast")]; + tensor var_9332 = const()[name = tensor("op_9332"), val = tensor([1])]; + tensor channels_mean_281_cast = reduce_mean(axes = var_9332, keep_dims = var_6860, x = inputs_281_cast)[name = tensor("channels_mean_281_cast")]; + tensor zero_mean_281_cast = sub(x = inputs_281_cast, y = channels_mean_281_cast)[name = tensor("zero_mean_281_cast")]; + tensor zero_mean_sq_281_cast = mul(x = zero_mean_281_cast, y = zero_mean_281_cast)[name = tensor("zero_mean_sq_281_cast")]; + tensor var_9336 = const()[name = tensor("op_9336"), val = tensor([1])]; + tensor var_9337_cast = reduce_mean(axes = var_9336, keep_dims = var_6860, x = zero_mean_sq_281_cast)[name = tensor("op_9337_cast")]; + tensor var_9338_to_fp16 = const()[name = tensor("op_9338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9339_cast = add(x = var_9337_cast, y = var_9338_to_fp16)[name = tensor("op_9339_cast")]; + tensor denom_281_epsilon_0_to_fp16 = const()[name = tensor("denom_281_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_281_cast = rsqrt(epsilon = denom_281_epsilon_0_to_fp16, x = var_9339_cast)[name = tensor("denom_281_cast")]; + tensor out_281_cast = mul(x = zero_mean_281_cast, y = denom_281_cast)[name = tensor("out_281_cast")]; + tensor var_9343_to_fp16 = const()[name = tensor("op_9343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(663119936)))]; + tensor var_9344_cast = add(x = out_281_cast, y = var_9343_to_fp16)[name = tensor("op_9344_cast")]; + tensor var_9346_to_fp16 = const()[name = tensor("op_9346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(663122560)))]; + tensor input_557_cast = mul(x = var_9344_cast, y = var_9346_to_fp16)[name = tensor("input_557_cast")]; + tensor var_9354 = const()[name = tensor("op_9354"), val = tensor([1, 1])]; + tensor var_9356 = const()[name = tensor("op_9356"), val = tensor([1, 1])]; + tensor var_9358_pad_type_0 = const()[name = tensor("op_9358_pad_type_0"), val = tensor("custom")]; + tensor var_9358_pad_0 = const()[name = tensor("op_9358_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(663125184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672955648))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672955840)))]; + tensor var_9358_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_9356, groups = var_6865, pad = var_9358_pad_0, pad_type = var_9358_pad_type_0, strides = var_9354, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_557_cast)[name = tensor("op_9358_cast")]; + tensor var_9359_split_sizes_0 = const()[name = tensor("op_9359_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9359_axis_0 = const()[name = tensor("op_9359_axis_0"), val = tensor(1)]; + tensor var_9359_cast_0, tensor var_9359_cast_1 = split(axis = var_9359_axis_0, split_sizes = var_9359_split_sizes_0, x = var_9358_cast)[name = tensor("op_9359_cast")]; + tensor var_9361_mode_0 = const()[name = tensor("op_9361_mode_0"), val = tensor("EXACT")]; + tensor var_9361_cast = gelu(mode = var_9361_mode_0, x = var_9359_cast_1)[name = tensor("op_9361_cast")]; + tensor input_559_cast = mul(x = var_9359_cast_0, y = var_9361_cast)[name = tensor("input_559_cast")]; + tensor var_9365 = const()[name = tensor("op_9365"), val = tensor([1, 1])]; + tensor var_9367 = const()[name = tensor("op_9367"), val = tensor([1, 1])]; + tensor var_9369_pad_type_0 = const()[name = tensor("op_9369_pad_type_0"), val = tensor("custom")]; + tensor var_9369_pad_0 = const()[name = tensor("op_9369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672976384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676253248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676253376)))]; + tensor var_9369_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_9367, groups = var_6865, pad = var_9369_pad_0, pad_type = var_9369_pad_type_0, strides = var_9365, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_559_cast)[name = tensor("op_9369_cast")]; + tensor inputs_283_cast = add(x = var_9369_cast, y = inputs_281_cast)[name = tensor("inputs_283_cast")]; + tensor var_9379 = const()[name = tensor("op_9379"), val = tensor([1])]; + tensor channels_mean_283_cast = reduce_mean(axes = var_9379, keep_dims = var_6860, x = inputs_283_cast)[name = tensor("channels_mean_283_cast")]; + tensor zero_mean_283_cast = sub(x = inputs_283_cast, y = channels_mean_283_cast)[name = tensor("zero_mean_283_cast")]; + tensor zero_mean_sq_283_cast = mul(x = zero_mean_283_cast, y = zero_mean_283_cast)[name = tensor("zero_mean_sq_283_cast")]; + tensor var_9383 = const()[name = tensor("op_9383"), val = tensor([1])]; + tensor var_9384_cast = reduce_mean(axes = var_9383, keep_dims = var_6860, x = zero_mean_sq_283_cast)[name = tensor("op_9384_cast")]; + tensor var_9385_to_fp16 = const()[name = tensor("op_9385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9386_cast = add(x = var_9384_cast, y = var_9385_to_fp16)[name = tensor("op_9386_cast")]; + tensor denom_283_epsilon_0_to_fp16 = const()[name = tensor("denom_283_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_283_cast = rsqrt(epsilon = denom_283_epsilon_0_to_fp16, x = var_9386_cast)[name = tensor("denom_283_cast")]; + tensor out_283_cast = mul(x = zero_mean_283_cast, y = denom_283_cast)[name = tensor("out_283_cast")]; + tensor var_9390_to_fp16 = const()[name = tensor("op_9390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676256000)))]; + tensor var_9391_cast = add(x = out_283_cast, y = var_9390_to_fp16)[name = tensor("op_9391_cast")]; + tensor var_9393_to_fp16 = const()[name = tensor("op_9393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676258624)))]; + tensor hidden_states_379_cast = mul(x = var_9391_cast, y = var_9393_to_fp16)[name = tensor("hidden_states_379_cast")]; + tensor var_9400 = const()[name = tensor("op_9400"), val = tensor([1, 1])]; + tensor var_9402 = const()[name = tensor("op_9402"), val = tensor([1, 1])]; + tensor q_189_pad_type_0 = const()[name = tensor("q_189_pad_type_0"), val = tensor("custom")]; + tensor q_189_pad_0 = const()[name = tensor("q_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676261248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677080512))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_189_cast = conv(dilations = var_9402, groups = var_6865, pad = q_189_pad_0, pad_type = q_189_pad_type_0, strides = var_9400, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("q_189_cast")]; + tensor var_9406 = const()[name = tensor("op_9406"), val = tensor([1, 1])]; + tensor var_9408 = const()[name = tensor("op_9408"), val = tensor([1, 1])]; + tensor k_189_pad_type_0 = const()[name = tensor("k_189_pad_type_0"), val = tensor("custom")]; + tensor k_189_pad_0 = const()[name = tensor("k_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677080640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677899904))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_189_cast = conv(dilations = var_9408, groups = var_6865, pad = k_189_pad_0, pad_type = k_189_pad_type_0, strides = var_9406, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("k_189_cast")]; + tensor var_9412 = const()[name = tensor("op_9412"), val = tensor([1, 1])]; + tensor var_9414 = const()[name = tensor("op_9414"), val = tensor([1, 1])]; + tensor v_189_pad_type_0 = const()[name = tensor("v_189_pad_type_0"), val = tensor("custom")]; + tensor v_189_pad_0 = const()[name = tensor("v_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677900032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678719296))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_189_cast = conv(dilations = var_9414, groups = var_6865, pad = v_189_pad_0, pad_type = v_189_pad_type_0, strides = var_9412, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("v_189_cast")]; + tensor var_9418 = const()[name = tensor("op_9418"), val = tensor([2, 20, 64, -1])]; + tensor var_9419_cast = reshape(shape = var_9418, x = q_189_cast)[name = tensor("op_9419_cast")]; + tensor var_9420 = const()[name = tensor("op_9420"), val = tensor([2, 20, 64, -1])]; + tensor var_9421_cast = reshape(shape = var_9420, x = k_189_cast)[name = tensor("op_9421_cast")]; + tensor var_9422 = const()[name = tensor("op_9422"), val = tensor([2, 20, 64, -1])]; + tensor var_9423_cast = reshape(shape = var_9422, x = v_189_cast)[name = tensor("op_9423_cast")]; + tensor attn_weights_377_transpose_x_0 = const()[name = tensor("attn_weights_377_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_377_transpose_y_0 = const()[name = tensor("attn_weights_377_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_377_cast = matmul(transpose_x = attn_weights_377_transpose_x_0, transpose_y = attn_weights_377_transpose_y_0, x = var_9419_cast, y = var_9421_cast)[name = tensor("attn_weights_377_cast")]; + tensor attn_weights_379_cast = mul(x = attn_weights_377_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_379_cast")]; + tensor var_9427_cast = softmax(axis = var_6849, x = attn_weights_379_cast)[name = tensor("op_9427_cast")]; + tensor attn_189_transpose_x_0 = const()[name = tensor("attn_189_transpose_x_0"), val = tensor(false)]; + tensor attn_189_transpose_y_0 = const()[name = tensor("attn_189_transpose_y_0"), val = tensor(true)]; + tensor attn_189_cast = matmul(transpose_x = attn_189_transpose_x_0, transpose_y = attn_189_transpose_y_0, x = var_9423_cast, y = var_9427_cast)[name = tensor("attn_189_cast")]; + tensor var_9431 = const()[name = tensor("op_9431"), val = tensor([2, 1280, 1, -1])]; + tensor input_561_cast = reshape(shape = var_9431, x = attn_189_cast)[name = tensor("input_561_cast")]; + tensor var_9436 = const()[name = tensor("op_9436"), val = tensor([1, 1])]; + tensor var_9438 = const()[name = tensor("op_9438"), val = tensor([1, 1])]; + tensor var_9440_pad_type_0 = const()[name = tensor("op_9440_pad_type_0"), val = tensor("custom")]; + tensor var_9440_pad_0 = const()[name = tensor("op_9440_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678719424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679538688))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679538816)))]; + tensor var_9440_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_9438, groups = var_6865, pad = var_9440_pad_0, pad_type = var_9440_pad_type_0, strides = var_9436, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_561_cast)[name = tensor("op_9440_cast")]; + tensor inputs_285_cast = add(x = var_9440_cast, y = inputs_283_cast)[name = tensor("inputs_285_cast")]; + tensor var_9444 = const()[name = tensor("op_9444"), val = tensor([1])]; + tensor channels_mean_285_cast = reduce_mean(axes = var_9444, keep_dims = var_6860, x = inputs_285_cast)[name = tensor("channels_mean_285_cast")]; + tensor zero_mean_285_cast = sub(x = inputs_285_cast, y = channels_mean_285_cast)[name = tensor("zero_mean_285_cast")]; + tensor zero_mean_sq_285_cast = mul(x = zero_mean_285_cast, y = zero_mean_285_cast)[name = tensor("zero_mean_sq_285_cast")]; + tensor var_9448 = const()[name = tensor("op_9448"), val = tensor([1])]; + tensor var_9449_cast = reduce_mean(axes = var_9448, keep_dims = var_6860, x = zero_mean_sq_285_cast)[name = tensor("op_9449_cast")]; + tensor var_9450_to_fp16 = const()[name = tensor("op_9450_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9451_cast = add(x = var_9449_cast, y = var_9450_to_fp16)[name = tensor("op_9451_cast")]; + tensor denom_285_epsilon_0_to_fp16 = const()[name = tensor("denom_285_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_285_cast = rsqrt(epsilon = denom_285_epsilon_0_to_fp16, x = var_9451_cast)[name = tensor("denom_285_cast")]; + tensor out_285_cast = mul(x = zero_mean_285_cast, y = denom_285_cast)[name = tensor("out_285_cast")]; + tensor var_9455_to_fp16 = const()[name = tensor("op_9455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679541440)))]; + tensor var_9456_cast = add(x = out_285_cast, y = var_9455_to_fp16)[name = tensor("op_9456_cast")]; + tensor var_9458_to_fp16 = const()[name = tensor("op_9458_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679544064)))]; + tensor hidden_states_381_cast = mul(x = var_9456_cast, y = var_9458_to_fp16)[name = tensor("hidden_states_381_cast")]; + tensor var_9465 = const()[name = tensor("op_9465"), val = tensor([1, 1])]; + tensor var_9467 = const()[name = tensor("op_9467"), val = tensor([1, 1])]; + tensor q_191_pad_type_0 = const()[name = tensor("q_191_pad_type_0"), val = tensor("custom")]; + tensor q_191_pad_0 = const()[name = tensor("q_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679546688))), lut = tensor([-0x1.42cp-5, -0x1.84p-7, 0x1.774p-7, 0x1.3fp-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_191_cast = conv(dilations = var_9467, groups = var_6865, pad = q_191_pad_0, pad_type = q_191_pad_type_0, strides = var_9465, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_381_cast)[name = tensor("q_191_cast")]; + tensor var_9471 = const()[name = tensor("op_9471"), val = tensor([1, 1])]; + tensor var_9473 = const()[name = tensor("op_9473"), val = tensor([1, 1])]; + tensor k_191_pad_type_0 = const()[name = tensor("k_191_pad_type_0"), val = tensor("custom")]; + tensor k_191_pad_0 = const()[name = tensor("k_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(679956352))), lut = tensor([-0x1.1b4p-5, -0x1.4bp-7, 0x1.4d4p-7, 0x1.1cp-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_191_cast = conv(dilations = var_9473, groups = var_6865, pad = k_191_pad_0, pad_type = k_191_pad_type_0, strides = var_9471, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_191_cast")]; + tensor var_9477 = const()[name = tensor("op_9477"), val = tensor([1, 1])]; + tensor var_9479 = const()[name = tensor("op_9479"), val = tensor([1, 1])]; + tensor v_191_pad_type_0 = const()[name = tensor("v_191_pad_type_0"), val = tensor("custom")]; + tensor v_191_pad_0 = const()[name = tensor("v_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680611776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681922560))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_191_cast = conv(dilations = var_9479, groups = var_6865, pad = v_191_pad_0, pad_type = v_191_pad_type_0, strides = var_9477, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_191_cast")]; + tensor var_9483 = const()[name = tensor("op_9483"), val = tensor([2, 20, 64, -1])]; + tensor var_9484_cast = reshape(shape = var_9483, x = q_191_cast)[name = tensor("op_9484_cast")]; + tensor var_9485 = const()[name = tensor("op_9485"), val = tensor([2, 20, 64, -1])]; + tensor var_9486_cast = reshape(shape = var_9485, x = k_191_cast)[name = tensor("op_9486_cast")]; + tensor var_9487 = const()[name = tensor("op_9487"), val = tensor([2, 20, 64, -1])]; + tensor var_9488_cast = reshape(shape = var_9487, x = v_191_cast)[name = tensor("op_9488_cast")]; + tensor attn_weights_381_transpose_x_0 = const()[name = tensor("attn_weights_381_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_381_transpose_y_0 = const()[name = tensor("attn_weights_381_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_381_cast = matmul(transpose_x = attn_weights_381_transpose_x_0, transpose_y = attn_weights_381_transpose_y_0, x = var_9484_cast, y = var_9486_cast)[name = tensor("attn_weights_381_cast")]; + tensor attn_weights_383_cast = mul(x = attn_weights_381_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_383_cast")]; + tensor var_9492_cast = softmax(axis = var_6849, x = attn_weights_383_cast)[name = tensor("op_9492_cast")]; + tensor attn_191_transpose_x_0 = const()[name = tensor("attn_191_transpose_x_0"), val = tensor(false)]; + tensor attn_191_transpose_y_0 = const()[name = tensor("attn_191_transpose_y_0"), val = tensor(true)]; + tensor attn_191_cast = matmul(transpose_x = attn_191_transpose_x_0, transpose_y = attn_191_transpose_y_0, x = var_9488_cast, y = var_9492_cast)[name = tensor("attn_191_cast")]; + tensor var_9496 = const()[name = tensor("op_9496"), val = tensor([2, 1280, 1, -1])]; + tensor input_563_cast = reshape(shape = var_9496, x = attn_191_cast)[name = tensor("input_563_cast")]; + tensor var_9501 = const()[name = tensor("op_9501"), val = tensor([1, 1])]; + tensor var_9503 = const()[name = tensor("op_9503"), val = tensor([1, 1])]; + tensor var_9505_pad_type_0 = const()[name = tensor("op_9505_pad_type_0"), val = tensor("custom")]; + tensor var_9505_pad_0 = const()[name = tensor("op_9505_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681922688))), lut = tensor([-0x1.47p-6, -0x1.864p-8, 0x1.868p-8, 0x1.474p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(682332352)))]; + tensor var_9505_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_9503, groups = var_6865, pad = var_9505_pad_0, pad_type = var_9505_pad_type_0, strides = var_9501, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_563_cast)[name = tensor("op_9505_cast")]; + tensor inputs_287_cast = add(x = var_9505_cast, y = inputs_285_cast)[name = tensor("inputs_287_cast")]; + tensor var_9509 = const()[name = tensor("op_9509"), val = tensor([1])]; + tensor channels_mean_287_cast = reduce_mean(axes = var_9509, keep_dims = var_6860, x = inputs_287_cast)[name = tensor("channels_mean_287_cast")]; + tensor zero_mean_287_cast = sub(x = inputs_287_cast, y = channels_mean_287_cast)[name = tensor("zero_mean_287_cast")]; + tensor zero_mean_sq_287_cast = mul(x = zero_mean_287_cast, y = zero_mean_287_cast)[name = tensor("zero_mean_sq_287_cast")]; + tensor var_9513 = const()[name = tensor("op_9513"), val = tensor([1])]; + tensor var_9514_cast = reduce_mean(axes = var_9513, keep_dims = var_6860, x = zero_mean_sq_287_cast)[name = tensor("op_9514_cast")]; + tensor var_9515_to_fp16 = const()[name = tensor("op_9515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9516_cast = add(x = var_9514_cast, y = var_9515_to_fp16)[name = tensor("op_9516_cast")]; + tensor denom_287_epsilon_0_to_fp16 = const()[name = tensor("denom_287_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_287_cast = rsqrt(epsilon = denom_287_epsilon_0_to_fp16, x = var_9516_cast)[name = tensor("denom_287_cast")]; + tensor out_287_cast = mul(x = zero_mean_287_cast, y = denom_287_cast)[name = tensor("out_287_cast")]; + tensor var_9520_to_fp16 = const()[name = tensor("op_9520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(682334976)))]; + tensor var_9521_cast = add(x = out_287_cast, y = var_9520_to_fp16)[name = tensor("op_9521_cast")]; + tensor var_9523_to_fp16 = const()[name = tensor("op_9523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(682337600)))]; + tensor input_565_cast = mul(x = var_9521_cast, y = var_9523_to_fp16)[name = tensor("input_565_cast")]; + tensor var_9531 = const()[name = tensor("op_9531"), val = tensor([1, 1])]; + tensor var_9533 = const()[name = tensor("op_9533"), val = tensor([1, 1])]; + tensor var_9535_pad_type_0 = const()[name = tensor("op_9535_pad_type_0"), val = tensor("custom")]; + tensor var_9535_pad_0 = const()[name = tensor("op_9535_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(682340224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(692170688))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(692170880)))]; + tensor var_9535_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_9533, groups = var_6865, pad = var_9535_pad_0, pad_type = var_9535_pad_type_0, strides = var_9531, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_565_cast)[name = tensor("op_9535_cast")]; + tensor var_9536_split_sizes_0 = const()[name = tensor("op_9536_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9536_axis_0 = const()[name = tensor("op_9536_axis_0"), val = tensor(1)]; + tensor var_9536_cast_0, tensor var_9536_cast_1 = split(axis = var_9536_axis_0, split_sizes = var_9536_split_sizes_0, x = var_9535_cast)[name = tensor("op_9536_cast")]; + tensor var_9538_mode_0 = const()[name = tensor("op_9538_mode_0"), val = tensor("EXACT")]; + tensor var_9538_cast = gelu(mode = var_9538_mode_0, x = var_9536_cast_1)[name = tensor("op_9538_cast")]; + tensor input_567_cast = mul(x = var_9536_cast_0, y = var_9538_cast)[name = tensor("input_567_cast")]; + tensor var_9542 = const()[name = tensor("op_9542"), val = tensor([1, 1])]; + tensor var_9544 = const()[name = tensor("op_9544"), val = tensor([1, 1])]; + tensor var_9546_pad_type_0 = const()[name = tensor("op_9546_pad_type_0"), val = tensor("custom")]; + tensor var_9546_pad_0 = const()[name = tensor("op_9546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(692191424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695468288))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695468416)))]; + tensor var_9546_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_9544, groups = var_6865, pad = var_9546_pad_0, pad_type = var_9546_pad_type_0, strides = var_9542, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_567_cast)[name = tensor("op_9546_cast")]; + tensor inputs_289_cast = add(x = var_9546_cast, y = inputs_287_cast)[name = tensor("inputs_289_cast")]; + tensor var_9556 = const()[name = tensor("op_9556"), val = tensor([1])]; + tensor channels_mean_289_cast = reduce_mean(axes = var_9556, keep_dims = var_6860, x = inputs_289_cast)[name = tensor("channels_mean_289_cast")]; + tensor zero_mean_289_cast = sub(x = inputs_289_cast, y = channels_mean_289_cast)[name = tensor("zero_mean_289_cast")]; + tensor zero_mean_sq_289_cast = mul(x = zero_mean_289_cast, y = zero_mean_289_cast)[name = tensor("zero_mean_sq_289_cast")]; + tensor var_9560 = const()[name = tensor("op_9560"), val = tensor([1])]; + tensor var_9561_cast = reduce_mean(axes = var_9560, keep_dims = var_6860, x = zero_mean_sq_289_cast)[name = tensor("op_9561_cast")]; + tensor var_9562_to_fp16 = const()[name = tensor("op_9562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9563_cast = add(x = var_9561_cast, y = var_9562_to_fp16)[name = tensor("op_9563_cast")]; + tensor denom_289_epsilon_0_to_fp16 = const()[name = tensor("denom_289_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_289_cast = rsqrt(epsilon = denom_289_epsilon_0_to_fp16, x = var_9563_cast)[name = tensor("denom_289_cast")]; + tensor out_289_cast = mul(x = zero_mean_289_cast, y = denom_289_cast)[name = tensor("out_289_cast")]; + tensor var_9567_to_fp16 = const()[name = tensor("op_9567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695471040)))]; + tensor var_9568_cast = add(x = out_289_cast, y = var_9567_to_fp16)[name = tensor("op_9568_cast")]; + tensor var_9570_to_fp16 = const()[name = tensor("op_9570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695473664)))]; + tensor hidden_states_385_cast = mul(x = var_9568_cast, y = var_9570_to_fp16)[name = tensor("hidden_states_385_cast")]; + tensor var_9577 = const()[name = tensor("op_9577"), val = tensor([1, 1])]; + tensor var_9579 = const()[name = tensor("op_9579"), val = tensor([1, 1])]; + tensor q_193_pad_type_0 = const()[name = tensor("q_193_pad_type_0"), val = tensor("custom")]; + tensor q_193_pad_0 = const()[name = tensor("q_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(695476288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696295552))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_193_cast = conv(dilations = var_9579, groups = var_6865, pad = q_193_pad_0, pad_type = q_193_pad_type_0, strides = var_9577, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("q_193_cast")]; + tensor var_9583 = const()[name = tensor("op_9583"), val = tensor([1, 1])]; + tensor var_9585 = const()[name = tensor("op_9585"), val = tensor([1, 1])]; + tensor k_193_pad_type_0 = const()[name = tensor("k_193_pad_type_0"), val = tensor("custom")]; + tensor k_193_pad_0 = const()[name = tensor("k_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(696295680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(697114944))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_193_cast = conv(dilations = var_9585, groups = var_6865, pad = k_193_pad_0, pad_type = k_193_pad_type_0, strides = var_9583, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("k_193_cast")]; + tensor var_9589 = const()[name = tensor("op_9589"), val = tensor([1, 1])]; + tensor var_9591 = const()[name = tensor("op_9591"), val = tensor([1, 1])]; + tensor v_193_pad_type_0 = const()[name = tensor("v_193_pad_type_0"), val = tensor("custom")]; + tensor v_193_pad_0 = const()[name = tensor("v_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(697115072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(697934336))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_193_cast = conv(dilations = var_9591, groups = var_6865, pad = v_193_pad_0, pad_type = v_193_pad_type_0, strides = var_9589, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("v_193_cast")]; + tensor var_9595 = const()[name = tensor("op_9595"), val = tensor([2, 20, 64, -1])]; + tensor var_9596_cast = reshape(shape = var_9595, x = q_193_cast)[name = tensor("op_9596_cast")]; + tensor var_9597 = const()[name = tensor("op_9597"), val = tensor([2, 20, 64, -1])]; + tensor var_9598_cast = reshape(shape = var_9597, x = k_193_cast)[name = tensor("op_9598_cast")]; + tensor var_9599 = const()[name = tensor("op_9599"), val = tensor([2, 20, 64, -1])]; + tensor var_9600_cast = reshape(shape = var_9599, x = v_193_cast)[name = tensor("op_9600_cast")]; + tensor attn_weights_385_transpose_x_0 = const()[name = tensor("attn_weights_385_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_385_transpose_y_0 = const()[name = tensor("attn_weights_385_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_385_cast = matmul(transpose_x = attn_weights_385_transpose_x_0, transpose_y = attn_weights_385_transpose_y_0, x = var_9596_cast, y = var_9598_cast)[name = tensor("attn_weights_385_cast")]; + tensor attn_weights_387_cast = mul(x = attn_weights_385_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_387_cast")]; + tensor var_9604_cast = softmax(axis = var_6849, x = attn_weights_387_cast)[name = tensor("op_9604_cast")]; + tensor attn_193_transpose_x_0 = const()[name = tensor("attn_193_transpose_x_0"), val = tensor(false)]; + tensor attn_193_transpose_y_0 = const()[name = tensor("attn_193_transpose_y_0"), val = tensor(true)]; + tensor attn_193_cast = matmul(transpose_x = attn_193_transpose_x_0, transpose_y = attn_193_transpose_y_0, x = var_9600_cast, y = var_9604_cast)[name = tensor("attn_193_cast")]; + tensor var_9608 = const()[name = tensor("op_9608"), val = tensor([2, 1280, 1, -1])]; + tensor input_569_cast = reshape(shape = var_9608, x = attn_193_cast)[name = tensor("input_569_cast")]; + tensor var_9613 = const()[name = tensor("op_9613"), val = tensor([1, 1])]; + tensor var_9615 = const()[name = tensor("op_9615"), val = tensor([1, 1])]; + tensor var_9617_pad_type_0 = const()[name = tensor("op_9617_pad_type_0"), val = tensor("custom")]; + tensor var_9617_pad_0 = const()[name = tensor("op_9617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(697934464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698753728))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698753856)))]; + tensor var_9617_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_9615, groups = var_6865, pad = var_9617_pad_0, pad_type = var_9617_pad_type_0, strides = var_9613, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_569_cast)[name = tensor("op_9617_cast")]; + tensor inputs_291_cast = add(x = var_9617_cast, y = inputs_289_cast)[name = tensor("inputs_291_cast")]; + tensor var_9621 = const()[name = tensor("op_9621"), val = tensor([1])]; + tensor channels_mean_291_cast = reduce_mean(axes = var_9621, keep_dims = var_6860, x = inputs_291_cast)[name = tensor("channels_mean_291_cast")]; + tensor zero_mean_291_cast = sub(x = inputs_291_cast, y = channels_mean_291_cast)[name = tensor("zero_mean_291_cast")]; + tensor zero_mean_sq_291_cast = mul(x = zero_mean_291_cast, y = zero_mean_291_cast)[name = tensor("zero_mean_sq_291_cast")]; + tensor var_9625 = const()[name = tensor("op_9625"), val = tensor([1])]; + tensor var_9626_cast = reduce_mean(axes = var_9625, keep_dims = var_6860, x = zero_mean_sq_291_cast)[name = tensor("op_9626_cast")]; + tensor var_9627_to_fp16 = const()[name = tensor("op_9627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9628_cast = add(x = var_9626_cast, y = var_9627_to_fp16)[name = tensor("op_9628_cast")]; + tensor denom_291_epsilon_0_to_fp16 = const()[name = tensor("denom_291_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_291_cast = rsqrt(epsilon = denom_291_epsilon_0_to_fp16, x = var_9628_cast)[name = tensor("denom_291_cast")]; + tensor out_291_cast = mul(x = zero_mean_291_cast, y = denom_291_cast)[name = tensor("out_291_cast")]; + tensor var_9632_to_fp16 = const()[name = tensor("op_9632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698756480)))]; + tensor var_9633_cast = add(x = out_291_cast, y = var_9632_to_fp16)[name = tensor("op_9633_cast")]; + tensor var_9635_to_fp16 = const()[name = tensor("op_9635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698759104)))]; + tensor hidden_states_387_cast = mul(x = var_9633_cast, y = var_9635_to_fp16)[name = tensor("hidden_states_387_cast")]; + tensor var_9642 = const()[name = tensor("op_9642"), val = tensor([1, 1])]; + tensor var_9644 = const()[name = tensor("op_9644"), val = tensor([1, 1])]; + tensor q_195_pad_type_0 = const()[name = tensor("q_195_pad_type_0"), val = tensor("custom")]; + tensor q_195_pad_0 = const()[name = tensor("q_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698761728))), lut = tensor([-0x1.264p-5, -0x1.604p-7, 0x1.658p-7, 0x1.278p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_195_cast = conv(dilations = var_9644, groups = var_6865, pad = q_195_pad_0, pad_type = q_195_pad_type_0, strides = var_9642, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_387_cast)[name = tensor("q_195_cast")]; + tensor var_9648 = const()[name = tensor("op_9648"), val = tensor([1, 1])]; + tensor var_9650 = const()[name = tensor("op_9650"), val = tensor([1, 1])]; + tensor k_195_pad_type_0 = const()[name = tensor("k_195_pad_type_0"), val = tensor("custom")]; + tensor k_195_pad_0 = const()[name = tensor("k_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699171392))), lut = tensor([-0x1.fa4p-6, -0x1.2dcp-7, 0x1.274p-7, 0x1.f7p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_195_cast = conv(dilations = var_9650, groups = var_6865, pad = k_195_pad_0, pad_type = k_195_pad_type_0, strides = var_9648, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_195_cast")]; + tensor var_9654 = const()[name = tensor("op_9654"), val = tensor([1, 1])]; + tensor var_9656 = const()[name = tensor("op_9656"), val = tensor([1, 1])]; + tensor v_195_pad_type_0 = const()[name = tensor("v_195_pad_type_0"), val = tensor("custom")]; + tensor v_195_pad_0 = const()[name = tensor("v_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699826816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701137600))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_195_cast = conv(dilations = var_9656, groups = var_6865, pad = v_195_pad_0, pad_type = v_195_pad_type_0, strides = var_9654, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_195_cast")]; + tensor var_9660 = const()[name = tensor("op_9660"), val = tensor([2, 20, 64, -1])]; + tensor var_9661_cast = reshape(shape = var_9660, x = q_195_cast)[name = tensor("op_9661_cast")]; + tensor var_9662 = const()[name = tensor("op_9662"), val = tensor([2, 20, 64, -1])]; + tensor var_9663_cast = reshape(shape = var_9662, x = k_195_cast)[name = tensor("op_9663_cast")]; + tensor var_9664 = const()[name = tensor("op_9664"), val = tensor([2, 20, 64, -1])]; + tensor var_9665_cast = reshape(shape = var_9664, x = v_195_cast)[name = tensor("op_9665_cast")]; + tensor attn_weights_389_transpose_x_0 = const()[name = tensor("attn_weights_389_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_389_transpose_y_0 = const()[name = tensor("attn_weights_389_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_389_cast = matmul(transpose_x = attn_weights_389_transpose_x_0, transpose_y = attn_weights_389_transpose_y_0, x = var_9661_cast, y = var_9663_cast)[name = tensor("attn_weights_389_cast")]; + tensor attn_weights_391_cast = mul(x = attn_weights_389_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_391_cast")]; + tensor var_9669_cast = softmax(axis = var_6849, x = attn_weights_391_cast)[name = tensor("op_9669_cast")]; + tensor attn_195_transpose_x_0 = const()[name = tensor("attn_195_transpose_x_0"), val = tensor(false)]; + tensor attn_195_transpose_y_0 = const()[name = tensor("attn_195_transpose_y_0"), val = tensor(true)]; + tensor attn_195_cast = matmul(transpose_x = attn_195_transpose_x_0, transpose_y = attn_195_transpose_y_0, x = var_9665_cast, y = var_9669_cast)[name = tensor("attn_195_cast")]; + tensor var_9673 = const()[name = tensor("op_9673"), val = tensor([2, 1280, 1, -1])]; + tensor input_571_cast = reshape(shape = var_9673, x = attn_195_cast)[name = tensor("input_571_cast")]; + tensor var_9678 = const()[name = tensor("op_9678"), val = tensor([1, 1])]; + tensor var_9680 = const()[name = tensor("op_9680"), val = tensor([1, 1])]; + tensor var_9682_pad_type_0 = const()[name = tensor("op_9682_pad_type_0"), val = tensor("custom")]; + tensor var_9682_pad_0 = const()[name = tensor("op_9682_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701137728))), lut = tensor([-0x1.3a8p-6, -0x1.79p-8, 0x1.798p-8, 0x1.3bp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701547392)))]; + tensor var_9682_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_9680, groups = var_6865, pad = var_9682_pad_0, pad_type = var_9682_pad_type_0, strides = var_9678, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_571_cast)[name = tensor("op_9682_cast")]; + tensor inputs_293_cast = add(x = var_9682_cast, y = inputs_291_cast)[name = tensor("inputs_293_cast")]; + tensor var_9686 = const()[name = tensor("op_9686"), val = tensor([1])]; + tensor channels_mean_293_cast = reduce_mean(axes = var_9686, keep_dims = var_6860, x = inputs_293_cast)[name = tensor("channels_mean_293_cast")]; + tensor zero_mean_293_cast = sub(x = inputs_293_cast, y = channels_mean_293_cast)[name = tensor("zero_mean_293_cast")]; + tensor zero_mean_sq_293_cast = mul(x = zero_mean_293_cast, y = zero_mean_293_cast)[name = tensor("zero_mean_sq_293_cast")]; + tensor var_9690 = const()[name = tensor("op_9690"), val = tensor([1])]; + tensor var_9691_cast = reduce_mean(axes = var_9690, keep_dims = var_6860, x = zero_mean_sq_293_cast)[name = tensor("op_9691_cast")]; + tensor var_9692_to_fp16 = const()[name = tensor("op_9692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9693_cast = add(x = var_9691_cast, y = var_9692_to_fp16)[name = tensor("op_9693_cast")]; + tensor denom_293_epsilon_0_to_fp16 = const()[name = tensor("denom_293_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_293_cast = rsqrt(epsilon = denom_293_epsilon_0_to_fp16, x = var_9693_cast)[name = tensor("denom_293_cast")]; + tensor out_293_cast = mul(x = zero_mean_293_cast, y = denom_293_cast)[name = tensor("out_293_cast")]; + tensor var_9697_to_fp16 = const()[name = tensor("op_9697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701550016)))]; + tensor var_9698_cast = add(x = out_293_cast, y = var_9697_to_fp16)[name = tensor("op_9698_cast")]; + tensor var_9700_to_fp16 = const()[name = tensor("op_9700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701552640)))]; + tensor input_573_cast = mul(x = var_9698_cast, y = var_9700_to_fp16)[name = tensor("input_573_cast")]; + tensor var_9708 = const()[name = tensor("op_9708"), val = tensor([1, 1])]; + tensor var_9710 = const()[name = tensor("op_9710"), val = tensor([1, 1])]; + tensor var_9712_pad_type_0 = const()[name = tensor("op_9712_pad_type_0"), val = tensor("custom")]; + tensor var_9712_pad_0 = const()[name = tensor("op_9712_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(701555264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708108928))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708109056)))]; + tensor var_9712_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_9710, groups = var_6865, pad = var_9712_pad_0, pad_type = var_9712_pad_type_0, strides = var_9708, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_573_cast)[name = tensor("op_9712_cast")]; + tensor var_9713_split_sizes_0 = const()[name = tensor("op_9713_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9713_axis_0 = const()[name = tensor("op_9713_axis_0"), val = tensor(1)]; + tensor var_9713_cast_0, tensor var_9713_cast_1 = split(axis = var_9713_axis_0, split_sizes = var_9713_split_sizes_0, x = var_9712_cast)[name = tensor("op_9713_cast")]; + tensor var_9715_mode_0 = const()[name = tensor("op_9715_mode_0"), val = tensor("EXACT")]; + tensor var_9715_cast = gelu(mode = var_9715_mode_0, x = var_9713_cast_1)[name = tensor("op_9715_cast")]; + tensor input_575_cast = mul(x = var_9713_cast_0, y = var_9715_cast)[name = tensor("input_575_cast")]; + tensor var_9719 = const()[name = tensor("op_9719"), val = tensor([1, 1])]; + tensor var_9721 = const()[name = tensor("op_9721"), val = tensor([1, 1])]; + tensor var_9723_pad_type_0 = const()[name = tensor("op_9723_pad_type_0"), val = tensor("custom")]; + tensor var_9723_pad_0 = const()[name = tensor("op_9723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708129600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711406464))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711406592)))]; + tensor var_9723_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_9721, groups = var_6865, pad = var_9723_pad_0, pad_type = var_9723_pad_type_0, strides = var_9719, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_575_cast)[name = tensor("op_9723_cast")]; + tensor inputs_295_cast = add(x = var_9723_cast, y = inputs_293_cast)[name = tensor("inputs_295_cast")]; + tensor var_9733 = const()[name = tensor("op_9733"), val = tensor([1])]; + tensor channels_mean_295_cast = reduce_mean(axes = var_9733, keep_dims = var_6860, x = inputs_295_cast)[name = tensor("channels_mean_295_cast")]; + tensor zero_mean_295_cast = sub(x = inputs_295_cast, y = channels_mean_295_cast)[name = tensor("zero_mean_295_cast")]; + tensor zero_mean_sq_295_cast = mul(x = zero_mean_295_cast, y = zero_mean_295_cast)[name = tensor("zero_mean_sq_295_cast")]; + tensor var_9737 = const()[name = tensor("op_9737"), val = tensor([1])]; + tensor var_9738_cast = reduce_mean(axes = var_9737, keep_dims = var_6860, x = zero_mean_sq_295_cast)[name = tensor("op_9738_cast")]; + tensor var_9739_to_fp16 = const()[name = tensor("op_9739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9740_cast = add(x = var_9738_cast, y = var_9739_to_fp16)[name = tensor("op_9740_cast")]; + tensor denom_295_epsilon_0_to_fp16 = const()[name = tensor("denom_295_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_295_cast = rsqrt(epsilon = denom_295_epsilon_0_to_fp16, x = var_9740_cast)[name = tensor("denom_295_cast")]; + tensor out_295_cast = mul(x = zero_mean_295_cast, y = denom_295_cast)[name = tensor("out_295_cast")]; + tensor var_9744_to_fp16 = const()[name = tensor("op_9744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711409216)))]; + tensor var_9745_cast = add(x = out_295_cast, y = var_9744_to_fp16)[name = tensor("op_9745_cast")]; + tensor var_9747_to_fp16 = const()[name = tensor("op_9747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711411840)))]; + tensor hidden_states_391_cast = mul(x = var_9745_cast, y = var_9747_to_fp16)[name = tensor("hidden_states_391_cast")]; + tensor var_9754 = const()[name = tensor("op_9754"), val = tensor([1, 1])]; + tensor var_9756 = const()[name = tensor("op_9756"), val = tensor([1, 1])]; + tensor q_197_pad_type_0 = const()[name = tensor("q_197_pad_type_0"), val = tensor("custom")]; + tensor q_197_pad_0 = const()[name = tensor("q_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(711414464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(712233728))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_197_cast = conv(dilations = var_9756, groups = var_6865, pad = q_197_pad_0, pad_type = q_197_pad_type_0, strides = var_9754, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("q_197_cast")]; + tensor var_9760 = const()[name = tensor("op_9760"), val = tensor([1, 1])]; + tensor var_9762 = const()[name = tensor("op_9762"), val = tensor([1, 1])]; + tensor k_197_pad_type_0 = const()[name = tensor("k_197_pad_type_0"), val = tensor("custom")]; + tensor k_197_pad_0 = const()[name = tensor("k_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(712233856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(713053120))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_197_cast = conv(dilations = var_9762, groups = var_6865, pad = k_197_pad_0, pad_type = k_197_pad_type_0, strides = var_9760, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("k_197_cast")]; + tensor var_9766 = const()[name = tensor("op_9766"), val = tensor([1, 1])]; + tensor var_9768 = const()[name = tensor("op_9768"), val = tensor([1, 1])]; + tensor v_197_pad_type_0 = const()[name = tensor("v_197_pad_type_0"), val = tensor("custom")]; + tensor v_197_pad_0 = const()[name = tensor("v_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(713053248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(713872512))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_197_cast = conv(dilations = var_9768, groups = var_6865, pad = v_197_pad_0, pad_type = v_197_pad_type_0, strides = var_9766, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("v_197_cast")]; + tensor var_9772 = const()[name = tensor("op_9772"), val = tensor([2, 20, 64, -1])]; + tensor var_9773_cast = reshape(shape = var_9772, x = q_197_cast)[name = tensor("op_9773_cast")]; + tensor var_9774 = const()[name = tensor("op_9774"), val = tensor([2, 20, 64, -1])]; + tensor var_9775_cast = reshape(shape = var_9774, x = k_197_cast)[name = tensor("op_9775_cast")]; + tensor var_9776 = const()[name = tensor("op_9776"), val = tensor([2, 20, 64, -1])]; + tensor var_9777_cast = reshape(shape = var_9776, x = v_197_cast)[name = tensor("op_9777_cast")]; + tensor attn_weights_393_transpose_x_0 = const()[name = tensor("attn_weights_393_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_393_transpose_y_0 = const()[name = tensor("attn_weights_393_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_393_cast = matmul(transpose_x = attn_weights_393_transpose_x_0, transpose_y = attn_weights_393_transpose_y_0, x = var_9773_cast, y = var_9775_cast)[name = tensor("attn_weights_393_cast")]; + tensor attn_weights_395_cast = mul(x = attn_weights_393_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_395_cast")]; + tensor var_9781_cast = softmax(axis = var_6849, x = attn_weights_395_cast)[name = tensor("op_9781_cast")]; + tensor attn_197_transpose_x_0 = const()[name = tensor("attn_197_transpose_x_0"), val = tensor(false)]; + tensor attn_197_transpose_y_0 = const()[name = tensor("attn_197_transpose_y_0"), val = tensor(true)]; + tensor attn_197_cast = matmul(transpose_x = attn_197_transpose_x_0, transpose_y = attn_197_transpose_y_0, x = var_9777_cast, y = var_9781_cast)[name = tensor("attn_197_cast")]; + tensor var_9785 = const()[name = tensor("op_9785"), val = tensor([2, 1280, 1, -1])]; + tensor input_577_cast = reshape(shape = var_9785, x = attn_197_cast)[name = tensor("input_577_cast")]; + tensor var_9790 = const()[name = tensor("op_9790"), val = tensor([1, 1])]; + tensor var_9792 = const()[name = tensor("op_9792"), val = tensor([1, 1])]; + tensor var_9794_pad_type_0 = const()[name = tensor("op_9794_pad_type_0"), val = tensor("custom")]; + tensor var_9794_pad_0 = const()[name = tensor("op_9794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(713872640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714691904))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714692032)))]; + tensor var_9794_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_9792, groups = var_6865, pad = var_9794_pad_0, pad_type = var_9794_pad_type_0, strides = var_9790, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_577_cast)[name = tensor("op_9794_cast")]; + tensor inputs_297_cast = add(x = var_9794_cast, y = inputs_295_cast)[name = tensor("inputs_297_cast")]; + tensor var_9798 = const()[name = tensor("op_9798"), val = tensor([1])]; + tensor channels_mean_297_cast = reduce_mean(axes = var_9798, keep_dims = var_6860, x = inputs_297_cast)[name = tensor("channels_mean_297_cast")]; + tensor zero_mean_297_cast = sub(x = inputs_297_cast, y = channels_mean_297_cast)[name = tensor("zero_mean_297_cast")]; + tensor zero_mean_sq_297_cast = mul(x = zero_mean_297_cast, y = zero_mean_297_cast)[name = tensor("zero_mean_sq_297_cast")]; + tensor var_9802 = const()[name = tensor("op_9802"), val = tensor([1])]; + tensor var_9803_cast = reduce_mean(axes = var_9802, keep_dims = var_6860, x = zero_mean_sq_297_cast)[name = tensor("op_9803_cast")]; + tensor var_9804_to_fp16 = const()[name = tensor("op_9804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9805_cast = add(x = var_9803_cast, y = var_9804_to_fp16)[name = tensor("op_9805_cast")]; + tensor denom_297_epsilon_0_to_fp16 = const()[name = tensor("denom_297_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_297_cast = rsqrt(epsilon = denom_297_epsilon_0_to_fp16, x = var_9805_cast)[name = tensor("denom_297_cast")]; + tensor out_297_cast = mul(x = zero_mean_297_cast, y = denom_297_cast)[name = tensor("out_297_cast")]; + tensor var_9809_to_fp16 = const()[name = tensor("op_9809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714694656)))]; + tensor var_9810_cast = add(x = out_297_cast, y = var_9809_to_fp16)[name = tensor("op_9810_cast")]; + tensor var_9812_to_fp16 = const()[name = tensor("op_9812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714697280)))]; + tensor hidden_states_393_cast = mul(x = var_9810_cast, y = var_9812_to_fp16)[name = tensor("hidden_states_393_cast")]; + tensor var_9819 = const()[name = tensor("op_9819"), val = tensor([1, 1])]; + tensor var_9821 = const()[name = tensor("op_9821"), val = tensor([1, 1])]; + tensor q_199_pad_type_0 = const()[name = tensor("q_199_pad_type_0"), val = tensor("custom")]; + tensor q_199_pad_0 = const()[name = tensor("q_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714699904))), lut = tensor([-0x1.164p-5, -0x1.4ecp-7, 0x1.5p-7, 0x1.16p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_199_cast = conv(dilations = var_9821, groups = var_6865, pad = q_199_pad_0, pad_type = q_199_pad_type_0, strides = var_9819, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_393_cast)[name = tensor("q_199_cast")]; + tensor var_9825 = const()[name = tensor("op_9825"), val = tensor([1, 1])]; + tensor var_9827 = const()[name = tensor("op_9827"), val = tensor([1, 1])]; + tensor k_199_pad_type_0 = const()[name = tensor("k_199_pad_type_0"), val = tensor("custom")]; + tensor k_199_pad_0 = const()[name = tensor("k_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(715109568))), lut = tensor([-0x1.cfcp-6, -0x1.12p-7, 0x1.0f8p-7, 0x1.cep-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_199_cast = conv(dilations = var_9827, groups = var_6865, pad = k_199_pad_0, pad_type = k_199_pad_type_0, strides = var_9825, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_199_cast")]; + tensor var_9831 = const()[name = tensor("op_9831"), val = tensor([1, 1])]; + tensor var_9833 = const()[name = tensor("op_9833"), val = tensor([1, 1])]; + tensor v_199_pad_type_0 = const()[name = tensor("v_199_pad_type_0"), val = tensor("custom")]; + tensor v_199_pad_0 = const()[name = tensor("v_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(715764992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717075776))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_199_cast = conv(dilations = var_9833, groups = var_6865, pad = v_199_pad_0, pad_type = v_199_pad_type_0, strides = var_9831, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_199_cast")]; + tensor var_9837 = const()[name = tensor("op_9837"), val = tensor([2, 20, 64, -1])]; + tensor var_9838_cast = reshape(shape = var_9837, x = q_199_cast)[name = tensor("op_9838_cast")]; + tensor var_9839 = const()[name = tensor("op_9839"), val = tensor([2, 20, 64, -1])]; + tensor var_9840_cast = reshape(shape = var_9839, x = k_199_cast)[name = tensor("op_9840_cast")]; + tensor var_9841 = const()[name = tensor("op_9841"), val = tensor([2, 20, 64, -1])]; + tensor var_9842_cast = reshape(shape = var_9841, x = v_199_cast)[name = tensor("op_9842_cast")]; + tensor attn_weights_397_transpose_x_0 = const()[name = tensor("attn_weights_397_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_397_transpose_y_0 = const()[name = tensor("attn_weights_397_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_397_cast = matmul(transpose_x = attn_weights_397_transpose_x_0, transpose_y = attn_weights_397_transpose_y_0, x = var_9838_cast, y = var_9840_cast)[name = tensor("attn_weights_397_cast")]; + tensor attn_weights_399_cast = mul(x = attn_weights_397_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_399_cast")]; + tensor var_9846_cast = softmax(axis = var_6849, x = attn_weights_399_cast)[name = tensor("op_9846_cast")]; + tensor attn_199_transpose_x_0 = const()[name = tensor("attn_199_transpose_x_0"), val = tensor(false)]; + tensor attn_199_transpose_y_0 = const()[name = tensor("attn_199_transpose_y_0"), val = tensor(true)]; + tensor attn_199_cast = matmul(transpose_x = attn_199_transpose_x_0, transpose_y = attn_199_transpose_y_0, x = var_9842_cast, y = var_9846_cast)[name = tensor("attn_199_cast")]; + tensor var_9850 = const()[name = tensor("op_9850"), val = tensor([2, 1280, 1, -1])]; + tensor input_579_cast = reshape(shape = var_9850, x = attn_199_cast)[name = tensor("input_579_cast")]; + tensor var_9855 = const()[name = tensor("op_9855"), val = tensor([1, 1])]; + tensor var_9857 = const()[name = tensor("op_9857"), val = tensor([1, 1])]; + tensor var_9859_pad_type_0 = const()[name = tensor("op_9859_pad_type_0"), val = tensor("custom")]; + tensor var_9859_pad_0 = const()[name = tensor("op_9859_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717075904))), lut = tensor([-0x1.2b8p-6, -0x1.664p-8, 0x1.644p-8, 0x1.2b4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717485568)))]; + tensor var_9859_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_9857, groups = var_6865, pad = var_9859_pad_0, pad_type = var_9859_pad_type_0, strides = var_9855, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_579_cast)[name = tensor("op_9859_cast")]; + tensor inputs_299_cast = add(x = var_9859_cast, y = inputs_297_cast)[name = tensor("inputs_299_cast")]; + tensor var_9863 = const()[name = tensor("op_9863"), val = tensor([1])]; + tensor channels_mean_299_cast = reduce_mean(axes = var_9863, keep_dims = var_6860, x = inputs_299_cast)[name = tensor("channels_mean_299_cast")]; + tensor zero_mean_299_cast = sub(x = inputs_299_cast, y = channels_mean_299_cast)[name = tensor("zero_mean_299_cast")]; + tensor zero_mean_sq_299_cast = mul(x = zero_mean_299_cast, y = zero_mean_299_cast)[name = tensor("zero_mean_sq_299_cast")]; + tensor var_9867 = const()[name = tensor("op_9867"), val = tensor([1])]; + tensor var_9868_cast = reduce_mean(axes = var_9867, keep_dims = var_6860, x = zero_mean_sq_299_cast)[name = tensor("op_9868_cast")]; + tensor var_9869_to_fp16 = const()[name = tensor("op_9869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9870_cast = add(x = var_9868_cast, y = var_9869_to_fp16)[name = tensor("op_9870_cast")]; + tensor denom_299_epsilon_0_to_fp16 = const()[name = tensor("denom_299_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_299_cast = rsqrt(epsilon = denom_299_epsilon_0_to_fp16, x = var_9870_cast)[name = tensor("denom_299_cast")]; + tensor out_299_cast = mul(x = zero_mean_299_cast, y = denom_299_cast)[name = tensor("out_299_cast")]; + tensor var_9874_to_fp16 = const()[name = tensor("op_9874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717488192)))]; + tensor var_9875_cast = add(x = out_299_cast, y = var_9874_to_fp16)[name = tensor("op_9875_cast")]; + tensor var_9877_to_fp16 = const()[name = tensor("op_9877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717490816)))]; + tensor input_581_cast = mul(x = var_9875_cast, y = var_9877_to_fp16)[name = tensor("input_581_cast")]; + tensor var_9885 = const()[name = tensor("op_9885"), val = tensor([1, 1])]; + tensor var_9887 = const()[name = tensor("op_9887"), val = tensor([1, 1])]; + tensor var_9889_pad_type_0 = const()[name = tensor("op_9889_pad_type_0"), val = tensor("custom")]; + tensor var_9889_pad_0 = const()[name = tensor("op_9889_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717493440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(724047104))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(724047232)))]; + tensor var_9889_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_9887, groups = var_6865, pad = var_9889_pad_0, pad_type = var_9889_pad_type_0, strides = var_9885, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_581_cast)[name = tensor("op_9889_cast")]; + tensor var_9890_split_sizes_0 = const()[name = tensor("op_9890_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9890_axis_0 = const()[name = tensor("op_9890_axis_0"), val = tensor(1)]; + tensor var_9890_cast_0, tensor var_9890_cast_1 = split(axis = var_9890_axis_0, split_sizes = var_9890_split_sizes_0, x = var_9889_cast)[name = tensor("op_9890_cast")]; + tensor var_9892_mode_0 = const()[name = tensor("op_9892_mode_0"), val = tensor("EXACT")]; + tensor var_9892_cast = gelu(mode = var_9892_mode_0, x = var_9890_cast_1)[name = tensor("op_9892_cast")]; + tensor input_583_cast = mul(x = var_9890_cast_0, y = var_9892_cast)[name = tensor("input_583_cast")]; + tensor var_9896 = const()[name = tensor("op_9896"), val = tensor([1, 1])]; + tensor var_9898 = const()[name = tensor("op_9898"), val = tensor([1, 1])]; + tensor var_9900_pad_type_0 = const()[name = tensor("op_9900_pad_type_0"), val = tensor("custom")]; + tensor var_9900_pad_0 = const()[name = tensor("op_9900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(724067776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727344640))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727344768)))]; + tensor var_9900_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_9898, groups = var_6865, pad = var_9900_pad_0, pad_type = var_9900_pad_type_0, strides = var_9896, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_583_cast)[name = tensor("op_9900_cast")]; + tensor inputs_301_cast = add(x = var_9900_cast, y = inputs_299_cast)[name = tensor("inputs_301_cast")]; + tensor var_9910 = const()[name = tensor("op_9910"), val = tensor([1])]; + tensor channels_mean_301_cast = reduce_mean(axes = var_9910, keep_dims = var_6860, x = inputs_301_cast)[name = tensor("channels_mean_301_cast")]; + tensor zero_mean_301_cast = sub(x = inputs_301_cast, y = channels_mean_301_cast)[name = tensor("zero_mean_301_cast")]; + tensor zero_mean_sq_301_cast = mul(x = zero_mean_301_cast, y = zero_mean_301_cast)[name = tensor("zero_mean_sq_301_cast")]; + tensor var_9914 = const()[name = tensor("op_9914"), val = tensor([1])]; + tensor var_9915_cast = reduce_mean(axes = var_9914, keep_dims = var_6860, x = zero_mean_sq_301_cast)[name = tensor("op_9915_cast")]; + tensor var_9916_to_fp16 = const()[name = tensor("op_9916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9917_cast = add(x = var_9915_cast, y = var_9916_to_fp16)[name = tensor("op_9917_cast")]; + tensor denom_301_epsilon_0_to_fp16 = const()[name = tensor("denom_301_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_301_cast = rsqrt(epsilon = denom_301_epsilon_0_to_fp16, x = var_9917_cast)[name = tensor("denom_301_cast")]; + tensor out_301_cast = mul(x = zero_mean_301_cast, y = denom_301_cast)[name = tensor("out_301_cast")]; + tensor var_9921_to_fp16 = const()[name = tensor("op_9921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727347392)))]; + tensor var_9922_cast = add(x = out_301_cast, y = var_9921_to_fp16)[name = tensor("op_9922_cast")]; + tensor var_9924_to_fp16 = const()[name = tensor("op_9924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727350016)))]; + tensor hidden_states_397_cast = mul(x = var_9922_cast, y = var_9924_to_fp16)[name = tensor("hidden_states_397_cast")]; + tensor var_9931 = const()[name = tensor("op_9931"), val = tensor([1, 1])]; + tensor var_9933 = const()[name = tensor("op_9933"), val = tensor([1, 1])]; + tensor q_201_pad_type_0 = const()[name = tensor("q_201_pad_type_0"), val = tensor("custom")]; + tensor q_201_pad_0 = const()[name = tensor("q_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727352640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728171904))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_201_cast = conv(dilations = var_9933, groups = var_6865, pad = q_201_pad_0, pad_type = q_201_pad_type_0, strides = var_9931, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("q_201_cast")]; + tensor var_9937 = const()[name = tensor("op_9937"), val = tensor([1, 1])]; + tensor var_9939 = const()[name = tensor("op_9939"), val = tensor([1, 1])]; + tensor k_201_pad_type_0 = const()[name = tensor("k_201_pad_type_0"), val = tensor("custom")]; + tensor k_201_pad_0 = const()[name = tensor("k_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728172032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728991296))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_201_cast = conv(dilations = var_9939, groups = var_6865, pad = k_201_pad_0, pad_type = k_201_pad_type_0, strides = var_9937, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("k_201_cast")]; + tensor var_9943 = const()[name = tensor("op_9943"), val = tensor([1, 1])]; + tensor var_9945 = const()[name = tensor("op_9945"), val = tensor([1, 1])]; + tensor v_201_pad_type_0 = const()[name = tensor("v_201_pad_type_0"), val = tensor("custom")]; + tensor v_201_pad_0 = const()[name = tensor("v_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728991424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(729810688))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_201_cast = conv(dilations = var_9945, groups = var_6865, pad = v_201_pad_0, pad_type = v_201_pad_type_0, strides = var_9943, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("v_201_cast")]; + tensor var_9949 = const()[name = tensor("op_9949"), val = tensor([2, 20, 64, -1])]; + tensor var_9950_cast = reshape(shape = var_9949, x = q_201_cast)[name = tensor("op_9950_cast")]; + tensor var_9951 = const()[name = tensor("op_9951"), val = tensor([2, 20, 64, -1])]; + tensor var_9952_cast = reshape(shape = var_9951, x = k_201_cast)[name = tensor("op_9952_cast")]; + tensor var_9953 = const()[name = tensor("op_9953"), val = tensor([2, 20, 64, -1])]; + tensor var_9954_cast = reshape(shape = var_9953, x = v_201_cast)[name = tensor("op_9954_cast")]; + tensor attn_weights_401_transpose_x_0 = const()[name = tensor("attn_weights_401_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_401_transpose_y_0 = const()[name = tensor("attn_weights_401_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_401_cast = matmul(transpose_x = attn_weights_401_transpose_x_0, transpose_y = attn_weights_401_transpose_y_0, x = var_9950_cast, y = var_9952_cast)[name = tensor("attn_weights_401_cast")]; + tensor attn_weights_403_cast = mul(x = attn_weights_401_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_403_cast")]; + tensor var_9958_cast = softmax(axis = var_6849, x = attn_weights_403_cast)[name = tensor("op_9958_cast")]; + tensor attn_201_transpose_x_0 = const()[name = tensor("attn_201_transpose_x_0"), val = tensor(false)]; + tensor attn_201_transpose_y_0 = const()[name = tensor("attn_201_transpose_y_0"), val = tensor(true)]; + tensor attn_201_cast = matmul(transpose_x = attn_201_transpose_x_0, transpose_y = attn_201_transpose_y_0, x = var_9954_cast, y = var_9958_cast)[name = tensor("attn_201_cast")]; + tensor var_9962 = const()[name = tensor("op_9962"), val = tensor([2, 1280, 1, -1])]; + tensor input_585_cast = reshape(shape = var_9962, x = attn_201_cast)[name = tensor("input_585_cast")]; + tensor var_9967 = const()[name = tensor("op_9967"), val = tensor([1, 1])]; + tensor var_9969 = const()[name = tensor("op_9969"), val = tensor([1, 1])]; + tensor var_9971_pad_type_0 = const()[name = tensor("op_9971_pad_type_0"), val = tensor("custom")]; + tensor var_9971_pad_0 = const()[name = tensor("op_9971_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(729810816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730630080))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730630208)))]; + tensor var_9971_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_9969, groups = var_6865, pad = var_9971_pad_0, pad_type = var_9971_pad_type_0, strides = var_9967, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_585_cast)[name = tensor("op_9971_cast")]; + tensor inputs_303_cast = add(x = var_9971_cast, y = inputs_301_cast)[name = tensor("inputs_303_cast")]; + tensor var_9975 = const()[name = tensor("op_9975"), val = tensor([1])]; + tensor channels_mean_303_cast = reduce_mean(axes = var_9975, keep_dims = var_6860, x = inputs_303_cast)[name = tensor("channels_mean_303_cast")]; + tensor zero_mean_303_cast = sub(x = inputs_303_cast, y = channels_mean_303_cast)[name = tensor("zero_mean_303_cast")]; + tensor zero_mean_sq_303_cast = mul(x = zero_mean_303_cast, y = zero_mean_303_cast)[name = tensor("zero_mean_sq_303_cast")]; + tensor var_9979 = const()[name = tensor("op_9979"), val = tensor([1])]; + tensor var_9980_cast = reduce_mean(axes = var_9979, keep_dims = var_6860, x = zero_mean_sq_303_cast)[name = tensor("op_9980_cast")]; + tensor var_9981_to_fp16 = const()[name = tensor("op_9981_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9982_cast = add(x = var_9980_cast, y = var_9981_to_fp16)[name = tensor("op_9982_cast")]; + tensor denom_303_epsilon_0_to_fp16 = const()[name = tensor("denom_303_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_303_cast = rsqrt(epsilon = denom_303_epsilon_0_to_fp16, x = var_9982_cast)[name = tensor("denom_303_cast")]; + tensor out_303_cast = mul(x = zero_mean_303_cast, y = denom_303_cast)[name = tensor("out_303_cast")]; + tensor var_9986_to_fp16 = const()[name = tensor("op_9986_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730632832)))]; + tensor var_9987_cast = add(x = out_303_cast, y = var_9986_to_fp16)[name = tensor("op_9987_cast")]; + tensor var_9989_to_fp16 = const()[name = tensor("op_9989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730635456)))]; + tensor hidden_states_399_cast = mul(x = var_9987_cast, y = var_9989_to_fp16)[name = tensor("hidden_states_399_cast")]; + tensor var_9996 = const()[name = tensor("op_9996"), val = tensor([1, 1])]; + tensor var_9998 = const()[name = tensor("op_9998"), val = tensor([1, 1])]; + tensor q_203_pad_type_0 = const()[name = tensor("q_203_pad_type_0"), val = tensor("custom")]; + tensor q_203_pad_0 = const()[name = tensor("q_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(730638080))), lut = tensor([-0x1.048p-5, -0x1.3ap-7, 0x1.3cp-7, 0x1.05p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_203_cast = conv(dilations = var_9998, groups = var_6865, pad = q_203_pad_0, pad_type = q_203_pad_type_0, strides = var_9996, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_399_cast)[name = tensor("q_203_cast")]; + tensor var_10002 = const()[name = tensor("op_10002"), val = tensor([1, 1])]; + tensor var_10004 = const()[name = tensor("op_10004"), val = tensor([1, 1])]; + tensor k_203_pad_type_0 = const()[name = tensor("k_203_pad_type_0"), val = tensor("custom")]; + tensor k_203_pad_0 = const()[name = tensor("k_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731047744))), lut = tensor([-0x1.ac4p-6, -0x1.f84p-8, 0x1.f88p-8, 0x1.ac4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_203_cast = conv(dilations = var_10004, groups = var_6865, pad = k_203_pad_0, pad_type = k_203_pad_type_0, strides = var_10002, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_203_cast")]; + tensor var_10008 = const()[name = tensor("op_10008"), val = tensor([1, 1])]; + tensor var_10010 = const()[name = tensor("op_10010"), val = tensor([1, 1])]; + tensor v_203_pad_type_0 = const()[name = tensor("v_203_pad_type_0"), val = tensor("custom")]; + tensor v_203_pad_0 = const()[name = tensor("v_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731703168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733013952))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_203_cast = conv(dilations = var_10010, groups = var_6865, pad = v_203_pad_0, pad_type = v_203_pad_type_0, strides = var_10008, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_203_cast")]; + tensor var_10014 = const()[name = tensor("op_10014"), val = tensor([2, 20, 64, -1])]; + tensor var_10015_cast = reshape(shape = var_10014, x = q_203_cast)[name = tensor("op_10015_cast")]; + tensor var_10016 = const()[name = tensor("op_10016"), val = tensor([2, 20, 64, -1])]; + tensor var_10017_cast = reshape(shape = var_10016, x = k_203_cast)[name = tensor("op_10017_cast")]; + tensor var_10018 = const()[name = tensor("op_10018"), val = tensor([2, 20, 64, -1])]; + tensor var_10019_cast = reshape(shape = var_10018, x = v_203_cast)[name = tensor("op_10019_cast")]; + tensor attn_weights_405_transpose_x_0 = const()[name = tensor("attn_weights_405_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_405_transpose_y_0 = const()[name = tensor("attn_weights_405_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_405_cast = matmul(transpose_x = attn_weights_405_transpose_x_0, transpose_y = attn_weights_405_transpose_y_0, x = var_10015_cast, y = var_10017_cast)[name = tensor("attn_weights_405_cast")]; + tensor attn_weights_407_cast = mul(x = attn_weights_405_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_407_cast")]; + tensor var_10023_cast = softmax(axis = var_6849, x = attn_weights_407_cast)[name = tensor("op_10023_cast")]; + tensor attn_203_transpose_x_0 = const()[name = tensor("attn_203_transpose_x_0"), val = tensor(false)]; + tensor attn_203_transpose_y_0 = const()[name = tensor("attn_203_transpose_y_0"), val = tensor(true)]; + tensor attn_203_cast = matmul(transpose_x = attn_203_transpose_x_0, transpose_y = attn_203_transpose_y_0, x = var_10019_cast, y = var_10023_cast)[name = tensor("attn_203_cast")]; + tensor var_10027 = const()[name = tensor("op_10027"), val = tensor([2, 1280, 1, -1])]; + tensor input_587_cast = reshape(shape = var_10027, x = attn_203_cast)[name = tensor("input_587_cast")]; + tensor var_10032 = const()[name = tensor("op_10032"), val = tensor([1, 1])]; + tensor var_10034 = const()[name = tensor("op_10034"), val = tensor([1, 1])]; + tensor var_10036_pad_type_0 = const()[name = tensor("op_10036_pad_type_0"), val = tensor("custom")]; + tensor var_10036_pad_0 = const()[name = tensor("op_10036_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733014080))), lut = tensor([-0x1.1bcp-6, -0x1.55p-8, 0x1.52p-8, 0x1.1acp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733423744)))]; + tensor var_10036_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_10034, groups = var_6865, pad = var_10036_pad_0, pad_type = var_10036_pad_type_0, strides = var_10032, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_587_cast)[name = tensor("op_10036_cast")]; + tensor inputs_305_cast = add(x = var_10036_cast, y = inputs_303_cast)[name = tensor("inputs_305_cast")]; + tensor var_10040 = const()[name = tensor("op_10040"), val = tensor([1])]; + tensor channels_mean_305_cast = reduce_mean(axes = var_10040, keep_dims = var_6860, x = inputs_305_cast)[name = tensor("channels_mean_305_cast")]; + tensor zero_mean_305_cast = sub(x = inputs_305_cast, y = channels_mean_305_cast)[name = tensor("zero_mean_305_cast")]; + tensor zero_mean_sq_305_cast = mul(x = zero_mean_305_cast, y = zero_mean_305_cast)[name = tensor("zero_mean_sq_305_cast")]; + tensor var_10044 = const()[name = tensor("op_10044"), val = tensor([1])]; + tensor var_10045_cast = reduce_mean(axes = var_10044, keep_dims = var_6860, x = zero_mean_sq_305_cast)[name = tensor("op_10045_cast")]; + tensor var_10046_to_fp16 = const()[name = tensor("op_10046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10047_cast = add(x = var_10045_cast, y = var_10046_to_fp16)[name = tensor("op_10047_cast")]; + tensor denom_305_epsilon_0_to_fp16 = const()[name = tensor("denom_305_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_305_cast = rsqrt(epsilon = denom_305_epsilon_0_to_fp16, x = var_10047_cast)[name = tensor("denom_305_cast")]; + tensor out_305_cast = mul(x = zero_mean_305_cast, y = denom_305_cast)[name = tensor("out_305_cast")]; + tensor var_10051_to_fp16 = const()[name = tensor("op_10051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733426368)))]; + tensor var_10052_cast = add(x = out_305_cast, y = var_10051_to_fp16)[name = tensor("op_10052_cast")]; + tensor var_10054_to_fp16 = const()[name = tensor("op_10054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733428992)))]; + tensor input_589_cast = mul(x = var_10052_cast, y = var_10054_to_fp16)[name = tensor("input_589_cast")]; + tensor var_10062 = const()[name = tensor("op_10062"), val = tensor([1, 1])]; + tensor var_10064 = const()[name = tensor("op_10064"), val = tensor([1, 1])]; + tensor var_10066_pad_type_0 = const()[name = tensor("op_10066_pad_type_0"), val = tensor("custom")]; + tensor var_10066_pad_0 = const()[name = tensor("op_10066_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733431616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743262080))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743262272)))]; + tensor var_10066_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_10064, groups = var_6865, pad = var_10066_pad_0, pad_type = var_10066_pad_type_0, strides = var_10062, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_589_cast)[name = tensor("op_10066_cast")]; + tensor var_10067_split_sizes_0 = const()[name = tensor("op_10067_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10067_axis_0 = const()[name = tensor("op_10067_axis_0"), val = tensor(1)]; + tensor var_10067_cast_0, tensor var_10067_cast_1 = split(axis = var_10067_axis_0, split_sizes = var_10067_split_sizes_0, x = var_10066_cast)[name = tensor("op_10067_cast")]; + tensor var_10069_mode_0 = const()[name = tensor("op_10069_mode_0"), val = tensor("EXACT")]; + tensor var_10069_cast = gelu(mode = var_10069_mode_0, x = var_10067_cast_1)[name = tensor("op_10069_cast")]; + tensor input_591_cast = mul(x = var_10067_cast_0, y = var_10069_cast)[name = tensor("input_591_cast")]; + tensor var_10073 = const()[name = tensor("op_10073"), val = tensor([1, 1])]; + tensor var_10075 = const()[name = tensor("op_10075"), val = tensor([1, 1])]; + tensor var_10077_pad_type_0 = const()[name = tensor("op_10077_pad_type_0"), val = tensor("custom")]; + tensor var_10077_pad_0 = const()[name = tensor("op_10077_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743282816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746559680))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746559808)))]; + tensor var_10077_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_10075, groups = var_6865, pad = var_10077_pad_0, pad_type = var_10077_pad_type_0, strides = var_10073, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_591_cast)[name = tensor("op_10077_cast")]; + tensor inputs_307_cast = add(x = var_10077_cast, y = inputs_305_cast)[name = tensor("inputs_307_cast")]; + tensor var_10087 = const()[name = tensor("op_10087"), val = tensor([1])]; + tensor channels_mean_307_cast = reduce_mean(axes = var_10087, keep_dims = var_6860, x = inputs_307_cast)[name = tensor("channels_mean_307_cast")]; + tensor zero_mean_307_cast = sub(x = inputs_307_cast, y = channels_mean_307_cast)[name = tensor("zero_mean_307_cast")]; + tensor zero_mean_sq_307_cast = mul(x = zero_mean_307_cast, y = zero_mean_307_cast)[name = tensor("zero_mean_sq_307_cast")]; + tensor var_10091 = const()[name = tensor("op_10091"), val = tensor([1])]; + tensor var_10092_cast = reduce_mean(axes = var_10091, keep_dims = var_6860, x = zero_mean_sq_307_cast)[name = tensor("op_10092_cast")]; + tensor var_10093_to_fp16 = const()[name = tensor("op_10093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10094_cast = add(x = var_10092_cast, y = var_10093_to_fp16)[name = tensor("op_10094_cast")]; + tensor denom_307_epsilon_0_to_fp16 = const()[name = tensor("denom_307_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_307_cast = rsqrt(epsilon = denom_307_epsilon_0_to_fp16, x = var_10094_cast)[name = tensor("denom_307_cast")]; + tensor out_307_cast = mul(x = zero_mean_307_cast, y = denom_307_cast)[name = tensor("out_307_cast")]; + tensor var_10098_to_fp16 = const()[name = tensor("op_10098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746562432)))]; + tensor var_10099_cast = add(x = out_307_cast, y = var_10098_to_fp16)[name = tensor("op_10099_cast")]; + tensor var_10101_to_fp16 = const()[name = tensor("op_10101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746565056)))]; + tensor hidden_states_403_cast = mul(x = var_10099_cast, y = var_10101_to_fp16)[name = tensor("hidden_states_403_cast")]; + tensor var_10108 = const()[name = tensor("op_10108"), val = tensor([1, 1])]; + tensor var_10110 = const()[name = tensor("op_10110"), val = tensor([1, 1])]; + tensor q_205_pad_type_0 = const()[name = tensor("q_205_pad_type_0"), val = tensor("custom")]; + tensor q_205_pad_0 = const()[name = tensor("q_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746567680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747386944))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_205_cast = conv(dilations = var_10110, groups = var_6865, pad = q_205_pad_0, pad_type = q_205_pad_type_0, strides = var_10108, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("q_205_cast")]; + tensor var_10114 = const()[name = tensor("op_10114"), val = tensor([1, 1])]; + tensor var_10116 = const()[name = tensor("op_10116"), val = tensor([1, 1])]; + tensor k_205_pad_type_0 = const()[name = tensor("k_205_pad_type_0"), val = tensor("custom")]; + tensor k_205_pad_0 = const()[name = tensor("k_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(747387072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748206336))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_205_cast = conv(dilations = var_10116, groups = var_6865, pad = k_205_pad_0, pad_type = k_205_pad_type_0, strides = var_10114, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("k_205_cast")]; + tensor var_10120 = const()[name = tensor("op_10120"), val = tensor([1, 1])]; + tensor var_10122 = const()[name = tensor("op_10122"), val = tensor([1, 1])]; + tensor v_205_pad_type_0 = const()[name = tensor("v_205_pad_type_0"), val = tensor("custom")]; + tensor v_205_pad_0 = const()[name = tensor("v_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748206464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749025728))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_205_cast = conv(dilations = var_10122, groups = var_6865, pad = v_205_pad_0, pad_type = v_205_pad_type_0, strides = var_10120, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("v_205_cast")]; + tensor var_10126 = const()[name = tensor("op_10126"), val = tensor([2, 20, 64, -1])]; + tensor var_10127_cast = reshape(shape = var_10126, x = q_205_cast)[name = tensor("op_10127_cast")]; + tensor var_10128 = const()[name = tensor("op_10128"), val = tensor([2, 20, 64, -1])]; + tensor var_10129_cast = reshape(shape = var_10128, x = k_205_cast)[name = tensor("op_10129_cast")]; + tensor var_10130 = const()[name = tensor("op_10130"), val = tensor([2, 20, 64, -1])]; + tensor var_10131_cast = reshape(shape = var_10130, x = v_205_cast)[name = tensor("op_10131_cast")]; + tensor attn_weights_409_transpose_x_0 = const()[name = tensor("attn_weights_409_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_409_transpose_y_0 = const()[name = tensor("attn_weights_409_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_409_cast = matmul(transpose_x = attn_weights_409_transpose_x_0, transpose_y = attn_weights_409_transpose_y_0, x = var_10127_cast, y = var_10129_cast)[name = tensor("attn_weights_409_cast")]; + tensor attn_weights_411_cast = mul(x = attn_weights_409_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_411_cast")]; + tensor var_10135_cast = softmax(axis = var_6849, x = attn_weights_411_cast)[name = tensor("op_10135_cast")]; + tensor attn_205_transpose_x_0 = const()[name = tensor("attn_205_transpose_x_0"), val = tensor(false)]; + tensor attn_205_transpose_y_0 = const()[name = tensor("attn_205_transpose_y_0"), val = tensor(true)]; + tensor attn_205_cast = matmul(transpose_x = attn_205_transpose_x_0, transpose_y = attn_205_transpose_y_0, x = var_10131_cast, y = var_10135_cast)[name = tensor("attn_205_cast")]; + tensor var_10139 = const()[name = tensor("op_10139"), val = tensor([2, 1280, 1, -1])]; + tensor input_593_cast = reshape(shape = var_10139, x = attn_205_cast)[name = tensor("input_593_cast")]; + tensor var_10144 = const()[name = tensor("op_10144"), val = tensor([1, 1])]; + tensor var_10146 = const()[name = tensor("op_10146"), val = tensor([1, 1])]; + tensor var_10148_pad_type_0 = const()[name = tensor("op_10148_pad_type_0"), val = tensor("custom")]; + tensor var_10148_pad_0 = const()[name = tensor("op_10148_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749025856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749845120))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749845248)))]; + tensor var_10148_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_10146, groups = var_6865, pad = var_10148_pad_0, pad_type = var_10148_pad_type_0, strides = var_10144, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_593_cast)[name = tensor("op_10148_cast")]; + tensor inputs_309_cast = add(x = var_10148_cast, y = inputs_307_cast)[name = tensor("inputs_309_cast")]; + tensor var_10152 = const()[name = tensor("op_10152"), val = tensor([1])]; + tensor channels_mean_309_cast = reduce_mean(axes = var_10152, keep_dims = var_6860, x = inputs_309_cast)[name = tensor("channels_mean_309_cast")]; + tensor zero_mean_309_cast = sub(x = inputs_309_cast, y = channels_mean_309_cast)[name = tensor("zero_mean_309_cast")]; + tensor zero_mean_sq_309_cast = mul(x = zero_mean_309_cast, y = zero_mean_309_cast)[name = tensor("zero_mean_sq_309_cast")]; + tensor var_10156 = const()[name = tensor("op_10156"), val = tensor([1])]; + tensor var_10157_cast = reduce_mean(axes = var_10156, keep_dims = var_6860, x = zero_mean_sq_309_cast)[name = tensor("op_10157_cast")]; + tensor var_10158_to_fp16 = const()[name = tensor("op_10158_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10159_cast = add(x = var_10157_cast, y = var_10158_to_fp16)[name = tensor("op_10159_cast")]; + tensor denom_309_epsilon_0_to_fp16 = const()[name = tensor("denom_309_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_309_cast = rsqrt(epsilon = denom_309_epsilon_0_to_fp16, x = var_10159_cast)[name = tensor("denom_309_cast")]; + tensor out_309_cast = mul(x = zero_mean_309_cast, y = denom_309_cast)[name = tensor("out_309_cast")]; + tensor var_10163_to_fp16 = const()[name = tensor("op_10163_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749847872)))]; + tensor var_10164_cast = add(x = out_309_cast, y = var_10163_to_fp16)[name = tensor("op_10164_cast")]; + tensor var_10166_to_fp16 = const()[name = tensor("op_10166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749850496)))]; + tensor hidden_states_405_cast = mul(x = var_10164_cast, y = var_10166_to_fp16)[name = tensor("hidden_states_405_cast")]; + tensor var_10173 = const()[name = tensor("op_10173"), val = tensor([1, 1])]; + tensor var_10175 = const()[name = tensor("op_10175"), val = tensor([1, 1])]; + tensor q_207_pad_type_0 = const()[name = tensor("q_207_pad_type_0"), val = tensor("custom")]; + tensor q_207_pad_0 = const()[name = tensor("q_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749853120))), lut = tensor([-0x1.f98p-7, 0x1.f8cp-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_207_cast = conv(dilations = var_10175, groups = var_6865, pad = q_207_pad_0, pad_type = q_207_pad_type_0, strides = var_10173, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_405_cast)[name = tensor("q_207_cast")]; + tensor var_10179 = const()[name = tensor("op_10179"), val = tensor([1, 1])]; + tensor var_10181 = const()[name = tensor("op_10181"), val = tensor([1, 1])]; + tensor k_207_pad_type_0 = const()[name = tensor("k_207_pad_type_0"), val = tensor("custom")]; + tensor k_207_pad_0 = const()[name = tensor("k_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750057984))), lut = tensor([-0x1.664p-6, -0x1.a54p-8, 0x1.b0cp-8, 0x1.69p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_207_cast = conv(dilations = var_10181, groups = var_6865, pad = k_207_pad_0, pad_type = k_207_pad_type_0, strides = var_10179, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_207_cast")]; + tensor var_10185 = const()[name = tensor("op_10185"), val = tensor([1, 1])]; + tensor var_10187 = const()[name = tensor("op_10187"), val = tensor([1, 1])]; + tensor v_207_pad_type_0 = const()[name = tensor("v_207_pad_type_0"), val = tensor("custom")]; + tensor v_207_pad_0 = const()[name = tensor("v_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750713408))), lut = tensor([-0x1.cf4p-6, -0x1.0acp-7, 0x1.09p-7, 0x1.ce4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_207_cast = conv(dilations = var_10187, groups = var_6865, pad = v_207_pad_0, pad_type = v_207_pad_type_0, strides = var_10185, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_207_cast")]; + tensor var_10191 = const()[name = tensor("op_10191"), val = tensor([2, 20, 64, -1])]; + tensor var_10192_cast = reshape(shape = var_10191, x = q_207_cast)[name = tensor("op_10192_cast")]; + tensor var_10193 = const()[name = tensor("op_10193"), val = tensor([2, 20, 64, -1])]; + tensor var_10194_cast = reshape(shape = var_10193, x = k_207_cast)[name = tensor("op_10194_cast")]; + tensor var_10195 = const()[name = tensor("op_10195"), val = tensor([2, 20, 64, -1])]; + tensor var_10196_cast = reshape(shape = var_10195, x = v_207_cast)[name = tensor("op_10196_cast")]; + tensor attn_weights_413_transpose_x_0 = const()[name = tensor("attn_weights_413_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_413_transpose_y_0 = const()[name = tensor("attn_weights_413_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_413_cast = matmul(transpose_x = attn_weights_413_transpose_x_0, transpose_y = attn_weights_413_transpose_y_0, x = var_10192_cast, y = var_10194_cast)[name = tensor("attn_weights_413_cast")]; + tensor attn_weights_415_cast = mul(x = attn_weights_413_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_415_cast")]; + tensor var_10200_cast = softmax(axis = var_6849, x = attn_weights_415_cast)[name = tensor("op_10200_cast")]; + tensor attn_207_transpose_x_0 = const()[name = tensor("attn_207_transpose_x_0"), val = tensor(false)]; + tensor attn_207_transpose_y_0 = const()[name = tensor("attn_207_transpose_y_0"), val = tensor(true)]; + tensor attn_207_cast = matmul(transpose_x = attn_207_transpose_x_0, transpose_y = attn_207_transpose_y_0, x = var_10196_cast, y = var_10200_cast)[name = tensor("attn_207_cast")]; + tensor var_10204 = const()[name = tensor("op_10204"), val = tensor([2, 1280, 1, -1])]; + tensor input_595_cast = reshape(shape = var_10204, x = attn_207_cast)[name = tensor("input_595_cast")]; + tensor var_10209 = const()[name = tensor("op_10209"), val = tensor([1, 1])]; + tensor var_10211 = const()[name = tensor("op_10211"), val = tensor([1, 1])]; + tensor var_10213_pad_type_0 = const()[name = tensor("op_10213_pad_type_0"), val = tensor("custom")]; + tensor var_10213_pad_0 = const()[name = tensor("op_10213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751368832))), lut = tensor([-0x1.09p-6, -0x1.3ep-8, 0x1.3b8p-8, 0x1.084p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751778496)))]; + tensor var_10213_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_10211, groups = var_6865, pad = var_10213_pad_0, pad_type = var_10213_pad_type_0, strides = var_10209, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_595_cast)[name = tensor("op_10213_cast")]; + tensor inputs_311_cast = add(x = var_10213_cast, y = inputs_309_cast)[name = tensor("inputs_311_cast")]; + tensor var_10217 = const()[name = tensor("op_10217"), val = tensor([1])]; + tensor channels_mean_311_cast = reduce_mean(axes = var_10217, keep_dims = var_6860, x = inputs_311_cast)[name = tensor("channels_mean_311_cast")]; + tensor zero_mean_311_cast = sub(x = inputs_311_cast, y = channels_mean_311_cast)[name = tensor("zero_mean_311_cast")]; + tensor zero_mean_sq_311_cast = mul(x = zero_mean_311_cast, y = zero_mean_311_cast)[name = tensor("zero_mean_sq_311_cast")]; + tensor var_10221 = const()[name = tensor("op_10221"), val = tensor([1])]; + tensor var_10222_cast = reduce_mean(axes = var_10221, keep_dims = var_6860, x = zero_mean_sq_311_cast)[name = tensor("op_10222_cast")]; + tensor var_10223_to_fp16 = const()[name = tensor("op_10223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10224_cast = add(x = var_10222_cast, y = var_10223_to_fp16)[name = tensor("op_10224_cast")]; + tensor denom_311_epsilon_0_to_fp16 = const()[name = tensor("denom_311_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_311_cast = rsqrt(epsilon = denom_311_epsilon_0_to_fp16, x = var_10224_cast)[name = tensor("denom_311_cast")]; + tensor out_311_cast = mul(x = zero_mean_311_cast, y = denom_311_cast)[name = tensor("out_311_cast")]; + tensor var_10228_to_fp16 = const()[name = tensor("op_10228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751781120)))]; + tensor var_10229_cast = add(x = out_311_cast, y = var_10228_to_fp16)[name = tensor("op_10229_cast")]; + tensor var_10231_to_fp16 = const()[name = tensor("op_10231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751783744)))]; + tensor input_597_cast = mul(x = var_10229_cast, y = var_10231_to_fp16)[name = tensor("input_597_cast")]; + tensor var_10239 = const()[name = tensor("op_10239"), val = tensor([1, 1])]; + tensor var_10241 = const()[name = tensor("op_10241"), val = tensor([1, 1])]; + tensor var_10243_pad_type_0 = const()[name = tensor("op_10243_pad_type_0"), val = tensor("custom")]; + tensor var_10243_pad_0 = const()[name = tensor("op_10243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(751786368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761616832))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761617024)))]; + tensor var_10243_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_10241, groups = var_6865, pad = var_10243_pad_0, pad_type = var_10243_pad_type_0, strides = var_10239, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_597_cast)[name = tensor("op_10243_cast")]; + tensor var_10244_split_sizes_0 = const()[name = tensor("op_10244_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10244_axis_0 = const()[name = tensor("op_10244_axis_0"), val = tensor(1)]; + tensor var_10244_cast_0, tensor var_10244_cast_1 = split(axis = var_10244_axis_0, split_sizes = var_10244_split_sizes_0, x = var_10243_cast)[name = tensor("op_10244_cast")]; + tensor var_10246_mode_0 = const()[name = tensor("op_10246_mode_0"), val = tensor("EXACT")]; + tensor var_10246_cast = gelu(mode = var_10246_mode_0, x = var_10244_cast_1)[name = tensor("op_10246_cast")]; + tensor input_599_cast = mul(x = var_10244_cast_0, y = var_10246_cast)[name = tensor("input_599_cast")]; + tensor var_10250 = const()[name = tensor("op_10250"), val = tensor([1, 1])]; + tensor var_10252 = const()[name = tensor("op_10252"), val = tensor([1, 1])]; + tensor var_10254_pad_type_0 = const()[name = tensor("op_10254_pad_type_0"), val = tensor("custom")]; + tensor var_10254_pad_0 = const()[name = tensor("op_10254_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761637568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(764914432))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(764914560)))]; + tensor var_10254_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_10252, groups = var_6865, pad = var_10254_pad_0, pad_type = var_10254_pad_type_0, strides = var_10250, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_599_cast)[name = tensor("op_10254_cast")]; + tensor inputs_313_cast = add(x = var_10254_cast, y = inputs_311_cast)[name = tensor("inputs_313_cast")]; + tensor var_10264 = const()[name = tensor("op_10264"), val = tensor([1])]; + tensor channels_mean_313_cast = reduce_mean(axes = var_10264, keep_dims = var_6860, x = inputs_313_cast)[name = tensor("channels_mean_313_cast")]; + tensor zero_mean_313_cast = sub(x = inputs_313_cast, y = channels_mean_313_cast)[name = tensor("zero_mean_313_cast")]; + tensor zero_mean_sq_313_cast = mul(x = zero_mean_313_cast, y = zero_mean_313_cast)[name = tensor("zero_mean_sq_313_cast")]; + tensor var_10268 = const()[name = tensor("op_10268"), val = tensor([1])]; + tensor var_10269_cast = reduce_mean(axes = var_10268, keep_dims = var_6860, x = zero_mean_sq_313_cast)[name = tensor("op_10269_cast")]; + tensor var_10270_to_fp16 = const()[name = tensor("op_10270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10271_cast = add(x = var_10269_cast, y = var_10270_to_fp16)[name = tensor("op_10271_cast")]; + tensor denom_313_epsilon_0_to_fp16 = const()[name = tensor("denom_313_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_313_cast = rsqrt(epsilon = denom_313_epsilon_0_to_fp16, x = var_10271_cast)[name = tensor("denom_313_cast")]; + tensor out_313_cast = mul(x = zero_mean_313_cast, y = denom_313_cast)[name = tensor("out_313_cast")]; + tensor var_10275_to_fp16 = const()[name = tensor("op_10275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(764917184)))]; + tensor var_10276_cast = add(x = out_313_cast, y = var_10275_to_fp16)[name = tensor("op_10276_cast")]; + tensor var_10278_to_fp16 = const()[name = tensor("op_10278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(764919808)))]; + tensor hidden_states_409_cast = mul(x = var_10276_cast, y = var_10278_to_fp16)[name = tensor("hidden_states_409_cast")]; + tensor var_10285 = const()[name = tensor("op_10285"), val = tensor([1, 1])]; + tensor var_10287 = const()[name = tensor("op_10287"), val = tensor([1, 1])]; + tensor q_209_pad_type_0 = const()[name = tensor("q_209_pad_type_0"), val = tensor("custom")]; + tensor q_209_pad_0 = const()[name = tensor("q_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(764922432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765741696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_209_cast = conv(dilations = var_10287, groups = var_6865, pad = q_209_pad_0, pad_type = q_209_pad_type_0, strides = var_10285, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("q_209_cast")]; + tensor var_10291 = const()[name = tensor("op_10291"), val = tensor([1, 1])]; + tensor var_10293 = const()[name = tensor("op_10293"), val = tensor([1, 1])]; + tensor k_209_pad_type_0 = const()[name = tensor("k_209_pad_type_0"), val = tensor("custom")]; + tensor k_209_pad_0 = const()[name = tensor("k_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765741824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766561088))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_209_cast = conv(dilations = var_10293, groups = var_6865, pad = k_209_pad_0, pad_type = k_209_pad_type_0, strides = var_10291, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("k_209_cast")]; + tensor var_10297 = const()[name = tensor("op_10297"), val = tensor([1, 1])]; + tensor var_10299 = const()[name = tensor("op_10299"), val = tensor([1, 1])]; + tensor v_209_pad_type_0 = const()[name = tensor("v_209_pad_type_0"), val = tensor("custom")]; + tensor v_209_pad_0 = const()[name = tensor("v_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(766561216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(767380480))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_209_cast = conv(dilations = var_10299, groups = var_6865, pad = v_209_pad_0, pad_type = v_209_pad_type_0, strides = var_10297, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("v_209_cast")]; + tensor var_10303 = const()[name = tensor("op_10303"), val = tensor([2, 20, 64, -1])]; + tensor var_10304_cast = reshape(shape = var_10303, x = q_209_cast)[name = tensor("op_10304_cast")]; + tensor var_10305 = const()[name = tensor("op_10305"), val = tensor([2, 20, 64, -1])]; + tensor var_10306_cast = reshape(shape = var_10305, x = k_209_cast)[name = tensor("op_10306_cast")]; + tensor var_10307 = const()[name = tensor("op_10307"), val = tensor([2, 20, 64, -1])]; + tensor var_10308_cast = reshape(shape = var_10307, x = v_209_cast)[name = tensor("op_10308_cast")]; + tensor attn_weights_417_transpose_x_0 = const()[name = tensor("attn_weights_417_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_417_transpose_y_0 = const()[name = tensor("attn_weights_417_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_417_cast = matmul(transpose_x = attn_weights_417_transpose_x_0, transpose_y = attn_weights_417_transpose_y_0, x = var_10304_cast, y = var_10306_cast)[name = tensor("attn_weights_417_cast")]; + tensor attn_weights_419_cast = mul(x = attn_weights_417_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_419_cast")]; + tensor var_10312_cast = softmax(axis = var_6849, x = attn_weights_419_cast)[name = tensor("op_10312_cast")]; + tensor attn_209_transpose_x_0 = const()[name = tensor("attn_209_transpose_x_0"), val = tensor(false)]; + tensor attn_209_transpose_y_0 = const()[name = tensor("attn_209_transpose_y_0"), val = tensor(true)]; + tensor attn_209_cast = matmul(transpose_x = attn_209_transpose_x_0, transpose_y = attn_209_transpose_y_0, x = var_10308_cast, y = var_10312_cast)[name = tensor("attn_209_cast")]; + tensor var_10316 = const()[name = tensor("op_10316"), val = tensor([2, 1280, 1, -1])]; + tensor input_601_cast = reshape(shape = var_10316, x = attn_209_cast)[name = tensor("input_601_cast")]; + tensor var_10321 = const()[name = tensor("op_10321"), val = tensor([1, 1])]; + tensor var_10323 = const()[name = tensor("op_10323"), val = tensor([1, 1])]; + tensor var_10325_pad_type_0 = const()[name = tensor("op_10325_pad_type_0"), val = tensor("custom")]; + tensor var_10325_pad_0 = const()[name = tensor("op_10325_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(767380608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768199872))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768200000)))]; + tensor var_10325_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_10323, groups = var_6865, pad = var_10325_pad_0, pad_type = var_10325_pad_type_0, strides = var_10321, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_601_cast)[name = tensor("op_10325_cast")]; + tensor inputs_315_cast = add(x = var_10325_cast, y = inputs_313_cast)[name = tensor("inputs_315_cast")]; + tensor var_10329 = const()[name = tensor("op_10329"), val = tensor([1])]; + tensor channels_mean_315_cast = reduce_mean(axes = var_10329, keep_dims = var_6860, x = inputs_315_cast)[name = tensor("channels_mean_315_cast")]; + tensor zero_mean_315_cast = sub(x = inputs_315_cast, y = channels_mean_315_cast)[name = tensor("zero_mean_315_cast")]; + tensor zero_mean_sq_315_cast = mul(x = zero_mean_315_cast, y = zero_mean_315_cast)[name = tensor("zero_mean_sq_315_cast")]; + tensor var_10333 = const()[name = tensor("op_10333"), val = tensor([1])]; + tensor var_10334_cast = reduce_mean(axes = var_10333, keep_dims = var_6860, x = zero_mean_sq_315_cast)[name = tensor("op_10334_cast")]; + tensor var_10335_to_fp16 = const()[name = tensor("op_10335_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10336_cast = add(x = var_10334_cast, y = var_10335_to_fp16)[name = tensor("op_10336_cast")]; + tensor denom_315_epsilon_0_to_fp16 = const()[name = tensor("denom_315_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_315_cast = rsqrt(epsilon = denom_315_epsilon_0_to_fp16, x = var_10336_cast)[name = tensor("denom_315_cast")]; + tensor out_315_cast = mul(x = zero_mean_315_cast, y = denom_315_cast)[name = tensor("out_315_cast")]; + tensor var_10340_to_fp16 = const()[name = tensor("op_10340_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768202624)))]; + tensor var_10341_cast = add(x = out_315_cast, y = var_10340_to_fp16)[name = tensor("op_10341_cast")]; + tensor var_10343_to_fp16 = const()[name = tensor("op_10343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768205248)))]; + tensor hidden_states_411_cast = mul(x = var_10341_cast, y = var_10343_to_fp16)[name = tensor("hidden_states_411_cast")]; + tensor var_10350 = const()[name = tensor("op_10350"), val = tensor([1, 1])]; + tensor var_10352 = const()[name = tensor("op_10352"), val = tensor([1, 1])]; + tensor q_211_pad_type_0 = const()[name = tensor("q_211_pad_type_0"), val = tensor("custom")]; + tensor q_211_pad_0 = const()[name = tensor("q_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768207872))), lut = tensor([-0x1.a5p-6, -0x1.048p-7, 0x1.054p-7, 0x1.a54p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_211_cast = conv(dilations = var_10352, groups = var_6865, pad = q_211_pad_0, pad_type = q_211_pad_type_0, strides = var_10350, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_411_cast)[name = tensor("q_211_cast")]; + tensor var_10356 = const()[name = tensor("op_10356"), val = tensor([1, 1])]; + tensor var_10358 = const()[name = tensor("op_10358"), val = tensor([1, 1])]; + tensor k_211_pad_type_0 = const()[name = tensor("k_211_pad_type_0"), val = tensor("custom")]; + tensor k_211_pad_0 = const()[name = tensor("k_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768617536))), lut = tensor([-0x1.404p-6, -0x1.808p-8, 0x1.7ccp-8, 0x1.3f4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_211_cast = conv(dilations = var_10358, groups = var_6865, pad = k_211_pad_0, pad_type = k_211_pad_type_0, strides = var_10356, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_211_cast")]; + tensor var_10362 = const()[name = tensor("op_10362"), val = tensor([1, 1])]; + tensor var_10364 = const()[name = tensor("op_10364"), val = tensor([1, 1])]; + tensor v_211_pad_type_0 = const()[name = tensor("v_211_pad_type_0"), val = tensor("custom")]; + tensor v_211_pad_0 = const()[name = tensor("v_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769272960))), lut = tensor([-0x1.9f8p-6, -0x1.e58p-8, 0x1.e5cp-8, 0x1.a04p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_211_cast = conv(dilations = var_10364, groups = var_6865, pad = v_211_pad_0, pad_type = v_211_pad_type_0, strides = var_10362, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_211_cast")]; + tensor var_10368 = const()[name = tensor("op_10368"), val = tensor([2, 20, 64, -1])]; + tensor var_10369_cast = reshape(shape = var_10368, x = q_211_cast)[name = tensor("op_10369_cast")]; + tensor var_10370 = const()[name = tensor("op_10370"), val = tensor([2, 20, 64, -1])]; + tensor var_10371_cast = reshape(shape = var_10370, x = k_211_cast)[name = tensor("op_10371_cast")]; + tensor var_10372 = const()[name = tensor("op_10372"), val = tensor([2, 20, 64, -1])]; + tensor var_10373_cast = reshape(shape = var_10372, x = v_211_cast)[name = tensor("op_10373_cast")]; + tensor attn_weights_421_transpose_x_0 = const()[name = tensor("attn_weights_421_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_421_transpose_y_0 = const()[name = tensor("attn_weights_421_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_421_cast = matmul(transpose_x = attn_weights_421_transpose_x_0, transpose_y = attn_weights_421_transpose_y_0, x = var_10369_cast, y = var_10371_cast)[name = tensor("attn_weights_421_cast")]; + tensor attn_weights_423_cast = mul(x = attn_weights_421_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_423_cast")]; + tensor var_10377_cast = softmax(axis = var_6849, x = attn_weights_423_cast)[name = tensor("op_10377_cast")]; + tensor attn_211_transpose_x_0 = const()[name = tensor("attn_211_transpose_x_0"), val = tensor(false)]; + tensor attn_211_transpose_y_0 = const()[name = tensor("attn_211_transpose_y_0"), val = tensor(true)]; + tensor attn_211_cast = matmul(transpose_x = attn_211_transpose_x_0, transpose_y = attn_211_transpose_y_0, x = var_10373_cast, y = var_10377_cast)[name = tensor("attn_211_cast")]; + tensor var_10381 = const()[name = tensor("op_10381"), val = tensor([2, 1280, 1, -1])]; + tensor input_603_cast = reshape(shape = var_10381, x = attn_211_cast)[name = tensor("input_603_cast")]; + tensor var_10386 = const()[name = tensor("op_10386"), val = tensor([1, 1])]; + tensor var_10388 = const()[name = tensor("op_10388"), val = tensor([1, 1])]; + tensor var_10390_pad_type_0 = const()[name = tensor("op_10390_pad_type_0"), val = tensor("custom")]; + tensor var_10390_pad_0 = const()[name = tensor("op_10390_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769928384))), lut = tensor([-0x1.f0cp-7, -0x1.2bp-8, 0x1.2b8p-8, 0x1.f08p-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770338048)))]; + tensor var_10390_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_10388, groups = var_6865, pad = var_10390_pad_0, pad_type = var_10390_pad_type_0, strides = var_10386, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_603_cast)[name = tensor("op_10390_cast")]; + tensor inputs_317_cast = add(x = var_10390_cast, y = inputs_315_cast)[name = tensor("inputs_317_cast")]; + tensor var_10394 = const()[name = tensor("op_10394"), val = tensor([1])]; + tensor channels_mean_317_cast = reduce_mean(axes = var_10394, keep_dims = var_6860, x = inputs_317_cast)[name = tensor("channels_mean_317_cast")]; + tensor zero_mean_317_cast = sub(x = inputs_317_cast, y = channels_mean_317_cast)[name = tensor("zero_mean_317_cast")]; + tensor zero_mean_sq_317_cast = mul(x = zero_mean_317_cast, y = zero_mean_317_cast)[name = tensor("zero_mean_sq_317_cast")]; + tensor var_10398 = const()[name = tensor("op_10398"), val = tensor([1])]; + tensor var_10399_cast = reduce_mean(axes = var_10398, keep_dims = var_6860, x = zero_mean_sq_317_cast)[name = tensor("op_10399_cast")]; + tensor var_10400_to_fp16 = const()[name = tensor("op_10400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10401_cast = add(x = var_10399_cast, y = var_10400_to_fp16)[name = tensor("op_10401_cast")]; + tensor denom_317_epsilon_0_to_fp16 = const()[name = tensor("denom_317_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_317_cast = rsqrt(epsilon = denom_317_epsilon_0_to_fp16, x = var_10401_cast)[name = tensor("denom_317_cast")]; + tensor out_317_cast = mul(x = zero_mean_317_cast, y = denom_317_cast)[name = tensor("out_317_cast")]; + tensor var_10405_to_fp16 = const()[name = tensor("op_10405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770340672)))]; + tensor var_10406_cast = add(x = out_317_cast, y = var_10405_to_fp16)[name = tensor("op_10406_cast")]; + tensor var_10408_to_fp16 = const()[name = tensor("op_10408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770343296)))]; + tensor input_605_cast = mul(x = var_10406_cast, y = var_10408_to_fp16)[name = tensor("input_605_cast")]; + tensor var_10416 = const()[name = tensor("op_10416"), val = tensor([1, 1])]; + tensor var_10418 = const()[name = tensor("op_10418"), val = tensor([1, 1])]; + tensor var_10420_pad_type_0 = const()[name = tensor("op_10420_pad_type_0"), val = tensor("custom")]; + tensor var_10420_pad_0 = const()[name = tensor("op_10420_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770345920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780176384))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780176576)))]; + tensor var_10420_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_10418, groups = var_6865, pad = var_10420_pad_0, pad_type = var_10420_pad_type_0, strides = var_10416, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_605_cast)[name = tensor("op_10420_cast")]; + tensor var_10421_split_sizes_0 = const()[name = tensor("op_10421_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10421_axis_0 = const()[name = tensor("op_10421_axis_0"), val = tensor(1)]; + tensor var_10421_cast_0, tensor var_10421_cast_1 = split(axis = var_10421_axis_0, split_sizes = var_10421_split_sizes_0, x = var_10420_cast)[name = tensor("op_10421_cast")]; + tensor var_10423_mode_0 = const()[name = tensor("op_10423_mode_0"), val = tensor("EXACT")]; + tensor var_10423_cast = gelu(mode = var_10423_mode_0, x = var_10421_cast_1)[name = tensor("op_10423_cast")]; + tensor input_607_cast = mul(x = var_10421_cast_0, y = var_10423_cast)[name = tensor("input_607_cast")]; + tensor var_10427 = const()[name = tensor("op_10427"), val = tensor([1, 1])]; + tensor var_10429 = const()[name = tensor("op_10429"), val = tensor([1, 1])]; + tensor var_10431_pad_type_0 = const()[name = tensor("op_10431_pad_type_0"), val = tensor("custom")]; + tensor var_10431_pad_0 = const()[name = tensor("op_10431_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780197120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783473984))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783474112)))]; + tensor var_10431_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_10429, groups = var_6865, pad = var_10431_pad_0, pad_type = var_10431_pad_type_0, strides = var_10427, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_607_cast)[name = tensor("op_10431_cast")]; + tensor inputs_319_cast = add(x = var_10431_cast, y = inputs_317_cast)[name = tensor("inputs_319_cast")]; + tensor var_10441 = const()[name = tensor("op_10441"), val = tensor([1])]; + tensor channels_mean_319_cast = reduce_mean(axes = var_10441, keep_dims = var_6860, x = inputs_319_cast)[name = tensor("channels_mean_319_cast")]; + tensor zero_mean_319_cast = sub(x = inputs_319_cast, y = channels_mean_319_cast)[name = tensor("zero_mean_319_cast")]; + tensor zero_mean_sq_319_cast = mul(x = zero_mean_319_cast, y = zero_mean_319_cast)[name = tensor("zero_mean_sq_319_cast")]; + tensor var_10445 = const()[name = tensor("op_10445"), val = tensor([1])]; + tensor var_10446_cast = reduce_mean(axes = var_10445, keep_dims = var_6860, x = zero_mean_sq_319_cast)[name = tensor("op_10446_cast")]; + tensor var_10447_to_fp16 = const()[name = tensor("op_10447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10448_cast = add(x = var_10446_cast, y = var_10447_to_fp16)[name = tensor("op_10448_cast")]; + tensor denom_319_epsilon_0_to_fp16 = const()[name = tensor("denom_319_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_319_cast = rsqrt(epsilon = denom_319_epsilon_0_to_fp16, x = var_10448_cast)[name = tensor("denom_319_cast")]; + tensor out_319_cast = mul(x = zero_mean_319_cast, y = denom_319_cast)[name = tensor("out_319_cast")]; + tensor var_10452_to_fp16 = const()[name = tensor("op_10452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783476736)))]; + tensor var_10453_cast = add(x = out_319_cast, y = var_10452_to_fp16)[name = tensor("op_10453_cast")]; + tensor var_10455_to_fp16 = const()[name = tensor("op_10455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783479360)))]; + tensor hidden_states_415_cast = mul(x = var_10453_cast, y = var_10455_to_fp16)[name = tensor("hidden_states_415_cast")]; + tensor var_10462 = const()[name = tensor("op_10462"), val = tensor([1, 1])]; + tensor var_10464 = const()[name = tensor("op_10464"), val = tensor([1, 1])]; + tensor q_213_pad_type_0 = const()[name = tensor("q_213_pad_type_0"), val = tensor("custom")]; + tensor q_213_pad_0 = const()[name = tensor("q_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(783481984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(784301248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_213_cast = conv(dilations = var_10464, groups = var_6865, pad = q_213_pad_0, pad_type = q_213_pad_type_0, strides = var_10462, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("q_213_cast")]; + tensor var_10468 = const()[name = tensor("op_10468"), val = tensor([1, 1])]; + tensor var_10470 = const()[name = tensor("op_10470"), val = tensor([1, 1])]; + tensor k_213_pad_type_0 = const()[name = tensor("k_213_pad_type_0"), val = tensor("custom")]; + tensor k_213_pad_0 = const()[name = tensor("k_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(784301376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785120640))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_213_cast = conv(dilations = var_10470, groups = var_6865, pad = k_213_pad_0, pad_type = k_213_pad_type_0, strides = var_10468, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("k_213_cast")]; + tensor var_10474 = const()[name = tensor("op_10474"), val = tensor([1, 1])]; + tensor var_10476 = const()[name = tensor("op_10476"), val = tensor([1, 1])]; + tensor v_213_pad_type_0 = const()[name = tensor("v_213_pad_type_0"), val = tensor("custom")]; + tensor v_213_pad_0 = const()[name = tensor("v_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785120768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785940032))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_213_cast = conv(dilations = var_10476, groups = var_6865, pad = v_213_pad_0, pad_type = v_213_pad_type_0, strides = var_10474, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("v_213_cast")]; + tensor var_10480 = const()[name = tensor("op_10480"), val = tensor([2, 20, 64, -1])]; + tensor var_10481_cast = reshape(shape = var_10480, x = q_213_cast)[name = tensor("op_10481_cast")]; + tensor var_10482 = const()[name = tensor("op_10482"), val = tensor([2, 20, 64, -1])]; + tensor var_10483_cast = reshape(shape = var_10482, x = k_213_cast)[name = tensor("op_10483_cast")]; + tensor var_10484 = const()[name = tensor("op_10484"), val = tensor([2, 20, 64, -1])]; + tensor var_10485_cast = reshape(shape = var_10484, x = v_213_cast)[name = tensor("op_10485_cast")]; + tensor attn_weights_425_transpose_x_0 = const()[name = tensor("attn_weights_425_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_425_transpose_y_0 = const()[name = tensor("attn_weights_425_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_425_cast = matmul(transpose_x = attn_weights_425_transpose_x_0, transpose_y = attn_weights_425_transpose_y_0, x = var_10481_cast, y = var_10483_cast)[name = tensor("attn_weights_425_cast")]; + tensor attn_weights_427_cast = mul(x = attn_weights_425_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_427_cast")]; + tensor var_10489_cast = softmax(axis = var_6849, x = attn_weights_427_cast)[name = tensor("op_10489_cast")]; + tensor attn_213_transpose_x_0 = const()[name = tensor("attn_213_transpose_x_0"), val = tensor(false)]; + tensor attn_213_transpose_y_0 = const()[name = tensor("attn_213_transpose_y_0"), val = tensor(true)]; + tensor attn_213_cast = matmul(transpose_x = attn_213_transpose_x_0, transpose_y = attn_213_transpose_y_0, x = var_10485_cast, y = var_10489_cast)[name = tensor("attn_213_cast")]; + tensor var_10493 = const()[name = tensor("op_10493"), val = tensor([2, 1280, 1, -1])]; + tensor input_609_cast = reshape(shape = var_10493, x = attn_213_cast)[name = tensor("input_609_cast")]; + tensor var_10498 = const()[name = tensor("op_10498"), val = tensor([1, 1])]; + tensor var_10500 = const()[name = tensor("op_10500"), val = tensor([1, 1])]; + tensor var_10502_pad_type_0 = const()[name = tensor("op_10502_pad_type_0"), val = tensor("custom")]; + tensor var_10502_pad_0 = const()[name = tensor("op_10502_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785940160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786759424))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786759552)))]; + tensor var_10502_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_10500, groups = var_6865, pad = var_10502_pad_0, pad_type = var_10502_pad_type_0, strides = var_10498, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_609_cast)[name = tensor("op_10502_cast")]; + tensor inputs_321_cast = add(x = var_10502_cast, y = inputs_319_cast)[name = tensor("inputs_321_cast")]; + tensor var_10506 = const()[name = tensor("op_10506"), val = tensor([1])]; + tensor channels_mean_321_cast = reduce_mean(axes = var_10506, keep_dims = var_6860, x = inputs_321_cast)[name = tensor("channels_mean_321_cast")]; + tensor zero_mean_321_cast = sub(x = inputs_321_cast, y = channels_mean_321_cast)[name = tensor("zero_mean_321_cast")]; + tensor zero_mean_sq_321_cast = mul(x = zero_mean_321_cast, y = zero_mean_321_cast)[name = tensor("zero_mean_sq_321_cast")]; + tensor var_10510 = const()[name = tensor("op_10510"), val = tensor([1])]; + tensor var_10511_cast = reduce_mean(axes = var_10510, keep_dims = var_6860, x = zero_mean_sq_321_cast)[name = tensor("op_10511_cast")]; + tensor var_10512_to_fp16 = const()[name = tensor("op_10512_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10513_cast = add(x = var_10511_cast, y = var_10512_to_fp16)[name = tensor("op_10513_cast")]; + tensor denom_321_epsilon_0_to_fp16 = const()[name = tensor("denom_321_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_321_cast = rsqrt(epsilon = denom_321_epsilon_0_to_fp16, x = var_10513_cast)[name = tensor("denom_321_cast")]; + tensor out_321_cast = mul(x = zero_mean_321_cast, y = denom_321_cast)[name = tensor("out_321_cast")]; + tensor var_10517_to_fp16 = const()[name = tensor("op_10517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786762176)))]; + tensor var_10518_cast = add(x = out_321_cast, y = var_10517_to_fp16)[name = tensor("op_10518_cast")]; + tensor var_10520_to_fp16 = const()[name = tensor("op_10520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786764800)))]; + tensor hidden_states_417_cast = mul(x = var_10518_cast, y = var_10520_to_fp16)[name = tensor("hidden_states_417_cast")]; + tensor var_10527 = const()[name = tensor("op_10527"), val = tensor([1, 1])]; + tensor var_10529 = const()[name = tensor("op_10529"), val = tensor([1, 1])]; + tensor q_215_pad_type_0 = const()[name = tensor("q_215_pad_type_0"), val = tensor("custom")]; + tensor q_215_pad_0 = const()[name = tensor("q_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786767424))), lut = tensor([-0x1.8bcp-6, -0x1.eep-8, 0x1.ef4p-8, 0x1.8c4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_215_cast = conv(dilations = var_10529, groups = var_6865, pad = q_215_pad_0, pad_type = q_215_pad_type_0, strides = var_10527, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_417_cast)[name = tensor("q_215_cast")]; + tensor var_10533 = const()[name = tensor("op_10533"), val = tensor([1, 1])]; + tensor var_10535 = const()[name = tensor("op_10535"), val = tensor([1, 1])]; + tensor k_215_pad_type_0 = const()[name = tensor("k_215_pad_type_0"), val = tensor("custom")]; + tensor k_215_pad_0 = const()[name = tensor("k_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787177088))), lut = tensor([-0x1.214p-6, -0x1.5d8p-8, 0x1.59cp-8, 0x1.20cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_215_cast = conv(dilations = var_10535, groups = var_6865, pad = k_215_pad_0, pad_type = k_215_pad_type_0, strides = var_10533, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_215_cast")]; + tensor var_10539 = const()[name = tensor("op_10539"), val = tensor([1, 1])]; + tensor var_10541 = const()[name = tensor("op_10541"), val = tensor([1, 1])]; + tensor v_215_pad_type_0 = const()[name = tensor("v_215_pad_type_0"), val = tensor("custom")]; + tensor v_215_pad_0 = const()[name = tensor("v_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787832512))), lut = tensor([-0x1.45cp-6, -0x1.8p-8, 0x1.7fp-8, 0x1.454p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_215_cast = conv(dilations = var_10541, groups = var_6865, pad = v_215_pad_0, pad_type = v_215_pad_type_0, strides = var_10539, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_215_cast")]; + tensor var_10545 = const()[name = tensor("op_10545"), val = tensor([2, 20, 64, -1])]; + tensor var_10546_cast = reshape(shape = var_10545, x = q_215_cast)[name = tensor("op_10546_cast")]; + tensor var_10547 = const()[name = tensor("op_10547"), val = tensor([2, 20, 64, -1])]; + tensor var_10548_cast = reshape(shape = var_10547, x = k_215_cast)[name = tensor("op_10548_cast")]; + tensor var_10549 = const()[name = tensor("op_10549"), val = tensor([2, 20, 64, -1])]; + tensor var_10550_cast = reshape(shape = var_10549, x = v_215_cast)[name = tensor("op_10550_cast")]; + tensor attn_weights_429_transpose_x_0 = const()[name = tensor("attn_weights_429_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_429_transpose_y_0 = const()[name = tensor("attn_weights_429_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_429_cast = matmul(transpose_x = attn_weights_429_transpose_x_0, transpose_y = attn_weights_429_transpose_y_0, x = var_10546_cast, y = var_10548_cast)[name = tensor("attn_weights_429_cast")]; + tensor attn_weights_431_cast = mul(x = attn_weights_429_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_431_cast")]; + tensor var_10554_cast = softmax(axis = var_6849, x = attn_weights_431_cast)[name = tensor("op_10554_cast")]; + tensor attn_215_transpose_x_0 = const()[name = tensor("attn_215_transpose_x_0"), val = tensor(false)]; + tensor attn_215_transpose_y_0 = const()[name = tensor("attn_215_transpose_y_0"), val = tensor(true)]; + tensor attn_215_cast = matmul(transpose_x = attn_215_transpose_x_0, transpose_y = attn_215_transpose_y_0, x = var_10550_cast, y = var_10554_cast)[name = tensor("attn_215_cast")]; + tensor var_10558 = const()[name = tensor("op_10558"), val = tensor([2, 1280, 1, -1])]; + tensor input_611_cast = reshape(shape = var_10558, x = attn_215_cast)[name = tensor("input_611_cast")]; + tensor var_10563 = const()[name = tensor("op_10563"), val = tensor([1, 1])]; + tensor var_10565 = const()[name = tensor("op_10565"), val = tensor([1, 1])]; + tensor var_10567_pad_type_0 = const()[name = tensor("op_10567_pad_type_0"), val = tensor("custom")]; + tensor var_10567_pad_0 = const()[name = tensor("op_10567_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788487936))), lut = tensor([-0x1.c1p-8, 0x1.c24p-8]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788692800)))]; + tensor var_10567_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_10565, groups = var_6865, pad = var_10567_pad_0, pad_type = var_10567_pad_type_0, strides = var_10563, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_611_cast)[name = tensor("op_10567_cast")]; + tensor inputs_323_cast = add(x = var_10567_cast, y = inputs_321_cast)[name = tensor("inputs_323_cast")]; + tensor var_10571 = const()[name = tensor("op_10571"), val = tensor([1])]; + tensor channels_mean_323_cast = reduce_mean(axes = var_10571, keep_dims = var_6860, x = inputs_323_cast)[name = tensor("channels_mean_323_cast")]; + tensor zero_mean_323_cast = sub(x = inputs_323_cast, y = channels_mean_323_cast)[name = tensor("zero_mean_323_cast")]; + tensor zero_mean_sq_323_cast = mul(x = zero_mean_323_cast, y = zero_mean_323_cast)[name = tensor("zero_mean_sq_323_cast")]; + tensor var_10575 = const()[name = tensor("op_10575"), val = tensor([1])]; + tensor var_10576_cast = reduce_mean(axes = var_10575, keep_dims = var_6860, x = zero_mean_sq_323_cast)[name = tensor("op_10576_cast")]; + tensor var_10577_to_fp16 = const()[name = tensor("op_10577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10578_cast = add(x = var_10576_cast, y = var_10577_to_fp16)[name = tensor("op_10578_cast")]; + tensor denom_323_epsilon_0_to_fp16 = const()[name = tensor("denom_323_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_323_cast = rsqrt(epsilon = denom_323_epsilon_0_to_fp16, x = var_10578_cast)[name = tensor("denom_323_cast")]; + tensor out_323_cast = mul(x = zero_mean_323_cast, y = denom_323_cast)[name = tensor("out_323_cast")]; + tensor var_10582_to_fp16 = const()[name = tensor("op_10582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788695424)))]; + tensor var_10583_cast = add(x = out_323_cast, y = var_10582_to_fp16)[name = tensor("op_10583_cast")]; + tensor var_10585_to_fp16 = const()[name = tensor("op_10585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788698048)))]; + tensor input_613_cast = mul(x = var_10583_cast, y = var_10585_to_fp16)[name = tensor("input_613_cast")]; + tensor var_10593 = const()[name = tensor("op_10593"), val = tensor([1, 1])]; + tensor var_10595 = const()[name = tensor("op_10595"), val = tensor([1, 1])]; + tensor var_10597_pad_type_0 = const()[name = tensor("op_10597_pad_type_0"), val = tensor("custom")]; + tensor var_10597_pad_0 = const()[name = tensor("op_10597_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788700672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(798531136))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(798531328)))]; + tensor var_10597_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_10595, groups = var_6865, pad = var_10597_pad_0, pad_type = var_10597_pad_type_0, strides = var_10593, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_613_cast)[name = tensor("op_10597_cast")]; + tensor var_10598_split_sizes_0 = const()[name = tensor("op_10598_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10598_axis_0 = const()[name = tensor("op_10598_axis_0"), val = tensor(1)]; + tensor var_10598_cast_0, tensor var_10598_cast_1 = split(axis = var_10598_axis_0, split_sizes = var_10598_split_sizes_0, x = var_10597_cast)[name = tensor("op_10598_cast")]; + tensor var_10600_mode_0 = const()[name = tensor("op_10600_mode_0"), val = tensor("EXACT")]; + tensor var_10600_cast = gelu(mode = var_10600_mode_0, x = var_10598_cast_1)[name = tensor("op_10600_cast")]; + tensor input_615_cast = mul(x = var_10598_cast_0, y = var_10600_cast)[name = tensor("input_615_cast")]; + tensor var_10604 = const()[name = tensor("op_10604"), val = tensor([1, 1])]; + tensor var_10606 = const()[name = tensor("op_10606"), val = tensor([1, 1])]; + tensor var_10608_pad_type_0 = const()[name = tensor("op_10608_pad_type_0"), val = tensor("custom")]; + tensor var_10608_pad_0 = const()[name = tensor("op_10608_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(798551872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803467136))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803467328)))]; + tensor var_10608_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_10606, groups = var_6865, pad = var_10608_pad_0, pad_type = var_10608_pad_type_0, strides = var_10604, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_615_cast)[name = tensor("op_10608_cast")]; + tensor hidden_states_421_cast = add(x = var_10608_cast, y = inputs_323_cast)[name = tensor("hidden_states_421_cast")]; + tensor var_10610 = const()[name = tensor("op_10610"), val = tensor([2, 1280, 32, 32])]; + tensor input_617_cast = reshape(shape = var_10610, x = hidden_states_421_cast)[name = tensor("input_617_cast")]; + tensor var_10614 = const()[name = tensor("op_10614"), val = tensor([1, 1])]; + tensor var_10616 = const()[name = tensor("op_10616"), val = tensor([1, 1])]; + tensor hidden_states_423_pad_type_0 = const()[name = tensor("hidden_states_423_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_423_pad_0 = const()[name = tensor("hidden_states_423_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(803469952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804698816))), name = tensor("up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804699008)))]; + tensor hidden_states_423_cast = conv(bias = up_blocks_0_attentions_1_proj_out_bias_to_fp16, dilations = var_10616, groups = var_6865, pad = hidden_states_423_pad_0, pad_type = hidden_states_423_pad_type_0, strides = var_10614, weight = up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized, x = input_617_cast)[name = tensor("hidden_states_423_cast")]; + tensor hidden_states_425_cast = add(x = hidden_states_423_cast, y = hidden_states_357_cast)[name = tensor("hidden_states_425_cast")]; + tensor input_619_interleave_0 = const()[name = tensor("input_619_interleave_0"), val = tensor(false)]; + tensor input_619_cast = concat(axis = var_6865, interleave = input_619_interleave_0, values = (hidden_states_425_cast, input_115_cast))[name = tensor("input_619_cast")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([2, 32, 60, 32, 32])]; + tensor reshape_108_cast = reshape(shape = reshape_108_shape_0, x = input_619_cast)[name = tensor("reshape_108_cast")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81_cast = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108_cast)[name = tensor("reduce_mean_81_cast")]; + tensor sub_54_cast = sub(x = reshape_108_cast, y = reduce_mean_81_cast)[name = tensor("sub_54_cast")]; + tensor square_27_cast = square(x = sub_54_cast)[name = tensor("square_27_cast")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83_cast = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27_cast)[name = tensor("reduce_mean_83_cast")]; + tensor add_54_y_0_to_fp16 = const()[name = tensor("add_54_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_54_cast = add(x = reduce_mean_83_cast, y = add_54_y_0_to_fp16)[name = tensor("add_54_cast")]; + tensor sqrt_27_cast = sqrt(x = add_54_cast)[name = tensor("sqrt_27_cast")]; + tensor real_div_27_cast = real_div(x = sub_54_cast, y = sqrt_27_cast)[name = tensor("real_div_27_cast")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([2, 1920, 32, 32])]; + tensor reshape_109_cast = reshape(shape = reshape_109_shape_0, x = real_div_27_cast)[name = tensor("reshape_109_cast")]; + tensor add_55_mean_0_to_fp16 = const()[name = tensor("add_55_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804701632)))]; + tensor add_55_variance_0_to_fp16 = const()[name = tensor("add_55_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804705536)))]; + tensor add_55_gamma_0_to_fp16 = const()[name = tensor("add_55_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804709440)))]; + tensor add_55_beta_0_to_fp16 = const()[name = tensor("add_55_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804713344)))]; + tensor add_55_epsilon_0_to_fp16 = const()[name = tensor("add_55_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_55_cast = batch_norm(beta = add_55_beta_0_to_fp16, epsilon = add_55_epsilon_0_to_fp16, gamma = add_55_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_109_cast)[name = tensor("add_55_cast")]; + tensor input_623_cast = silu(x = add_55_cast)[name = tensor("input_623_cast")]; + tensor var_10634 = const()[name = tensor("op_10634"), val = tensor([1, 1])]; + tensor var_10636 = const()[name = tensor("op_10636"), val = tensor([1, 1])]; + tensor hidden_states_427_pad_type_0 = const()[name = tensor("hidden_states_427_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_427_pad_0 = const()[name = tensor("hidden_states_427_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(804717248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815776512))), name = tensor("up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 3, 3])]; + tensor up_blocks_0_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815776640)))]; + tensor hidden_states_427_cast = conv(bias = up_blocks_0_resnets_2_conv1_bias_to_fp16, dilations = var_10636, groups = var_6865, pad = hidden_states_427_pad_0, pad_type = hidden_states_427_pad_type_0, strides = var_10634, weight = up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized, x = input_623_cast)[name = tensor("hidden_states_427_cast")]; + tensor var_10642 = const()[name = tensor("op_10642"), val = tensor([1, 1])]; + tensor var_10644 = const()[name = tensor("op_10644"), val = tensor([1, 1])]; + tensor temb_21_pad_type_0 = const()[name = tensor("temb_21_pad_type_0"), val = tensor("custom")]; + tensor temb_21_pad_0 = const()[name = tensor("temb_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(815779264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(816598528))), name = tensor("up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(816598656)))]; + tensor temb_21_cast = conv(bias = up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_10644, groups = var_6865, pad = temb_21_pad_0, pad_type = temb_21_pad_type_0, strides = var_10642, weight = up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_21_cast")]; + tensor input_627_cast = add(x = hidden_states_427_cast, y = temb_21_cast)[name = tensor("input_627_cast")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_112_cast = reshape(shape = reshape_112_shape_0, x = input_627_cast)[name = tensor("reshape_112_cast")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84_cast = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112_cast)[name = tensor("reduce_mean_84_cast")]; + tensor sub_56_cast = sub(x = reshape_112_cast, y = reduce_mean_84_cast)[name = tensor("sub_56_cast")]; + tensor square_28_cast = square(x = sub_56_cast)[name = tensor("square_28_cast")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86_cast = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28_cast)[name = tensor("reduce_mean_86_cast")]; + tensor add_56_y_0_to_fp16 = const()[name = tensor("add_56_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_56_cast = add(x = reduce_mean_86_cast, y = add_56_y_0_to_fp16)[name = tensor("add_56_cast")]; + tensor sqrt_28_cast = sqrt(x = add_56_cast)[name = tensor("sqrt_28_cast")]; + tensor real_div_28_cast = real_div(x = sub_56_cast, y = sqrt_28_cast)[name = tensor("real_div_28_cast")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_113_cast = reshape(shape = reshape_113_shape_0, x = real_div_28_cast)[name = tensor("reshape_113_cast")]; + tensor add_57_gamma_0_to_fp16 = const()[name = tensor("add_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(816601280)))]; + tensor add_57_beta_0_to_fp16 = const()[name = tensor("add_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(816603904)))]; + tensor add_57_epsilon_0_to_fp16 = const()[name = tensor("add_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_57_cast = batch_norm(beta = add_57_beta_0_to_fp16, epsilon = add_57_epsilon_0_to_fp16, gamma = add_57_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_113_cast)[name = tensor("add_57_cast")]; + tensor input_631_cast = silu(x = add_57_cast)[name = tensor("input_631_cast")]; + tensor var_10654 = const()[name = tensor("op_10654"), val = tensor([1, 1])]; + tensor var_10656 = const()[name = tensor("op_10656"), val = tensor([1, 1])]; + tensor hidden_states_429_pad_type_0 = const()[name = tensor("hidden_states_429_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_429_pad_0 = const()[name = tensor("hidden_states_429_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(816606528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(823979392))), name = tensor("up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(823979520)))]; + tensor hidden_states_429_cast = conv(bias = up_blocks_0_resnets_2_conv2_bias_to_fp16, dilations = var_10656, groups = var_6865, pad = hidden_states_429_pad_0, pad_type = hidden_states_429_pad_type_0, strides = var_10654, weight = up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized, x = input_631_cast)[name = tensor("hidden_states_429_cast")]; + tensor var_10661 = const()[name = tensor("op_10661"), val = tensor([1, 1])]; + tensor var_10663 = const()[name = tensor("op_10663"), val = tensor([1, 1])]; + tensor x_9_pad_type_0 = const()[name = tensor("x_9_pad_type_0"), val = tensor("custom")]; + tensor x_9_pad_0 = const()[name = tensor("x_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(823982144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826439808))), name = tensor("up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 1, 1])]; + tensor up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826440384)))]; + tensor x_9_cast = conv(bias = up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_10663, groups = var_6865, pad = x_9_pad_0, pad_type = x_9_pad_type_0, strides = var_10661, weight = up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_619_cast)[name = tensor("x_9_cast")]; + tensor hidden_states_431_cast = add(x = x_9_cast, y = hidden_states_429_cast)[name = tensor("hidden_states_431_cast")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_116_cast = reshape(shape = reshape_116_shape_0, x = hidden_states_431_cast)[name = tensor("reshape_116_cast")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87_cast = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116_cast)[name = tensor("reduce_mean_87_cast")]; + tensor sub_58_cast = sub(x = reshape_116_cast, y = reduce_mean_87_cast)[name = tensor("sub_58_cast")]; + tensor square_29_cast = square(x = sub_58_cast)[name = tensor("square_29_cast")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89_cast = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29_cast)[name = tensor("reduce_mean_89_cast")]; + tensor add_58_y_0_to_fp16 = const()[name = tensor("add_58_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_58_cast = add(x = reduce_mean_89_cast, y = add_58_y_0_to_fp16)[name = tensor("add_58_cast")]; + tensor sqrt_29_cast = sqrt(x = add_58_cast)[name = tensor("sqrt_29_cast")]; + tensor real_div_29_cast = real_div(x = sub_58_cast, y = sqrt_29_cast)[name = tensor("real_div_29_cast")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_117_cast = reshape(shape = reshape_117_shape_0, x = real_div_29_cast)[name = tensor("reshape_117_cast")]; + tensor add_59_gamma_0_to_fp16 = const()[name = tensor("add_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826443008)))]; + tensor add_59_beta_0_to_fp16 = const()[name = tensor("add_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826445632)))]; + tensor add_59_epsilon_0_to_fp16 = const()[name = tensor("add_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_59_cast = batch_norm(beta = add_59_beta_0_to_fp16, epsilon = add_59_epsilon_0_to_fp16, gamma = add_59_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_117_cast)[name = tensor("add_59_cast")]; + tensor var_10701 = const()[name = tensor("op_10701"), val = tensor([1, 1])]; + tensor var_10703 = const()[name = tensor("op_10703"), val = tensor([1, 1])]; + tensor hidden_states_433_pad_type_0 = const()[name = tensor("hidden_states_433_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_433_pad_0 = const()[name = tensor("hidden_states_433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826448256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827677120))), name = tensor("up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827677312)))]; + tensor hidden_states_433_cast = conv(bias = up_blocks_0_attentions_2_proj_in_bias_to_fp16, dilations = var_10703, groups = var_6865, pad = hidden_states_433_pad_0, pad_type = hidden_states_433_pad_type_0, strides = var_10701, weight = up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized, x = add_59_cast)[name = tensor("hidden_states_433_cast")]; + tensor var_10708 = const()[name = tensor("op_10708"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_325_cast = reshape(shape = var_10708, x = hidden_states_433_cast)[name = tensor("inputs_325_cast")]; + tensor var_10718 = const()[name = tensor("op_10718"), val = tensor([1])]; + tensor channels_mean_325_cast = reduce_mean(axes = var_10718, keep_dims = var_6860, x = inputs_325_cast)[name = tensor("channels_mean_325_cast")]; + tensor zero_mean_325_cast = sub(x = inputs_325_cast, y = channels_mean_325_cast)[name = tensor("zero_mean_325_cast")]; + tensor zero_mean_sq_325_cast = mul(x = zero_mean_325_cast, y = zero_mean_325_cast)[name = tensor("zero_mean_sq_325_cast")]; + tensor var_10722 = const()[name = tensor("op_10722"), val = tensor([1])]; + tensor var_10723_cast = reduce_mean(axes = var_10722, keep_dims = var_6860, x = zero_mean_sq_325_cast)[name = tensor("op_10723_cast")]; + tensor var_10724_to_fp16 = const()[name = tensor("op_10724_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10725_cast = add(x = var_10723_cast, y = var_10724_to_fp16)[name = tensor("op_10725_cast")]; + tensor denom_325_epsilon_0_to_fp16 = const()[name = tensor("denom_325_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_325_cast = rsqrt(epsilon = denom_325_epsilon_0_to_fp16, x = var_10725_cast)[name = tensor("denom_325_cast")]; + tensor out_325_cast = mul(x = zero_mean_325_cast, y = denom_325_cast)[name = tensor("out_325_cast")]; + tensor var_10729_to_fp16 = const()[name = tensor("op_10729_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827679936)))]; + tensor var_10730_cast = add(x = out_325_cast, y = var_10729_to_fp16)[name = tensor("op_10730_cast")]; + tensor var_10732_to_fp16 = const()[name = tensor("op_10732_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827682560)))]; + tensor hidden_states_435_cast = mul(x = var_10730_cast, y = var_10732_to_fp16)[name = tensor("hidden_states_435_cast")]; + tensor var_10739 = const()[name = tensor("op_10739"), val = tensor([1, 1])]; + tensor var_10741 = const()[name = tensor("op_10741"), val = tensor([1, 1])]; + tensor q_217_pad_type_0 = const()[name = tensor("q_217_pad_type_0"), val = tensor("custom")]; + tensor q_217_pad_0 = const()[name = tensor("q_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(827685184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828504448))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_217_cast = conv(dilations = var_10741, groups = var_6865, pad = q_217_pad_0, pad_type = q_217_pad_type_0, strides = var_10739, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("q_217_cast")]; + tensor var_10745 = const()[name = tensor("op_10745"), val = tensor([1, 1])]; + tensor var_10747 = const()[name = tensor("op_10747"), val = tensor([1, 1])]; + tensor k_217_pad_type_0 = const()[name = tensor("k_217_pad_type_0"), val = tensor("custom")]; + tensor k_217_pad_0 = const()[name = tensor("k_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828504576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829323840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_217_cast = conv(dilations = var_10747, groups = var_6865, pad = k_217_pad_0, pad_type = k_217_pad_type_0, strides = var_10745, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("k_217_cast")]; + tensor var_10751 = const()[name = tensor("op_10751"), val = tensor([1, 1])]; + tensor var_10753 = const()[name = tensor("op_10753"), val = tensor([1, 1])]; + tensor v_217_pad_type_0 = const()[name = tensor("v_217_pad_type_0"), val = tensor("custom")]; + tensor v_217_pad_0 = const()[name = tensor("v_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829323968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830143232))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_217_cast = conv(dilations = var_10753, groups = var_6865, pad = v_217_pad_0, pad_type = v_217_pad_type_0, strides = var_10751, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("v_217_cast")]; + tensor var_10757 = const()[name = tensor("op_10757"), val = tensor([2, 20, 64, -1])]; + tensor var_10758_cast = reshape(shape = var_10757, x = q_217_cast)[name = tensor("op_10758_cast")]; + tensor var_10759 = const()[name = tensor("op_10759"), val = tensor([2, 20, 64, -1])]; + tensor var_10760_cast = reshape(shape = var_10759, x = k_217_cast)[name = tensor("op_10760_cast")]; + tensor var_10761 = const()[name = tensor("op_10761"), val = tensor([2, 20, 64, -1])]; + tensor var_10762_cast = reshape(shape = var_10761, x = v_217_cast)[name = tensor("op_10762_cast")]; + tensor attn_weights_433_transpose_x_0 = const()[name = tensor("attn_weights_433_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_433_transpose_y_0 = const()[name = tensor("attn_weights_433_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_433_cast = matmul(transpose_x = attn_weights_433_transpose_x_0, transpose_y = attn_weights_433_transpose_y_0, x = var_10758_cast, y = var_10760_cast)[name = tensor("attn_weights_433_cast")]; + tensor attn_weights_435_cast = mul(x = attn_weights_433_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_435_cast")]; + tensor var_10766_cast = softmax(axis = var_6849, x = attn_weights_435_cast)[name = tensor("op_10766_cast")]; + tensor attn_217_transpose_x_0 = const()[name = tensor("attn_217_transpose_x_0"), val = tensor(false)]; + tensor attn_217_transpose_y_0 = const()[name = tensor("attn_217_transpose_y_0"), val = tensor(true)]; + tensor attn_217_cast = matmul(transpose_x = attn_217_transpose_x_0, transpose_y = attn_217_transpose_y_0, x = var_10762_cast, y = var_10766_cast)[name = tensor("attn_217_cast")]; + tensor var_10770 = const()[name = tensor("op_10770"), val = tensor([2, 1280, 1, -1])]; + tensor input_635_cast = reshape(shape = var_10770, x = attn_217_cast)[name = tensor("input_635_cast")]; + tensor var_10775 = const()[name = tensor("op_10775"), val = tensor([1, 1])]; + tensor var_10777 = const()[name = tensor("op_10777"), val = tensor([1, 1])]; + tensor var_10779_pad_type_0 = const()[name = tensor("op_10779_pad_type_0"), val = tensor("custom")]; + tensor var_10779_pad_0 = const()[name = tensor("op_10779_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(830143360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831372224))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831372416)))]; + tensor var_10779_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_10777, groups = var_6865, pad = var_10779_pad_0, pad_type = var_10779_pad_type_0, strides = var_10775, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_635_cast)[name = tensor("op_10779_cast")]; + tensor inputs_327_cast = add(x = var_10779_cast, y = inputs_325_cast)[name = tensor("inputs_327_cast")]; + tensor var_10783 = const()[name = tensor("op_10783"), val = tensor([1])]; + tensor channels_mean_327_cast = reduce_mean(axes = var_10783, keep_dims = var_6860, x = inputs_327_cast)[name = tensor("channels_mean_327_cast")]; + tensor zero_mean_327_cast = sub(x = inputs_327_cast, y = channels_mean_327_cast)[name = tensor("zero_mean_327_cast")]; + tensor zero_mean_sq_327_cast = mul(x = zero_mean_327_cast, y = zero_mean_327_cast)[name = tensor("zero_mean_sq_327_cast")]; + tensor var_10787 = const()[name = tensor("op_10787"), val = tensor([1])]; + tensor var_10788_cast = reduce_mean(axes = var_10787, keep_dims = var_6860, x = zero_mean_sq_327_cast)[name = tensor("op_10788_cast")]; + tensor var_10789_to_fp16 = const()[name = tensor("op_10789_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10790_cast = add(x = var_10788_cast, y = var_10789_to_fp16)[name = tensor("op_10790_cast")]; + tensor denom_327_epsilon_0_to_fp16 = const()[name = tensor("denom_327_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_327_cast = rsqrt(epsilon = denom_327_epsilon_0_to_fp16, x = var_10790_cast)[name = tensor("denom_327_cast")]; + tensor out_327_cast = mul(x = zero_mean_327_cast, y = denom_327_cast)[name = tensor("out_327_cast")]; + tensor var_10794_to_fp16 = const()[name = tensor("op_10794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831375040)))]; + tensor var_10795_cast = add(x = out_327_cast, y = var_10794_to_fp16)[name = tensor("op_10795_cast")]; + tensor var_10797_to_fp16 = const()[name = tensor("op_10797_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831377664)))]; + tensor hidden_states_437_cast = mul(x = var_10795_cast, y = var_10797_to_fp16)[name = tensor("hidden_states_437_cast")]; + tensor var_10804 = const()[name = tensor("op_10804"), val = tensor([1, 1])]; + tensor var_10806 = const()[name = tensor("op_10806"), val = tensor([1, 1])]; + tensor q_219_pad_type_0 = const()[name = tensor("q_219_pad_type_0"), val = tensor("custom")]; + tensor q_219_pad_0 = const()[name = tensor("q_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831380288))), lut = tensor([-0x1.e68p-7, 0x1.e5cp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_219_cast = conv(dilations = var_10806, groups = var_6865, pad = q_219_pad_0, pad_type = q_219_pad_type_0, strides = var_10804, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_437_cast)[name = tensor("q_219_cast")]; + tensor var_10810 = const()[name = tensor("op_10810"), val = tensor([1, 1])]; + tensor var_10812 = const()[name = tensor("op_10812"), val = tensor([1, 1])]; + tensor k_219_pad_type_0 = const()[name = tensor("k_219_pad_type_0"), val = tensor("custom")]; + tensor k_219_pad_0 = const()[name = tensor("k_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(831585152))), lut = tensor([-0x1.a8p-6, -0x1.ef8p-8, 0x1.f2cp-8, 0x1.a9p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_219_cast = conv(dilations = var_10812, groups = var_6865, pad = k_219_pad_0, pad_type = k_219_pad_type_0, strides = var_10810, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_219_cast")]; + tensor var_10816 = const()[name = tensor("op_10816"), val = tensor([1, 1])]; + tensor var_10818 = const()[name = tensor("op_10818"), val = tensor([1, 1])]; + tensor v_219_pad_type_0 = const()[name = tensor("v_219_pad_type_0"), val = tensor("custom")]; + tensor v_219_pad_0 = const()[name = tensor("v_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(832240576))), lut = tensor([-0x1.6c4p-6, -0x1.994p-8, 0x1.9bcp-8, 0x1.6cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_219_cast = conv(dilations = var_10818, groups = var_6865, pad = v_219_pad_0, pad_type = v_219_pad_type_0, strides = var_10816, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_219_cast")]; + tensor var_10822 = const()[name = tensor("op_10822"), val = tensor([2, 20, 64, -1])]; + tensor var_10823_cast = reshape(shape = var_10822, x = q_219_cast)[name = tensor("op_10823_cast")]; + tensor var_10824 = const()[name = tensor("op_10824"), val = tensor([2, 20, 64, -1])]; + tensor var_10825_cast = reshape(shape = var_10824, x = k_219_cast)[name = tensor("op_10825_cast")]; + tensor var_10826 = const()[name = tensor("op_10826"), val = tensor([2, 20, 64, -1])]; + tensor var_10827_cast = reshape(shape = var_10826, x = v_219_cast)[name = tensor("op_10827_cast")]; + tensor attn_weights_437_transpose_x_0 = const()[name = tensor("attn_weights_437_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_437_transpose_y_0 = const()[name = tensor("attn_weights_437_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_437_cast = matmul(transpose_x = attn_weights_437_transpose_x_0, transpose_y = attn_weights_437_transpose_y_0, x = var_10823_cast, y = var_10825_cast)[name = tensor("attn_weights_437_cast")]; + tensor attn_weights_439_cast = mul(x = attn_weights_437_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_439_cast")]; + tensor var_10831_cast = softmax(axis = var_6849, x = attn_weights_439_cast)[name = tensor("op_10831_cast")]; + tensor attn_219_transpose_x_0 = const()[name = tensor("attn_219_transpose_x_0"), val = tensor(false)]; + tensor attn_219_transpose_y_0 = const()[name = tensor("attn_219_transpose_y_0"), val = tensor(true)]; + tensor attn_219_cast = matmul(transpose_x = attn_219_transpose_x_0, transpose_y = attn_219_transpose_y_0, x = var_10827_cast, y = var_10831_cast)[name = tensor("attn_219_cast")]; + tensor var_10835 = const()[name = tensor("op_10835"), val = tensor([2, 1280, 1, -1])]; + tensor input_637_cast = reshape(shape = var_10835, x = attn_219_cast)[name = tensor("input_637_cast")]; + tensor var_10840 = const()[name = tensor("op_10840"), val = tensor([1, 1])]; + tensor var_10842 = const()[name = tensor("op_10842"), val = tensor([1, 1])]; + tensor var_10844_pad_type_0 = const()[name = tensor("op_10844_pad_type_0"), val = tensor("custom")]; + tensor var_10844_pad_0 = const()[name = tensor("op_10844_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(832896000))), lut = tensor([-0x1.48cp-7, -0x1.828p-9, 0x1.83cp-9, 0x1.49cp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(833305664)))]; + tensor var_10844_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_10842, groups = var_6865, pad = var_10844_pad_0, pad_type = var_10844_pad_type_0, strides = var_10840, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_637_cast)[name = tensor("op_10844_cast")]; + tensor inputs_329_cast = add(x = var_10844_cast, y = inputs_327_cast)[name = tensor("inputs_329_cast")]; + tensor var_10848 = const()[name = tensor("op_10848"), val = tensor([1])]; + tensor channels_mean_329_cast = reduce_mean(axes = var_10848, keep_dims = var_6860, x = inputs_329_cast)[name = tensor("channels_mean_329_cast")]; + tensor zero_mean_329_cast = sub(x = inputs_329_cast, y = channels_mean_329_cast)[name = tensor("zero_mean_329_cast")]; + tensor zero_mean_sq_329_cast = mul(x = zero_mean_329_cast, y = zero_mean_329_cast)[name = tensor("zero_mean_sq_329_cast")]; + tensor var_10852 = const()[name = tensor("op_10852"), val = tensor([1])]; + tensor var_10853_cast = reduce_mean(axes = var_10852, keep_dims = var_6860, x = zero_mean_sq_329_cast)[name = tensor("op_10853_cast")]; + tensor var_10854_to_fp16 = const()[name = tensor("op_10854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10855_cast = add(x = var_10853_cast, y = var_10854_to_fp16)[name = tensor("op_10855_cast")]; + tensor denom_329_epsilon_0_to_fp16 = const()[name = tensor("denom_329_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_329_cast = rsqrt(epsilon = denom_329_epsilon_0_to_fp16, x = var_10855_cast)[name = tensor("denom_329_cast")]; + tensor out_329_cast = mul(x = zero_mean_329_cast, y = denom_329_cast)[name = tensor("out_329_cast")]; + tensor var_10859_to_fp16 = const()[name = tensor("op_10859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(833308288)))]; + tensor var_10860_cast = add(x = out_329_cast, y = var_10859_to_fp16)[name = tensor("op_10860_cast")]; + tensor var_10862_to_fp16 = const()[name = tensor("op_10862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(833310912)))]; + tensor input_639_cast = mul(x = var_10860_cast, y = var_10862_to_fp16)[name = tensor("input_639_cast")]; + tensor var_10870 = const()[name = tensor("op_10870"), val = tensor([1, 1])]; + tensor var_10872 = const()[name = tensor("op_10872"), val = tensor([1, 1])]; + tensor var_10874_pad_type_0 = const()[name = tensor("op_10874_pad_type_0"), val = tensor("custom")]; + tensor var_10874_pad_0 = const()[name = tensor("op_10874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(833313536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839867200))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839867328)))]; + tensor var_10874_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_10872, groups = var_6865, pad = var_10874_pad_0, pad_type = var_10874_pad_type_0, strides = var_10870, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_639_cast)[name = tensor("op_10874_cast")]; + tensor var_10875_split_sizes_0 = const()[name = tensor("op_10875_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10875_axis_0 = const()[name = tensor("op_10875_axis_0"), val = tensor(1)]; + tensor var_10875_cast_0, tensor var_10875_cast_1 = split(axis = var_10875_axis_0, split_sizes = var_10875_split_sizes_0, x = var_10874_cast)[name = tensor("op_10875_cast")]; + tensor var_10877_mode_0 = const()[name = tensor("op_10877_mode_0"), val = tensor("EXACT")]; + tensor var_10877_cast = gelu(mode = var_10877_mode_0, x = var_10875_cast_1)[name = tensor("op_10877_cast")]; + tensor input_641_cast = mul(x = var_10875_cast_0, y = var_10877_cast)[name = tensor("input_641_cast")]; + tensor var_10881 = const()[name = tensor("op_10881"), val = tensor([1, 1])]; + tensor var_10883 = const()[name = tensor("op_10883"), val = tensor([1, 1])]; + tensor var_10885_pad_type_0 = const()[name = tensor("op_10885_pad_type_0"), val = tensor("custom")]; + tensor var_10885_pad_0 = const()[name = tensor("op_10885_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839887872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844803136))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844803328)))]; + tensor var_10885_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_10883, groups = var_6865, pad = var_10885_pad_0, pad_type = var_10885_pad_type_0, strides = var_10881, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_641_cast)[name = tensor("op_10885_cast")]; + tensor inputs_331_cast = add(x = var_10885_cast, y = inputs_329_cast)[name = tensor("inputs_331_cast")]; + tensor var_10895 = const()[name = tensor("op_10895"), val = tensor([1])]; + tensor channels_mean_331_cast = reduce_mean(axes = var_10895, keep_dims = var_6860, x = inputs_331_cast)[name = tensor("channels_mean_331_cast")]; + tensor zero_mean_331_cast = sub(x = inputs_331_cast, y = channels_mean_331_cast)[name = tensor("zero_mean_331_cast")]; + tensor zero_mean_sq_331_cast = mul(x = zero_mean_331_cast, y = zero_mean_331_cast)[name = tensor("zero_mean_sq_331_cast")]; + tensor var_10899 = const()[name = tensor("op_10899"), val = tensor([1])]; + tensor var_10900_cast = reduce_mean(axes = var_10899, keep_dims = var_6860, x = zero_mean_sq_331_cast)[name = tensor("op_10900_cast")]; + tensor var_10901_to_fp16 = const()[name = tensor("op_10901_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10902_cast = add(x = var_10900_cast, y = var_10901_to_fp16)[name = tensor("op_10902_cast")]; + tensor denom_331_epsilon_0_to_fp16 = const()[name = tensor("denom_331_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_331_cast = rsqrt(epsilon = denom_331_epsilon_0_to_fp16, x = var_10902_cast)[name = tensor("denom_331_cast")]; + tensor out_331_cast = mul(x = zero_mean_331_cast, y = denom_331_cast)[name = tensor("out_331_cast")]; + tensor var_10906_to_fp16 = const()[name = tensor("op_10906_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844805952)))]; + tensor var_10907_cast = add(x = out_331_cast, y = var_10906_to_fp16)[name = tensor("op_10907_cast")]; + tensor var_10909_to_fp16 = const()[name = tensor("op_10909_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844808576)))]; + tensor hidden_states_441_cast = mul(x = var_10907_cast, y = var_10909_to_fp16)[name = tensor("hidden_states_441_cast")]; + tensor var_10916 = const()[name = tensor("op_10916"), val = tensor([1, 1])]; + tensor var_10918 = const()[name = tensor("op_10918"), val = tensor([1, 1])]; + tensor q_221_pad_type_0 = const()[name = tensor("q_221_pad_type_0"), val = tensor("custom")]; + tensor q_221_pad_0 = const()[name = tensor("q_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844811200))), lut = tensor([-0x1.6b4p-5, -0x1.b1p-7, 0x1.b7cp-7, 0x1.6c4p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_221_cast = conv(dilations = var_10918, groups = var_6865, pad = q_221_pad_0, pad_type = q_221_pad_type_0, strides = var_10916, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("q_221_cast")]; + tensor var_10922 = const()[name = tensor("op_10922"), val = tensor([1, 1])]; + tensor var_10924 = const()[name = tensor("op_10924"), val = tensor([1, 1])]; + tensor k_221_pad_type_0 = const()[name = tensor("k_221_pad_type_0"), val = tensor("custom")]; + tensor k_221_pad_0 = const()[name = tensor("k_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845220864))), lut = tensor([-0x1.6ecp-5, -0x1.ba8p-7, 0x1.b4p-7, 0x1.6c8p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_221_cast = conv(dilations = var_10924, groups = var_6865, pad = k_221_pad_0, pad_type = k_221_pad_type_0, strides = var_10922, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("k_221_cast")]; + tensor var_10928 = const()[name = tensor("op_10928"), val = tensor([1, 1])]; + tensor var_10930 = const()[name = tensor("op_10930"), val = tensor([1, 1])]; + tensor v_221_pad_type_0 = const()[name = tensor("v_221_pad_type_0"), val = tensor("custom")]; + tensor v_221_pad_0 = const()[name = tensor("v_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845630528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(846449792))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_221_cast = conv(dilations = var_10930, groups = var_6865, pad = v_221_pad_0, pad_type = v_221_pad_type_0, strides = var_10928, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("v_221_cast")]; + tensor var_10934 = const()[name = tensor("op_10934"), val = tensor([2, 20, 64, -1])]; + tensor var_10935_cast = reshape(shape = var_10934, x = q_221_cast)[name = tensor("op_10935_cast")]; + tensor var_10936 = const()[name = tensor("op_10936"), val = tensor([2, 20, 64, -1])]; + tensor var_10937_cast = reshape(shape = var_10936, x = k_221_cast)[name = tensor("op_10937_cast")]; + tensor var_10938 = const()[name = tensor("op_10938"), val = tensor([2, 20, 64, -1])]; + tensor var_10939_cast = reshape(shape = var_10938, x = v_221_cast)[name = tensor("op_10939_cast")]; + tensor attn_weights_441_transpose_x_0 = const()[name = tensor("attn_weights_441_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_441_transpose_y_0 = const()[name = tensor("attn_weights_441_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_441_cast = matmul(transpose_x = attn_weights_441_transpose_x_0, transpose_y = attn_weights_441_transpose_y_0, x = var_10935_cast, y = var_10937_cast)[name = tensor("attn_weights_441_cast")]; + tensor attn_weights_443_cast = mul(x = attn_weights_441_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_443_cast")]; + tensor var_10943_cast = softmax(axis = var_6849, x = attn_weights_443_cast)[name = tensor("op_10943_cast")]; + tensor attn_221_transpose_x_0 = const()[name = tensor("attn_221_transpose_x_0"), val = tensor(false)]; + tensor attn_221_transpose_y_0 = const()[name = tensor("attn_221_transpose_y_0"), val = tensor(true)]; + tensor attn_221_cast = matmul(transpose_x = attn_221_transpose_x_0, transpose_y = attn_221_transpose_y_0, x = var_10939_cast, y = var_10943_cast)[name = tensor("attn_221_cast")]; + tensor var_10947 = const()[name = tensor("op_10947"), val = tensor([2, 1280, 1, -1])]; + tensor input_643_cast = reshape(shape = var_10947, x = attn_221_cast)[name = tensor("input_643_cast")]; + tensor var_10952 = const()[name = tensor("op_10952"), val = tensor([1, 1])]; + tensor var_10954 = const()[name = tensor("op_10954"), val = tensor([1, 1])]; + tensor var_10956_pad_type_0 = const()[name = tensor("op_10956_pad_type_0"), val = tensor("custom")]; + tensor var_10956_pad_0 = const()[name = tensor("op_10956_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(846449920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847269184))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847269312)))]; + tensor var_10956_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_10954, groups = var_6865, pad = var_10956_pad_0, pad_type = var_10956_pad_type_0, strides = var_10952, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_643_cast)[name = tensor("op_10956_cast")]; + tensor inputs_333_cast = add(x = var_10956_cast, y = inputs_331_cast)[name = tensor("inputs_333_cast")]; + tensor var_10960 = const()[name = tensor("op_10960"), val = tensor([1])]; + tensor channels_mean_333_cast = reduce_mean(axes = var_10960, keep_dims = var_6860, x = inputs_333_cast)[name = tensor("channels_mean_333_cast")]; + tensor zero_mean_333_cast = sub(x = inputs_333_cast, y = channels_mean_333_cast)[name = tensor("zero_mean_333_cast")]; + tensor zero_mean_sq_333_cast = mul(x = zero_mean_333_cast, y = zero_mean_333_cast)[name = tensor("zero_mean_sq_333_cast")]; + tensor var_10964 = const()[name = tensor("op_10964"), val = tensor([1])]; + tensor var_10965_cast = reduce_mean(axes = var_10964, keep_dims = var_6860, x = zero_mean_sq_333_cast)[name = tensor("op_10965_cast")]; + tensor var_10966_to_fp16 = const()[name = tensor("op_10966_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10967_cast = add(x = var_10965_cast, y = var_10966_to_fp16)[name = tensor("op_10967_cast")]; + tensor denom_333_epsilon_0_to_fp16 = const()[name = tensor("denom_333_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_333_cast = rsqrt(epsilon = denom_333_epsilon_0_to_fp16, x = var_10967_cast)[name = tensor("denom_333_cast")]; + tensor out_333_cast = mul(x = zero_mean_333_cast, y = denom_333_cast)[name = tensor("out_333_cast")]; + tensor var_10971_to_fp16 = const()[name = tensor("op_10971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847271936)))]; + tensor var_10972_cast = add(x = out_333_cast, y = var_10971_to_fp16)[name = tensor("op_10972_cast")]; + tensor var_10974_to_fp16 = const()[name = tensor("op_10974_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847274560)))]; + tensor hidden_states_443_cast = mul(x = var_10972_cast, y = var_10974_to_fp16)[name = tensor("hidden_states_443_cast")]; + tensor var_10981 = const()[name = tensor("op_10981"), val = tensor([1, 1])]; + tensor var_10983 = const()[name = tensor("op_10983"), val = tensor([1, 1])]; + tensor q_223_pad_type_0 = const()[name = tensor("q_223_pad_type_0"), val = tensor("custom")]; + tensor q_223_pad_0 = const()[name = tensor("q_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847277184))), lut = tensor([-0x1.308p-6, 0x1.2f8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_223_cast = conv(dilations = var_10983, groups = var_6865, pad = q_223_pad_0, pad_type = q_223_pad_type_0, strides = var_10981, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_443_cast)[name = tensor("q_223_cast")]; + tensor var_10987 = const()[name = tensor("op_10987"), val = tensor([1, 1])]; + tensor var_10989 = const()[name = tensor("op_10989"), val = tensor([1, 1])]; + tensor k_223_pad_type_0 = const()[name = tensor("k_223_pad_type_0"), val = tensor("custom")]; + tensor k_223_pad_0 = const()[name = tensor("k_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847482048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848792832))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_223_cast = conv(dilations = var_10989, groups = var_6865, pad = k_223_pad_0, pad_type = k_223_pad_type_0, strides = var_10987, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_223_cast")]; + tensor var_10993 = const()[name = tensor("op_10993"), val = tensor([1, 1])]; + tensor var_10995 = const()[name = tensor("op_10995"), val = tensor([1, 1])]; + tensor v_223_pad_type_0 = const()[name = tensor("v_223_pad_type_0"), val = tensor("custom")]; + tensor v_223_pad_0 = const()[name = tensor("v_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848792960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850103744))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_223_cast = conv(dilations = var_10995, groups = var_6865, pad = v_223_pad_0, pad_type = v_223_pad_type_0, strides = var_10993, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_223_cast")]; + tensor var_10999 = const()[name = tensor("op_10999"), val = tensor([2, 20, 64, -1])]; + tensor var_11000_cast = reshape(shape = var_10999, x = q_223_cast)[name = tensor("op_11000_cast")]; + tensor var_11001 = const()[name = tensor("op_11001"), val = tensor([2, 20, 64, -1])]; + tensor var_11002_cast = reshape(shape = var_11001, x = k_223_cast)[name = tensor("op_11002_cast")]; + tensor var_11003 = const()[name = tensor("op_11003"), val = tensor([2, 20, 64, -1])]; + tensor var_11004_cast = reshape(shape = var_11003, x = v_223_cast)[name = tensor("op_11004_cast")]; + tensor attn_weights_445_transpose_x_0 = const()[name = tensor("attn_weights_445_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_445_transpose_y_0 = const()[name = tensor("attn_weights_445_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_445_cast = matmul(transpose_x = attn_weights_445_transpose_x_0, transpose_y = attn_weights_445_transpose_y_0, x = var_11000_cast, y = var_11002_cast)[name = tensor("attn_weights_445_cast")]; + tensor attn_weights_447_cast = mul(x = attn_weights_445_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_447_cast")]; + tensor var_11008_cast = softmax(axis = var_6849, x = attn_weights_447_cast)[name = tensor("op_11008_cast")]; + tensor attn_223_transpose_x_0 = const()[name = tensor("attn_223_transpose_x_0"), val = tensor(false)]; + tensor attn_223_transpose_y_0 = const()[name = tensor("attn_223_transpose_y_0"), val = tensor(true)]; + tensor attn_223_cast = matmul(transpose_x = attn_223_transpose_x_0, transpose_y = attn_223_transpose_y_0, x = var_11004_cast, y = var_11008_cast)[name = tensor("attn_223_cast")]; + tensor var_11012 = const()[name = tensor("op_11012"), val = tensor([2, 1280, 1, -1])]; + tensor input_645_cast = reshape(shape = var_11012, x = attn_223_cast)[name = tensor("input_645_cast")]; + tensor var_11017 = const()[name = tensor("op_11017"), val = tensor([1, 1])]; + tensor var_11019 = const()[name = tensor("op_11019"), val = tensor([1, 1])]; + tensor var_11021_pad_type_0 = const()[name = tensor("op_11021_pad_type_0"), val = tensor("custom")]; + tensor var_11021_pad_0 = const()[name = tensor("op_11021_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850103872))), lut = tensor([-0x1.014p-6, -0x1.224p-8, 0x1.2p-8, 0x1.00cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850513536)))]; + tensor var_11021_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_11019, groups = var_6865, pad = var_11021_pad_0, pad_type = var_11021_pad_type_0, strides = var_11017, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_645_cast)[name = tensor("op_11021_cast")]; + tensor inputs_335_cast = add(x = var_11021_cast, y = inputs_333_cast)[name = tensor("inputs_335_cast")]; + tensor var_11025 = const()[name = tensor("op_11025"), val = tensor([1])]; + tensor channels_mean_335_cast = reduce_mean(axes = var_11025, keep_dims = var_6860, x = inputs_335_cast)[name = tensor("channels_mean_335_cast")]; + tensor zero_mean_335_cast = sub(x = inputs_335_cast, y = channels_mean_335_cast)[name = tensor("zero_mean_335_cast")]; + tensor zero_mean_sq_335_cast = mul(x = zero_mean_335_cast, y = zero_mean_335_cast)[name = tensor("zero_mean_sq_335_cast")]; + tensor var_11029 = const()[name = tensor("op_11029"), val = tensor([1])]; + tensor var_11030_cast = reduce_mean(axes = var_11029, keep_dims = var_6860, x = zero_mean_sq_335_cast)[name = tensor("op_11030_cast")]; + tensor var_11031_to_fp16 = const()[name = tensor("op_11031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11032_cast = add(x = var_11030_cast, y = var_11031_to_fp16)[name = tensor("op_11032_cast")]; + tensor denom_335_epsilon_0_to_fp16 = const()[name = tensor("denom_335_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_335_cast = rsqrt(epsilon = denom_335_epsilon_0_to_fp16, x = var_11032_cast)[name = tensor("denom_335_cast")]; + tensor out_335_cast = mul(x = zero_mean_335_cast, y = denom_335_cast)[name = tensor("out_335_cast")]; + tensor var_11036_to_fp16 = const()[name = tensor("op_11036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850516160)))]; + tensor var_11037_cast = add(x = out_335_cast, y = var_11036_to_fp16)[name = tensor("op_11037_cast")]; + tensor var_11039_to_fp16 = const()[name = tensor("op_11039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850518784)))]; + tensor input_647_cast = mul(x = var_11037_cast, y = var_11039_to_fp16)[name = tensor("input_647_cast")]; + tensor var_11047 = const()[name = tensor("op_11047"), val = tensor([1, 1])]; + tensor var_11049 = const()[name = tensor("op_11049"), val = tensor([1, 1])]; + tensor var_11051_pad_type_0 = const()[name = tensor("op_11051_pad_type_0"), val = tensor("custom")]; + tensor var_11051_pad_0 = const()[name = tensor("op_11051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850521408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857075072))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857075200)))]; + tensor var_11051_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_11049, groups = var_6865, pad = var_11051_pad_0, pad_type = var_11051_pad_type_0, strides = var_11047, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_647_cast)[name = tensor("op_11051_cast")]; + tensor var_11052_split_sizes_0 = const()[name = tensor("op_11052_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11052_axis_0 = const()[name = tensor("op_11052_axis_0"), val = tensor(1)]; + tensor var_11052_cast_0, tensor var_11052_cast_1 = split(axis = var_11052_axis_0, split_sizes = var_11052_split_sizes_0, x = var_11051_cast)[name = tensor("op_11052_cast")]; + tensor var_11054_mode_0 = const()[name = tensor("op_11054_mode_0"), val = tensor("EXACT")]; + tensor var_11054_cast = gelu(mode = var_11054_mode_0, x = var_11052_cast_1)[name = tensor("op_11054_cast")]; + tensor input_649_cast = mul(x = var_11052_cast_0, y = var_11054_cast)[name = tensor("input_649_cast")]; + tensor var_11058 = const()[name = tensor("op_11058"), val = tensor([1, 1])]; + tensor var_11060 = const()[name = tensor("op_11060"), val = tensor([1, 1])]; + tensor var_11062_pad_type_0 = const()[name = tensor("op_11062_pad_type_0"), val = tensor("custom")]; + tensor var_11062_pad_0 = const()[name = tensor("op_11062_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857095744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862011008))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862011200)))]; + tensor var_11062_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_11060, groups = var_6865, pad = var_11062_pad_0, pad_type = var_11062_pad_type_0, strides = var_11058, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_649_cast)[name = tensor("op_11062_cast")]; + tensor inputs_337_cast = add(x = var_11062_cast, y = inputs_335_cast)[name = tensor("inputs_337_cast")]; + tensor var_11072 = const()[name = tensor("op_11072"), val = tensor([1])]; + tensor channels_mean_337_cast = reduce_mean(axes = var_11072, keep_dims = var_6860, x = inputs_337_cast)[name = tensor("channels_mean_337_cast")]; + tensor zero_mean_337_cast = sub(x = inputs_337_cast, y = channels_mean_337_cast)[name = tensor("zero_mean_337_cast")]; + tensor zero_mean_sq_337_cast = mul(x = zero_mean_337_cast, y = zero_mean_337_cast)[name = tensor("zero_mean_sq_337_cast")]; + tensor var_11076 = const()[name = tensor("op_11076"), val = tensor([1])]; + tensor var_11077_cast = reduce_mean(axes = var_11076, keep_dims = var_6860, x = zero_mean_sq_337_cast)[name = tensor("op_11077_cast")]; + tensor var_11078_to_fp16 = const()[name = tensor("op_11078_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11079_cast = add(x = var_11077_cast, y = var_11078_to_fp16)[name = tensor("op_11079_cast")]; + tensor denom_337_epsilon_0_to_fp16 = const()[name = tensor("denom_337_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_337_cast = rsqrt(epsilon = denom_337_epsilon_0_to_fp16, x = var_11079_cast)[name = tensor("denom_337_cast")]; + tensor out_337_cast = mul(x = zero_mean_337_cast, y = denom_337_cast)[name = tensor("out_337_cast")]; + tensor var_11083_to_fp16 = const()[name = tensor("op_11083_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862013824)))]; + tensor var_11084_cast = add(x = out_337_cast, y = var_11083_to_fp16)[name = tensor("op_11084_cast")]; + tensor var_11086_to_fp16 = const()[name = tensor("op_11086_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862016448)))]; + tensor hidden_states_447_cast = mul(x = var_11084_cast, y = var_11086_to_fp16)[name = tensor("hidden_states_447_cast")]; + tensor var_11093 = const()[name = tensor("op_11093"), val = tensor([1, 1])]; + tensor var_11095 = const()[name = tensor("op_11095"), val = tensor([1, 1])]; + tensor q_225_pad_type_0 = const()[name = tensor("q_225_pad_type_0"), val = tensor("custom")]; + tensor q_225_pad_0 = const()[name = tensor("q_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862019072))), lut = tensor([-0x1.714p-5, -0x1.b9cp-7, 0x1.bd8p-7, 0x1.728p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_225_cast = conv(dilations = var_11095, groups = var_6865, pad = q_225_pad_0, pad_type = q_225_pad_type_0, strides = var_11093, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("q_225_cast")]; + tensor var_11099 = const()[name = tensor("op_11099"), val = tensor([1, 1])]; + tensor var_11101 = const()[name = tensor("op_11101"), val = tensor([1, 1])]; + tensor k_225_pad_type_0 = const()[name = tensor("k_225_pad_type_0"), val = tensor("custom")]; + tensor k_225_pad_0 = const()[name = tensor("k_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862428736))), lut = tensor([-0x1.72cp-5, -0x1.bd8p-7, 0x1.bf4p-7, 0x1.738p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_225_cast = conv(dilations = var_11101, groups = var_6865, pad = k_225_pad_0, pad_type = k_225_pad_type_0, strides = var_11099, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("k_225_cast")]; + tensor var_11105 = const()[name = tensor("op_11105"), val = tensor([1, 1])]; + tensor var_11107 = const()[name = tensor("op_11107"), val = tensor([1, 1])]; + tensor v_225_pad_type_0 = const()[name = tensor("v_225_pad_type_0"), val = tensor("custom")]; + tensor v_225_pad_0 = const()[name = tensor("v_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862838400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863657664))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_225_cast = conv(dilations = var_11107, groups = var_6865, pad = v_225_pad_0, pad_type = v_225_pad_type_0, strides = var_11105, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("v_225_cast")]; + tensor var_11111 = const()[name = tensor("op_11111"), val = tensor([2, 20, 64, -1])]; + tensor var_11112_cast = reshape(shape = var_11111, x = q_225_cast)[name = tensor("op_11112_cast")]; + tensor var_11113 = const()[name = tensor("op_11113"), val = tensor([2, 20, 64, -1])]; + tensor var_11114_cast = reshape(shape = var_11113, x = k_225_cast)[name = tensor("op_11114_cast")]; + tensor var_11115 = const()[name = tensor("op_11115"), val = tensor([2, 20, 64, -1])]; + tensor var_11116_cast = reshape(shape = var_11115, x = v_225_cast)[name = tensor("op_11116_cast")]; + tensor attn_weights_449_transpose_x_0 = const()[name = tensor("attn_weights_449_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_449_transpose_y_0 = const()[name = tensor("attn_weights_449_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_449_cast = matmul(transpose_x = attn_weights_449_transpose_x_0, transpose_y = attn_weights_449_transpose_y_0, x = var_11112_cast, y = var_11114_cast)[name = tensor("attn_weights_449_cast")]; + tensor attn_weights_451_cast = mul(x = attn_weights_449_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_451_cast")]; + tensor var_11120_cast = softmax(axis = var_6849, x = attn_weights_451_cast)[name = tensor("op_11120_cast")]; + tensor attn_225_transpose_x_0 = const()[name = tensor("attn_225_transpose_x_0"), val = tensor(false)]; + tensor attn_225_transpose_y_0 = const()[name = tensor("attn_225_transpose_y_0"), val = tensor(true)]; + tensor attn_225_cast = matmul(transpose_x = attn_225_transpose_x_0, transpose_y = attn_225_transpose_y_0, x = var_11116_cast, y = var_11120_cast)[name = tensor("attn_225_cast")]; + tensor var_11124 = const()[name = tensor("op_11124"), val = tensor([2, 1280, 1, -1])]; + tensor input_651_cast = reshape(shape = var_11124, x = attn_225_cast)[name = tensor("input_651_cast")]; + tensor var_11129 = const()[name = tensor("op_11129"), val = tensor([1, 1])]; + tensor var_11131 = const()[name = tensor("op_11131"), val = tensor([1, 1])]; + tensor var_11133_pad_type_0 = const()[name = tensor("op_11133_pad_type_0"), val = tensor("custom")]; + tensor var_11133_pad_0 = const()[name = tensor("op_11133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863657792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864477056))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864477184)))]; + tensor var_11133_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_11131, groups = var_6865, pad = var_11133_pad_0, pad_type = var_11133_pad_type_0, strides = var_11129, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_651_cast)[name = tensor("op_11133_cast")]; + tensor inputs_339_cast = add(x = var_11133_cast, y = inputs_337_cast)[name = tensor("inputs_339_cast")]; + tensor var_11137 = const()[name = tensor("op_11137"), val = tensor([1])]; + tensor channels_mean_339_cast = reduce_mean(axes = var_11137, keep_dims = var_6860, x = inputs_339_cast)[name = tensor("channels_mean_339_cast")]; + tensor zero_mean_339_cast = sub(x = inputs_339_cast, y = channels_mean_339_cast)[name = tensor("zero_mean_339_cast")]; + tensor zero_mean_sq_339_cast = mul(x = zero_mean_339_cast, y = zero_mean_339_cast)[name = tensor("zero_mean_sq_339_cast")]; + tensor var_11141 = const()[name = tensor("op_11141"), val = tensor([1])]; + tensor var_11142_cast = reduce_mean(axes = var_11141, keep_dims = var_6860, x = zero_mean_sq_339_cast)[name = tensor("op_11142_cast")]; + tensor var_11143_to_fp16 = const()[name = tensor("op_11143_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11144_cast = add(x = var_11142_cast, y = var_11143_to_fp16)[name = tensor("op_11144_cast")]; + tensor denom_339_epsilon_0_to_fp16 = const()[name = tensor("denom_339_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_339_cast = rsqrt(epsilon = denom_339_epsilon_0_to_fp16, x = var_11144_cast)[name = tensor("denom_339_cast")]; + tensor out_339_cast = mul(x = zero_mean_339_cast, y = denom_339_cast)[name = tensor("out_339_cast")]; + tensor var_11148_to_fp16 = const()[name = tensor("op_11148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864479808)))]; + tensor var_11149_cast = add(x = out_339_cast, y = var_11148_to_fp16)[name = tensor("op_11149_cast")]; + tensor var_11151_to_fp16 = const()[name = tensor("op_11151_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864482432)))]; + tensor hidden_states_449_cast = mul(x = var_11149_cast, y = var_11151_to_fp16)[name = tensor("hidden_states_449_cast")]; + tensor var_11158 = const()[name = tensor("op_11158"), val = tensor([1, 1])]; + tensor var_11160 = const()[name = tensor("op_11160"), val = tensor([1, 1])]; + tensor q_227_pad_type_0 = const()[name = tensor("q_227_pad_type_0"), val = tensor("custom")]; + tensor q_227_pad_0 = const()[name = tensor("q_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864485056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865304320))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_227_cast = conv(dilations = var_11160, groups = var_6865, pad = q_227_pad_0, pad_type = q_227_pad_type_0, strides = var_11158, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_449_cast)[name = tensor("q_227_cast")]; + tensor var_11164 = const()[name = tensor("op_11164"), val = tensor([1, 1])]; + tensor var_11166 = const()[name = tensor("op_11166"), val = tensor([1, 1])]; + tensor k_227_pad_type_0 = const()[name = tensor("k_227_pad_type_0"), val = tensor("custom")]; + tensor k_227_pad_0 = const()[name = tensor("k_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865304448))), lut = tensor([-0x1.fdcp-6, -0x1.22cp-7, 0x1.24cp-7, 0x1.ff4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_227_cast = conv(dilations = var_11166, groups = var_6865, pad = k_227_pad_0, pad_type = k_227_pad_type_0, strides = var_11164, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_227_cast")]; + tensor var_11170 = const()[name = tensor("op_11170"), val = tensor([1, 1])]; + tensor var_11172 = const()[name = tensor("op_11172"), val = tensor([1, 1])]; + tensor v_227_pad_type_0 = const()[name = tensor("v_227_pad_type_0"), val = tensor("custom")]; + tensor v_227_pad_0 = const()[name = tensor("v_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865959872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867270656))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_227_cast = conv(dilations = var_11172, groups = var_6865, pad = v_227_pad_0, pad_type = v_227_pad_type_0, strides = var_11170, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_227_cast")]; + tensor var_11176 = const()[name = tensor("op_11176"), val = tensor([2, 20, 64, -1])]; + tensor var_11177_cast = reshape(shape = var_11176, x = q_227_cast)[name = tensor("op_11177_cast")]; + tensor var_11178 = const()[name = tensor("op_11178"), val = tensor([2, 20, 64, -1])]; + tensor var_11179_cast = reshape(shape = var_11178, x = k_227_cast)[name = tensor("op_11179_cast")]; + tensor var_11180 = const()[name = tensor("op_11180"), val = tensor([2, 20, 64, -1])]; + tensor var_11181_cast = reshape(shape = var_11180, x = v_227_cast)[name = tensor("op_11181_cast")]; + tensor attn_weights_453_transpose_x_0 = const()[name = tensor("attn_weights_453_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_453_transpose_y_0 = const()[name = tensor("attn_weights_453_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_453_cast = matmul(transpose_x = attn_weights_453_transpose_x_0, transpose_y = attn_weights_453_transpose_y_0, x = var_11177_cast, y = var_11179_cast)[name = tensor("attn_weights_453_cast")]; + tensor attn_weights_455_cast = mul(x = attn_weights_453_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_455_cast")]; + tensor var_11185_cast = softmax(axis = var_6849, x = attn_weights_455_cast)[name = tensor("op_11185_cast")]; + tensor attn_227_transpose_x_0 = const()[name = tensor("attn_227_transpose_x_0"), val = tensor(false)]; + tensor attn_227_transpose_y_0 = const()[name = tensor("attn_227_transpose_y_0"), val = tensor(true)]; + tensor attn_227_cast = matmul(transpose_x = attn_227_transpose_x_0, transpose_y = attn_227_transpose_y_0, x = var_11181_cast, y = var_11185_cast)[name = tensor("attn_227_cast")]; + tensor var_11189 = const()[name = tensor("op_11189"), val = tensor([2, 1280, 1, -1])]; + tensor input_653_cast = reshape(shape = var_11189, x = attn_227_cast)[name = tensor("input_653_cast")]; + tensor var_11194 = const()[name = tensor("op_11194"), val = tensor([1, 1])]; + tensor var_11196 = const()[name = tensor("op_11196"), val = tensor([1, 1])]; + tensor var_11198_pad_type_0 = const()[name = tensor("op_11198_pad_type_0"), val = tensor("custom")]; + tensor var_11198_pad_0 = const()[name = tensor("op_11198_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867270784))), lut = tensor([-0x1.11cp-6, -0x1.354p-8, 0x1.34p-8, 0x1.118p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867680448)))]; + tensor var_11198_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_11196, groups = var_6865, pad = var_11198_pad_0, pad_type = var_11198_pad_type_0, strides = var_11194, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_653_cast)[name = tensor("op_11198_cast")]; + tensor inputs_341_cast = add(x = var_11198_cast, y = inputs_339_cast)[name = tensor("inputs_341_cast")]; + tensor var_11202 = const()[name = tensor("op_11202"), val = tensor([1])]; + tensor channels_mean_341_cast = reduce_mean(axes = var_11202, keep_dims = var_6860, x = inputs_341_cast)[name = tensor("channels_mean_341_cast")]; + tensor zero_mean_341_cast = sub(x = inputs_341_cast, y = channels_mean_341_cast)[name = tensor("zero_mean_341_cast")]; + tensor zero_mean_sq_341_cast = mul(x = zero_mean_341_cast, y = zero_mean_341_cast)[name = tensor("zero_mean_sq_341_cast")]; + tensor var_11206 = const()[name = tensor("op_11206"), val = tensor([1])]; + tensor var_11207_cast = reduce_mean(axes = var_11206, keep_dims = var_6860, x = zero_mean_sq_341_cast)[name = tensor("op_11207_cast")]; + tensor var_11208_to_fp16 = const()[name = tensor("op_11208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11209_cast = add(x = var_11207_cast, y = var_11208_to_fp16)[name = tensor("op_11209_cast")]; + tensor denom_341_epsilon_0_to_fp16 = const()[name = tensor("denom_341_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_341_cast = rsqrt(epsilon = denom_341_epsilon_0_to_fp16, x = var_11209_cast)[name = tensor("denom_341_cast")]; + tensor out_341_cast = mul(x = zero_mean_341_cast, y = denom_341_cast)[name = tensor("out_341_cast")]; + tensor var_11213_to_fp16 = const()[name = tensor("op_11213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867683072)))]; + tensor var_11214_cast = add(x = out_341_cast, y = var_11213_to_fp16)[name = tensor("op_11214_cast")]; + tensor var_11216_to_fp16 = const()[name = tensor("op_11216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867685696)))]; + tensor input_655_cast = mul(x = var_11214_cast, y = var_11216_to_fp16)[name = tensor("input_655_cast")]; + tensor var_11224 = const()[name = tensor("op_11224"), val = tensor([1, 1])]; + tensor var_11226 = const()[name = tensor("op_11226"), val = tensor([1, 1])]; + tensor var_11228_pad_type_0 = const()[name = tensor("op_11228_pad_type_0"), val = tensor("custom")]; + tensor var_11228_pad_0 = const()[name = tensor("op_11228_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867688320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874241984))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874242112)))]; + tensor var_11228_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_11226, groups = var_6865, pad = var_11228_pad_0, pad_type = var_11228_pad_type_0, strides = var_11224, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_655_cast)[name = tensor("op_11228_cast")]; + tensor var_11229_split_sizes_0 = const()[name = tensor("op_11229_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11229_axis_0 = const()[name = tensor("op_11229_axis_0"), val = tensor(1)]; + tensor var_11229_cast_0, tensor var_11229_cast_1 = split(axis = var_11229_axis_0, split_sizes = var_11229_split_sizes_0, x = var_11228_cast)[name = tensor("op_11229_cast")]; + tensor var_11231_mode_0 = const()[name = tensor("op_11231_mode_0"), val = tensor("EXACT")]; + tensor var_11231_cast = gelu(mode = var_11231_mode_0, x = var_11229_cast_1)[name = tensor("op_11231_cast")]; + tensor input_657_cast = mul(x = var_11229_cast_0, y = var_11231_cast)[name = tensor("input_657_cast")]; + tensor var_11235 = const()[name = tensor("op_11235"), val = tensor([1, 1])]; + tensor var_11237 = const()[name = tensor("op_11237"), val = tensor([1, 1])]; + tensor var_11239_pad_type_0 = const()[name = tensor("op_11239_pad_type_0"), val = tensor("custom")]; + tensor var_11239_pad_0 = const()[name = tensor("op_11239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(874262656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877539520))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877539648)))]; + tensor var_11239_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_11237, groups = var_6865, pad = var_11239_pad_0, pad_type = var_11239_pad_type_0, strides = var_11235, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_657_cast)[name = tensor("op_11239_cast")]; + tensor inputs_343_cast = add(x = var_11239_cast, y = inputs_341_cast)[name = tensor("inputs_343_cast")]; + tensor var_11249 = const()[name = tensor("op_11249"), val = tensor([1])]; + tensor channels_mean_343_cast = reduce_mean(axes = var_11249, keep_dims = var_6860, x = inputs_343_cast)[name = tensor("channels_mean_343_cast")]; + tensor zero_mean_343_cast = sub(x = inputs_343_cast, y = channels_mean_343_cast)[name = tensor("zero_mean_343_cast")]; + tensor zero_mean_sq_343_cast = mul(x = zero_mean_343_cast, y = zero_mean_343_cast)[name = tensor("zero_mean_sq_343_cast")]; + tensor var_11253 = const()[name = tensor("op_11253"), val = tensor([1])]; + tensor var_11254_cast = reduce_mean(axes = var_11253, keep_dims = var_6860, x = zero_mean_sq_343_cast)[name = tensor("op_11254_cast")]; + tensor var_11255_to_fp16 = const()[name = tensor("op_11255_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11256_cast = add(x = var_11254_cast, y = var_11255_to_fp16)[name = tensor("op_11256_cast")]; + tensor denom_343_epsilon_0_to_fp16 = const()[name = tensor("denom_343_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_343_cast = rsqrt(epsilon = denom_343_epsilon_0_to_fp16, x = var_11256_cast)[name = tensor("denom_343_cast")]; + tensor out_343_cast = mul(x = zero_mean_343_cast, y = denom_343_cast)[name = tensor("out_343_cast")]; + tensor var_11260_to_fp16 = const()[name = tensor("op_11260_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877542272)))]; + tensor var_11261_cast = add(x = out_343_cast, y = var_11260_to_fp16)[name = tensor("op_11261_cast")]; + tensor var_11263_to_fp16 = const()[name = tensor("op_11263_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877544896)))]; + tensor hidden_states_453_cast = mul(x = var_11261_cast, y = var_11263_to_fp16)[name = tensor("hidden_states_453_cast")]; + tensor var_11270 = const()[name = tensor("op_11270"), val = tensor([1, 1])]; + tensor var_11272 = const()[name = tensor("op_11272"), val = tensor([1, 1])]; + tensor q_229_pad_type_0 = const()[name = tensor("q_229_pad_type_0"), val = tensor("custom")]; + tensor q_229_pad_0 = const()[name = tensor("q_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877547520))), lut = tensor([-0x1.744p-5, -0x1.bbcp-7, 0x1.c54p-7, 0x1.764p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_229_cast = conv(dilations = var_11272, groups = var_6865, pad = q_229_pad_0, pad_type = q_229_pad_type_0, strides = var_11270, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("q_229_cast")]; + tensor var_11276 = const()[name = tensor("op_11276"), val = tensor([1, 1])]; + tensor var_11278 = const()[name = tensor("op_11278"), val = tensor([1, 1])]; + tensor k_229_pad_type_0 = const()[name = tensor("k_229_pad_type_0"), val = tensor("custom")]; + tensor k_229_pad_0 = const()[name = tensor("k_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(877957184))), lut = tensor([-0x1.768p-5, -0x1.c34p-7, 0x1.c08p-7, 0x1.764p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_229_cast = conv(dilations = var_11278, groups = var_6865, pad = k_229_pad_0, pad_type = k_229_pad_type_0, strides = var_11276, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("k_229_cast")]; + tensor var_11282 = const()[name = tensor("op_11282"), val = tensor([1, 1])]; + tensor var_11284 = const()[name = tensor("op_11284"), val = tensor([1, 1])]; + tensor v_229_pad_type_0 = const()[name = tensor("v_229_pad_type_0"), val = tensor("custom")]; + tensor v_229_pad_0 = const()[name = tensor("v_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(878366848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(879186112))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_229_cast = conv(dilations = var_11284, groups = var_6865, pad = v_229_pad_0, pad_type = v_229_pad_type_0, strides = var_11282, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("v_229_cast")]; + tensor var_11288 = const()[name = tensor("op_11288"), val = tensor([2, 20, 64, -1])]; + tensor var_11289_cast = reshape(shape = var_11288, x = q_229_cast)[name = tensor("op_11289_cast")]; + tensor var_11290 = const()[name = tensor("op_11290"), val = tensor([2, 20, 64, -1])]; + tensor var_11291_cast = reshape(shape = var_11290, x = k_229_cast)[name = tensor("op_11291_cast")]; + tensor var_11292 = const()[name = tensor("op_11292"), val = tensor([2, 20, 64, -1])]; + tensor var_11293_cast = reshape(shape = var_11292, x = v_229_cast)[name = tensor("op_11293_cast")]; + tensor attn_weights_457_transpose_x_0 = const()[name = tensor("attn_weights_457_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_457_transpose_y_0 = const()[name = tensor("attn_weights_457_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_457_cast = matmul(transpose_x = attn_weights_457_transpose_x_0, transpose_y = attn_weights_457_transpose_y_0, x = var_11289_cast, y = var_11291_cast)[name = tensor("attn_weights_457_cast")]; + tensor attn_weights_459_cast = mul(x = attn_weights_457_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_459_cast")]; + tensor var_11297_cast = softmax(axis = var_6849, x = attn_weights_459_cast)[name = tensor("op_11297_cast")]; + tensor attn_229_transpose_x_0 = const()[name = tensor("attn_229_transpose_x_0"), val = tensor(false)]; + tensor attn_229_transpose_y_0 = const()[name = tensor("attn_229_transpose_y_0"), val = tensor(true)]; + tensor attn_229_cast = matmul(transpose_x = attn_229_transpose_x_0, transpose_y = attn_229_transpose_y_0, x = var_11293_cast, y = var_11297_cast)[name = tensor("attn_229_cast")]; + tensor var_11301 = const()[name = tensor("op_11301"), val = tensor([2, 1280, 1, -1])]; + tensor input_659_cast = reshape(shape = var_11301, x = attn_229_cast)[name = tensor("input_659_cast")]; + tensor var_11306 = const()[name = tensor("op_11306"), val = tensor([1, 1])]; + tensor var_11308 = const()[name = tensor("op_11308"), val = tensor([1, 1])]; + tensor var_11310_pad_type_0 = const()[name = tensor("op_11310_pad_type_0"), val = tensor("custom")]; + tensor var_11310_pad_0 = const()[name = tensor("op_11310_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(879186240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880415104))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880415296)))]; + tensor var_11310_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_11308, groups = var_6865, pad = var_11310_pad_0, pad_type = var_11310_pad_type_0, strides = var_11306, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_659_cast)[name = tensor("op_11310_cast")]; + tensor inputs_345_cast = add(x = var_11310_cast, y = inputs_343_cast)[name = tensor("inputs_345_cast")]; + tensor var_11314 = const()[name = tensor("op_11314"), val = tensor([1])]; + tensor channels_mean_345_cast = reduce_mean(axes = var_11314, keep_dims = var_6860, x = inputs_345_cast)[name = tensor("channels_mean_345_cast")]; + tensor zero_mean_345_cast = sub(x = inputs_345_cast, y = channels_mean_345_cast)[name = tensor("zero_mean_345_cast")]; + tensor zero_mean_sq_345_cast = mul(x = zero_mean_345_cast, y = zero_mean_345_cast)[name = tensor("zero_mean_sq_345_cast")]; + tensor var_11318 = const()[name = tensor("op_11318"), val = tensor([1])]; + tensor var_11319_cast = reduce_mean(axes = var_11318, keep_dims = var_6860, x = zero_mean_sq_345_cast)[name = tensor("op_11319_cast")]; + tensor var_11320_to_fp16 = const()[name = tensor("op_11320_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11321_cast = add(x = var_11319_cast, y = var_11320_to_fp16)[name = tensor("op_11321_cast")]; + tensor denom_345_epsilon_0_to_fp16 = const()[name = tensor("denom_345_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_345_cast = rsqrt(epsilon = denom_345_epsilon_0_to_fp16, x = var_11321_cast)[name = tensor("denom_345_cast")]; + tensor out_345_cast = mul(x = zero_mean_345_cast, y = denom_345_cast)[name = tensor("out_345_cast")]; + tensor var_11325_to_fp16 = const()[name = tensor("op_11325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880417920)))]; + tensor var_11326_cast = add(x = out_345_cast, y = var_11325_to_fp16)[name = tensor("op_11326_cast")]; + tensor var_11328_to_fp16 = const()[name = tensor("op_11328_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880420544)))]; + tensor hidden_states_455_cast = mul(x = var_11326_cast, y = var_11328_to_fp16)[name = tensor("hidden_states_455_cast")]; + tensor var_11335 = const()[name = tensor("op_11335"), val = tensor([1, 1])]; + tensor var_11337 = const()[name = tensor("op_11337"), val = tensor([1, 1])]; + tensor q_231_pad_type_0 = const()[name = tensor("q_231_pad_type_0"), val = tensor("custom")]; + tensor q_231_pad_0 = const()[name = tensor("q_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880423168))), lut = tensor([-0x1.0bp-6, 0x1.0a4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_231_cast = conv(dilations = var_11337, groups = var_6865, pad = q_231_pad_0, pad_type = q_231_pad_type_0, strides = var_11335, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_455_cast)[name = tensor("q_231_cast")]; + tensor var_11341 = const()[name = tensor("op_11341"), val = tensor([1, 1])]; + tensor var_11343 = const()[name = tensor("op_11343"), val = tensor([1, 1])]; + tensor k_231_pad_type_0 = const()[name = tensor("k_231_pad_type_0"), val = tensor("custom")]; + tensor k_231_pad_0 = const()[name = tensor("k_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880628032))), lut = tensor([-0x1.b3cp-6, -0x1.f7cp-8, 0x1.fb8p-8, 0x1.b6p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_231_cast = conv(dilations = var_11343, groups = var_6865, pad = k_231_pad_0, pad_type = k_231_pad_type_0, strides = var_11341, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_231_cast")]; + tensor var_11347 = const()[name = tensor("op_11347"), val = tensor([1, 1])]; + tensor var_11349 = const()[name = tensor("op_11349"), val = tensor([1, 1])]; + tensor v_231_pad_type_0 = const()[name = tensor("v_231_pad_type_0"), val = tensor("custom")]; + tensor v_231_pad_0 = const()[name = tensor("v_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881283456))), lut = tensor([-0x1.ce4p-6, -0x1.fa4p-8, 0x1.f78p-8, 0x1.cd8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_231_cast = conv(dilations = var_11349, groups = var_6865, pad = v_231_pad_0, pad_type = v_231_pad_type_0, strides = var_11347, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_231_cast")]; + tensor var_11353 = const()[name = tensor("op_11353"), val = tensor([2, 20, 64, -1])]; + tensor var_11354_cast = reshape(shape = var_11353, x = q_231_cast)[name = tensor("op_11354_cast")]; + tensor var_11355 = const()[name = tensor("op_11355"), val = tensor([2, 20, 64, -1])]; + tensor var_11356_cast = reshape(shape = var_11355, x = k_231_cast)[name = tensor("op_11356_cast")]; + tensor var_11357 = const()[name = tensor("op_11357"), val = tensor([2, 20, 64, -1])]; + tensor var_11358_cast = reshape(shape = var_11357, x = v_231_cast)[name = tensor("op_11358_cast")]; + tensor attn_weights_461_transpose_x_0 = const()[name = tensor("attn_weights_461_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_461_transpose_y_0 = const()[name = tensor("attn_weights_461_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_461_cast = matmul(transpose_x = attn_weights_461_transpose_x_0, transpose_y = attn_weights_461_transpose_y_0, x = var_11354_cast, y = var_11356_cast)[name = tensor("attn_weights_461_cast")]; + tensor attn_weights_463_cast = mul(x = attn_weights_461_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_463_cast")]; + tensor var_11362_cast = softmax(axis = var_6849, x = attn_weights_463_cast)[name = tensor("op_11362_cast")]; + tensor attn_231_transpose_x_0 = const()[name = tensor("attn_231_transpose_x_0"), val = tensor(false)]; + tensor attn_231_transpose_y_0 = const()[name = tensor("attn_231_transpose_y_0"), val = tensor(true)]; + tensor attn_231_cast = matmul(transpose_x = attn_231_transpose_x_0, transpose_y = attn_231_transpose_y_0, x = var_11358_cast, y = var_11362_cast)[name = tensor("attn_231_cast")]; + tensor var_11366 = const()[name = tensor("op_11366"), val = tensor([2, 1280, 1, -1])]; + tensor input_661_cast = reshape(shape = var_11366, x = attn_231_cast)[name = tensor("input_661_cast")]; + tensor var_11371 = const()[name = tensor("op_11371"), val = tensor([1, 1])]; + tensor var_11373 = const()[name = tensor("op_11373"), val = tensor([1, 1])]; + tensor var_11375_pad_type_0 = const()[name = tensor("op_11375_pad_type_0"), val = tensor("custom")]; + tensor var_11375_pad_0 = const()[name = tensor("op_11375_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881938880))), lut = tensor([-0x1.bap-7, -0x1.008p-8, 0x1.008p-8, 0x1.bbp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882348544)))]; + tensor var_11375_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_11373, groups = var_6865, pad = var_11375_pad_0, pad_type = var_11375_pad_type_0, strides = var_11371, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_661_cast)[name = tensor("op_11375_cast")]; + tensor inputs_347_cast = add(x = var_11375_cast, y = inputs_345_cast)[name = tensor("inputs_347_cast")]; + tensor var_11379 = const()[name = tensor("op_11379"), val = tensor([1])]; + tensor channels_mean_347_cast = reduce_mean(axes = var_11379, keep_dims = var_6860, x = inputs_347_cast)[name = tensor("channels_mean_347_cast")]; + tensor zero_mean_347_cast = sub(x = inputs_347_cast, y = channels_mean_347_cast)[name = tensor("zero_mean_347_cast")]; + tensor zero_mean_sq_347_cast = mul(x = zero_mean_347_cast, y = zero_mean_347_cast)[name = tensor("zero_mean_sq_347_cast")]; + tensor var_11383 = const()[name = tensor("op_11383"), val = tensor([1])]; + tensor var_11384_cast = reduce_mean(axes = var_11383, keep_dims = var_6860, x = zero_mean_sq_347_cast)[name = tensor("op_11384_cast")]; + tensor var_11385_to_fp16 = const()[name = tensor("op_11385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11386_cast = add(x = var_11384_cast, y = var_11385_to_fp16)[name = tensor("op_11386_cast")]; + tensor denom_347_epsilon_0_to_fp16 = const()[name = tensor("denom_347_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_347_cast = rsqrt(epsilon = denom_347_epsilon_0_to_fp16, x = var_11386_cast)[name = tensor("denom_347_cast")]; + tensor out_347_cast = mul(x = zero_mean_347_cast, y = denom_347_cast)[name = tensor("out_347_cast")]; + tensor var_11390_to_fp16 = const()[name = tensor("op_11390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882351168)))]; + tensor var_11391_cast = add(x = out_347_cast, y = var_11390_to_fp16)[name = tensor("op_11391_cast")]; + tensor var_11393_to_fp16 = const()[name = tensor("op_11393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882353792)))]; + tensor input_663_cast = mul(x = var_11391_cast, y = var_11393_to_fp16)[name = tensor("input_663_cast")]; + tensor var_11401 = const()[name = tensor("op_11401"), val = tensor([1, 1])]; + tensor var_11403 = const()[name = tensor("op_11403"), val = tensor([1, 1])]; + tensor var_11405_pad_type_0 = const()[name = tensor("op_11405_pad_type_0"), val = tensor("custom")]; + tensor var_11405_pad_0 = const()[name = tensor("op_11405_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882356416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888910080))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888910208)))]; + tensor var_11405_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_11403, groups = var_6865, pad = var_11405_pad_0, pad_type = var_11405_pad_type_0, strides = var_11401, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_663_cast)[name = tensor("op_11405_cast")]; + tensor var_11406_split_sizes_0 = const()[name = tensor("op_11406_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11406_axis_0 = const()[name = tensor("op_11406_axis_0"), val = tensor(1)]; + tensor var_11406_cast_0, tensor var_11406_cast_1 = split(axis = var_11406_axis_0, split_sizes = var_11406_split_sizes_0, x = var_11405_cast)[name = tensor("op_11406_cast")]; + tensor var_11408_mode_0 = const()[name = tensor("op_11408_mode_0"), val = tensor("EXACT")]; + tensor var_11408_cast = gelu(mode = var_11408_mode_0, x = var_11406_cast_1)[name = tensor("op_11408_cast")]; + tensor input_665_cast = mul(x = var_11406_cast_0, y = var_11408_cast)[name = tensor("input_665_cast")]; + tensor var_11412 = const()[name = tensor("op_11412"), val = tensor([1, 1])]; + tensor var_11414 = const()[name = tensor("op_11414"), val = tensor([1, 1])]; + tensor var_11416_pad_type_0 = const()[name = tensor("op_11416_pad_type_0"), val = tensor("custom")]; + tensor var_11416_pad_0 = const()[name = tensor("op_11416_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888930752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892207616))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892207744)))]; + tensor var_11416_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_11414, groups = var_6865, pad = var_11416_pad_0, pad_type = var_11416_pad_type_0, strides = var_11412, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_665_cast)[name = tensor("op_11416_cast")]; + tensor inputs_349_cast = add(x = var_11416_cast, y = inputs_347_cast)[name = tensor("inputs_349_cast")]; + tensor var_11426 = const()[name = tensor("op_11426"), val = tensor([1])]; + tensor channels_mean_349_cast = reduce_mean(axes = var_11426, keep_dims = var_6860, x = inputs_349_cast)[name = tensor("channels_mean_349_cast")]; + tensor zero_mean_349_cast = sub(x = inputs_349_cast, y = channels_mean_349_cast)[name = tensor("zero_mean_349_cast")]; + tensor zero_mean_sq_349_cast = mul(x = zero_mean_349_cast, y = zero_mean_349_cast)[name = tensor("zero_mean_sq_349_cast")]; + tensor var_11430 = const()[name = tensor("op_11430"), val = tensor([1])]; + tensor var_11431_cast = reduce_mean(axes = var_11430, keep_dims = var_6860, x = zero_mean_sq_349_cast)[name = tensor("op_11431_cast")]; + tensor var_11432_to_fp16 = const()[name = tensor("op_11432_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11433_cast = add(x = var_11431_cast, y = var_11432_to_fp16)[name = tensor("op_11433_cast")]; + tensor denom_349_epsilon_0_to_fp16 = const()[name = tensor("denom_349_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_349_cast = rsqrt(epsilon = denom_349_epsilon_0_to_fp16, x = var_11433_cast)[name = tensor("denom_349_cast")]; + tensor out_349_cast = mul(x = zero_mean_349_cast, y = denom_349_cast)[name = tensor("out_349_cast")]; + tensor var_11437_to_fp16 = const()[name = tensor("op_11437_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892210368)))]; + tensor var_11438_cast = add(x = out_349_cast, y = var_11437_to_fp16)[name = tensor("op_11438_cast")]; + tensor var_11440_to_fp16 = const()[name = tensor("op_11440_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892212992)))]; + tensor hidden_states_459_cast = mul(x = var_11438_cast, y = var_11440_to_fp16)[name = tensor("hidden_states_459_cast")]; + tensor var_11447 = const()[name = tensor("op_11447"), val = tensor([1, 1])]; + tensor var_11449 = const()[name = tensor("op_11449"), val = tensor([1, 1])]; + tensor q_233_pad_type_0 = const()[name = tensor("q_233_pad_type_0"), val = tensor("custom")]; + tensor q_233_pad_0 = const()[name = tensor("q_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892215616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893034880))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_233_cast = conv(dilations = var_11449, groups = var_6865, pad = q_233_pad_0, pad_type = q_233_pad_type_0, strides = var_11447, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("q_233_cast")]; + tensor var_11453 = const()[name = tensor("op_11453"), val = tensor([1, 1])]; + tensor var_11455 = const()[name = tensor("op_11455"), val = tensor([1, 1])]; + tensor k_233_pad_type_0 = const()[name = tensor("k_233_pad_type_0"), val = tensor("custom")]; + tensor k_233_pad_0 = const()[name = tensor("k_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893035008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893854272))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_233_cast = conv(dilations = var_11455, groups = var_6865, pad = k_233_pad_0, pad_type = k_233_pad_type_0, strides = var_11453, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("k_233_cast")]; + tensor var_11459 = const()[name = tensor("op_11459"), val = tensor([1, 1])]; + tensor var_11461 = const()[name = tensor("op_11461"), val = tensor([1, 1])]; + tensor v_233_pad_type_0 = const()[name = tensor("v_233_pad_type_0"), val = tensor("custom")]; + tensor v_233_pad_0 = const()[name = tensor("v_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893854400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(894673664))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_233_cast = conv(dilations = var_11461, groups = var_6865, pad = v_233_pad_0, pad_type = v_233_pad_type_0, strides = var_11459, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("v_233_cast")]; + tensor var_11465 = const()[name = tensor("op_11465"), val = tensor([2, 20, 64, -1])]; + tensor var_11466_cast = reshape(shape = var_11465, x = q_233_cast)[name = tensor("op_11466_cast")]; + tensor var_11467 = const()[name = tensor("op_11467"), val = tensor([2, 20, 64, -1])]; + tensor var_11468_cast = reshape(shape = var_11467, x = k_233_cast)[name = tensor("op_11468_cast")]; + tensor var_11469 = const()[name = tensor("op_11469"), val = tensor([2, 20, 64, -1])]; + tensor var_11470_cast = reshape(shape = var_11469, x = v_233_cast)[name = tensor("op_11470_cast")]; + tensor attn_weights_465_transpose_x_0 = const()[name = tensor("attn_weights_465_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_465_transpose_y_0 = const()[name = tensor("attn_weights_465_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_465_cast = matmul(transpose_x = attn_weights_465_transpose_x_0, transpose_y = attn_weights_465_transpose_y_0, x = var_11466_cast, y = var_11468_cast)[name = tensor("attn_weights_465_cast")]; + tensor attn_weights_467_cast = mul(x = attn_weights_465_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_467_cast")]; + tensor var_11474_cast = softmax(axis = var_6849, x = attn_weights_467_cast)[name = tensor("op_11474_cast")]; + tensor attn_233_transpose_x_0 = const()[name = tensor("attn_233_transpose_x_0"), val = tensor(false)]; + tensor attn_233_transpose_y_0 = const()[name = tensor("attn_233_transpose_y_0"), val = tensor(true)]; + tensor attn_233_cast = matmul(transpose_x = attn_233_transpose_x_0, transpose_y = attn_233_transpose_y_0, x = var_11470_cast, y = var_11474_cast)[name = tensor("attn_233_cast")]; + tensor var_11478 = const()[name = tensor("op_11478"), val = tensor([2, 1280, 1, -1])]; + tensor input_667_cast = reshape(shape = var_11478, x = attn_233_cast)[name = tensor("input_667_cast")]; + tensor var_11483 = const()[name = tensor("op_11483"), val = tensor([1, 1])]; + tensor var_11485 = const()[name = tensor("op_11485"), val = tensor([1, 1])]; + tensor var_11487_pad_type_0 = const()[name = tensor("op_11487_pad_type_0"), val = tensor("custom")]; + tensor var_11487_pad_0 = const()[name = tensor("op_11487_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(894673792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895902656))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895902848)))]; + tensor var_11487_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_11485, groups = var_6865, pad = var_11487_pad_0, pad_type = var_11487_pad_type_0, strides = var_11483, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_667_cast)[name = tensor("op_11487_cast")]; + tensor inputs_351_cast = add(x = var_11487_cast, y = inputs_349_cast)[name = tensor("inputs_351_cast")]; + tensor var_11491 = const()[name = tensor("op_11491"), val = tensor([1])]; + tensor channels_mean_351_cast = reduce_mean(axes = var_11491, keep_dims = var_6860, x = inputs_351_cast)[name = tensor("channels_mean_351_cast")]; + tensor zero_mean_351_cast = sub(x = inputs_351_cast, y = channels_mean_351_cast)[name = tensor("zero_mean_351_cast")]; + tensor zero_mean_sq_351_cast = mul(x = zero_mean_351_cast, y = zero_mean_351_cast)[name = tensor("zero_mean_sq_351_cast")]; + tensor var_11495 = const()[name = tensor("op_11495"), val = tensor([1])]; + tensor var_11496_cast = reduce_mean(axes = var_11495, keep_dims = var_6860, x = zero_mean_sq_351_cast)[name = tensor("op_11496_cast")]; + tensor var_11497_to_fp16 = const()[name = tensor("op_11497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11498_cast = add(x = var_11496_cast, y = var_11497_to_fp16)[name = tensor("op_11498_cast")]; + tensor denom_351_epsilon_0_to_fp16 = const()[name = tensor("denom_351_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_351_cast = rsqrt(epsilon = denom_351_epsilon_0_to_fp16, x = var_11498_cast)[name = tensor("denom_351_cast")]; + tensor out_351_cast = mul(x = zero_mean_351_cast, y = denom_351_cast)[name = tensor("out_351_cast")]; + tensor var_11502_to_fp16 = const()[name = tensor("op_11502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895905472)))]; + tensor var_11503_cast = add(x = out_351_cast, y = var_11502_to_fp16)[name = tensor("op_11503_cast")]; + tensor var_11505_to_fp16 = const()[name = tensor("op_11505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895908096)))]; + tensor hidden_states_461_cast = mul(x = var_11503_cast, y = var_11505_to_fp16)[name = tensor("hidden_states_461_cast")]; + tensor var_11512 = const()[name = tensor("op_11512"), val = tensor([1, 1])]; + tensor var_11514 = const()[name = tensor("op_11514"), val = tensor([1, 1])]; + tensor q_235_pad_type_0 = const()[name = tensor("q_235_pad_type_0"), val = tensor("custom")]; + tensor q_235_pad_0 = const()[name = tensor("q_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(895910720))), lut = tensor([-0x1.eb4p-7, 0x1.eacp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_235_cast = conv(dilations = var_11514, groups = var_6865, pad = q_235_pad_0, pad_type = q_235_pad_type_0, strides = var_11512, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_461_cast)[name = tensor("q_235_cast")]; + tensor var_11518 = const()[name = tensor("op_11518"), val = tensor([1, 1])]; + tensor var_11520 = const()[name = tensor("op_11520"), val = tensor([1, 1])]; + tensor k_235_pad_type_0 = const()[name = tensor("k_235_pad_type_0"), val = tensor("custom")]; + tensor k_235_pad_0 = const()[name = tensor("k_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(896115584))), lut = tensor([-0x1.718p-6, -0x1.b18p-8, 0x1.b68p-8, 0x1.728p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_235_cast = conv(dilations = var_11520, groups = var_6865, pad = k_235_pad_0, pad_type = k_235_pad_type_0, strides = var_11518, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_235_cast")]; + tensor var_11524 = const()[name = tensor("op_11524"), val = tensor([1, 1])]; + tensor var_11526 = const()[name = tensor("op_11526"), val = tensor([1, 1])]; + tensor v_235_pad_type_0 = const()[name = tensor("v_235_pad_type_0"), val = tensor("custom")]; + tensor v_235_pad_0 = const()[name = tensor("v_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(896771008))), lut = tensor([-0x1.a6p-6, -0x1.d14p-8, 0x1.cccp-8, 0x1.a48p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_235_cast = conv(dilations = var_11526, groups = var_6865, pad = v_235_pad_0, pad_type = v_235_pad_type_0, strides = var_11524, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_235_cast")]; + tensor var_11530 = const()[name = tensor("op_11530"), val = tensor([2, 20, 64, -1])]; + tensor var_11531_cast = reshape(shape = var_11530, x = q_235_cast)[name = tensor("op_11531_cast")]; + tensor var_11532 = const()[name = tensor("op_11532"), val = tensor([2, 20, 64, -1])]; + tensor var_11533_cast = reshape(shape = var_11532, x = k_235_cast)[name = tensor("op_11533_cast")]; + tensor var_11534 = const()[name = tensor("op_11534"), val = tensor([2, 20, 64, -1])]; + tensor var_11535_cast = reshape(shape = var_11534, x = v_235_cast)[name = tensor("op_11535_cast")]; + tensor attn_weights_469_transpose_x_0 = const()[name = tensor("attn_weights_469_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_469_transpose_y_0 = const()[name = tensor("attn_weights_469_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_469_cast = matmul(transpose_x = attn_weights_469_transpose_x_0, transpose_y = attn_weights_469_transpose_y_0, x = var_11531_cast, y = var_11533_cast)[name = tensor("attn_weights_469_cast")]; + tensor attn_weights_471_cast = mul(x = attn_weights_469_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_471_cast")]; + tensor var_11539_cast = softmax(axis = var_6849, x = attn_weights_471_cast)[name = tensor("op_11539_cast")]; + tensor attn_235_transpose_x_0 = const()[name = tensor("attn_235_transpose_x_0"), val = tensor(false)]; + tensor attn_235_transpose_y_0 = const()[name = tensor("attn_235_transpose_y_0"), val = tensor(true)]; + tensor attn_235_cast = matmul(transpose_x = attn_235_transpose_x_0, transpose_y = attn_235_transpose_y_0, x = var_11535_cast, y = var_11539_cast)[name = tensor("attn_235_cast")]; + tensor var_11543 = const()[name = tensor("op_11543"), val = tensor([2, 1280, 1, -1])]; + tensor input_669_cast = reshape(shape = var_11543, x = attn_235_cast)[name = tensor("input_669_cast")]; + tensor var_11548 = const()[name = tensor("op_11548"), val = tensor([1, 1])]; + tensor var_11550 = const()[name = tensor("op_11550"), val = tensor([1, 1])]; + tensor var_11552_pad_type_0 = const()[name = tensor("op_11552_pad_type_0"), val = tensor("custom")]; + tensor var_11552_pad_0 = const()[name = tensor("op_11552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(897426432))), lut = tensor([-0x1.8a8p-7, -0x1.d14p-9, 0x1.cf4p-9, 0x1.8a4p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(897836096)))]; + tensor var_11552_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_11550, groups = var_6865, pad = var_11552_pad_0, pad_type = var_11552_pad_type_0, strides = var_11548, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_669_cast)[name = tensor("op_11552_cast")]; + tensor inputs_353_cast = add(x = var_11552_cast, y = inputs_351_cast)[name = tensor("inputs_353_cast")]; + tensor var_11556 = const()[name = tensor("op_11556"), val = tensor([1])]; + tensor channels_mean_353_cast = reduce_mean(axes = var_11556, keep_dims = var_6860, x = inputs_353_cast)[name = tensor("channels_mean_353_cast")]; + tensor zero_mean_353_cast = sub(x = inputs_353_cast, y = channels_mean_353_cast)[name = tensor("zero_mean_353_cast")]; + tensor zero_mean_sq_353_cast = mul(x = zero_mean_353_cast, y = zero_mean_353_cast)[name = tensor("zero_mean_sq_353_cast")]; + tensor var_11560 = const()[name = tensor("op_11560"), val = tensor([1])]; + tensor var_11561_cast = reduce_mean(axes = var_11560, keep_dims = var_6860, x = zero_mean_sq_353_cast)[name = tensor("op_11561_cast")]; + tensor var_11562_to_fp16 = const()[name = tensor("op_11562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11563_cast = add(x = var_11561_cast, y = var_11562_to_fp16)[name = tensor("op_11563_cast")]; + tensor denom_353_epsilon_0_to_fp16 = const()[name = tensor("denom_353_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_353_cast = rsqrt(epsilon = denom_353_epsilon_0_to_fp16, x = var_11563_cast)[name = tensor("denom_353_cast")]; + tensor out_353_cast = mul(x = zero_mean_353_cast, y = denom_353_cast)[name = tensor("out_353_cast")]; + tensor var_11567_to_fp16 = const()[name = tensor("op_11567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(897838720)))]; + tensor var_11568_cast = add(x = out_353_cast, y = var_11567_to_fp16)[name = tensor("op_11568_cast")]; + tensor var_11570_to_fp16 = const()[name = tensor("op_11570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(897841344)))]; + tensor input_671_cast = mul(x = var_11568_cast, y = var_11570_to_fp16)[name = tensor("input_671_cast")]; + tensor var_11578 = const()[name = tensor("op_11578"), val = tensor([1, 1])]; + tensor var_11580 = const()[name = tensor("op_11580"), val = tensor([1, 1])]; + tensor var_11582_pad_type_0 = const()[name = tensor("op_11582_pad_type_0"), val = tensor("custom")]; + tensor var_11582_pad_0 = const()[name = tensor("op_11582_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(897843968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(904397632))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(904397760)))]; + tensor var_11582_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_11580, groups = var_6865, pad = var_11582_pad_0, pad_type = var_11582_pad_type_0, strides = var_11578, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_671_cast)[name = tensor("op_11582_cast")]; + tensor var_11583_split_sizes_0 = const()[name = tensor("op_11583_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11583_axis_0 = const()[name = tensor("op_11583_axis_0"), val = tensor(1)]; + tensor var_11583_cast_0, tensor var_11583_cast_1 = split(axis = var_11583_axis_0, split_sizes = var_11583_split_sizes_0, x = var_11582_cast)[name = tensor("op_11583_cast")]; + tensor var_11585_mode_0 = const()[name = tensor("op_11585_mode_0"), val = tensor("EXACT")]; + tensor var_11585_cast = gelu(mode = var_11585_mode_0, x = var_11583_cast_1)[name = tensor("op_11585_cast")]; + tensor input_673_cast = mul(x = var_11583_cast_0, y = var_11585_cast)[name = tensor("input_673_cast")]; + tensor var_11589 = const()[name = tensor("op_11589"), val = tensor([1, 1])]; + tensor var_11591 = const()[name = tensor("op_11591"), val = tensor([1, 1])]; + tensor var_11593_pad_type_0 = const()[name = tensor("op_11593_pad_type_0"), val = tensor("custom")]; + tensor var_11593_pad_0 = const()[name = tensor("op_11593_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(904418304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(907695168))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(907695296)))]; + tensor var_11593_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_11591, groups = var_6865, pad = var_11593_pad_0, pad_type = var_11593_pad_type_0, strides = var_11589, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_673_cast)[name = tensor("op_11593_cast")]; + tensor inputs_355_cast = add(x = var_11593_cast, y = inputs_353_cast)[name = tensor("inputs_355_cast")]; + tensor var_11603 = const()[name = tensor("op_11603"), val = tensor([1])]; + tensor channels_mean_355_cast = reduce_mean(axes = var_11603, keep_dims = var_6860, x = inputs_355_cast)[name = tensor("channels_mean_355_cast")]; + tensor zero_mean_355_cast = sub(x = inputs_355_cast, y = channels_mean_355_cast)[name = tensor("zero_mean_355_cast")]; + tensor zero_mean_sq_355_cast = mul(x = zero_mean_355_cast, y = zero_mean_355_cast)[name = tensor("zero_mean_sq_355_cast")]; + tensor var_11607 = const()[name = tensor("op_11607"), val = tensor([1])]; + tensor var_11608_cast = reduce_mean(axes = var_11607, keep_dims = var_6860, x = zero_mean_sq_355_cast)[name = tensor("op_11608_cast")]; + tensor var_11609_to_fp16 = const()[name = tensor("op_11609_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11610_cast = add(x = var_11608_cast, y = var_11609_to_fp16)[name = tensor("op_11610_cast")]; + tensor denom_355_epsilon_0_to_fp16 = const()[name = tensor("denom_355_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_355_cast = rsqrt(epsilon = denom_355_epsilon_0_to_fp16, x = var_11610_cast)[name = tensor("denom_355_cast")]; + tensor out_355_cast = mul(x = zero_mean_355_cast, y = denom_355_cast)[name = tensor("out_355_cast")]; + tensor var_11614_to_fp16 = const()[name = tensor("op_11614_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(907697920)))]; + tensor var_11615_cast = add(x = out_355_cast, y = var_11614_to_fp16)[name = tensor("op_11615_cast")]; + tensor var_11617_to_fp16 = const()[name = tensor("op_11617_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(907700544)))]; + tensor hidden_states_465_cast = mul(x = var_11615_cast, y = var_11617_to_fp16)[name = tensor("hidden_states_465_cast")]; + tensor var_11624 = const()[name = tensor("op_11624"), val = tensor([1, 1])]; + tensor var_11626 = const()[name = tensor("op_11626"), val = tensor([1, 1])]; + tensor q_237_pad_type_0 = const()[name = tensor("q_237_pad_type_0"), val = tensor("custom")]; + tensor q_237_pad_0 = const()[name = tensor("q_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(907703168))), lut = tensor([-0x1.7c4p-5, -0x1.c9p-7, 0x1.ca8p-7, 0x1.7d4p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_237_cast = conv(dilations = var_11626, groups = var_6865, pad = q_237_pad_0, pad_type = q_237_pad_type_0, strides = var_11624, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("q_237_cast")]; + tensor var_11630 = const()[name = tensor("op_11630"), val = tensor([1, 1])]; + tensor var_11632 = const()[name = tensor("op_11632"), val = tensor([1, 1])]; + tensor k_237_pad_type_0 = const()[name = tensor("k_237_pad_type_0"), val = tensor("custom")]; + tensor k_237_pad_0 = const()[name = tensor("k_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908112832))), lut = tensor([-0x1.7d4p-5, -0x1.cbp-7, 0x1.c9p-7, 0x1.7d8p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_237_cast = conv(dilations = var_11632, groups = var_6865, pad = k_237_pad_0, pad_type = k_237_pad_type_0, strides = var_11630, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("k_237_cast")]; + tensor var_11636 = const()[name = tensor("op_11636"), val = tensor([1, 1])]; + tensor var_11638 = const()[name = tensor("op_11638"), val = tensor([1, 1])]; + tensor v_237_pad_type_0 = const()[name = tensor("v_237_pad_type_0"), val = tensor("custom")]; + tensor v_237_pad_0 = const()[name = tensor("v_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908522496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909341760))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_237_cast = conv(dilations = var_11638, groups = var_6865, pad = v_237_pad_0, pad_type = v_237_pad_type_0, strides = var_11636, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("v_237_cast")]; + tensor var_11642 = const()[name = tensor("op_11642"), val = tensor([2, 20, 64, -1])]; + tensor var_11643_cast = reshape(shape = var_11642, x = q_237_cast)[name = tensor("op_11643_cast")]; + tensor var_11644 = const()[name = tensor("op_11644"), val = tensor([2, 20, 64, -1])]; + tensor var_11645_cast = reshape(shape = var_11644, x = k_237_cast)[name = tensor("op_11645_cast")]; + tensor var_11646 = const()[name = tensor("op_11646"), val = tensor([2, 20, 64, -1])]; + tensor var_11647_cast = reshape(shape = var_11646, x = v_237_cast)[name = tensor("op_11647_cast")]; + tensor attn_weights_473_transpose_x_0 = const()[name = tensor("attn_weights_473_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_473_transpose_y_0 = const()[name = tensor("attn_weights_473_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_473_cast = matmul(transpose_x = attn_weights_473_transpose_x_0, transpose_y = attn_weights_473_transpose_y_0, x = var_11643_cast, y = var_11645_cast)[name = tensor("attn_weights_473_cast")]; + tensor attn_weights_475_cast = mul(x = attn_weights_473_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_475_cast")]; + tensor var_11651_cast = softmax(axis = var_6849, x = attn_weights_475_cast)[name = tensor("op_11651_cast")]; + tensor attn_237_transpose_x_0 = const()[name = tensor("attn_237_transpose_x_0"), val = tensor(false)]; + tensor attn_237_transpose_y_0 = const()[name = tensor("attn_237_transpose_y_0"), val = tensor(true)]; + tensor attn_237_cast = matmul(transpose_x = attn_237_transpose_x_0, transpose_y = attn_237_transpose_y_0, x = var_11647_cast, y = var_11651_cast)[name = tensor("attn_237_cast")]; + tensor var_11655 = const()[name = tensor("op_11655"), val = tensor([2, 1280, 1, -1])]; + tensor input_675_cast = reshape(shape = var_11655, x = attn_237_cast)[name = tensor("input_675_cast")]; + tensor var_11660 = const()[name = tensor("op_11660"), val = tensor([1, 1])]; + tensor var_11662 = const()[name = tensor("op_11662"), val = tensor([1, 1])]; + tensor var_11664_pad_type_0 = const()[name = tensor("op_11664_pad_type_0"), val = tensor("custom")]; + tensor var_11664_pad_0 = const()[name = tensor("op_11664_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909341888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910161152))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910161280)))]; + tensor var_11664_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_11662, groups = var_6865, pad = var_11664_pad_0, pad_type = var_11664_pad_type_0, strides = var_11660, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_675_cast)[name = tensor("op_11664_cast")]; + tensor inputs_357_cast = add(x = var_11664_cast, y = inputs_355_cast)[name = tensor("inputs_357_cast")]; + tensor var_11668 = const()[name = tensor("op_11668"), val = tensor([1])]; + tensor channels_mean_357_cast = reduce_mean(axes = var_11668, keep_dims = var_6860, x = inputs_357_cast)[name = tensor("channels_mean_357_cast")]; + tensor zero_mean_357_cast = sub(x = inputs_357_cast, y = channels_mean_357_cast)[name = tensor("zero_mean_357_cast")]; + tensor zero_mean_sq_357_cast = mul(x = zero_mean_357_cast, y = zero_mean_357_cast)[name = tensor("zero_mean_sq_357_cast")]; + tensor var_11672 = const()[name = tensor("op_11672"), val = tensor([1])]; + tensor var_11673_cast = reduce_mean(axes = var_11672, keep_dims = var_6860, x = zero_mean_sq_357_cast)[name = tensor("op_11673_cast")]; + tensor var_11674_to_fp16 = const()[name = tensor("op_11674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11675_cast = add(x = var_11673_cast, y = var_11674_to_fp16)[name = tensor("op_11675_cast")]; + tensor denom_357_epsilon_0_to_fp16 = const()[name = tensor("denom_357_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_357_cast = rsqrt(epsilon = denom_357_epsilon_0_to_fp16, x = var_11675_cast)[name = tensor("denom_357_cast")]; + tensor out_357_cast = mul(x = zero_mean_357_cast, y = denom_357_cast)[name = tensor("out_357_cast")]; + tensor var_11679_to_fp16 = const()[name = tensor("op_11679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910163904)))]; + tensor var_11680_cast = add(x = out_357_cast, y = var_11679_to_fp16)[name = tensor("op_11680_cast")]; + tensor var_11682_to_fp16 = const()[name = tensor("op_11682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910166528)))]; + tensor hidden_states_467_cast = mul(x = var_11680_cast, y = var_11682_to_fp16)[name = tensor("hidden_states_467_cast")]; + tensor var_11689 = const()[name = tensor("op_11689"), val = tensor([1, 1])]; + tensor var_11691 = const()[name = tensor("op_11691"), val = tensor([1, 1])]; + tensor q_239_pad_type_0 = const()[name = tensor("q_239_pad_type_0"), val = tensor("custom")]; + tensor q_239_pad_0 = const()[name = tensor("q_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910169152))), lut = tensor([-0x1.d3cp-7, 0x1.d34p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_239_cast = conv(dilations = var_11691, groups = var_6865, pad = q_239_pad_0, pad_type = q_239_pad_type_0, strides = var_11689, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_467_cast)[name = tensor("q_239_cast")]; + tensor var_11695 = const()[name = tensor("op_11695"), val = tensor([1, 1])]; + tensor var_11697 = const()[name = tensor("op_11697"), val = tensor([1, 1])]; + tensor k_239_pad_type_0 = const()[name = tensor("k_239_pad_type_0"), val = tensor("custom")]; + tensor k_239_pad_0 = const()[name = tensor("k_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(910374016))), lut = tensor([-0x1.50cp-6, -0x1.90cp-8, 0x1.8e4p-8, 0x1.504p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_239_cast = conv(dilations = var_11697, groups = var_6865, pad = k_239_pad_0, pad_type = k_239_pad_type_0, strides = var_11695, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_239_cast")]; + tensor var_11701 = const()[name = tensor("op_11701"), val = tensor([1, 1])]; + tensor var_11703 = const()[name = tensor("op_11703"), val = tensor([1, 1])]; + tensor v_239_pad_type_0 = const()[name = tensor("v_239_pad_type_0"), val = tensor("custom")]; + tensor v_239_pad_0 = const()[name = tensor("v_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911029440))), lut = tensor([-0x1.80cp-6, -0x1.ac8p-8, 0x1.b34p-8, 0x1.82cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_239_cast = conv(dilations = var_11703, groups = var_6865, pad = v_239_pad_0, pad_type = v_239_pad_type_0, strides = var_11701, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_239_cast")]; + tensor var_11707 = const()[name = tensor("op_11707"), val = tensor([2, 20, 64, -1])]; + tensor var_11708_cast = reshape(shape = var_11707, x = q_239_cast)[name = tensor("op_11708_cast")]; + tensor var_11709 = const()[name = tensor("op_11709"), val = tensor([2, 20, 64, -1])]; + tensor var_11710_cast = reshape(shape = var_11709, x = k_239_cast)[name = tensor("op_11710_cast")]; + tensor var_11711 = const()[name = tensor("op_11711"), val = tensor([2, 20, 64, -1])]; + tensor var_11712_cast = reshape(shape = var_11711, x = v_239_cast)[name = tensor("op_11712_cast")]; + tensor attn_weights_477_transpose_x_0 = const()[name = tensor("attn_weights_477_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_477_transpose_y_0 = const()[name = tensor("attn_weights_477_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_477_cast = matmul(transpose_x = attn_weights_477_transpose_x_0, transpose_y = attn_weights_477_transpose_y_0, x = var_11708_cast, y = var_11710_cast)[name = tensor("attn_weights_477_cast")]; + tensor attn_weights_479_cast = mul(x = attn_weights_477_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_479_cast")]; + tensor var_11716_cast = softmax(axis = var_6849, x = attn_weights_479_cast)[name = tensor("op_11716_cast")]; + tensor attn_239_transpose_x_0 = const()[name = tensor("attn_239_transpose_x_0"), val = tensor(false)]; + tensor attn_239_transpose_y_0 = const()[name = tensor("attn_239_transpose_y_0"), val = tensor(true)]; + tensor attn_239_cast = matmul(transpose_x = attn_239_transpose_x_0, transpose_y = attn_239_transpose_y_0, x = var_11712_cast, y = var_11716_cast)[name = tensor("attn_239_cast")]; + tensor var_11720 = const()[name = tensor("op_11720"), val = tensor([2, 1280, 1, -1])]; + tensor input_677_cast = reshape(shape = var_11720, x = attn_239_cast)[name = tensor("input_677_cast")]; + tensor var_11725 = const()[name = tensor("op_11725"), val = tensor([1, 1])]; + tensor var_11727 = const()[name = tensor("op_11727"), val = tensor([1, 1])]; + tensor var_11729_pad_type_0 = const()[name = tensor("op_11729_pad_type_0"), val = tensor("custom")]; + tensor var_11729_pad_0 = const()[name = tensor("op_11729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911684864))), lut = tensor([-0x1.7e8p-8, 0x1.7fp-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911889728)))]; + tensor var_11729_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_11727, groups = var_6865, pad = var_11729_pad_0, pad_type = var_11729_pad_type_0, strides = var_11725, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_677_cast)[name = tensor("op_11729_cast")]; + tensor inputs_359_cast = add(x = var_11729_cast, y = inputs_357_cast)[name = tensor("inputs_359_cast")]; + tensor var_11733 = const()[name = tensor("op_11733"), val = tensor([1])]; + tensor channels_mean_359_cast = reduce_mean(axes = var_11733, keep_dims = var_6860, x = inputs_359_cast)[name = tensor("channels_mean_359_cast")]; + tensor zero_mean_359_cast = sub(x = inputs_359_cast, y = channels_mean_359_cast)[name = tensor("zero_mean_359_cast")]; + tensor zero_mean_sq_359_cast = mul(x = zero_mean_359_cast, y = zero_mean_359_cast)[name = tensor("zero_mean_sq_359_cast")]; + tensor var_11737 = const()[name = tensor("op_11737"), val = tensor([1])]; + tensor var_11738_cast = reduce_mean(axes = var_11737, keep_dims = var_6860, x = zero_mean_sq_359_cast)[name = tensor("op_11738_cast")]; + tensor var_11739_to_fp16 = const()[name = tensor("op_11739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11740_cast = add(x = var_11738_cast, y = var_11739_to_fp16)[name = tensor("op_11740_cast")]; + tensor denom_359_epsilon_0_to_fp16 = const()[name = tensor("denom_359_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_359_cast = rsqrt(epsilon = denom_359_epsilon_0_to_fp16, x = var_11740_cast)[name = tensor("denom_359_cast")]; + tensor out_359_cast = mul(x = zero_mean_359_cast, y = denom_359_cast)[name = tensor("out_359_cast")]; + tensor var_11744_to_fp16 = const()[name = tensor("op_11744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911892352)))]; + tensor var_11745_cast = add(x = out_359_cast, y = var_11744_to_fp16)[name = tensor("op_11745_cast")]; + tensor var_11747_to_fp16 = const()[name = tensor("op_11747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911894976)))]; + tensor input_679_cast = mul(x = var_11745_cast, y = var_11747_to_fp16)[name = tensor("input_679_cast")]; + tensor var_11755 = const()[name = tensor("op_11755"), val = tensor([1, 1])]; + tensor var_11757 = const()[name = tensor("op_11757"), val = tensor([1, 1])]; + tensor var_11759_pad_type_0 = const()[name = tensor("op_11759_pad_type_0"), val = tensor("custom")]; + tensor var_11759_pad_0 = const()[name = tensor("op_11759_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911897600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(918451264))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(918451392)))]; + tensor var_11759_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_11757, groups = var_6865, pad = var_11759_pad_0, pad_type = var_11759_pad_type_0, strides = var_11755, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_679_cast)[name = tensor("op_11759_cast")]; + tensor var_11760_split_sizes_0 = const()[name = tensor("op_11760_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11760_axis_0 = const()[name = tensor("op_11760_axis_0"), val = tensor(1)]; + tensor var_11760_cast_0, tensor var_11760_cast_1 = split(axis = var_11760_axis_0, split_sizes = var_11760_split_sizes_0, x = var_11759_cast)[name = tensor("op_11760_cast")]; + tensor var_11762_mode_0 = const()[name = tensor("op_11762_mode_0"), val = tensor("EXACT")]; + tensor var_11762_cast = gelu(mode = var_11762_mode_0, x = var_11760_cast_1)[name = tensor("op_11762_cast")]; + tensor input_681_cast = mul(x = var_11760_cast_0, y = var_11762_cast)[name = tensor("input_681_cast")]; + tensor var_11766 = const()[name = tensor("op_11766"), val = tensor([1, 1])]; + tensor var_11768 = const()[name = tensor("op_11768"), val = tensor([1, 1])]; + tensor var_11770_pad_type_0 = const()[name = tensor("op_11770_pad_type_0"), val = tensor("custom")]; + tensor var_11770_pad_0 = const()[name = tensor("op_11770_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(918471936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(921748800))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(921748928)))]; + tensor var_11770_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_11768, groups = var_6865, pad = var_11770_pad_0, pad_type = var_11770_pad_type_0, strides = var_11766, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_681_cast)[name = tensor("op_11770_cast")]; + tensor inputs_361_cast = add(x = var_11770_cast, y = inputs_359_cast)[name = tensor("inputs_361_cast")]; + tensor var_11780 = const()[name = tensor("op_11780"), val = tensor([1])]; + tensor channels_mean_361_cast = reduce_mean(axes = var_11780, keep_dims = var_6860, x = inputs_361_cast)[name = tensor("channels_mean_361_cast")]; + tensor zero_mean_361_cast = sub(x = inputs_361_cast, y = channels_mean_361_cast)[name = tensor("zero_mean_361_cast")]; + tensor zero_mean_sq_361_cast = mul(x = zero_mean_361_cast, y = zero_mean_361_cast)[name = tensor("zero_mean_sq_361_cast")]; + tensor var_11784 = const()[name = tensor("op_11784"), val = tensor([1])]; + tensor var_11785_cast = reduce_mean(axes = var_11784, keep_dims = var_6860, x = zero_mean_sq_361_cast)[name = tensor("op_11785_cast")]; + tensor var_11786_to_fp16 = const()[name = tensor("op_11786_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11787_cast = add(x = var_11785_cast, y = var_11786_to_fp16)[name = tensor("op_11787_cast")]; + tensor denom_361_epsilon_0_to_fp16 = const()[name = tensor("denom_361_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_361_cast = rsqrt(epsilon = denom_361_epsilon_0_to_fp16, x = var_11787_cast)[name = tensor("denom_361_cast")]; + tensor out_361_cast = mul(x = zero_mean_361_cast, y = denom_361_cast)[name = tensor("out_361_cast")]; + tensor var_11791_to_fp16 = const()[name = tensor("op_11791_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(921751552)))]; + tensor var_11792_cast = add(x = out_361_cast, y = var_11791_to_fp16)[name = tensor("op_11792_cast")]; + tensor var_11794_to_fp16 = const()[name = tensor("op_11794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(921754176)))]; + tensor hidden_states_471_cast = mul(x = var_11792_cast, y = var_11794_to_fp16)[name = tensor("hidden_states_471_cast")]; + tensor var_11801 = const()[name = tensor("op_11801"), val = tensor([1, 1])]; + tensor var_11803 = const()[name = tensor("op_11803"), val = tensor([1, 1])]; + tensor q_241_pad_type_0 = const()[name = tensor("q_241_pad_type_0"), val = tensor("custom")]; + tensor q_241_pad_0 = const()[name = tensor("q_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(921756800))), lut = tensor([-0x1.7cp-5, -0x1.c7cp-7, 0x1.cc4p-7, 0x1.7ccp-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_241_cast = conv(dilations = var_11803, groups = var_6865, pad = q_241_pad_0, pad_type = q_241_pad_type_0, strides = var_11801, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("q_241_cast")]; + tensor var_11807 = const()[name = tensor("op_11807"), val = tensor([1, 1])]; + tensor var_11809 = const()[name = tensor("op_11809"), val = tensor([1, 1])]; + tensor k_241_pad_type_0 = const()[name = tensor("k_241_pad_type_0"), val = tensor("custom")]; + tensor k_241_pad_0 = const()[name = tensor("k_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922166464))), lut = tensor([-0x1.7c4p-5, -0x1.c78p-7, 0x1.cccp-7, 0x1.7d8p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_241_cast = conv(dilations = var_11809, groups = var_6865, pad = k_241_pad_0, pad_type = k_241_pad_type_0, strides = var_11807, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("k_241_cast")]; + tensor var_11813 = const()[name = tensor("op_11813"), val = tensor([1, 1])]; + tensor var_11815 = const()[name = tensor("op_11815"), val = tensor([1, 1])]; + tensor v_241_pad_type_0 = const()[name = tensor("v_241_pad_type_0"), val = tensor("custom")]; + tensor v_241_pad_0 = const()[name = tensor("v_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922576128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923395392))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_241_cast = conv(dilations = var_11815, groups = var_6865, pad = v_241_pad_0, pad_type = v_241_pad_type_0, strides = var_11813, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("v_241_cast")]; + tensor var_11819 = const()[name = tensor("op_11819"), val = tensor([2, 20, 64, -1])]; + tensor var_11820_cast = reshape(shape = var_11819, x = q_241_cast)[name = tensor("op_11820_cast")]; + tensor var_11821 = const()[name = tensor("op_11821"), val = tensor([2, 20, 64, -1])]; + tensor var_11822_cast = reshape(shape = var_11821, x = k_241_cast)[name = tensor("op_11822_cast")]; + tensor var_11823 = const()[name = tensor("op_11823"), val = tensor([2, 20, 64, -1])]; + tensor var_11824_cast = reshape(shape = var_11823, x = v_241_cast)[name = tensor("op_11824_cast")]; + tensor attn_weights_481_transpose_x_0 = const()[name = tensor("attn_weights_481_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_481_transpose_y_0 = const()[name = tensor("attn_weights_481_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_481_cast = matmul(transpose_x = attn_weights_481_transpose_x_0, transpose_y = attn_weights_481_transpose_y_0, x = var_11820_cast, y = var_11822_cast)[name = tensor("attn_weights_481_cast")]; + tensor attn_weights_483_cast = mul(x = attn_weights_481_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_483_cast")]; + tensor var_11828_cast = softmax(axis = var_6849, x = attn_weights_483_cast)[name = tensor("op_11828_cast")]; + tensor attn_241_transpose_x_0 = const()[name = tensor("attn_241_transpose_x_0"), val = tensor(false)]; + tensor attn_241_transpose_y_0 = const()[name = tensor("attn_241_transpose_y_0"), val = tensor(true)]; + tensor attn_241_cast = matmul(transpose_x = attn_241_transpose_x_0, transpose_y = attn_241_transpose_y_0, x = var_11824_cast, y = var_11828_cast)[name = tensor("attn_241_cast")]; + tensor var_11832 = const()[name = tensor("op_11832"), val = tensor([2, 1280, 1, -1])]; + tensor input_683_cast = reshape(shape = var_11832, x = attn_241_cast)[name = tensor("input_683_cast")]; + tensor var_11837 = const()[name = tensor("op_11837"), val = tensor([1, 1])]; + tensor var_11839 = const()[name = tensor("op_11839"), val = tensor([1, 1])]; + tensor var_11841_pad_type_0 = const()[name = tensor("op_11841_pad_type_0"), val = tensor("custom")]; + tensor var_11841_pad_0 = const()[name = tensor("op_11841_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(923395520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924214784))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924214912)))]; + tensor var_11841_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_11839, groups = var_6865, pad = var_11841_pad_0, pad_type = var_11841_pad_type_0, strides = var_11837, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_683_cast)[name = tensor("op_11841_cast")]; + tensor inputs_363_cast = add(x = var_11841_cast, y = inputs_361_cast)[name = tensor("inputs_363_cast")]; + tensor var_11845 = const()[name = tensor("op_11845"), val = tensor([1])]; + tensor channels_mean_363_cast = reduce_mean(axes = var_11845, keep_dims = var_6860, x = inputs_363_cast)[name = tensor("channels_mean_363_cast")]; + tensor zero_mean_363_cast = sub(x = inputs_363_cast, y = channels_mean_363_cast)[name = tensor("zero_mean_363_cast")]; + tensor zero_mean_sq_363_cast = mul(x = zero_mean_363_cast, y = zero_mean_363_cast)[name = tensor("zero_mean_sq_363_cast")]; + tensor var_11849 = const()[name = tensor("op_11849"), val = tensor([1])]; + tensor var_11850_cast = reduce_mean(axes = var_11849, keep_dims = var_6860, x = zero_mean_sq_363_cast)[name = tensor("op_11850_cast")]; + tensor var_11851_to_fp16 = const()[name = tensor("op_11851_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11852_cast = add(x = var_11850_cast, y = var_11851_to_fp16)[name = tensor("op_11852_cast")]; + tensor denom_363_epsilon_0_to_fp16 = const()[name = tensor("denom_363_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_363_cast = rsqrt(epsilon = denom_363_epsilon_0_to_fp16, x = var_11852_cast)[name = tensor("denom_363_cast")]; + tensor out_363_cast = mul(x = zero_mean_363_cast, y = denom_363_cast)[name = tensor("out_363_cast")]; + tensor var_11856_to_fp16 = const()[name = tensor("op_11856_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924217536)))]; + tensor var_11857_cast = add(x = out_363_cast, y = var_11856_to_fp16)[name = tensor("op_11857_cast")]; + tensor var_11859_to_fp16 = const()[name = tensor("op_11859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924220160)))]; + tensor hidden_states_473_cast = mul(x = var_11857_cast, y = var_11859_to_fp16)[name = tensor("hidden_states_473_cast")]; + tensor var_11866 = const()[name = tensor("op_11866"), val = tensor([1, 1])]; + tensor var_11868 = const()[name = tensor("op_11868"), val = tensor([1, 1])]; + tensor q_243_pad_type_0 = const()[name = tensor("q_243_pad_type_0"), val = tensor("custom")]; + tensor q_243_pad_0 = const()[name = tensor("q_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924222784))), lut = tensor([-0x1.aep-7, 0x1.af4p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_243_cast = conv(dilations = var_11868, groups = var_6865, pad = q_243_pad_0, pad_type = q_243_pad_type_0, strides = var_11866, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_473_cast)[name = tensor("q_243_cast")]; + tensor var_11872 = const()[name = tensor("op_11872"), val = tensor([1, 1])]; + tensor var_11874 = const()[name = tensor("op_11874"), val = tensor([1, 1])]; + tensor k_243_pad_type_0 = const()[name = tensor("k_243_pad_type_0"), val = tensor("custom")]; + tensor k_243_pad_0 = const()[name = tensor("k_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924427648))), lut = tensor([-0x1.304p-7, 0x1.31p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_243_cast = conv(dilations = var_11874, groups = var_6865, pad = k_243_pad_0, pad_type = k_243_pad_type_0, strides = var_11872, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_243_cast")]; + tensor var_11878 = const()[name = tensor("op_11878"), val = tensor([1, 1])]; + tensor var_11880 = const()[name = tensor("op_11880"), val = tensor([1, 1])]; + tensor v_243_pad_type_0 = const()[name = tensor("v_243_pad_type_0"), val = tensor("custom")]; + tensor v_243_pad_0 = const()[name = tensor("v_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924755392))), lut = tensor([-0x1.444p-7, 0x1.428p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_243_cast = conv(dilations = var_11880, groups = var_6865, pad = v_243_pad_0, pad_type = v_243_pad_type_0, strides = var_11878, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_243_cast")]; + tensor var_11884 = const()[name = tensor("op_11884"), val = tensor([2, 20, 64, -1])]; + tensor var_11885_cast = reshape(shape = var_11884, x = q_243_cast)[name = tensor("op_11885_cast")]; + tensor var_11886 = const()[name = tensor("op_11886"), val = tensor([2, 20, 64, -1])]; + tensor var_11887_cast = reshape(shape = var_11886, x = k_243_cast)[name = tensor("op_11887_cast")]; + tensor var_11888 = const()[name = tensor("op_11888"), val = tensor([2, 20, 64, -1])]; + tensor var_11889_cast = reshape(shape = var_11888, x = v_243_cast)[name = tensor("op_11889_cast")]; + tensor attn_weights_485_transpose_x_0 = const()[name = tensor("attn_weights_485_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_485_transpose_y_0 = const()[name = tensor("attn_weights_485_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_485_cast = matmul(transpose_x = attn_weights_485_transpose_x_0, transpose_y = attn_weights_485_transpose_y_0, x = var_11885_cast, y = var_11887_cast)[name = tensor("attn_weights_485_cast")]; + tensor attn_weights_487_cast = mul(x = attn_weights_485_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_487_cast")]; + tensor var_11893_cast = softmax(axis = var_6849, x = attn_weights_487_cast)[name = tensor("op_11893_cast")]; + tensor attn_243_transpose_x_0 = const()[name = tensor("attn_243_transpose_x_0"), val = tensor(false)]; + tensor attn_243_transpose_y_0 = const()[name = tensor("attn_243_transpose_y_0"), val = tensor(true)]; + tensor attn_243_cast = matmul(transpose_x = attn_243_transpose_x_0, transpose_y = attn_243_transpose_y_0, x = var_11889_cast, y = var_11893_cast)[name = tensor("attn_243_cast")]; + tensor var_11897 = const()[name = tensor("op_11897"), val = tensor([2, 1280, 1, -1])]; + tensor input_685_cast = reshape(shape = var_11897, x = attn_243_cast)[name = tensor("input_685_cast")]; + tensor var_11902 = const()[name = tensor("op_11902"), val = tensor([1, 1])]; + tensor var_11904 = const()[name = tensor("op_11904"), val = tensor([1, 1])]; + tensor var_11906_pad_type_0 = const()[name = tensor("op_11906_pad_type_0"), val = tensor("custom")]; + tensor var_11906_pad_0 = const()[name = tensor("op_11906_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925083136))), lut = tensor([-0x1.58p-8, 0x1.578p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925288000)))]; + tensor var_11906_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_11904, groups = var_6865, pad = var_11906_pad_0, pad_type = var_11906_pad_type_0, strides = var_11902, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_685_cast)[name = tensor("op_11906_cast")]; + tensor inputs_365_cast = add(x = var_11906_cast, y = inputs_363_cast)[name = tensor("inputs_365_cast")]; + tensor var_11910 = const()[name = tensor("op_11910"), val = tensor([1])]; + tensor channels_mean_365_cast = reduce_mean(axes = var_11910, keep_dims = var_6860, x = inputs_365_cast)[name = tensor("channels_mean_365_cast")]; + tensor zero_mean_365_cast = sub(x = inputs_365_cast, y = channels_mean_365_cast)[name = tensor("zero_mean_365_cast")]; + tensor zero_mean_sq_365_cast = mul(x = zero_mean_365_cast, y = zero_mean_365_cast)[name = tensor("zero_mean_sq_365_cast")]; + tensor var_11914 = const()[name = tensor("op_11914"), val = tensor([1])]; + tensor var_11915_cast = reduce_mean(axes = var_11914, keep_dims = var_6860, x = zero_mean_sq_365_cast)[name = tensor("op_11915_cast")]; + tensor var_11916_to_fp16 = const()[name = tensor("op_11916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11917_cast = add(x = var_11915_cast, y = var_11916_to_fp16)[name = tensor("op_11917_cast")]; + tensor denom_365_epsilon_0_to_fp16 = const()[name = tensor("denom_365_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_365_cast = rsqrt(epsilon = denom_365_epsilon_0_to_fp16, x = var_11917_cast)[name = tensor("denom_365_cast")]; + tensor out_365_cast = mul(x = zero_mean_365_cast, y = denom_365_cast)[name = tensor("out_365_cast")]; + tensor var_11921_to_fp16 = const()[name = tensor("op_11921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925290624)))]; + tensor var_11922_cast = add(x = out_365_cast, y = var_11921_to_fp16)[name = tensor("op_11922_cast")]; + tensor var_11924_to_fp16 = const()[name = tensor("op_11924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925293248)))]; + tensor input_687_cast = mul(x = var_11922_cast, y = var_11924_to_fp16)[name = tensor("input_687_cast")]; + tensor var_11932 = const()[name = tensor("op_11932"), val = tensor([1, 1])]; + tensor var_11934 = const()[name = tensor("op_11934"), val = tensor([1, 1])]; + tensor var_11936_pad_type_0 = const()[name = tensor("op_11936_pad_type_0"), val = tensor("custom")]; + tensor var_11936_pad_0 = const()[name = tensor("op_11936_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925295872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931849536))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931849664)))]; + tensor var_11936_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_11934, groups = var_6865, pad = var_11936_pad_0, pad_type = var_11936_pad_type_0, strides = var_11932, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_687_cast)[name = tensor("op_11936_cast")]; + tensor var_11937_split_sizes_0 = const()[name = tensor("op_11937_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11937_axis_0 = const()[name = tensor("op_11937_axis_0"), val = tensor(1)]; + tensor var_11937_cast_0, tensor var_11937_cast_1 = split(axis = var_11937_axis_0, split_sizes = var_11937_split_sizes_0, x = var_11936_cast)[name = tensor("op_11937_cast")]; + tensor var_11939_mode_0 = const()[name = tensor("op_11939_mode_0"), val = tensor("EXACT")]; + tensor var_11939_cast = gelu(mode = var_11939_mode_0, x = var_11937_cast_1)[name = tensor("op_11939_cast")]; + tensor input_689_cast = mul(x = var_11937_cast_0, y = var_11939_cast)[name = tensor("input_689_cast")]; + tensor var_11943 = const()[name = tensor("op_11943"), val = tensor([1, 1])]; + tensor var_11945 = const()[name = tensor("op_11945"), val = tensor([1, 1])]; + tensor var_11947_pad_type_0 = const()[name = tensor("op_11947_pad_type_0"), val = tensor("custom")]; + tensor var_11947_pad_0 = const()[name = tensor("op_11947_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931870208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935147072))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935147200)))]; + tensor var_11947_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_11945, groups = var_6865, pad = var_11947_pad_0, pad_type = var_11947_pad_type_0, strides = var_11943, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_689_cast)[name = tensor("op_11947_cast")]; + tensor inputs_367_cast = add(x = var_11947_cast, y = inputs_365_cast)[name = tensor("inputs_367_cast")]; + tensor var_11957 = const()[name = tensor("op_11957"), val = tensor([1])]; + tensor channels_mean_367_cast = reduce_mean(axes = var_11957, keep_dims = var_6860, x = inputs_367_cast)[name = tensor("channels_mean_367_cast")]; + tensor zero_mean_367_cast = sub(x = inputs_367_cast, y = channels_mean_367_cast)[name = tensor("zero_mean_367_cast")]; + tensor zero_mean_sq_367_cast = mul(x = zero_mean_367_cast, y = zero_mean_367_cast)[name = tensor("zero_mean_sq_367_cast")]; + tensor var_11961 = const()[name = tensor("op_11961"), val = tensor([1])]; + tensor var_11962_cast = reduce_mean(axes = var_11961, keep_dims = var_6860, x = zero_mean_sq_367_cast)[name = tensor("op_11962_cast")]; + tensor var_11963_to_fp16 = const()[name = tensor("op_11963_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11964_cast = add(x = var_11962_cast, y = var_11963_to_fp16)[name = tensor("op_11964_cast")]; + tensor denom_367_epsilon_0_to_fp16 = const()[name = tensor("denom_367_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_367_cast = rsqrt(epsilon = denom_367_epsilon_0_to_fp16, x = var_11964_cast)[name = tensor("denom_367_cast")]; + tensor out_367_cast = mul(x = zero_mean_367_cast, y = denom_367_cast)[name = tensor("out_367_cast")]; + tensor var_11968_to_fp16 = const()[name = tensor("op_11968_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935149824)))]; + tensor var_11969_cast = add(x = out_367_cast, y = var_11968_to_fp16)[name = tensor("op_11969_cast")]; + tensor var_11971_to_fp16 = const()[name = tensor("op_11971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935152448)))]; + tensor hidden_states_477_cast = mul(x = var_11969_cast, y = var_11971_to_fp16)[name = tensor("hidden_states_477_cast")]; + tensor var_11978 = const()[name = tensor("op_11978"), val = tensor([1, 1])]; + tensor var_11980 = const()[name = tensor("op_11980"), val = tensor([1, 1])]; + tensor q_245_pad_type_0 = const()[name = tensor("q_245_pad_type_0"), val = tensor("custom")]; + tensor q_245_pad_0 = const()[name = tensor("q_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935155072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935974336))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_245_cast = conv(dilations = var_11980, groups = var_6865, pad = q_245_pad_0, pad_type = q_245_pad_type_0, strides = var_11978, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("q_245_cast")]; + tensor var_11984 = const()[name = tensor("op_11984"), val = tensor([1, 1])]; + tensor var_11986 = const()[name = tensor("op_11986"), val = tensor([1, 1])]; + tensor k_245_pad_type_0 = const()[name = tensor("k_245_pad_type_0"), val = tensor("custom")]; + tensor k_245_pad_0 = const()[name = tensor("k_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935974464))), lut = tensor([-0x1.758p-5, -0x1.bf4p-7, 0x1.c1p-7, 0x1.764p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_245_cast = conv(dilations = var_11986, groups = var_6865, pad = k_245_pad_0, pad_type = k_245_pad_type_0, strides = var_11984, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("k_245_cast")]; + tensor var_11990 = const()[name = tensor("op_11990"), val = tensor([1, 1])]; + tensor var_11992 = const()[name = tensor("op_11992"), val = tensor([1, 1])]; + tensor v_245_pad_type_0 = const()[name = tensor("v_245_pad_type_0"), val = tensor("custom")]; + tensor v_245_pad_0 = const()[name = tensor("v_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(936384128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937203392))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_245_cast = conv(dilations = var_11992, groups = var_6865, pad = v_245_pad_0, pad_type = v_245_pad_type_0, strides = var_11990, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("v_245_cast")]; + tensor var_11996 = const()[name = tensor("op_11996"), val = tensor([2, 20, 64, -1])]; + tensor var_11997_cast = reshape(shape = var_11996, x = q_245_cast)[name = tensor("op_11997_cast")]; + tensor var_11998 = const()[name = tensor("op_11998"), val = tensor([2, 20, 64, -1])]; + tensor var_11999_cast = reshape(shape = var_11998, x = k_245_cast)[name = tensor("op_11999_cast")]; + tensor var_12000 = const()[name = tensor("op_12000"), val = tensor([2, 20, 64, -1])]; + tensor var_12001_cast = reshape(shape = var_12000, x = v_245_cast)[name = tensor("op_12001_cast")]; + tensor attn_weights_489_transpose_x_0 = const()[name = tensor("attn_weights_489_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_489_transpose_y_0 = const()[name = tensor("attn_weights_489_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_489_cast = matmul(transpose_x = attn_weights_489_transpose_x_0, transpose_y = attn_weights_489_transpose_y_0, x = var_11997_cast, y = var_11999_cast)[name = tensor("attn_weights_489_cast")]; + tensor attn_weights_491_cast = mul(x = attn_weights_489_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_491_cast")]; + tensor var_12005_cast = softmax(axis = var_6849, x = attn_weights_491_cast)[name = tensor("op_12005_cast")]; + tensor attn_245_transpose_x_0 = const()[name = tensor("attn_245_transpose_x_0"), val = tensor(false)]; + tensor attn_245_transpose_y_0 = const()[name = tensor("attn_245_transpose_y_0"), val = tensor(true)]; + tensor attn_245_cast = matmul(transpose_x = attn_245_transpose_x_0, transpose_y = attn_245_transpose_y_0, x = var_12001_cast, y = var_12005_cast)[name = tensor("attn_245_cast")]; + tensor var_12009 = const()[name = tensor("op_12009"), val = tensor([2, 1280, 1, -1])]; + tensor input_691_cast = reshape(shape = var_12009, x = attn_245_cast)[name = tensor("input_691_cast")]; + tensor var_12014 = const()[name = tensor("op_12014"), val = tensor([1, 1])]; + tensor var_12016 = const()[name = tensor("op_12016"), val = tensor([1, 1])]; + tensor var_12018_pad_type_0 = const()[name = tensor("op_12018_pad_type_0"), val = tensor("custom")]; + tensor var_12018_pad_0 = const()[name = tensor("op_12018_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937203520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938432384))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938432576)))]; + tensor var_12018_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_12016, groups = var_6865, pad = var_12018_pad_0, pad_type = var_12018_pad_type_0, strides = var_12014, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_691_cast)[name = tensor("op_12018_cast")]; + tensor inputs_369_cast = add(x = var_12018_cast, y = inputs_367_cast)[name = tensor("inputs_369_cast")]; + tensor var_12022 = const()[name = tensor("op_12022"), val = tensor([1])]; + tensor channels_mean_369_cast = reduce_mean(axes = var_12022, keep_dims = var_6860, x = inputs_369_cast)[name = tensor("channels_mean_369_cast")]; + tensor zero_mean_369_cast = sub(x = inputs_369_cast, y = channels_mean_369_cast)[name = tensor("zero_mean_369_cast")]; + tensor zero_mean_sq_369_cast = mul(x = zero_mean_369_cast, y = zero_mean_369_cast)[name = tensor("zero_mean_sq_369_cast")]; + tensor var_12026 = const()[name = tensor("op_12026"), val = tensor([1])]; + tensor var_12027_cast = reduce_mean(axes = var_12026, keep_dims = var_6860, x = zero_mean_sq_369_cast)[name = tensor("op_12027_cast")]; + tensor var_12028_to_fp16 = const()[name = tensor("op_12028_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12029_cast = add(x = var_12027_cast, y = var_12028_to_fp16)[name = tensor("op_12029_cast")]; + tensor denom_369_epsilon_0_to_fp16 = const()[name = tensor("denom_369_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_369_cast = rsqrt(epsilon = denom_369_epsilon_0_to_fp16, x = var_12029_cast)[name = tensor("denom_369_cast")]; + tensor out_369_cast = mul(x = zero_mean_369_cast, y = denom_369_cast)[name = tensor("out_369_cast")]; + tensor var_12033_to_fp16 = const()[name = tensor("op_12033_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938435200)))]; + tensor var_12034_cast = add(x = out_369_cast, y = var_12033_to_fp16)[name = tensor("op_12034_cast")]; + tensor var_12036_to_fp16 = const()[name = tensor("op_12036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938437824)))]; + tensor hidden_states_479_cast = mul(x = var_12034_cast, y = var_12036_to_fp16)[name = tensor("hidden_states_479_cast")]; + tensor var_12043 = const()[name = tensor("op_12043"), val = tensor([1, 1])]; + tensor var_12045 = const()[name = tensor("op_12045"), val = tensor([1, 1])]; + tensor q_247_pad_type_0 = const()[name = tensor("q_247_pad_type_0"), val = tensor("custom")]; + tensor q_247_pad_0 = const()[name = tensor("q_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938440448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(939259712))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_247_cast = conv(dilations = var_12045, groups = var_6865, pad = q_247_pad_0, pad_type = q_247_pad_type_0, strides = var_12043, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_479_cast)[name = tensor("q_247_cast")]; + tensor var_12049 = const()[name = tensor("op_12049"), val = tensor([1, 1])]; + tensor var_12051 = const()[name = tensor("op_12051"), val = tensor([1, 1])]; + tensor k_247_pad_type_0 = const()[name = tensor("k_247_pad_type_0"), val = tensor("custom")]; + tensor k_247_pad_0 = const()[name = tensor("k_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(939259840))), lut = tensor([-0x1.224p-7, 0x1.23p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_247_cast = conv(dilations = var_12051, groups = var_6865, pad = k_247_pad_0, pad_type = k_247_pad_type_0, strides = var_12049, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_247_cast")]; + tensor var_12055 = const()[name = tensor("op_12055"), val = tensor([1, 1])]; + tensor var_12057 = const()[name = tensor("op_12057"), val = tensor([1, 1])]; + tensor v_247_pad_type_0 = const()[name = tensor("v_247_pad_type_0"), val = tensor("custom")]; + tensor v_247_pad_0 = const()[name = tensor("v_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(939587584))), lut = tensor([-0x1.3fcp-7, 0x1.3f4p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_247_cast = conv(dilations = var_12057, groups = var_6865, pad = v_247_pad_0, pad_type = v_247_pad_type_0, strides = var_12055, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_247_cast")]; + tensor var_12061 = const()[name = tensor("op_12061"), val = tensor([2, 20, 64, -1])]; + tensor var_12062_cast = reshape(shape = var_12061, x = q_247_cast)[name = tensor("op_12062_cast")]; + tensor var_12063 = const()[name = tensor("op_12063"), val = tensor([2, 20, 64, -1])]; + tensor var_12064_cast = reshape(shape = var_12063, x = k_247_cast)[name = tensor("op_12064_cast")]; + tensor var_12065 = const()[name = tensor("op_12065"), val = tensor([2, 20, 64, -1])]; + tensor var_12066_cast = reshape(shape = var_12065, x = v_247_cast)[name = tensor("op_12066_cast")]; + tensor attn_weights_493_transpose_x_0 = const()[name = tensor("attn_weights_493_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_493_transpose_y_0 = const()[name = tensor("attn_weights_493_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_493_cast = matmul(transpose_x = attn_weights_493_transpose_x_0, transpose_y = attn_weights_493_transpose_y_0, x = var_12062_cast, y = var_12064_cast)[name = tensor("attn_weights_493_cast")]; + tensor attn_weights_495_cast = mul(x = attn_weights_493_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_495_cast")]; + tensor var_12070_cast = softmax(axis = var_6849, x = attn_weights_495_cast)[name = tensor("op_12070_cast")]; + tensor attn_247_transpose_x_0 = const()[name = tensor("attn_247_transpose_x_0"), val = tensor(false)]; + tensor attn_247_transpose_y_0 = const()[name = tensor("attn_247_transpose_y_0"), val = tensor(true)]; + tensor attn_247_cast = matmul(transpose_x = attn_247_transpose_x_0, transpose_y = attn_247_transpose_y_0, x = var_12066_cast, y = var_12070_cast)[name = tensor("attn_247_cast")]; + tensor var_12074 = const()[name = tensor("op_12074"), val = tensor([2, 1280, 1, -1])]; + tensor input_693_cast = reshape(shape = var_12074, x = attn_247_cast)[name = tensor("input_693_cast")]; + tensor var_12079 = const()[name = tensor("op_12079"), val = tensor([1, 1])]; + tensor var_12081 = const()[name = tensor("op_12081"), val = tensor([1, 1])]; + tensor var_12083_pad_type_0 = const()[name = tensor("op_12083_pad_type_0"), val = tensor("custom")]; + tensor var_12083_pad_0 = const()[name = tensor("op_12083_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(939915328))), lut = tensor([-0x1.614p-8, 0x1.6p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940120192)))]; + tensor var_12083_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_12081, groups = var_6865, pad = var_12083_pad_0, pad_type = var_12083_pad_type_0, strides = var_12079, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_693_cast)[name = tensor("op_12083_cast")]; + tensor inputs_371_cast = add(x = var_12083_cast, y = inputs_369_cast)[name = tensor("inputs_371_cast")]; + tensor var_12087 = const()[name = tensor("op_12087"), val = tensor([1])]; + tensor channels_mean_371_cast = reduce_mean(axes = var_12087, keep_dims = var_6860, x = inputs_371_cast)[name = tensor("channels_mean_371_cast")]; + tensor zero_mean_371_cast = sub(x = inputs_371_cast, y = channels_mean_371_cast)[name = tensor("zero_mean_371_cast")]; + tensor zero_mean_sq_371_cast = mul(x = zero_mean_371_cast, y = zero_mean_371_cast)[name = tensor("zero_mean_sq_371_cast")]; + tensor var_12091 = const()[name = tensor("op_12091"), val = tensor([1])]; + tensor var_12092_cast = reduce_mean(axes = var_12091, keep_dims = var_6860, x = zero_mean_sq_371_cast)[name = tensor("op_12092_cast")]; + tensor var_12093_to_fp16 = const()[name = tensor("op_12093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12094_cast = add(x = var_12092_cast, y = var_12093_to_fp16)[name = tensor("op_12094_cast")]; + tensor denom_371_epsilon_0_to_fp16 = const()[name = tensor("denom_371_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_371_cast = rsqrt(epsilon = denom_371_epsilon_0_to_fp16, x = var_12094_cast)[name = tensor("denom_371_cast")]; + tensor out_371_cast = mul(x = zero_mean_371_cast, y = denom_371_cast)[name = tensor("out_371_cast")]; + tensor var_12098_to_fp16 = const()[name = tensor("op_12098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940122816)))]; + tensor var_12099_cast = add(x = out_371_cast, y = var_12098_to_fp16)[name = tensor("op_12099_cast")]; + tensor var_12101_to_fp16 = const()[name = tensor("op_12101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940125440)))]; + tensor input_695_cast = mul(x = var_12099_cast, y = var_12101_to_fp16)[name = tensor("input_695_cast")]; + tensor var_12109 = const()[name = tensor("op_12109"), val = tensor([1, 1])]; + tensor var_12111 = const()[name = tensor("op_12111"), val = tensor([1, 1])]; + tensor var_12113_pad_type_0 = const()[name = tensor("op_12113_pad_type_0"), val = tensor("custom")]; + tensor var_12113_pad_0 = const()[name = tensor("op_12113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(940128064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(946681728))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(946681856)))]; + tensor var_12113_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_12111, groups = var_6865, pad = var_12113_pad_0, pad_type = var_12113_pad_type_0, strides = var_12109, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_695_cast)[name = tensor("op_12113_cast")]; + tensor var_12114_split_sizes_0 = const()[name = tensor("op_12114_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12114_axis_0 = const()[name = tensor("op_12114_axis_0"), val = tensor(1)]; + tensor var_12114_cast_0, tensor var_12114_cast_1 = split(axis = var_12114_axis_0, split_sizes = var_12114_split_sizes_0, x = var_12113_cast)[name = tensor("op_12114_cast")]; + tensor var_12116_mode_0 = const()[name = tensor("op_12116_mode_0"), val = tensor("EXACT")]; + tensor var_12116_cast = gelu(mode = var_12116_mode_0, x = var_12114_cast_1)[name = tensor("op_12116_cast")]; + tensor input_697_cast = mul(x = var_12114_cast_0, y = var_12116_cast)[name = tensor("input_697_cast")]; + tensor var_12120 = const()[name = tensor("op_12120"), val = tensor([1, 1])]; + tensor var_12122 = const()[name = tensor("op_12122"), val = tensor([1, 1])]; + tensor var_12124_pad_type_0 = const()[name = tensor("op_12124_pad_type_0"), val = tensor("custom")]; + tensor var_12124_pad_0 = const()[name = tensor("op_12124_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(946702400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949979264))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949979392)))]; + tensor var_12124_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_12122, groups = var_6865, pad = var_12124_pad_0, pad_type = var_12124_pad_type_0, strides = var_12120, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_697_cast)[name = tensor("op_12124_cast")]; + tensor inputs_373_cast = add(x = var_12124_cast, y = inputs_371_cast)[name = tensor("inputs_373_cast")]; + tensor var_12134 = const()[name = tensor("op_12134"), val = tensor([1])]; + tensor channels_mean_373_cast = reduce_mean(axes = var_12134, keep_dims = var_6860, x = inputs_373_cast)[name = tensor("channels_mean_373_cast")]; + tensor zero_mean_373_cast = sub(x = inputs_373_cast, y = channels_mean_373_cast)[name = tensor("zero_mean_373_cast")]; + tensor zero_mean_sq_373_cast = mul(x = zero_mean_373_cast, y = zero_mean_373_cast)[name = tensor("zero_mean_sq_373_cast")]; + tensor var_12138 = const()[name = tensor("op_12138"), val = tensor([1])]; + tensor var_12139_cast = reduce_mean(axes = var_12138, keep_dims = var_6860, x = zero_mean_sq_373_cast)[name = tensor("op_12139_cast")]; + tensor var_12140_to_fp16 = const()[name = tensor("op_12140_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12141_cast = add(x = var_12139_cast, y = var_12140_to_fp16)[name = tensor("op_12141_cast")]; + tensor denom_373_epsilon_0_to_fp16 = const()[name = tensor("denom_373_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_373_cast = rsqrt(epsilon = denom_373_epsilon_0_to_fp16, x = var_12141_cast)[name = tensor("denom_373_cast")]; + tensor out_373_cast = mul(x = zero_mean_373_cast, y = denom_373_cast)[name = tensor("out_373_cast")]; + tensor var_12145_to_fp16 = const()[name = tensor("op_12145_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949982016)))]; + tensor var_12146_cast = add(x = out_373_cast, y = var_12145_to_fp16)[name = tensor("op_12146_cast")]; + tensor var_12148_to_fp16 = const()[name = tensor("op_12148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949984640)))]; + tensor hidden_states_483_cast = mul(x = var_12146_cast, y = var_12148_to_fp16)[name = tensor("hidden_states_483_cast")]; + tensor var_12155 = const()[name = tensor("op_12155"), val = tensor([1, 1])]; + tensor var_12157 = const()[name = tensor("op_12157"), val = tensor([1, 1])]; + tensor q_249_pad_type_0 = const()[name = tensor("q_249_pad_type_0"), val = tensor("custom")]; + tensor q_249_pad_0 = const()[name = tensor("q_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949987264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(950806528))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_249_cast = conv(dilations = var_12157, groups = var_6865, pad = q_249_pad_0, pad_type = q_249_pad_type_0, strides = var_12155, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("q_249_cast")]; + tensor var_12161 = const()[name = tensor("op_12161"), val = tensor([1, 1])]; + tensor var_12163 = const()[name = tensor("op_12163"), val = tensor([1, 1])]; + tensor k_249_pad_type_0 = const()[name = tensor("k_249_pad_type_0"), val = tensor("custom")]; + tensor k_249_pad_0 = const()[name = tensor("k_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(950806656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951625920))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_249_cast = conv(dilations = var_12163, groups = var_6865, pad = k_249_pad_0, pad_type = k_249_pad_type_0, strides = var_12161, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("k_249_cast")]; + tensor var_12167 = const()[name = tensor("op_12167"), val = tensor([1, 1])]; + tensor var_12169 = const()[name = tensor("op_12169"), val = tensor([1, 1])]; + tensor v_249_pad_type_0 = const()[name = tensor("v_249_pad_type_0"), val = tensor("custom")]; + tensor v_249_pad_0 = const()[name = tensor("v_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951626048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952445312))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_249_cast = conv(dilations = var_12169, groups = var_6865, pad = v_249_pad_0, pad_type = v_249_pad_type_0, strides = var_12167, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("v_249_cast")]; + tensor var_12173 = const()[name = tensor("op_12173"), val = tensor([2, 20, 64, -1])]; + tensor var_12174_cast = reshape(shape = var_12173, x = q_249_cast)[name = tensor("op_12174_cast")]; + tensor var_12175 = const()[name = tensor("op_12175"), val = tensor([2, 20, 64, -1])]; + tensor var_12176_cast = reshape(shape = var_12175, x = k_249_cast)[name = tensor("op_12176_cast")]; + tensor var_12177 = const()[name = tensor("op_12177"), val = tensor([2, 20, 64, -1])]; + tensor var_12178_cast = reshape(shape = var_12177, x = v_249_cast)[name = tensor("op_12178_cast")]; + tensor attn_weights_497_transpose_x_0 = const()[name = tensor("attn_weights_497_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_497_transpose_y_0 = const()[name = tensor("attn_weights_497_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_497_cast = matmul(transpose_x = attn_weights_497_transpose_x_0, transpose_y = attn_weights_497_transpose_y_0, x = var_12174_cast, y = var_12176_cast)[name = tensor("attn_weights_497_cast")]; + tensor attn_weights_499_cast = mul(x = attn_weights_497_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_499_cast")]; + tensor var_12182_cast = softmax(axis = var_6849, x = attn_weights_499_cast)[name = tensor("op_12182_cast")]; + tensor attn_249_transpose_x_0 = const()[name = tensor("attn_249_transpose_x_0"), val = tensor(false)]; + tensor attn_249_transpose_y_0 = const()[name = tensor("attn_249_transpose_y_0"), val = tensor(true)]; + tensor attn_249_cast = matmul(transpose_x = attn_249_transpose_x_0, transpose_y = attn_249_transpose_y_0, x = var_12178_cast, y = var_12182_cast)[name = tensor("attn_249_cast")]; + tensor var_12186 = const()[name = tensor("op_12186"), val = tensor([2, 1280, 1, -1])]; + tensor input_699_cast = reshape(shape = var_12186, x = attn_249_cast)[name = tensor("input_699_cast")]; + tensor var_12191 = const()[name = tensor("op_12191"), val = tensor([1, 1])]; + tensor var_12193 = const()[name = tensor("op_12193"), val = tensor([1, 1])]; + tensor var_12195_pad_type_0 = const()[name = tensor("op_12195_pad_type_0"), val = tensor("custom")]; + tensor var_12195_pad_0 = const()[name = tensor("op_12195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952445440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953264704))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953264832)))]; + tensor var_12195_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_12193, groups = var_6865, pad = var_12195_pad_0, pad_type = var_12195_pad_type_0, strides = var_12191, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_699_cast)[name = tensor("op_12195_cast")]; + tensor inputs_375_cast = add(x = var_12195_cast, y = inputs_373_cast)[name = tensor("inputs_375_cast")]; + tensor var_12199 = const()[name = tensor("op_12199"), val = tensor([1])]; + tensor channels_mean_375_cast = reduce_mean(axes = var_12199, keep_dims = var_6860, x = inputs_375_cast)[name = tensor("channels_mean_375_cast")]; + tensor zero_mean_375_cast = sub(x = inputs_375_cast, y = channels_mean_375_cast)[name = tensor("zero_mean_375_cast")]; + tensor zero_mean_sq_375_cast = mul(x = zero_mean_375_cast, y = zero_mean_375_cast)[name = tensor("zero_mean_sq_375_cast")]; + tensor var_12203 = const()[name = tensor("op_12203"), val = tensor([1])]; + tensor var_12204_cast = reduce_mean(axes = var_12203, keep_dims = var_6860, x = zero_mean_sq_375_cast)[name = tensor("op_12204_cast")]; + tensor var_12205_to_fp16 = const()[name = tensor("op_12205_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12206_cast = add(x = var_12204_cast, y = var_12205_to_fp16)[name = tensor("op_12206_cast")]; + tensor denom_375_epsilon_0_to_fp16 = const()[name = tensor("denom_375_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_375_cast = rsqrt(epsilon = denom_375_epsilon_0_to_fp16, x = var_12206_cast)[name = tensor("denom_375_cast")]; + tensor out_375_cast = mul(x = zero_mean_375_cast, y = denom_375_cast)[name = tensor("out_375_cast")]; + tensor var_12210_to_fp16 = const()[name = tensor("op_12210_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953267456)))]; + tensor var_12211_cast = add(x = out_375_cast, y = var_12210_to_fp16)[name = tensor("op_12211_cast")]; + tensor var_12213_to_fp16 = const()[name = tensor("op_12213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953270080)))]; + tensor hidden_states_485_cast = mul(x = var_12211_cast, y = var_12213_to_fp16)[name = tensor("hidden_states_485_cast")]; + tensor var_12220 = const()[name = tensor("op_12220"), val = tensor([1, 1])]; + tensor var_12222 = const()[name = tensor("op_12222"), val = tensor([1, 1])]; + tensor q_251_pad_type_0 = const()[name = tensor("q_251_pad_type_0"), val = tensor("custom")]; + tensor q_251_pad_0 = const()[name = tensor("q_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953272704))), lut = tensor([-0x1.94cp-7, 0x1.948p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_251_cast = conv(dilations = var_12222, groups = var_6865, pad = q_251_pad_0, pad_type = q_251_pad_type_0, strides = var_12220, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_485_cast)[name = tensor("q_251_cast")]; + tensor var_12226 = const()[name = tensor("op_12226"), val = tensor([1, 1])]; + tensor var_12228 = const()[name = tensor("op_12228"), val = tensor([1, 1])]; + tensor k_251_pad_type_0 = const()[name = tensor("k_251_pad_type_0"), val = tensor("custom")]; + tensor k_251_pad_0 = const()[name = tensor("k_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953477568))), lut = tensor([-0x1.0c8p-7, 0x1.0b8p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_251_cast = conv(dilations = var_12228, groups = var_6865, pad = k_251_pad_0, pad_type = k_251_pad_type_0, strides = var_12226, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_251_cast")]; + tensor var_12232 = const()[name = tensor("op_12232"), val = tensor([1, 1])]; + tensor var_12234 = const()[name = tensor("op_12234"), val = tensor([1, 1])]; + tensor v_251_pad_type_0 = const()[name = tensor("v_251_pad_type_0"), val = tensor("custom")]; + tensor v_251_pad_0 = const()[name = tensor("v_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953805312))), lut = tensor([-0x1.1c4p-7, 0x1.1c4p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_251_cast = conv(dilations = var_12234, groups = var_6865, pad = v_251_pad_0, pad_type = v_251_pad_type_0, strides = var_12232, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_251_cast")]; + tensor var_12238 = const()[name = tensor("op_12238"), val = tensor([2, 20, 64, -1])]; + tensor var_12239_cast = reshape(shape = var_12238, x = q_251_cast)[name = tensor("op_12239_cast")]; + tensor var_12240 = const()[name = tensor("op_12240"), val = tensor([2, 20, 64, -1])]; + tensor var_12241_cast = reshape(shape = var_12240, x = k_251_cast)[name = tensor("op_12241_cast")]; + tensor var_12242 = const()[name = tensor("op_12242"), val = tensor([2, 20, 64, -1])]; + tensor var_12243_cast = reshape(shape = var_12242, x = v_251_cast)[name = tensor("op_12243_cast")]; + tensor attn_weights_501_transpose_x_0 = const()[name = tensor("attn_weights_501_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_501_transpose_y_0 = const()[name = tensor("attn_weights_501_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_501_cast = matmul(transpose_x = attn_weights_501_transpose_x_0, transpose_y = attn_weights_501_transpose_y_0, x = var_12239_cast, y = var_12241_cast)[name = tensor("attn_weights_501_cast")]; + tensor attn_weights_503_cast = mul(x = attn_weights_501_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_503_cast")]; + tensor var_12247_cast = softmax(axis = var_6849, x = attn_weights_503_cast)[name = tensor("op_12247_cast")]; + tensor attn_251_transpose_x_0 = const()[name = tensor("attn_251_transpose_x_0"), val = tensor(false)]; + tensor attn_251_transpose_y_0 = const()[name = tensor("attn_251_transpose_y_0"), val = tensor(true)]; + tensor attn_251_cast = matmul(transpose_x = attn_251_transpose_x_0, transpose_y = attn_251_transpose_y_0, x = var_12243_cast, y = var_12247_cast)[name = tensor("attn_251_cast")]; + tensor var_12251 = const()[name = tensor("op_12251"), val = tensor([2, 1280, 1, -1])]; + tensor input_701_cast = reshape(shape = var_12251, x = attn_251_cast)[name = tensor("input_701_cast")]; + tensor var_12256 = const()[name = tensor("op_12256"), val = tensor([1, 1])]; + tensor var_12258 = const()[name = tensor("op_12258"), val = tensor([1, 1])]; + tensor var_12260_pad_type_0 = const()[name = tensor("op_12260_pad_type_0"), val = tensor("custom")]; + tensor var_12260_pad_0 = const()[name = tensor("op_12260_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954133056))), lut = tensor([-0x1.478p-8, 0x1.488p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954337920)))]; + tensor var_12260_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_12258, groups = var_6865, pad = var_12260_pad_0, pad_type = var_12260_pad_type_0, strides = var_12256, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_701_cast)[name = tensor("op_12260_cast")]; + tensor inputs_377_cast = add(x = var_12260_cast, y = inputs_375_cast)[name = tensor("inputs_377_cast")]; + tensor var_12264 = const()[name = tensor("op_12264"), val = tensor([1])]; + tensor channels_mean_377_cast = reduce_mean(axes = var_12264, keep_dims = var_6860, x = inputs_377_cast)[name = tensor("channels_mean_377_cast")]; + tensor zero_mean_377_cast = sub(x = inputs_377_cast, y = channels_mean_377_cast)[name = tensor("zero_mean_377_cast")]; + tensor zero_mean_sq_377_cast = mul(x = zero_mean_377_cast, y = zero_mean_377_cast)[name = tensor("zero_mean_sq_377_cast")]; + tensor var_12268 = const()[name = tensor("op_12268"), val = tensor([1])]; + tensor var_12269_cast = reduce_mean(axes = var_12268, keep_dims = var_6860, x = zero_mean_sq_377_cast)[name = tensor("op_12269_cast")]; + tensor var_12270_to_fp16 = const()[name = tensor("op_12270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12271_cast = add(x = var_12269_cast, y = var_12270_to_fp16)[name = tensor("op_12271_cast")]; + tensor denom_377_epsilon_0_to_fp16 = const()[name = tensor("denom_377_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_377_cast = rsqrt(epsilon = denom_377_epsilon_0_to_fp16, x = var_12271_cast)[name = tensor("denom_377_cast")]; + tensor out_377_cast = mul(x = zero_mean_377_cast, y = denom_377_cast)[name = tensor("out_377_cast")]; + tensor var_12275_to_fp16 = const()[name = tensor("op_12275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954340544)))]; + tensor var_12276_cast = add(x = out_377_cast, y = var_12275_to_fp16)[name = tensor("op_12276_cast")]; + tensor var_12278_to_fp16 = const()[name = tensor("op_12278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954343168)))]; + tensor input_703_cast = mul(x = var_12276_cast, y = var_12278_to_fp16)[name = tensor("input_703_cast")]; + tensor var_12286 = const()[name = tensor("op_12286"), val = tensor([1, 1])]; + tensor var_12288 = const()[name = tensor("op_12288"), val = tensor([1, 1])]; + tensor var_12290_pad_type_0 = const()[name = tensor("op_12290_pad_type_0"), val = tensor("custom")]; + tensor var_12290_pad_0 = const()[name = tensor("op_12290_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954345792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(960899456))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(960899584)))]; + tensor var_12290_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_12288, groups = var_6865, pad = var_12290_pad_0, pad_type = var_12290_pad_type_0, strides = var_12286, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_703_cast)[name = tensor("op_12290_cast")]; + tensor var_12291_split_sizes_0 = const()[name = tensor("op_12291_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12291_axis_0 = const()[name = tensor("op_12291_axis_0"), val = tensor(1)]; + tensor var_12291_cast_0, tensor var_12291_cast_1 = split(axis = var_12291_axis_0, split_sizes = var_12291_split_sizes_0, x = var_12290_cast)[name = tensor("op_12291_cast")]; + tensor var_12293_mode_0 = const()[name = tensor("op_12293_mode_0"), val = tensor("EXACT")]; + tensor var_12293_cast = gelu(mode = var_12293_mode_0, x = var_12291_cast_1)[name = tensor("op_12293_cast")]; + tensor input_705_cast = mul(x = var_12291_cast_0, y = var_12293_cast)[name = tensor("input_705_cast")]; + tensor var_12297 = const()[name = tensor("op_12297"), val = tensor([1, 1])]; + tensor var_12299 = const()[name = tensor("op_12299"), val = tensor([1, 1])]; + tensor var_12301_pad_type_0 = const()[name = tensor("op_12301_pad_type_0"), val = tensor("custom")]; + tensor var_12301_pad_0 = const()[name = tensor("op_12301_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(960920128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964196992))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964197120)))]; + tensor var_12301_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_12299, groups = var_6865, pad = var_12301_pad_0, pad_type = var_12301_pad_type_0, strides = var_12297, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_705_cast)[name = tensor("op_12301_cast")]; + tensor inputs_379_cast = add(x = var_12301_cast, y = inputs_377_cast)[name = tensor("inputs_379_cast")]; + tensor var_12311 = const()[name = tensor("op_12311"), val = tensor([1])]; + tensor channels_mean_379_cast = reduce_mean(axes = var_12311, keep_dims = var_6860, x = inputs_379_cast)[name = tensor("channels_mean_379_cast")]; + tensor zero_mean_379_cast = sub(x = inputs_379_cast, y = channels_mean_379_cast)[name = tensor("zero_mean_379_cast")]; + tensor zero_mean_sq_379_cast = mul(x = zero_mean_379_cast, y = zero_mean_379_cast)[name = tensor("zero_mean_sq_379_cast")]; + tensor var_12315 = const()[name = tensor("op_12315"), val = tensor([1])]; + tensor var_12316_cast = reduce_mean(axes = var_12315, keep_dims = var_6860, x = zero_mean_sq_379_cast)[name = tensor("op_12316_cast")]; + tensor var_12317_to_fp16 = const()[name = tensor("op_12317_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12318_cast = add(x = var_12316_cast, y = var_12317_to_fp16)[name = tensor("op_12318_cast")]; + tensor denom_379_epsilon_0_to_fp16 = const()[name = tensor("denom_379_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_379_cast = rsqrt(epsilon = denom_379_epsilon_0_to_fp16, x = var_12318_cast)[name = tensor("denom_379_cast")]; + tensor out_379_cast = mul(x = zero_mean_379_cast, y = denom_379_cast)[name = tensor("out_379_cast")]; + tensor var_12322_to_fp16 = const()[name = tensor("op_12322_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964199744)))]; + tensor var_12323_cast = add(x = out_379_cast, y = var_12322_to_fp16)[name = tensor("op_12323_cast")]; + tensor var_12325_to_fp16 = const()[name = tensor("op_12325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964202368)))]; + tensor hidden_states_489_cast = mul(x = var_12323_cast, y = var_12325_to_fp16)[name = tensor("hidden_states_489_cast")]; + tensor var_12332 = const()[name = tensor("op_12332"), val = tensor([1, 1])]; + tensor var_12334 = const()[name = tensor("op_12334"), val = tensor([1, 1])]; + tensor q_253_pad_type_0 = const()[name = tensor("q_253_pad_type_0"), val = tensor("custom")]; + tensor q_253_pad_0 = const()[name = tensor("q_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964204992))), lut = tensor([-0x1.6ap-5, -0x1.b24p-7, 0x1.b08p-7, 0x1.69p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_253_cast = conv(dilations = var_12334, groups = var_6865, pad = q_253_pad_0, pad_type = q_253_pad_type_0, strides = var_12332, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("q_253_cast")]; + tensor var_12338 = const()[name = tensor("op_12338"), val = tensor([1, 1])]; + tensor var_12340 = const()[name = tensor("op_12340"), val = tensor([1, 1])]; + tensor k_253_pad_type_0 = const()[name = tensor("k_253_pad_type_0"), val = tensor("custom")]; + tensor k_253_pad_0 = const()[name = tensor("k_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(964614656))), lut = tensor([-0x1.694p-5, -0x1.b0cp-7, 0x1.b24p-7, 0x1.694p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_253_cast = conv(dilations = var_12340, groups = var_6865, pad = k_253_pad_0, pad_type = k_253_pad_type_0, strides = var_12338, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("k_253_cast")]; + tensor var_12344 = const()[name = tensor("op_12344"), val = tensor([1, 1])]; + tensor var_12346 = const()[name = tensor("op_12346"), val = tensor([1, 1])]; + tensor v_253_pad_type_0 = const()[name = tensor("v_253_pad_type_0"), val = tensor("custom")]; + tensor v_253_pad_0 = const()[name = tensor("v_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(965024320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(965843584))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_253_cast = conv(dilations = var_12346, groups = var_6865, pad = v_253_pad_0, pad_type = v_253_pad_type_0, strides = var_12344, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("v_253_cast")]; + tensor var_12350 = const()[name = tensor("op_12350"), val = tensor([2, 20, 64, -1])]; + tensor var_12351_cast = reshape(shape = var_12350, x = q_253_cast)[name = tensor("op_12351_cast")]; + tensor var_12352 = const()[name = tensor("op_12352"), val = tensor([2, 20, 64, -1])]; + tensor var_12353_cast = reshape(shape = var_12352, x = k_253_cast)[name = tensor("op_12353_cast")]; + tensor var_12354 = const()[name = tensor("op_12354"), val = tensor([2, 20, 64, -1])]; + tensor var_12355_cast = reshape(shape = var_12354, x = v_253_cast)[name = tensor("op_12355_cast")]; + tensor attn_weights_505_transpose_x_0 = const()[name = tensor("attn_weights_505_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_505_transpose_y_0 = const()[name = tensor("attn_weights_505_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_505_cast = matmul(transpose_x = attn_weights_505_transpose_x_0, transpose_y = attn_weights_505_transpose_y_0, x = var_12351_cast, y = var_12353_cast)[name = tensor("attn_weights_505_cast")]; + tensor attn_weights_507_cast = mul(x = attn_weights_505_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_507_cast")]; + tensor var_12359_cast = softmax(axis = var_6849, x = attn_weights_507_cast)[name = tensor("op_12359_cast")]; + tensor attn_253_transpose_x_0 = const()[name = tensor("attn_253_transpose_x_0"), val = tensor(false)]; + tensor attn_253_transpose_y_0 = const()[name = tensor("attn_253_transpose_y_0"), val = tensor(true)]; + tensor attn_253_cast = matmul(transpose_x = attn_253_transpose_x_0, transpose_y = attn_253_transpose_y_0, x = var_12355_cast, y = var_12359_cast)[name = tensor("attn_253_cast")]; + tensor var_12363 = const()[name = tensor("op_12363"), val = tensor([2, 1280, 1, -1])]; + tensor input_707_cast = reshape(shape = var_12363, x = attn_253_cast)[name = tensor("input_707_cast")]; + tensor var_12368 = const()[name = tensor("op_12368"), val = tensor([1, 1])]; + tensor var_12370 = const()[name = tensor("op_12370"), val = tensor([1, 1])]; + tensor var_12372_pad_type_0 = const()[name = tensor("op_12372_pad_type_0"), val = tensor("custom")]; + tensor var_12372_pad_0 = const()[name = tensor("op_12372_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(965843712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966662976))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966663104)))]; + tensor var_12372_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_12370, groups = var_6865, pad = var_12372_pad_0, pad_type = var_12372_pad_type_0, strides = var_12368, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_707_cast)[name = tensor("op_12372_cast")]; + tensor inputs_381_cast = add(x = var_12372_cast, y = inputs_379_cast)[name = tensor("inputs_381_cast")]; + tensor var_12376 = const()[name = tensor("op_12376"), val = tensor([1])]; + tensor channels_mean_381_cast = reduce_mean(axes = var_12376, keep_dims = var_6860, x = inputs_381_cast)[name = tensor("channels_mean_381_cast")]; + tensor zero_mean_381_cast = sub(x = inputs_381_cast, y = channels_mean_381_cast)[name = tensor("zero_mean_381_cast")]; + tensor zero_mean_sq_381_cast = mul(x = zero_mean_381_cast, y = zero_mean_381_cast)[name = tensor("zero_mean_sq_381_cast")]; + tensor var_12380 = const()[name = tensor("op_12380"), val = tensor([1])]; + tensor var_12381_cast = reduce_mean(axes = var_12380, keep_dims = var_6860, x = zero_mean_sq_381_cast)[name = tensor("op_12381_cast")]; + tensor var_12382_to_fp16 = const()[name = tensor("op_12382_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12383_cast = add(x = var_12381_cast, y = var_12382_to_fp16)[name = tensor("op_12383_cast")]; + tensor denom_381_epsilon_0_to_fp16 = const()[name = tensor("denom_381_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_381_cast = rsqrt(epsilon = denom_381_epsilon_0_to_fp16, x = var_12383_cast)[name = tensor("denom_381_cast")]; + tensor out_381_cast = mul(x = zero_mean_381_cast, y = denom_381_cast)[name = tensor("out_381_cast")]; + tensor var_12387_to_fp16 = const()[name = tensor("op_12387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966665728)))]; + tensor var_12388_cast = add(x = out_381_cast, y = var_12387_to_fp16)[name = tensor("op_12388_cast")]; + tensor var_12390_to_fp16 = const()[name = tensor("op_12390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966668352)))]; + tensor hidden_states_491_cast = mul(x = var_12388_cast, y = var_12390_to_fp16)[name = tensor("hidden_states_491_cast")]; + tensor var_12397 = const()[name = tensor("op_12397"), val = tensor([1, 1])]; + tensor var_12399 = const()[name = tensor("op_12399"), val = tensor([1, 1])]; + tensor q_255_pad_type_0 = const()[name = tensor("q_255_pad_type_0"), val = tensor("custom")]; + tensor q_255_pad_0 = const()[name = tensor("q_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966670976))), lut = tensor([-0x1.96p-7, 0x1.958p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_255_cast = conv(dilations = var_12399, groups = var_6865, pad = q_255_pad_0, pad_type = q_255_pad_type_0, strides = var_12397, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_491_cast)[name = tensor("q_255_cast")]; + tensor var_12403 = const()[name = tensor("op_12403"), val = tensor([1, 1])]; + tensor var_12405 = const()[name = tensor("op_12405"), val = tensor([1, 1])]; + tensor k_255_pad_type_0 = const()[name = tensor("k_255_pad_type_0"), val = tensor("custom")]; + tensor k_255_pad_0 = const()[name = tensor("k_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(966875840))), lut = tensor([-0x1.fdcp-8, 0x1.fep-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_255_cast = conv(dilations = var_12405, groups = var_6865, pad = k_255_pad_0, pad_type = k_255_pad_type_0, strides = var_12403, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_255_cast")]; + tensor var_12409 = const()[name = tensor("op_12409"), val = tensor([1, 1])]; + tensor var_12411 = const()[name = tensor("op_12411"), val = tensor([1, 1])]; + tensor v_255_pad_type_0 = const()[name = tensor("v_255_pad_type_0"), val = tensor("custom")]; + tensor v_255_pad_0 = const()[name = tensor("v_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967203584))), lut = tensor([-0x1.facp-8, 0x1.fbp-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_255_cast = conv(dilations = var_12411, groups = var_6865, pad = v_255_pad_0, pad_type = v_255_pad_type_0, strides = var_12409, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_255_cast")]; + tensor var_12415 = const()[name = tensor("op_12415"), val = tensor([2, 20, 64, -1])]; + tensor var_12416_cast = reshape(shape = var_12415, x = q_255_cast)[name = tensor("op_12416_cast")]; + tensor var_12417 = const()[name = tensor("op_12417"), val = tensor([2, 20, 64, -1])]; + tensor var_12418_cast = reshape(shape = var_12417, x = k_255_cast)[name = tensor("op_12418_cast")]; + tensor var_12419 = const()[name = tensor("op_12419"), val = tensor([2, 20, 64, -1])]; + tensor var_12420_cast = reshape(shape = var_12419, x = v_255_cast)[name = tensor("op_12420_cast")]; + tensor attn_weights_509_transpose_x_0 = const()[name = tensor("attn_weights_509_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_509_transpose_y_0 = const()[name = tensor("attn_weights_509_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_509_cast = matmul(transpose_x = attn_weights_509_transpose_x_0, transpose_y = attn_weights_509_transpose_y_0, x = var_12416_cast, y = var_12418_cast)[name = tensor("attn_weights_509_cast")]; + tensor attn_weights_511_cast = mul(x = attn_weights_509_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_511_cast")]; + tensor var_12424_cast = softmax(axis = var_6849, x = attn_weights_511_cast)[name = tensor("op_12424_cast")]; + tensor attn_255_transpose_x_0 = const()[name = tensor("attn_255_transpose_x_0"), val = tensor(false)]; + tensor attn_255_transpose_y_0 = const()[name = tensor("attn_255_transpose_y_0"), val = tensor(true)]; + tensor attn_255_cast = matmul(transpose_x = attn_255_transpose_x_0, transpose_y = attn_255_transpose_y_0, x = var_12420_cast, y = var_12424_cast)[name = tensor("attn_255_cast")]; + tensor var_12428 = const()[name = tensor("op_12428"), val = tensor([2, 1280, 1, -1])]; + tensor input_709_cast = reshape(shape = var_12428, x = attn_255_cast)[name = tensor("input_709_cast")]; + tensor var_12433 = const()[name = tensor("op_12433"), val = tensor([1, 1])]; + tensor var_12435 = const()[name = tensor("op_12435"), val = tensor([1, 1])]; + tensor var_12437_pad_type_0 = const()[name = tensor("op_12437_pad_type_0"), val = tensor("custom")]; + tensor var_12437_pad_0 = const()[name = tensor("op_12437_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967531328))), lut = tensor([-0x1.414p-8, 0x1.42p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967736192)))]; + tensor var_12437_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_12435, groups = var_6865, pad = var_12437_pad_0, pad_type = var_12437_pad_type_0, strides = var_12433, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_709_cast)[name = tensor("op_12437_cast")]; + tensor inputs_383_cast = add(x = var_12437_cast, y = inputs_381_cast)[name = tensor("inputs_383_cast")]; + tensor var_12441 = const()[name = tensor("op_12441"), val = tensor([1])]; + tensor channels_mean_383_cast = reduce_mean(axes = var_12441, keep_dims = var_6860, x = inputs_383_cast)[name = tensor("channels_mean_383_cast")]; + tensor zero_mean_383_cast = sub(x = inputs_383_cast, y = channels_mean_383_cast)[name = tensor("zero_mean_383_cast")]; + tensor zero_mean_sq_383_cast = mul(x = zero_mean_383_cast, y = zero_mean_383_cast)[name = tensor("zero_mean_sq_383_cast")]; + tensor var_12445 = const()[name = tensor("op_12445"), val = tensor([1])]; + tensor var_12446_cast = reduce_mean(axes = var_12445, keep_dims = var_6860, x = zero_mean_sq_383_cast)[name = tensor("op_12446_cast")]; + tensor var_12447_to_fp16 = const()[name = tensor("op_12447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12448_cast = add(x = var_12446_cast, y = var_12447_to_fp16)[name = tensor("op_12448_cast")]; + tensor denom_383_epsilon_0_to_fp16 = const()[name = tensor("denom_383_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_383_cast = rsqrt(epsilon = denom_383_epsilon_0_to_fp16, x = var_12448_cast)[name = tensor("denom_383_cast")]; + tensor out_383_cast = mul(x = zero_mean_383_cast, y = denom_383_cast)[name = tensor("out_383_cast")]; + tensor var_12452_to_fp16 = const()[name = tensor("op_12452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967738816)))]; + tensor var_12453_cast = add(x = out_383_cast, y = var_12452_to_fp16)[name = tensor("op_12453_cast")]; + tensor var_12455_to_fp16 = const()[name = tensor("op_12455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967741440)))]; + tensor input_711_cast = mul(x = var_12453_cast, y = var_12455_to_fp16)[name = tensor("input_711_cast")]; + tensor var_12463 = const()[name = tensor("op_12463"), val = tensor([1, 1])]; + tensor var_12465 = const()[name = tensor("op_12465"), val = tensor([1, 1])]; + tensor var_12467_pad_type_0 = const()[name = tensor("op_12467_pad_type_0"), val = tensor("custom")]; + tensor var_12467_pad_0 = const()[name = tensor("op_12467_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967744064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977574528))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977574720)))]; + tensor var_12467_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_12465, groups = var_6865, pad = var_12467_pad_0, pad_type = var_12467_pad_type_0, strides = var_12463, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_711_cast)[name = tensor("op_12467_cast")]; + tensor var_12468_split_sizes_0 = const()[name = tensor("op_12468_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12468_axis_0 = const()[name = tensor("op_12468_axis_0"), val = tensor(1)]; + tensor var_12468_cast_0, tensor var_12468_cast_1 = split(axis = var_12468_axis_0, split_sizes = var_12468_split_sizes_0, x = var_12467_cast)[name = tensor("op_12468_cast")]; + tensor var_12470_mode_0 = const()[name = tensor("op_12470_mode_0"), val = tensor("EXACT")]; + tensor var_12470_cast = gelu(mode = var_12470_mode_0, x = var_12468_cast_1)[name = tensor("op_12470_cast")]; + tensor input_713_cast = mul(x = var_12468_cast_0, y = var_12470_cast)[name = tensor("input_713_cast")]; + tensor var_12474 = const()[name = tensor("op_12474"), val = tensor([1, 1])]; + tensor var_12476 = const()[name = tensor("op_12476"), val = tensor([1, 1])]; + tensor var_12478_pad_type_0 = const()[name = tensor("op_12478_pad_type_0"), val = tensor("custom")]; + tensor var_12478_pad_0 = const()[name = tensor("op_12478_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977595264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982510528))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982510720)))]; + tensor var_12478_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_12476, groups = var_6865, pad = var_12478_pad_0, pad_type = var_12478_pad_type_0, strides = var_12474, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_713_cast)[name = tensor("op_12478_cast")]; + tensor hidden_states_495_cast = add(x = var_12478_cast, y = inputs_383_cast)[name = tensor("hidden_states_495_cast")]; + tensor var_12480 = const()[name = tensor("op_12480"), val = tensor([2, 1280, 32, 32])]; + tensor input_715_cast = reshape(shape = var_12480, x = hidden_states_495_cast)[name = tensor("input_715_cast")]; + tensor var_12484 = const()[name = tensor("op_12484"), val = tensor([1, 1])]; + tensor var_12486 = const()[name = tensor("op_12486"), val = tensor([1, 1])]; + tensor hidden_states_497_pad_type_0 = const()[name = tensor("hidden_states_497_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_497_pad_0 = const()[name = tensor("hidden_states_497_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(982513344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(983742208))), name = tensor("up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(983742400)))]; + tensor hidden_states_497_cast = conv(bias = up_blocks_0_attentions_2_proj_out_bias_to_fp16, dilations = var_12486, groups = var_6865, pad = hidden_states_497_pad_0, pad_type = hidden_states_497_pad_type_0, strides = var_12484, weight = up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized, x = input_715_cast)[name = tensor("hidden_states_497_cast")]; + tensor input_717_cast = add(x = hidden_states_497_cast, y = hidden_states_431_cast)[name = tensor("input_717_cast")]; + tensor input_719_scale_factor_height_0 = const()[name = tensor("input_719_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_719_scale_factor_width_0 = const()[name = tensor("input_719_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_719_cast = upsample_nearest_neighbor(scale_factor_height = input_719_scale_factor_height_0, scale_factor_width = input_719_scale_factor_width_0, x = input_717_cast)[name = tensor("input_719_cast")]; + tensor var_12495 = const()[name = tensor("op_12495"), val = tensor([1, 1])]; + tensor var_12497 = const()[name = tensor("op_12497"), val = tensor([1, 1])]; + tensor hidden_states_499_pad_type_0 = const()[name = tensor("hidden_states_499_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_499_pad_0 = const()[name = tensor("hidden_states_499_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(983745024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998490688))), name = tensor("up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_0_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998491264)))]; + tensor hidden_states_499_cast = conv(bias = up_blocks_0_upsamplers_0_conv_bias_to_fp16, dilations = var_12497, groups = var_6865, pad = hidden_states_499_pad_0, pad_type = hidden_states_499_pad_type_0, strides = var_12495, weight = up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized, x = input_719_cast)[name = tensor("hidden_states_499_cast")]; + tensor var_12502 = const()[name = tensor("op_12502"), val = tensor(3)]; + tensor var_12513 = const()[name = tensor("op_12513"), val = tensor(true)]; + tensor var_12518 = const()[name = tensor("op_12518"), val = tensor(1)]; + tensor input_721_interleave_0 = const()[name = tensor("input_721_interleave_0"), val = tensor(false)]; + tensor input_721_cast = concat(axis = var_12518, interleave = input_721_interleave_0, values = (hidden_states_499_cast, input_113_cast))[name = tensor("input_721_cast")]; + tensor reshape_120_shape_0 = const()[name = tensor("reshape_120_shape_0"), val = tensor([2, 32, 60, 64, 64])]; + tensor reshape_120_cast = reshape(shape = reshape_120_shape_0, x = input_721_cast)[name = tensor("reshape_120_cast")]; + tensor reduce_mean_90_axes_0 = const()[name = tensor("reduce_mean_90_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_90_keep_dims_0 = const()[name = tensor("reduce_mean_90_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_90_cast = reduce_mean(axes = reduce_mean_90_axes_0, keep_dims = reduce_mean_90_keep_dims_0, x = reshape_120_cast)[name = tensor("reduce_mean_90_cast")]; + tensor sub_60_cast = sub(x = reshape_120_cast, y = reduce_mean_90_cast)[name = tensor("sub_60_cast")]; + tensor square_30_cast = square(x = sub_60_cast)[name = tensor("square_30_cast")]; + tensor reduce_mean_92_axes_0 = const()[name = tensor("reduce_mean_92_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_92_keep_dims_0 = const()[name = tensor("reduce_mean_92_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_92_cast = reduce_mean(axes = reduce_mean_92_axes_0, keep_dims = reduce_mean_92_keep_dims_0, x = square_30_cast)[name = tensor("reduce_mean_92_cast")]; + tensor add_60_y_0_to_fp16 = const()[name = tensor("add_60_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_60_cast = add(x = reduce_mean_92_cast, y = add_60_y_0_to_fp16)[name = tensor("add_60_cast")]; + tensor sqrt_30_cast = sqrt(x = add_60_cast)[name = tensor("sqrt_30_cast")]; + tensor real_div_30_cast = real_div(x = sub_60_cast, y = sqrt_30_cast)[name = tensor("real_div_30_cast")]; + tensor reshape_121_shape_0 = const()[name = tensor("reshape_121_shape_0"), val = tensor([2, 1920, 64, 64])]; + tensor reshape_121_cast = reshape(shape = reshape_121_shape_0, x = real_div_30_cast)[name = tensor("reshape_121_cast")]; + tensor add_61_gamma_0_to_fp16 = const()[name = tensor("add_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998493888)))]; + tensor add_61_beta_0_to_fp16 = const()[name = tensor("add_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998497792)))]; + tensor add_61_epsilon_0_to_fp16 = const()[name = tensor("add_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_61_cast = batch_norm(beta = add_61_beta_0_to_fp16, epsilon = add_61_epsilon_0_to_fp16, gamma = add_61_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_121_cast)[name = tensor("add_61_cast")]; + tensor input_725_cast = silu(x = add_61_cast)[name = tensor("input_725_cast")]; + tensor var_12547 = const()[name = tensor("op_12547"), val = tensor([1, 1])]; + tensor var_12549 = const()[name = tensor("op_12549"), val = tensor([1, 1])]; + tensor hidden_states_501_pad_type_0 = const()[name = tensor("hidden_states_501_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_501_pad_0 = const()[name = tensor("hidden_states_501_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998501696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1006796160))), name = tensor("up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1920, 3, 3])]; + tensor up_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1006796352)))]; + tensor hidden_states_501_cast = conv(bias = up_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_12549, groups = var_12518, pad = hidden_states_501_pad_0, pad_type = hidden_states_501_pad_type_0, strides = var_12547, weight = up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_725_cast)[name = tensor("hidden_states_501_cast")]; + tensor var_12555 = const()[name = tensor("op_12555"), val = tensor([1, 1])]; + tensor var_12557 = const()[name = tensor("op_12557"), val = tensor([1, 1])]; + tensor temb_23_pad_type_0 = const()[name = tensor("temb_23_pad_type_0"), val = tensor("custom")]; + tensor temb_23_pad_0 = const()[name = tensor("temb_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1006797696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007412160))), name = tensor("up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007412352)))]; + tensor temb_23_cast = conv(bias = up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_12557, groups = var_12518, pad = temb_23_pad_0, pad_type = temb_23_pad_type_0, strides = var_12555, weight = up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_23_cast")]; + tensor input_729_cast = add(x = hidden_states_501_cast, y = temb_23_cast)[name = tensor("input_729_cast")]; + tensor reshape_124_shape_0 = const()[name = tensor("reshape_124_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_124_cast = reshape(shape = reshape_124_shape_0, x = input_729_cast)[name = tensor("reshape_124_cast")]; + tensor reduce_mean_93_axes_0 = const()[name = tensor("reduce_mean_93_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_93_keep_dims_0 = const()[name = tensor("reduce_mean_93_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_93_cast = reduce_mean(axes = reduce_mean_93_axes_0, keep_dims = reduce_mean_93_keep_dims_0, x = reshape_124_cast)[name = tensor("reduce_mean_93_cast")]; + tensor sub_62_cast = sub(x = reshape_124_cast, y = reduce_mean_93_cast)[name = tensor("sub_62_cast")]; + tensor square_31_cast = square(x = sub_62_cast)[name = tensor("square_31_cast")]; + tensor reduce_mean_95_axes_0 = const()[name = tensor("reduce_mean_95_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_95_keep_dims_0 = const()[name = tensor("reduce_mean_95_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_95_cast = reduce_mean(axes = reduce_mean_95_axes_0, keep_dims = reduce_mean_95_keep_dims_0, x = square_31_cast)[name = tensor("reduce_mean_95_cast")]; + tensor add_62_y_0_to_fp16 = const()[name = tensor("add_62_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_62_cast = add(x = reduce_mean_95_cast, y = add_62_y_0_to_fp16)[name = tensor("add_62_cast")]; + tensor sqrt_31_cast = sqrt(x = add_62_cast)[name = tensor("sqrt_31_cast")]; + tensor real_div_31_cast = real_div(x = sub_62_cast, y = sqrt_31_cast)[name = tensor("real_div_31_cast")]; + tensor reshape_125_shape_0 = const()[name = tensor("reshape_125_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_125_cast = reshape(shape = reshape_125_shape_0, x = real_div_31_cast)[name = tensor("reshape_125_cast")]; + tensor add_63_gamma_0_to_fp16 = const()[name = tensor("add_63_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007413696)))]; + tensor add_63_beta_0_to_fp16 = const()[name = tensor("add_63_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007415040)))]; + tensor add_63_epsilon_0_to_fp16 = const()[name = tensor("add_63_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_63_cast = batch_norm(beta = add_63_beta_0_to_fp16, epsilon = add_63_epsilon_0_to_fp16, gamma = add_63_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_125_cast)[name = tensor("add_63_cast")]; + tensor input_733_cast = silu(x = add_63_cast)[name = tensor("input_733_cast")]; + tensor var_12567 = const()[name = tensor("op_12567"), val = tensor([1, 1])]; + tensor var_12569 = const()[name = tensor("op_12569"), val = tensor([1, 1])]; + tensor hidden_states_503_pad_type_0 = const()[name = tensor("hidden_states_503_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_503_pad_0 = const()[name = tensor("hidden_states_503_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1007416384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1010181248))), name = tensor("up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1010181440)))]; + tensor hidden_states_503_cast = conv(bias = up_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_12569, groups = var_12518, pad = hidden_states_503_pad_0, pad_type = hidden_states_503_pad_type_0, strides = var_12567, weight = up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_733_cast)[name = tensor("hidden_states_503_cast")]; + tensor var_12574 = const()[name = tensor("op_12574"), val = tensor([1, 1])]; + tensor var_12576 = const()[name = tensor("op_12576"), val = tensor([1, 1])]; + tensor x_11_pad_type_0 = const()[name = tensor("x_11_pad_type_0"), val = tensor("custom")]; + tensor x_11_pad_0 = const()[name = tensor("x_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1010182784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011411648))), name = tensor("up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1920, 1, 1])]; + tensor up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011412224)))]; + tensor x_11_cast = conv(bias = up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_12576, groups = var_12518, pad = x_11_pad_0, pad_type = x_11_pad_type_0, strides = var_12574, weight = up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_721_cast)[name = tensor("x_11_cast")]; + tensor hidden_states_505_cast = add(x = x_11_cast, y = hidden_states_503_cast)[name = tensor("hidden_states_505_cast")]; + tensor reshape_128_shape_0 = const()[name = tensor("reshape_128_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_128_cast = reshape(shape = reshape_128_shape_0, x = hidden_states_505_cast)[name = tensor("reshape_128_cast")]; + tensor reduce_mean_96_axes_0 = const()[name = tensor("reduce_mean_96_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_96_keep_dims_0 = const()[name = tensor("reduce_mean_96_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_96_cast = reduce_mean(axes = reduce_mean_96_axes_0, keep_dims = reduce_mean_96_keep_dims_0, x = reshape_128_cast)[name = tensor("reduce_mean_96_cast")]; + tensor sub_64_cast = sub(x = reshape_128_cast, y = reduce_mean_96_cast)[name = tensor("sub_64_cast")]; + tensor square_32_cast = square(x = sub_64_cast)[name = tensor("square_32_cast")]; + tensor reduce_mean_98_axes_0 = const()[name = tensor("reduce_mean_98_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_98_keep_dims_0 = const()[name = tensor("reduce_mean_98_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_98_cast = reduce_mean(axes = reduce_mean_98_axes_0, keep_dims = reduce_mean_98_keep_dims_0, x = square_32_cast)[name = tensor("reduce_mean_98_cast")]; + tensor add_64_y_0_to_fp16 = const()[name = tensor("add_64_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_64_cast = add(x = reduce_mean_98_cast, y = add_64_y_0_to_fp16)[name = tensor("add_64_cast")]; + tensor sqrt_32_cast = sqrt(x = add_64_cast)[name = tensor("sqrt_32_cast")]; + tensor real_div_32_cast = real_div(x = sub_64_cast, y = sqrt_32_cast)[name = tensor("real_div_32_cast")]; + tensor reshape_129_shape_0 = const()[name = tensor("reshape_129_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_129_cast = reshape(shape = reshape_129_shape_0, x = real_div_32_cast)[name = tensor("reshape_129_cast")]; + tensor add_65_gamma_0_to_fp16 = const()[name = tensor("add_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011413568)))]; + tensor add_65_beta_0_to_fp16 = const()[name = tensor("add_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011414912)))]; + tensor add_65_epsilon_0_to_fp16 = const()[name = tensor("add_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_65_cast = batch_norm(beta = add_65_beta_0_to_fp16, epsilon = add_65_epsilon_0_to_fp16, gamma = add_65_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_129_cast)[name = tensor("add_65_cast")]; + tensor var_12598 = const()[name = tensor("op_12598"), val = tensor([1, 1])]; + tensor var_12600 = const()[name = tensor("op_12600"), val = tensor([1, 1])]; + tensor hidden_states_507_pad_type_0 = const()[name = tensor("hidden_states_507_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_507_pad_0 = const()[name = tensor("hidden_states_507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011416256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011825920))), name = tensor("up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011826496)))]; + tensor hidden_states_507_cast = conv(bias = up_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_12600, groups = var_12518, pad = hidden_states_507_pad_0, pad_type = hidden_states_507_pad_type_0, strides = var_12598, weight = up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_65_cast)[name = tensor("hidden_states_507_cast")]; + tensor var_12605 = const()[name = tensor("op_12605"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_385_cast = reshape(shape = var_12605, x = hidden_states_507_cast)[name = tensor("inputs_385_cast")]; + tensor var_12615 = const()[name = tensor("op_12615"), val = tensor([1])]; + tensor channels_mean_385_cast = reduce_mean(axes = var_12615, keep_dims = var_12513, x = inputs_385_cast)[name = tensor("channels_mean_385_cast")]; + tensor zero_mean_385_cast = sub(x = inputs_385_cast, y = channels_mean_385_cast)[name = tensor("zero_mean_385_cast")]; + tensor zero_mean_sq_385_cast = mul(x = zero_mean_385_cast, y = zero_mean_385_cast)[name = tensor("zero_mean_sq_385_cast")]; + tensor var_12619 = const()[name = tensor("op_12619"), val = tensor([1])]; + tensor var_12620_cast = reduce_mean(axes = var_12619, keep_dims = var_12513, x = zero_mean_sq_385_cast)[name = tensor("op_12620_cast")]; + tensor var_12621_to_fp16 = const()[name = tensor("op_12621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12622_cast = add(x = var_12620_cast, y = var_12621_to_fp16)[name = tensor("op_12622_cast")]; + tensor denom_385_epsilon_0_to_fp16 = const()[name = tensor("denom_385_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_385_cast = rsqrt(epsilon = denom_385_epsilon_0_to_fp16, x = var_12622_cast)[name = tensor("denom_385_cast")]; + tensor out_385_cast = mul(x = zero_mean_385_cast, y = denom_385_cast)[name = tensor("out_385_cast")]; + tensor var_12626_to_fp16 = const()[name = tensor("op_12626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011827840)))]; + tensor var_12627_cast = add(x = out_385_cast, y = var_12626_to_fp16)[name = tensor("op_12627_cast")]; + tensor var_12629_to_fp16 = const()[name = tensor("op_12629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011829184)))]; + tensor hidden_states_509_cast = mul(x = var_12627_cast, y = var_12629_to_fp16)[name = tensor("hidden_states_509_cast")]; + tensor var_12636 = const()[name = tensor("op_12636"), val = tensor([1, 1])]; + tensor var_12638 = const()[name = tensor("op_12638"), val = tensor([1, 1])]; + tensor q_257_pad_type_0 = const()[name = tensor("q_257_pad_type_0"), val = tensor("custom")]; + tensor q_257_pad_0 = const()[name = tensor("q_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1011830528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012137792))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_257_cast = conv(dilations = var_12638, groups = var_12518, pad = q_257_pad_0, pad_type = q_257_pad_type_0, strides = var_12636, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("q_257_cast")]; + tensor var_12642 = const()[name = tensor("op_12642"), val = tensor([1, 1])]; + tensor var_12644 = const()[name = tensor("op_12644"), val = tensor([1, 1])]; + tensor k_257_pad_type_0 = const()[name = tensor("k_257_pad_type_0"), val = tensor("custom")]; + tensor k_257_pad_0 = const()[name = tensor("k_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012137984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012445248))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_257_cast = conv(dilations = var_12644, groups = var_12518, pad = k_257_pad_0, pad_type = k_257_pad_type_0, strides = var_12642, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("k_257_cast")]; + tensor var_12648 = const()[name = tensor("op_12648"), val = tensor([1, 1])]; + tensor var_12650 = const()[name = tensor("op_12650"), val = tensor([1, 1])]; + tensor v_257_pad_type_0 = const()[name = tensor("v_257_pad_type_0"), val = tensor("custom")]; + tensor v_257_pad_0 = const()[name = tensor("v_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012445440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012752704))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_257_cast = conv(dilations = var_12650, groups = var_12518, pad = v_257_pad_0, pad_type = v_257_pad_type_0, strides = var_12648, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("v_257_cast")]; + tensor var_12654 = const()[name = tensor("op_12654"), val = tensor([2, 10, 64, -1])]; + tensor var_12655_cast = reshape(shape = var_12654, x = q_257_cast)[name = tensor("op_12655_cast")]; + tensor var_12656 = const()[name = tensor("op_12656"), val = tensor([2, 10, 64, -1])]; + tensor var_12657_cast = reshape(shape = var_12656, x = k_257_cast)[name = tensor("op_12657_cast")]; + tensor var_12658 = const()[name = tensor("op_12658"), val = tensor([2, 10, 64, -1])]; + tensor var_12659_cast = reshape(shape = var_12658, x = v_257_cast)[name = tensor("op_12659_cast")]; + tensor attn_weights_513_transpose_x_0 = const()[name = tensor("attn_weights_513_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_513_transpose_y_0 = const()[name = tensor("attn_weights_513_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_513_cast = matmul(transpose_x = attn_weights_513_transpose_x_0, transpose_y = attn_weights_513_transpose_y_0, x = var_12655_cast, y = var_12657_cast)[name = tensor("attn_weights_513_cast")]; + tensor var_12509_to_fp16 = const()[name = tensor("op_12509_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_515_cast = mul(x = attn_weights_513_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_515_cast")]; + tensor var_12663_cast = softmax(axis = var_12502, x = attn_weights_515_cast)[name = tensor("op_12663_cast")]; + tensor attn_257_transpose_x_0 = const()[name = tensor("attn_257_transpose_x_0"), val = tensor(false)]; + tensor attn_257_transpose_y_0 = const()[name = tensor("attn_257_transpose_y_0"), val = tensor(true)]; + tensor attn_257_cast = matmul(transpose_x = attn_257_transpose_x_0, transpose_y = attn_257_transpose_y_0, x = var_12659_cast, y = var_12663_cast)[name = tensor("attn_257_cast")]; + tensor var_12667 = const()[name = tensor("op_12667"), val = tensor([2, 640, 1, -1])]; + tensor input_737_cast = reshape(shape = var_12667, x = attn_257_cast)[name = tensor("input_737_cast")]; + tensor var_12672 = const()[name = tensor("op_12672"), val = tensor([1, 1])]; + tensor var_12674 = const()[name = tensor("op_12674"), val = tensor([1, 1])]; + tensor var_12676_pad_type_0 = const()[name = tensor("op_12676_pad_type_0"), val = tensor("custom")]; + tensor var_12676_pad_0 = const()[name = tensor("op_12676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012752896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013162560))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013163136)))]; + tensor var_12676_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_12674, groups = var_12518, pad = var_12676_pad_0, pad_type = var_12676_pad_type_0, strides = var_12672, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_737_cast)[name = tensor("op_12676_cast")]; + tensor inputs_387_cast = add(x = var_12676_cast, y = inputs_385_cast)[name = tensor("inputs_387_cast")]; + tensor var_12680 = const()[name = tensor("op_12680"), val = tensor([1])]; + tensor channels_mean_387_cast = reduce_mean(axes = var_12680, keep_dims = var_12513, x = inputs_387_cast)[name = tensor("channels_mean_387_cast")]; + tensor zero_mean_387_cast = sub(x = inputs_387_cast, y = channels_mean_387_cast)[name = tensor("zero_mean_387_cast")]; + tensor zero_mean_sq_387_cast = mul(x = zero_mean_387_cast, y = zero_mean_387_cast)[name = tensor("zero_mean_sq_387_cast")]; + tensor var_12684 = const()[name = tensor("op_12684"), val = tensor([1])]; + tensor var_12685_cast = reduce_mean(axes = var_12684, keep_dims = var_12513, x = zero_mean_sq_387_cast)[name = tensor("op_12685_cast")]; + tensor var_12686_to_fp16 = const()[name = tensor("op_12686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12687_cast = add(x = var_12685_cast, y = var_12686_to_fp16)[name = tensor("op_12687_cast")]; + tensor denom_387_epsilon_0_to_fp16 = const()[name = tensor("denom_387_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_387_cast = rsqrt(epsilon = denom_387_epsilon_0_to_fp16, x = var_12687_cast)[name = tensor("denom_387_cast")]; + tensor out_387_cast = mul(x = zero_mean_387_cast, y = denom_387_cast)[name = tensor("out_387_cast")]; + tensor var_12691_to_fp16 = const()[name = tensor("op_12691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013164480)))]; + tensor var_12692_cast = add(x = out_387_cast, y = var_12691_to_fp16)[name = tensor("op_12692_cast")]; + tensor var_12694_to_fp16 = const()[name = tensor("op_12694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013165824)))]; + tensor hidden_states_511_cast = mul(x = var_12692_cast, y = var_12694_to_fp16)[name = tensor("hidden_states_511_cast")]; + tensor var_12701 = const()[name = tensor("op_12701"), val = tensor([1, 1])]; + tensor var_12703 = const()[name = tensor("op_12703"), val = tensor([1, 1])]; + tensor q_259_pad_type_0 = const()[name = tensor("q_259_pad_type_0"), val = tensor("custom")]; + tensor q_259_pad_0 = const()[name = tensor("q_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013167168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013474432))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_259_cast = conv(dilations = var_12703, groups = var_12518, pad = q_259_pad_0, pad_type = q_259_pad_type_0, strides = var_12701, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_511_cast)[name = tensor("q_259_cast")]; + tensor var_12707 = const()[name = tensor("op_12707"), val = tensor([1, 1])]; + tensor var_12709 = const()[name = tensor("op_12709"), val = tensor([1, 1])]; + tensor k_259_pad_type_0 = const()[name = tensor("k_259_pad_type_0"), val = tensor("custom")]; + tensor k_259_pad_0 = const()[name = tensor("k_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1013474624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014130048))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_259_cast = conv(dilations = var_12709, groups = var_12518, pad = k_259_pad_0, pad_type = k_259_pad_type_0, strides = var_12707, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_259_cast")]; + tensor var_12713 = const()[name = tensor("op_12713"), val = tensor([1, 1])]; + tensor var_12715 = const()[name = tensor("op_12715"), val = tensor([1, 1])]; + tensor v_259_pad_type_0 = const()[name = tensor("v_259_pad_type_0"), val = tensor("custom")]; + tensor v_259_pad_0 = const()[name = tensor("v_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014130176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014785600))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_259_cast = conv(dilations = var_12715, groups = var_12518, pad = v_259_pad_0, pad_type = v_259_pad_type_0, strides = var_12713, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_259_cast")]; + tensor var_12719 = const()[name = tensor("op_12719"), val = tensor([2, 10, 64, -1])]; + tensor var_12720_cast = reshape(shape = var_12719, x = q_259_cast)[name = tensor("op_12720_cast")]; + tensor var_12721 = const()[name = tensor("op_12721"), val = tensor([2, 10, 64, -1])]; + tensor var_12722_cast = reshape(shape = var_12721, x = k_259_cast)[name = tensor("op_12722_cast")]; + tensor var_12723 = const()[name = tensor("op_12723"), val = tensor([2, 10, 64, -1])]; + tensor var_12724_cast = reshape(shape = var_12723, x = v_259_cast)[name = tensor("op_12724_cast")]; + tensor attn_weights_517_transpose_x_0 = const()[name = tensor("attn_weights_517_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_517_transpose_y_0 = const()[name = tensor("attn_weights_517_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_517_cast = matmul(transpose_x = attn_weights_517_transpose_x_0, transpose_y = attn_weights_517_transpose_y_0, x = var_12720_cast, y = var_12722_cast)[name = tensor("attn_weights_517_cast")]; + tensor attn_weights_519_cast = mul(x = attn_weights_517_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_519_cast")]; + tensor var_12728_cast = softmax(axis = var_12502, x = attn_weights_519_cast)[name = tensor("op_12728_cast")]; + tensor attn_259_transpose_x_0 = const()[name = tensor("attn_259_transpose_x_0"), val = tensor(false)]; + tensor attn_259_transpose_y_0 = const()[name = tensor("attn_259_transpose_y_0"), val = tensor(true)]; + tensor attn_259_cast = matmul(transpose_x = attn_259_transpose_x_0, transpose_y = attn_259_transpose_y_0, x = var_12724_cast, y = var_12728_cast)[name = tensor("attn_259_cast")]; + tensor var_12732 = const()[name = tensor("op_12732"), val = tensor([2, 640, 1, -1])]; + tensor input_739_cast = reshape(shape = var_12732, x = attn_259_cast)[name = tensor("input_739_cast")]; + tensor var_12737 = const()[name = tensor("op_12737"), val = tensor([1, 1])]; + tensor var_12739 = const()[name = tensor("op_12739"), val = tensor([1, 1])]; + tensor var_12741_pad_type_0 = const()[name = tensor("op_12741_pad_type_0"), val = tensor("custom")]; + tensor var_12741_pad_0 = const()[name = tensor("op_12741_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014785728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014990592))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014990720)))]; + tensor var_12741_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_12739, groups = var_12518, pad = var_12741_pad_0, pad_type = var_12741_pad_type_0, strides = var_12737, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_739_cast)[name = tensor("op_12741_cast")]; + tensor inputs_389_cast = add(x = var_12741_cast, y = inputs_387_cast)[name = tensor("inputs_389_cast")]; + tensor var_12745 = const()[name = tensor("op_12745"), val = tensor([1])]; + tensor channels_mean_389_cast = reduce_mean(axes = var_12745, keep_dims = var_12513, x = inputs_389_cast)[name = tensor("channels_mean_389_cast")]; + tensor zero_mean_389_cast = sub(x = inputs_389_cast, y = channels_mean_389_cast)[name = tensor("zero_mean_389_cast")]; + tensor zero_mean_sq_389_cast = mul(x = zero_mean_389_cast, y = zero_mean_389_cast)[name = tensor("zero_mean_sq_389_cast")]; + tensor var_12749 = const()[name = tensor("op_12749"), val = tensor([1])]; + tensor var_12750_cast = reduce_mean(axes = var_12749, keep_dims = var_12513, x = zero_mean_sq_389_cast)[name = tensor("op_12750_cast")]; + tensor var_12751_to_fp16 = const()[name = tensor("op_12751_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12752_cast = add(x = var_12750_cast, y = var_12751_to_fp16)[name = tensor("op_12752_cast")]; + tensor denom_389_epsilon_0_to_fp16 = const()[name = tensor("denom_389_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_389_cast = rsqrt(epsilon = denom_389_epsilon_0_to_fp16, x = var_12752_cast)[name = tensor("denom_389_cast")]; + tensor out_389_cast = mul(x = zero_mean_389_cast, y = denom_389_cast)[name = tensor("out_389_cast")]; + tensor var_12756_to_fp16 = const()[name = tensor("op_12756_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014992064)))]; + tensor var_12757_cast = add(x = out_389_cast, y = var_12756_to_fp16)[name = tensor("op_12757_cast")]; + tensor var_12759_to_fp16 = const()[name = tensor("op_12759_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014993408)))]; + tensor input_741_cast = mul(x = var_12757_cast, y = var_12759_to_fp16)[name = tensor("input_741_cast")]; + tensor var_12767 = const()[name = tensor("op_12767"), val = tensor([1, 1])]; + tensor var_12769 = const()[name = tensor("op_12769"), val = tensor([1, 1])]; + tensor var_12771_pad_type_0 = const()[name = tensor("op_12771_pad_type_0"), val = tensor("custom")]; + tensor var_12771_pad_0 = const()[name = tensor("op_12771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1014994752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018271616))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018272192)))]; + tensor var_12771_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_12769, groups = var_12518, pad = var_12771_pad_0, pad_type = var_12771_pad_type_0, strides = var_12767, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_741_cast)[name = tensor("op_12771_cast")]; + tensor var_12772_split_sizes_0 = const()[name = tensor("op_12772_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12772_axis_0 = const()[name = tensor("op_12772_axis_0"), val = tensor(1)]; + tensor var_12772_cast_0, tensor var_12772_cast_1 = split(axis = var_12772_axis_0, split_sizes = var_12772_split_sizes_0, x = var_12771_cast)[name = tensor("op_12772_cast")]; + tensor var_12774_mode_0 = const()[name = tensor("op_12774_mode_0"), val = tensor("EXACT")]; + tensor var_12774_cast = gelu(mode = var_12774_mode_0, x = var_12772_cast_1)[name = tensor("op_12774_cast")]; + tensor input_743_cast = mul(x = var_12772_cast_0, y = var_12774_cast)[name = tensor("input_743_cast")]; + tensor var_12778 = const()[name = tensor("op_12778"), val = tensor([1, 1])]; + tensor var_12780 = const()[name = tensor("op_12780"), val = tensor([1, 1])]; + tensor var_12782_pad_type_0 = const()[name = tensor("op_12782_pad_type_0"), val = tensor("custom")]; + tensor var_12782_pad_0 = const()[name = tensor("op_12782_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018282496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019920960))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019921536)))]; + tensor var_12782_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_12780, groups = var_12518, pad = var_12782_pad_0, pad_type = var_12782_pad_type_0, strides = var_12778, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_743_cast)[name = tensor("op_12782_cast")]; + tensor inputs_391_cast = add(x = var_12782_cast, y = inputs_389_cast)[name = tensor("inputs_391_cast")]; + tensor var_12792 = const()[name = tensor("op_12792"), val = tensor([1])]; + tensor channels_mean_391_cast = reduce_mean(axes = var_12792, keep_dims = var_12513, x = inputs_391_cast)[name = tensor("channels_mean_391_cast")]; + tensor zero_mean_391_cast = sub(x = inputs_391_cast, y = channels_mean_391_cast)[name = tensor("zero_mean_391_cast")]; + tensor zero_mean_sq_391_cast = mul(x = zero_mean_391_cast, y = zero_mean_391_cast)[name = tensor("zero_mean_sq_391_cast")]; + tensor var_12796 = const()[name = tensor("op_12796"), val = tensor([1])]; + tensor var_12797_cast = reduce_mean(axes = var_12796, keep_dims = var_12513, x = zero_mean_sq_391_cast)[name = tensor("op_12797_cast")]; + tensor var_12798_to_fp16 = const()[name = tensor("op_12798_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12799_cast = add(x = var_12797_cast, y = var_12798_to_fp16)[name = tensor("op_12799_cast")]; + tensor denom_391_epsilon_0_to_fp16 = const()[name = tensor("denom_391_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_391_cast = rsqrt(epsilon = denom_391_epsilon_0_to_fp16, x = var_12799_cast)[name = tensor("denom_391_cast")]; + tensor out_391_cast = mul(x = zero_mean_391_cast, y = denom_391_cast)[name = tensor("out_391_cast")]; + tensor var_12803_to_fp16 = const()[name = tensor("op_12803_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019922880)))]; + tensor var_12804_cast = add(x = out_391_cast, y = var_12803_to_fp16)[name = tensor("op_12804_cast")]; + tensor var_12806_to_fp16 = const()[name = tensor("op_12806_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019924224)))]; + tensor hidden_states_515_cast = mul(x = var_12804_cast, y = var_12806_to_fp16)[name = tensor("hidden_states_515_cast")]; + tensor var_12813 = const()[name = tensor("op_12813"), val = tensor([1, 1])]; + tensor var_12815 = const()[name = tensor("op_12815"), val = tensor([1, 1])]; + tensor q_261_pad_type_0 = const()[name = tensor("q_261_pad_type_0"), val = tensor("custom")]; + tensor q_261_pad_0 = const()[name = tensor("q_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019925568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020232832))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_261_cast = conv(dilations = var_12815, groups = var_12518, pad = q_261_pad_0, pad_type = q_261_pad_type_0, strides = var_12813, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("q_261_cast")]; + tensor var_12819 = const()[name = tensor("op_12819"), val = tensor([1, 1])]; + tensor var_12821 = const()[name = tensor("op_12821"), val = tensor([1, 1])]; + tensor k_261_pad_type_0 = const()[name = tensor("k_261_pad_type_0"), val = tensor("custom")]; + tensor k_261_pad_0 = const()[name = tensor("k_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020233024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020540288))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_261_cast = conv(dilations = var_12821, groups = var_12518, pad = k_261_pad_0, pad_type = k_261_pad_type_0, strides = var_12819, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("k_261_cast")]; + tensor var_12825 = const()[name = tensor("op_12825"), val = tensor([1, 1])]; + tensor var_12827 = const()[name = tensor("op_12827"), val = tensor([1, 1])]; + tensor v_261_pad_type_0 = const()[name = tensor("v_261_pad_type_0"), val = tensor("custom")]; + tensor v_261_pad_0 = const()[name = tensor("v_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020540480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020847744))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_261_cast = conv(dilations = var_12827, groups = var_12518, pad = v_261_pad_0, pad_type = v_261_pad_type_0, strides = var_12825, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("v_261_cast")]; + tensor var_12831 = const()[name = tensor("op_12831"), val = tensor([2, 10, 64, -1])]; + tensor var_12832_cast = reshape(shape = var_12831, x = q_261_cast)[name = tensor("op_12832_cast")]; + tensor var_12833 = const()[name = tensor("op_12833"), val = tensor([2, 10, 64, -1])]; + tensor var_12834_cast = reshape(shape = var_12833, x = k_261_cast)[name = tensor("op_12834_cast")]; + tensor var_12835 = const()[name = tensor("op_12835"), val = tensor([2, 10, 64, -1])]; + tensor var_12836_cast = reshape(shape = var_12835, x = v_261_cast)[name = tensor("op_12836_cast")]; + tensor attn_weights_521_transpose_x_0 = const()[name = tensor("attn_weights_521_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_521_transpose_y_0 = const()[name = tensor("attn_weights_521_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_521_cast = matmul(transpose_x = attn_weights_521_transpose_x_0, transpose_y = attn_weights_521_transpose_y_0, x = var_12832_cast, y = var_12834_cast)[name = tensor("attn_weights_521_cast")]; + tensor attn_weights_523_cast = mul(x = attn_weights_521_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_523_cast")]; + tensor var_12840_cast = softmax(axis = var_12502, x = attn_weights_523_cast)[name = tensor("op_12840_cast")]; + tensor attn_261_transpose_x_0 = const()[name = tensor("attn_261_transpose_x_0"), val = tensor(false)]; + tensor attn_261_transpose_y_0 = const()[name = tensor("attn_261_transpose_y_0"), val = tensor(true)]; + tensor attn_261_cast = matmul(transpose_x = attn_261_transpose_x_0, transpose_y = attn_261_transpose_y_0, x = var_12836_cast, y = var_12840_cast)[name = tensor("attn_261_cast")]; + tensor var_12844 = const()[name = tensor("op_12844"), val = tensor([2, 640, 1, -1])]; + tensor input_745_cast = reshape(shape = var_12844, x = attn_261_cast)[name = tensor("input_745_cast")]; + tensor var_12849 = const()[name = tensor("op_12849"), val = tensor([1, 1])]; + tensor var_12851 = const()[name = tensor("op_12851"), val = tensor([1, 1])]; + tensor var_12853_pad_type_0 = const()[name = tensor("op_12853_pad_type_0"), val = tensor("custom")]; + tensor var_12853_pad_0 = const()[name = tensor("op_12853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020847936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021257600))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021258176)))]; + tensor var_12853_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_12851, groups = var_12518, pad = var_12853_pad_0, pad_type = var_12853_pad_type_0, strides = var_12849, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_745_cast)[name = tensor("op_12853_cast")]; + tensor inputs_393_cast = add(x = var_12853_cast, y = inputs_391_cast)[name = tensor("inputs_393_cast")]; + tensor var_12857 = const()[name = tensor("op_12857"), val = tensor([1])]; + tensor channels_mean_393_cast = reduce_mean(axes = var_12857, keep_dims = var_12513, x = inputs_393_cast)[name = tensor("channels_mean_393_cast")]; + tensor zero_mean_393_cast = sub(x = inputs_393_cast, y = channels_mean_393_cast)[name = tensor("zero_mean_393_cast")]; + tensor zero_mean_sq_393_cast = mul(x = zero_mean_393_cast, y = zero_mean_393_cast)[name = tensor("zero_mean_sq_393_cast")]; + tensor var_12861 = const()[name = tensor("op_12861"), val = tensor([1])]; + tensor var_12862_cast = reduce_mean(axes = var_12861, keep_dims = var_12513, x = zero_mean_sq_393_cast)[name = tensor("op_12862_cast")]; + tensor var_12863_to_fp16 = const()[name = tensor("op_12863_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12864_cast = add(x = var_12862_cast, y = var_12863_to_fp16)[name = tensor("op_12864_cast")]; + tensor denom_393_epsilon_0_to_fp16 = const()[name = tensor("denom_393_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_393_cast = rsqrt(epsilon = denom_393_epsilon_0_to_fp16, x = var_12864_cast)[name = tensor("denom_393_cast")]; + tensor out_393_cast = mul(x = zero_mean_393_cast, y = denom_393_cast)[name = tensor("out_393_cast")]; + tensor var_12868_to_fp16 = const()[name = tensor("op_12868_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021259520)))]; + tensor var_12869_cast = add(x = out_393_cast, y = var_12868_to_fp16)[name = tensor("op_12869_cast")]; + tensor var_12871_to_fp16 = const()[name = tensor("op_12871_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021260864)))]; + tensor hidden_states_517_cast = mul(x = var_12869_cast, y = var_12871_to_fp16)[name = tensor("hidden_states_517_cast")]; + tensor var_12878 = const()[name = tensor("op_12878"), val = tensor([1, 1])]; + tensor var_12880 = const()[name = tensor("op_12880"), val = tensor([1, 1])]; + tensor q_263_pad_type_0 = const()[name = tensor("q_263_pad_type_0"), val = tensor("custom")]; + tensor q_263_pad_0 = const()[name = tensor("q_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021262208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021467072))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_263_cast = conv(dilations = var_12880, groups = var_12518, pad = q_263_pad_0, pad_type = q_263_pad_type_0, strides = var_12878, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_517_cast)[name = tensor("q_263_cast")]; + tensor var_12884 = const()[name = tensor("op_12884"), val = tensor([1, 1])]; + tensor var_12886 = const()[name = tensor("op_12886"), val = tensor([1, 1])]; + tensor k_263_pad_type_0 = const()[name = tensor("k_263_pad_type_0"), val = tensor("custom")]; + tensor k_263_pad_0 = const()[name = tensor("k_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021467200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022122624))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_263_cast = conv(dilations = var_12886, groups = var_12518, pad = k_263_pad_0, pad_type = k_263_pad_type_0, strides = var_12884, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_263_cast")]; + tensor var_12890 = const()[name = tensor("op_12890"), val = tensor([1, 1])]; + tensor var_12892 = const()[name = tensor("op_12892"), val = tensor([1, 1])]; + tensor v_263_pad_type_0 = const()[name = tensor("v_263_pad_type_0"), val = tensor("custom")]; + tensor v_263_pad_0 = const()[name = tensor("v_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022122752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022778176))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_263_cast = conv(dilations = var_12892, groups = var_12518, pad = v_263_pad_0, pad_type = v_263_pad_type_0, strides = var_12890, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_263_cast")]; + tensor var_12896 = const()[name = tensor("op_12896"), val = tensor([2, 10, 64, -1])]; + tensor var_12897_cast = reshape(shape = var_12896, x = q_263_cast)[name = tensor("op_12897_cast")]; + tensor var_12898 = const()[name = tensor("op_12898"), val = tensor([2, 10, 64, -1])]; + tensor var_12899_cast = reshape(shape = var_12898, x = k_263_cast)[name = tensor("op_12899_cast")]; + tensor var_12900 = const()[name = tensor("op_12900"), val = tensor([2, 10, 64, -1])]; + tensor var_12901_cast = reshape(shape = var_12900, x = v_263_cast)[name = tensor("op_12901_cast")]; + tensor attn_weights_525_transpose_x_0 = const()[name = tensor("attn_weights_525_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_525_transpose_y_0 = const()[name = tensor("attn_weights_525_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_525_cast = matmul(transpose_x = attn_weights_525_transpose_x_0, transpose_y = attn_weights_525_transpose_y_0, x = var_12897_cast, y = var_12899_cast)[name = tensor("attn_weights_525_cast")]; + tensor attn_weights_527_cast = mul(x = attn_weights_525_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_527_cast")]; + tensor var_12905_cast = softmax(axis = var_12502, x = attn_weights_527_cast)[name = tensor("op_12905_cast")]; + tensor attn_263_transpose_x_0 = const()[name = tensor("attn_263_transpose_x_0"), val = tensor(false)]; + tensor attn_263_transpose_y_0 = const()[name = tensor("attn_263_transpose_y_0"), val = tensor(true)]; + tensor attn_263_cast = matmul(transpose_x = attn_263_transpose_x_0, transpose_y = attn_263_transpose_y_0, x = var_12901_cast, y = var_12905_cast)[name = tensor("attn_263_cast")]; + tensor var_12909 = const()[name = tensor("op_12909"), val = tensor([2, 640, 1, -1])]; + tensor input_747_cast = reshape(shape = var_12909, x = attn_263_cast)[name = tensor("input_747_cast")]; + tensor var_12914 = const()[name = tensor("op_12914"), val = tensor([1, 1])]; + tensor var_12916 = const()[name = tensor("op_12916"), val = tensor([1, 1])]; + tensor var_12918_pad_type_0 = const()[name = tensor("op_12918_pad_type_0"), val = tensor("custom")]; + tensor var_12918_pad_0 = const()[name = tensor("op_12918_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022778304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022983168))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022983296)))]; + tensor var_12918_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_12916, groups = var_12518, pad = var_12918_pad_0, pad_type = var_12918_pad_type_0, strides = var_12914, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_747_cast)[name = tensor("op_12918_cast")]; + tensor inputs_395_cast = add(x = var_12918_cast, y = inputs_393_cast)[name = tensor("inputs_395_cast")]; + tensor var_12922 = const()[name = tensor("op_12922"), val = tensor([1])]; + tensor channels_mean_395_cast = reduce_mean(axes = var_12922, keep_dims = var_12513, x = inputs_395_cast)[name = tensor("channels_mean_395_cast")]; + tensor zero_mean_395_cast = sub(x = inputs_395_cast, y = channels_mean_395_cast)[name = tensor("zero_mean_395_cast")]; + tensor zero_mean_sq_395_cast = mul(x = zero_mean_395_cast, y = zero_mean_395_cast)[name = tensor("zero_mean_sq_395_cast")]; + tensor var_12926 = const()[name = tensor("op_12926"), val = tensor([1])]; + tensor var_12927_cast = reduce_mean(axes = var_12926, keep_dims = var_12513, x = zero_mean_sq_395_cast)[name = tensor("op_12927_cast")]; + tensor var_12928_to_fp16 = const()[name = tensor("op_12928_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12929_cast = add(x = var_12927_cast, y = var_12928_to_fp16)[name = tensor("op_12929_cast")]; + tensor denom_395_epsilon_0_to_fp16 = const()[name = tensor("denom_395_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_395_cast = rsqrt(epsilon = denom_395_epsilon_0_to_fp16, x = var_12929_cast)[name = tensor("denom_395_cast")]; + tensor out_395_cast = mul(x = zero_mean_395_cast, y = denom_395_cast)[name = tensor("out_395_cast")]; + tensor var_12933_to_fp16 = const()[name = tensor("op_12933_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022984640)))]; + tensor var_12934_cast = add(x = out_395_cast, y = var_12933_to_fp16)[name = tensor("op_12934_cast")]; + tensor var_12936_to_fp16 = const()[name = tensor("op_12936_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022985984)))]; + tensor input_749_cast = mul(x = var_12934_cast, y = var_12936_to_fp16)[name = tensor("input_749_cast")]; + tensor var_12944 = const()[name = tensor("op_12944"), val = tensor([1, 1])]; + tensor var_12946 = const()[name = tensor("op_12946"), val = tensor([1, 1])]; + tensor var_12948_pad_type_0 = const()[name = tensor("op_12948_pad_type_0"), val = tensor("custom")]; + tensor var_12948_pad_0 = const()[name = tensor("op_12948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022987328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025444992))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025445184)))]; + tensor var_12948_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_12946, groups = var_12518, pad = var_12948_pad_0, pad_type = var_12948_pad_type_0, strides = var_12944, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_749_cast)[name = tensor("op_12948_cast")]; + tensor var_12949_split_sizes_0 = const()[name = tensor("op_12949_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12949_axis_0 = const()[name = tensor("op_12949_axis_0"), val = tensor(1)]; + tensor var_12949_cast_0, tensor var_12949_cast_1 = split(axis = var_12949_axis_0, split_sizes = var_12949_split_sizes_0, x = var_12948_cast)[name = tensor("op_12949_cast")]; + tensor var_12951_mode_0 = const()[name = tensor("op_12951_mode_0"), val = tensor("EXACT")]; + tensor var_12951_cast = gelu(mode = var_12951_mode_0, x = var_12949_cast_1)[name = tensor("op_12951_cast")]; + tensor input_751_cast = mul(x = var_12949_cast_0, y = var_12951_cast)[name = tensor("input_751_cast")]; + tensor var_12955 = const()[name = tensor("op_12955"), val = tensor([1, 1])]; + tensor var_12957 = const()[name = tensor("op_12957"), val = tensor([1, 1])]; + tensor var_12959_pad_type_0 = const()[name = tensor("op_12959_pad_type_0"), val = tensor("custom")]; + tensor var_12959_pad_0 = const()[name = tensor("op_12959_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025455488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026684352))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026684544)))]; + tensor var_12959_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_12957, groups = var_12518, pad = var_12959_pad_0, pad_type = var_12959_pad_type_0, strides = var_12955, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_751_cast)[name = tensor("op_12959_cast")]; + tensor hidden_states_521_cast = add(x = var_12959_cast, y = inputs_395_cast)[name = tensor("hidden_states_521_cast")]; + tensor var_12961 = const()[name = tensor("op_12961"), val = tensor([2, 640, 64, 64])]; + tensor input_753_cast = reshape(shape = var_12961, x = hidden_states_521_cast)[name = tensor("input_753_cast")]; + tensor var_12965 = const()[name = tensor("op_12965"), val = tensor([1, 1])]; + tensor var_12967 = const()[name = tensor("op_12967"), val = tensor([1, 1])]; + tensor hidden_states_523_pad_type_0 = const()[name = tensor("hidden_states_523_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_523_pad_0 = const()[name = tensor("hidden_states_523_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026685888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026993152))), name = tensor("up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026993344)))]; + tensor hidden_states_523_cast = conv(bias = up_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_12967, groups = var_12518, pad = hidden_states_523_pad_0, pad_type = hidden_states_523_pad_type_0, strides = var_12965, weight = up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_753_cast)[name = tensor("hidden_states_523_cast")]; + tensor hidden_states_525_cast = add(x = hidden_states_523_cast, y = hidden_states_505_cast)[name = tensor("hidden_states_525_cast")]; + tensor input_755_interleave_0 = const()[name = tensor("input_755_interleave_0"), val = tensor(false)]; + tensor input_755_cast = concat(axis = var_12518, interleave = input_755_interleave_0, values = (hidden_states_525_cast, input_79_cast))[name = tensor("input_755_cast")]; + tensor reshape_132_shape_0 = const()[name = tensor("reshape_132_shape_0"), val = tensor([2, 32, 40, 64, 64])]; + tensor reshape_132_cast = reshape(shape = reshape_132_shape_0, x = input_755_cast)[name = tensor("reshape_132_cast")]; + tensor reduce_mean_99_axes_0 = const()[name = tensor("reduce_mean_99_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_99_keep_dims_0 = const()[name = tensor("reduce_mean_99_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_99_cast = reduce_mean(axes = reduce_mean_99_axes_0, keep_dims = reduce_mean_99_keep_dims_0, x = reshape_132_cast)[name = tensor("reduce_mean_99_cast")]; + tensor sub_66_cast = sub(x = reshape_132_cast, y = reduce_mean_99_cast)[name = tensor("sub_66_cast")]; + tensor square_33_cast = square(x = sub_66_cast)[name = tensor("square_33_cast")]; + tensor reduce_mean_101_axes_0 = const()[name = tensor("reduce_mean_101_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_101_keep_dims_0 = const()[name = tensor("reduce_mean_101_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_101_cast = reduce_mean(axes = reduce_mean_101_axes_0, keep_dims = reduce_mean_101_keep_dims_0, x = square_33_cast)[name = tensor("reduce_mean_101_cast")]; + tensor add_66_y_0_to_fp16 = const()[name = tensor("add_66_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_66_cast = add(x = reduce_mean_101_cast, y = add_66_y_0_to_fp16)[name = tensor("add_66_cast")]; + tensor sqrt_33_cast = sqrt(x = add_66_cast)[name = tensor("sqrt_33_cast")]; + tensor real_div_33_cast = real_div(x = sub_66_cast, y = sqrt_33_cast)[name = tensor("real_div_33_cast")]; + tensor reshape_133_shape_0 = const()[name = tensor("reshape_133_shape_0"), val = tensor([2, 1280, 64, 64])]; + tensor reshape_133_cast = reshape(shape = reshape_133_shape_0, x = real_div_33_cast)[name = tensor("reshape_133_cast")]; + tensor add_67_gamma_0_to_fp16 = const()[name = tensor("add_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026994688)))]; + tensor add_67_beta_0_to_fp16 = const()[name = tensor("add_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026997312)))]; + tensor add_67_epsilon_0_to_fp16 = const()[name = tensor("add_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_67_cast = batch_norm(beta = add_67_beta_0_to_fp16, epsilon = add_67_epsilon_0_to_fp16, gamma = add_67_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_133_cast)[name = tensor("add_67_cast")]; + tensor input_759_cast = silu(x = add_67_cast)[name = tensor("input_759_cast")]; + tensor var_12985 = const()[name = tensor("op_12985"), val = tensor([1, 1])]; + tensor var_12987 = const()[name = tensor("op_12987"), val = tensor([1, 1])]; + tensor hidden_states_527_pad_type_0 = const()[name = tensor("hidden_states_527_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_527_pad_0 = const()[name = tensor("hidden_states_527_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1026999936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034372800))), name = tensor("up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1280, 3, 3])]; + tensor up_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034373376)))]; + tensor hidden_states_527_cast = conv(bias = up_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_12987, groups = var_12518, pad = hidden_states_527_pad_0, pad_type = hidden_states_527_pad_type_0, strides = var_12985, weight = up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_759_cast)[name = tensor("hidden_states_527_cast")]; + tensor var_12993 = const()[name = tensor("op_12993"), val = tensor([1, 1])]; + tensor var_12995 = const()[name = tensor("op_12995"), val = tensor([1, 1])]; + tensor temb_25_pad_type_0 = const()[name = tensor("temb_25_pad_type_0"), val = tensor("custom")]; + tensor temb_25_pad_0 = const()[name = tensor("temb_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034374720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034989184))), name = tensor("up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034989376)))]; + tensor temb_25_cast = conv(bias = up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_12995, groups = var_12518, pad = temb_25_pad_0, pad_type = temb_25_pad_type_0, strides = var_12993, weight = up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_25_cast")]; + tensor input_763_cast = add(x = hidden_states_527_cast, y = temb_25_cast)[name = tensor("input_763_cast")]; + tensor reshape_136_shape_0 = const()[name = tensor("reshape_136_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_136_cast = reshape(shape = reshape_136_shape_0, x = input_763_cast)[name = tensor("reshape_136_cast")]; + tensor reduce_mean_102_axes_0 = const()[name = tensor("reduce_mean_102_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_102_keep_dims_0 = const()[name = tensor("reduce_mean_102_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_102_cast = reduce_mean(axes = reduce_mean_102_axes_0, keep_dims = reduce_mean_102_keep_dims_0, x = reshape_136_cast)[name = tensor("reduce_mean_102_cast")]; + tensor sub_68_cast = sub(x = reshape_136_cast, y = reduce_mean_102_cast)[name = tensor("sub_68_cast")]; + tensor square_34_cast = square(x = sub_68_cast)[name = tensor("square_34_cast")]; + tensor reduce_mean_104_axes_0 = const()[name = tensor("reduce_mean_104_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_104_keep_dims_0 = const()[name = tensor("reduce_mean_104_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_104_cast = reduce_mean(axes = reduce_mean_104_axes_0, keep_dims = reduce_mean_104_keep_dims_0, x = square_34_cast)[name = tensor("reduce_mean_104_cast")]; + tensor add_68_y_0_to_fp16 = const()[name = tensor("add_68_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_68_cast = add(x = reduce_mean_104_cast, y = add_68_y_0_to_fp16)[name = tensor("add_68_cast")]; + tensor sqrt_34_cast = sqrt(x = add_68_cast)[name = tensor("sqrt_34_cast")]; + tensor real_div_34_cast = real_div(x = sub_68_cast, y = sqrt_34_cast)[name = tensor("real_div_34_cast")]; + tensor reshape_137_shape_0 = const()[name = tensor("reshape_137_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_137_cast = reshape(shape = reshape_137_shape_0, x = real_div_34_cast)[name = tensor("reshape_137_cast")]; + tensor add_69_gamma_0_to_fp16 = const()[name = tensor("add_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034990720)))]; + tensor add_69_beta_0_to_fp16 = const()[name = tensor("add_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034992064)))]; + tensor add_69_epsilon_0_to_fp16 = const()[name = tensor("add_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_69_cast = batch_norm(beta = add_69_beta_0_to_fp16, epsilon = add_69_epsilon_0_to_fp16, gamma = add_69_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_137_cast)[name = tensor("add_69_cast")]; + tensor input_767_cast = silu(x = add_69_cast)[name = tensor("input_767_cast")]; + tensor var_13005 = const()[name = tensor("op_13005"), val = tensor([1, 1])]; + tensor var_13007 = const()[name = tensor("op_13007"), val = tensor([1, 1])]; + tensor hidden_states_529_pad_type_0 = const()[name = tensor("hidden_states_529_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_529_pad_0 = const()[name = tensor("hidden_states_529_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034993408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038679872))), name = tensor("up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038680448)))]; + tensor hidden_states_529_cast = conv(bias = up_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_13007, groups = var_12518, pad = hidden_states_529_pad_0, pad_type = hidden_states_529_pad_type_0, strides = var_13005, weight = up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_767_cast)[name = tensor("hidden_states_529_cast")]; + tensor var_13012 = const()[name = tensor("op_13012"), val = tensor([1, 1])]; + tensor var_13014 = const()[name = tensor("op_13014"), val = tensor([1, 1])]; + tensor x_13_pad_type_0 = const()[name = tensor("x_13_pad_type_0"), val = tensor("custom")]; + tensor x_13_pad_0 = const()[name = tensor("x_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1038681792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039501056))), name = tensor("up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039501632)))]; + tensor x_13_cast = conv(bias = up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13014, groups = var_12518, pad = x_13_pad_0, pad_type = x_13_pad_type_0, strides = var_13012, weight = up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_755_cast)[name = tensor("x_13_cast")]; + tensor hidden_states_531_cast = add(x = x_13_cast, y = hidden_states_529_cast)[name = tensor("hidden_states_531_cast")]; + tensor reshape_140_shape_0 = const()[name = tensor("reshape_140_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_140_cast = reshape(shape = reshape_140_shape_0, x = hidden_states_531_cast)[name = tensor("reshape_140_cast")]; + tensor reduce_mean_105_axes_0 = const()[name = tensor("reduce_mean_105_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_105_keep_dims_0 = const()[name = tensor("reduce_mean_105_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_105_cast = reduce_mean(axes = reduce_mean_105_axes_0, keep_dims = reduce_mean_105_keep_dims_0, x = reshape_140_cast)[name = tensor("reduce_mean_105_cast")]; + tensor sub_70_cast = sub(x = reshape_140_cast, y = reduce_mean_105_cast)[name = tensor("sub_70_cast")]; + tensor square_35_cast = square(x = sub_70_cast)[name = tensor("square_35_cast")]; + tensor reduce_mean_107_axes_0 = const()[name = tensor("reduce_mean_107_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_107_keep_dims_0 = const()[name = tensor("reduce_mean_107_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_107_cast = reduce_mean(axes = reduce_mean_107_axes_0, keep_dims = reduce_mean_107_keep_dims_0, x = square_35_cast)[name = tensor("reduce_mean_107_cast")]; + tensor add_70_y_0_to_fp16 = const()[name = tensor("add_70_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_70_cast = add(x = reduce_mean_107_cast, y = add_70_y_0_to_fp16)[name = tensor("add_70_cast")]; + tensor sqrt_35_cast = sqrt(x = add_70_cast)[name = tensor("sqrt_35_cast")]; + tensor real_div_35_cast = real_div(x = sub_70_cast, y = sqrt_35_cast)[name = tensor("real_div_35_cast")]; + tensor reshape_141_shape_0 = const()[name = tensor("reshape_141_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_141_cast = reshape(shape = reshape_141_shape_0, x = real_div_35_cast)[name = tensor("reshape_141_cast")]; + tensor add_71_gamma_0_to_fp16 = const()[name = tensor("add_71_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039502976)))]; + tensor add_71_beta_0_to_fp16 = const()[name = tensor("add_71_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039504320)))]; + tensor add_71_epsilon_0_to_fp16 = const()[name = tensor("add_71_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_71_cast = batch_norm(beta = add_71_beta_0_to_fp16, epsilon = add_71_epsilon_0_to_fp16, gamma = add_71_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_141_cast)[name = tensor("add_71_cast")]; + tensor var_13036 = const()[name = tensor("op_13036"), val = tensor([1, 1])]; + tensor var_13038 = const()[name = tensor("op_13038"), val = tensor([1, 1])]; + tensor hidden_states_533_pad_type_0 = const()[name = tensor("hidden_states_533_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_533_pad_0 = const()[name = tensor("hidden_states_533_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039505664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039915328))), name = tensor("up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039915904)))]; + tensor hidden_states_533_cast = conv(bias = up_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_13038, groups = var_12518, pad = hidden_states_533_pad_0, pad_type = hidden_states_533_pad_type_0, strides = var_13036, weight = up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_71_cast)[name = tensor("hidden_states_533_cast")]; + tensor var_13043 = const()[name = tensor("op_13043"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_397_cast = reshape(shape = var_13043, x = hidden_states_533_cast)[name = tensor("inputs_397_cast")]; + tensor var_13053 = const()[name = tensor("op_13053"), val = tensor([1])]; + tensor channels_mean_397_cast = reduce_mean(axes = var_13053, keep_dims = var_12513, x = inputs_397_cast)[name = tensor("channels_mean_397_cast")]; + tensor zero_mean_397_cast = sub(x = inputs_397_cast, y = channels_mean_397_cast)[name = tensor("zero_mean_397_cast")]; + tensor zero_mean_sq_397_cast = mul(x = zero_mean_397_cast, y = zero_mean_397_cast)[name = tensor("zero_mean_sq_397_cast")]; + tensor var_13057 = const()[name = tensor("op_13057"), val = tensor([1])]; + tensor var_13058_cast = reduce_mean(axes = var_13057, keep_dims = var_12513, x = zero_mean_sq_397_cast)[name = tensor("op_13058_cast")]; + tensor var_13059_to_fp16 = const()[name = tensor("op_13059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13060_cast = add(x = var_13058_cast, y = var_13059_to_fp16)[name = tensor("op_13060_cast")]; + tensor denom_397_epsilon_0_to_fp16 = const()[name = tensor("denom_397_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_397_cast = rsqrt(epsilon = denom_397_epsilon_0_to_fp16, x = var_13060_cast)[name = tensor("denom_397_cast")]; + tensor out_397_cast = mul(x = zero_mean_397_cast, y = denom_397_cast)[name = tensor("out_397_cast")]; + tensor var_13064_to_fp16 = const()[name = tensor("op_13064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039917248)))]; + tensor var_13065_cast = add(x = out_397_cast, y = var_13064_to_fp16)[name = tensor("op_13065_cast")]; + tensor var_13067_to_fp16 = const()[name = tensor("op_13067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039918592)))]; + tensor hidden_states_535_cast = mul(x = var_13065_cast, y = var_13067_to_fp16)[name = tensor("hidden_states_535_cast")]; + tensor var_13074 = const()[name = tensor("op_13074"), val = tensor([1, 1])]; + tensor var_13076 = const()[name = tensor("op_13076"), val = tensor([1, 1])]; + tensor q_265_pad_type_0 = const()[name = tensor("q_265_pad_type_0"), val = tensor("custom")]; + tensor q_265_pad_0 = const()[name = tensor("q_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039919936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040227200))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_265_cast = conv(dilations = var_13076, groups = var_12518, pad = q_265_pad_0, pad_type = q_265_pad_type_0, strides = var_13074, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("q_265_cast")]; + tensor var_13080 = const()[name = tensor("op_13080"), val = tensor([1, 1])]; + tensor var_13082 = const()[name = tensor("op_13082"), val = tensor([1, 1])]; + tensor k_265_pad_type_0 = const()[name = tensor("k_265_pad_type_0"), val = tensor("custom")]; + tensor k_265_pad_0 = const()[name = tensor("k_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040227392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040534656))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_265_cast = conv(dilations = var_13082, groups = var_12518, pad = k_265_pad_0, pad_type = k_265_pad_type_0, strides = var_13080, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("k_265_cast")]; + tensor var_13086 = const()[name = tensor("op_13086"), val = tensor([1, 1])]; + tensor var_13088 = const()[name = tensor("op_13088"), val = tensor([1, 1])]; + tensor v_265_pad_type_0 = const()[name = tensor("v_265_pad_type_0"), val = tensor("custom")]; + tensor v_265_pad_0 = const()[name = tensor("v_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040534848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040944512))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_265_cast = conv(dilations = var_13088, groups = var_12518, pad = v_265_pad_0, pad_type = v_265_pad_type_0, strides = var_13086, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("v_265_cast")]; + tensor var_13092 = const()[name = tensor("op_13092"), val = tensor([2, 10, 64, -1])]; + tensor var_13093_cast = reshape(shape = var_13092, x = q_265_cast)[name = tensor("op_13093_cast")]; + tensor var_13094 = const()[name = tensor("op_13094"), val = tensor([2, 10, 64, -1])]; + tensor var_13095_cast = reshape(shape = var_13094, x = k_265_cast)[name = tensor("op_13095_cast")]; + tensor var_13096 = const()[name = tensor("op_13096"), val = tensor([2, 10, 64, -1])]; + tensor var_13097_cast = reshape(shape = var_13096, x = v_265_cast)[name = tensor("op_13097_cast")]; + tensor attn_weights_529_transpose_x_0 = const()[name = tensor("attn_weights_529_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_529_transpose_y_0 = const()[name = tensor("attn_weights_529_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_529_cast = matmul(transpose_x = attn_weights_529_transpose_x_0, transpose_y = attn_weights_529_transpose_y_0, x = var_13093_cast, y = var_13095_cast)[name = tensor("attn_weights_529_cast")]; + tensor attn_weights_531_cast = mul(x = attn_weights_529_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_531_cast")]; + tensor var_13101_cast = softmax(axis = var_12502, x = attn_weights_531_cast)[name = tensor("op_13101_cast")]; + tensor attn_265_transpose_x_0 = const()[name = tensor("attn_265_transpose_x_0"), val = tensor(false)]; + tensor attn_265_transpose_y_0 = const()[name = tensor("attn_265_transpose_y_0"), val = tensor(true)]; + tensor attn_265_cast = matmul(transpose_x = attn_265_transpose_x_0, transpose_y = attn_265_transpose_y_0, x = var_13097_cast, y = var_13101_cast)[name = tensor("attn_265_cast")]; + tensor var_13105 = const()[name = tensor("op_13105"), val = tensor([2, 640, 1, -1])]; + tensor input_771_cast = reshape(shape = var_13105, x = attn_265_cast)[name = tensor("input_771_cast")]; + tensor var_13110 = const()[name = tensor("op_13110"), val = tensor([1, 1])]; + tensor var_13112 = const()[name = tensor("op_13112"), val = tensor([1, 1])]; + tensor var_13114_pad_type_0 = const()[name = tensor("op_13114_pad_type_0"), val = tensor("custom")]; + tensor var_13114_pad_0 = const()[name = tensor("op_13114_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040945088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041354752))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041355328)))]; + tensor var_13114_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13112, groups = var_12518, pad = var_13114_pad_0, pad_type = var_13114_pad_type_0, strides = var_13110, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_771_cast)[name = tensor("op_13114_cast")]; + tensor inputs_399_cast = add(x = var_13114_cast, y = inputs_397_cast)[name = tensor("inputs_399_cast")]; + tensor var_13118 = const()[name = tensor("op_13118"), val = tensor([1])]; + tensor channels_mean_399_cast = reduce_mean(axes = var_13118, keep_dims = var_12513, x = inputs_399_cast)[name = tensor("channels_mean_399_cast")]; + tensor zero_mean_399_cast = sub(x = inputs_399_cast, y = channels_mean_399_cast)[name = tensor("zero_mean_399_cast")]; + tensor zero_mean_sq_399_cast = mul(x = zero_mean_399_cast, y = zero_mean_399_cast)[name = tensor("zero_mean_sq_399_cast")]; + tensor var_13122 = const()[name = tensor("op_13122"), val = tensor([1])]; + tensor var_13123_cast = reduce_mean(axes = var_13122, keep_dims = var_12513, x = zero_mean_sq_399_cast)[name = tensor("op_13123_cast")]; + tensor var_13124_to_fp16 = const()[name = tensor("op_13124_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13125_cast = add(x = var_13123_cast, y = var_13124_to_fp16)[name = tensor("op_13125_cast")]; + tensor denom_399_epsilon_0_to_fp16 = const()[name = tensor("denom_399_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_399_cast = rsqrt(epsilon = denom_399_epsilon_0_to_fp16, x = var_13125_cast)[name = tensor("denom_399_cast")]; + tensor out_399_cast = mul(x = zero_mean_399_cast, y = denom_399_cast)[name = tensor("out_399_cast")]; + tensor var_13129_to_fp16 = const()[name = tensor("op_13129_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041356672)))]; + tensor var_13130_cast = add(x = out_399_cast, y = var_13129_to_fp16)[name = tensor("op_13130_cast")]; + tensor var_13132_to_fp16 = const()[name = tensor("op_13132_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041358016)))]; + tensor hidden_states_537_cast = mul(x = var_13130_cast, y = var_13132_to_fp16)[name = tensor("hidden_states_537_cast")]; + tensor var_13139 = const()[name = tensor("op_13139"), val = tensor([1, 1])]; + tensor var_13141 = const()[name = tensor("op_13141"), val = tensor([1, 1])]; + tensor q_267_pad_type_0 = const()[name = tensor("q_267_pad_type_0"), val = tensor("custom")]; + tensor q_267_pad_0 = const()[name = tensor("q_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041359360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041564224))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_267_cast = conv(dilations = var_13141, groups = var_12518, pad = q_267_pad_0, pad_type = q_267_pad_type_0, strides = var_13139, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_537_cast)[name = tensor("q_267_cast")]; + tensor var_13145 = const()[name = tensor("op_13145"), val = tensor([1, 1])]; + tensor var_13147 = const()[name = tensor("op_13147"), val = tensor([1, 1])]; + tensor k_267_pad_type_0 = const()[name = tensor("k_267_pad_type_0"), val = tensor("custom")]; + tensor k_267_pad_0 = const()[name = tensor("k_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041564352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042547456))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_267_cast = conv(dilations = var_13147, groups = var_12518, pad = k_267_pad_0, pad_type = k_267_pad_type_0, strides = var_13145, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_267_cast")]; + tensor var_13151 = const()[name = tensor("op_13151"), val = tensor([1, 1])]; + tensor var_13153 = const()[name = tensor("op_13153"), val = tensor([1, 1])]; + tensor v_267_pad_type_0 = const()[name = tensor("v_267_pad_type_0"), val = tensor("custom")]; + tensor v_267_pad_0 = const()[name = tensor("v_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042547648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043203072))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_267_cast = conv(dilations = var_13153, groups = var_12518, pad = v_267_pad_0, pad_type = v_267_pad_type_0, strides = var_13151, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_267_cast")]; + tensor var_13157 = const()[name = tensor("op_13157"), val = tensor([2, 10, 64, -1])]; + tensor var_13158_cast = reshape(shape = var_13157, x = q_267_cast)[name = tensor("op_13158_cast")]; + tensor var_13159 = const()[name = tensor("op_13159"), val = tensor([2, 10, 64, -1])]; + tensor var_13160_cast = reshape(shape = var_13159, x = k_267_cast)[name = tensor("op_13160_cast")]; + tensor var_13161 = const()[name = tensor("op_13161"), val = tensor([2, 10, 64, -1])]; + tensor var_13162_cast = reshape(shape = var_13161, x = v_267_cast)[name = tensor("op_13162_cast")]; + tensor attn_weights_533_transpose_x_0 = const()[name = tensor("attn_weights_533_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_533_transpose_y_0 = const()[name = tensor("attn_weights_533_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_533_cast = matmul(transpose_x = attn_weights_533_transpose_x_0, transpose_y = attn_weights_533_transpose_y_0, x = var_13158_cast, y = var_13160_cast)[name = tensor("attn_weights_533_cast")]; + tensor attn_weights_535_cast = mul(x = attn_weights_533_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_535_cast")]; + tensor var_13166_cast = softmax(axis = var_12502, x = attn_weights_535_cast)[name = tensor("op_13166_cast")]; + tensor attn_267_transpose_x_0 = const()[name = tensor("attn_267_transpose_x_0"), val = tensor(false)]; + tensor attn_267_transpose_y_0 = const()[name = tensor("attn_267_transpose_y_0"), val = tensor(true)]; + tensor attn_267_cast = matmul(transpose_x = attn_267_transpose_x_0, transpose_y = attn_267_transpose_y_0, x = var_13162_cast, y = var_13166_cast)[name = tensor("attn_267_cast")]; + tensor var_13170 = const()[name = tensor("op_13170"), val = tensor([2, 640, 1, -1])]; + tensor input_773_cast = reshape(shape = var_13170, x = attn_267_cast)[name = tensor("input_773_cast")]; + tensor var_13175 = const()[name = tensor("op_13175"), val = tensor([1, 1])]; + tensor var_13177 = const()[name = tensor("op_13177"), val = tensor([1, 1])]; + tensor var_13179_pad_type_0 = const()[name = tensor("op_13179_pad_type_0"), val = tensor("custom")]; + tensor var_13179_pad_0 = const()[name = tensor("op_13179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043203200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043510464))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043510656)))]; + tensor var_13179_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13177, groups = var_12518, pad = var_13179_pad_0, pad_type = var_13179_pad_type_0, strides = var_13175, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_773_cast)[name = tensor("op_13179_cast")]; + tensor inputs_401_cast = add(x = var_13179_cast, y = inputs_399_cast)[name = tensor("inputs_401_cast")]; + tensor var_13183 = const()[name = tensor("op_13183"), val = tensor([1])]; + tensor channels_mean_401_cast = reduce_mean(axes = var_13183, keep_dims = var_12513, x = inputs_401_cast)[name = tensor("channels_mean_401_cast")]; + tensor zero_mean_401_cast = sub(x = inputs_401_cast, y = channels_mean_401_cast)[name = tensor("zero_mean_401_cast")]; + tensor zero_mean_sq_401_cast = mul(x = zero_mean_401_cast, y = zero_mean_401_cast)[name = tensor("zero_mean_sq_401_cast")]; + tensor var_13187 = const()[name = tensor("op_13187"), val = tensor([1])]; + tensor var_13188_cast = reduce_mean(axes = var_13187, keep_dims = var_12513, x = zero_mean_sq_401_cast)[name = tensor("op_13188_cast")]; + tensor var_13189_to_fp16 = const()[name = tensor("op_13189_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13190_cast = add(x = var_13188_cast, y = var_13189_to_fp16)[name = tensor("op_13190_cast")]; + tensor denom_401_epsilon_0_to_fp16 = const()[name = tensor("denom_401_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_401_cast = rsqrt(epsilon = denom_401_epsilon_0_to_fp16, x = var_13190_cast)[name = tensor("denom_401_cast")]; + tensor out_401_cast = mul(x = zero_mean_401_cast, y = denom_401_cast)[name = tensor("out_401_cast")]; + tensor var_13194_to_fp16 = const()[name = tensor("op_13194_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043512000)))]; + tensor var_13195_cast = add(x = out_401_cast, y = var_13194_to_fp16)[name = tensor("op_13195_cast")]; + tensor var_13197_to_fp16 = const()[name = tensor("op_13197_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043513344)))]; + tensor input_775_cast = mul(x = var_13195_cast, y = var_13197_to_fp16)[name = tensor("input_775_cast")]; + tensor var_13205 = const()[name = tensor("op_13205"), val = tensor([1, 1])]; + tensor var_13207 = const()[name = tensor("op_13207"), val = tensor([1, 1])]; + tensor var_13209_pad_type_0 = const()[name = tensor("op_13209_pad_type_0"), val = tensor("custom")]; + tensor var_13209_pad_0 = const()[name = tensor("op_13209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043514688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046791552))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046792128)))]; + tensor var_13209_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13207, groups = var_12518, pad = var_13209_pad_0, pad_type = var_13209_pad_type_0, strides = var_13205, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_775_cast)[name = tensor("op_13209_cast")]; + tensor var_13210_split_sizes_0 = const()[name = tensor("op_13210_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13210_axis_0 = const()[name = tensor("op_13210_axis_0"), val = tensor(1)]; + tensor var_13210_cast_0, tensor var_13210_cast_1 = split(axis = var_13210_axis_0, split_sizes = var_13210_split_sizes_0, x = var_13209_cast)[name = tensor("op_13210_cast")]; + tensor var_13212_mode_0 = const()[name = tensor("op_13212_mode_0"), val = tensor("EXACT")]; + tensor var_13212_cast = gelu(mode = var_13212_mode_0, x = var_13210_cast_1)[name = tensor("op_13212_cast")]; + tensor input_777_cast = mul(x = var_13210_cast_0, y = var_13212_cast)[name = tensor("input_777_cast")]; + tensor var_13216 = const()[name = tensor("op_13216"), val = tensor([1, 1])]; + tensor var_13218 = const()[name = tensor("op_13218"), val = tensor([1, 1])]; + tensor var_13220_pad_type_0 = const()[name = tensor("op_13220_pad_type_0"), val = tensor("custom")]; + tensor var_13220_pad_0 = const()[name = tensor("op_13220_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046802432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048440896))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048441472)))]; + tensor var_13220_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13218, groups = var_12518, pad = var_13220_pad_0, pad_type = var_13220_pad_type_0, strides = var_13216, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_777_cast)[name = tensor("op_13220_cast")]; + tensor inputs_403_cast = add(x = var_13220_cast, y = inputs_401_cast)[name = tensor("inputs_403_cast")]; + tensor var_13230 = const()[name = tensor("op_13230"), val = tensor([1])]; + tensor channels_mean_403_cast = reduce_mean(axes = var_13230, keep_dims = var_12513, x = inputs_403_cast)[name = tensor("channels_mean_403_cast")]; + tensor zero_mean_403_cast = sub(x = inputs_403_cast, y = channels_mean_403_cast)[name = tensor("zero_mean_403_cast")]; + tensor zero_mean_sq_403_cast = mul(x = zero_mean_403_cast, y = zero_mean_403_cast)[name = tensor("zero_mean_sq_403_cast")]; + tensor var_13234 = const()[name = tensor("op_13234"), val = tensor([1])]; + tensor var_13235_cast = reduce_mean(axes = var_13234, keep_dims = var_12513, x = zero_mean_sq_403_cast)[name = tensor("op_13235_cast")]; + tensor var_13236_to_fp16 = const()[name = tensor("op_13236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13237_cast = add(x = var_13235_cast, y = var_13236_to_fp16)[name = tensor("op_13237_cast")]; + tensor denom_403_epsilon_0_to_fp16 = const()[name = tensor("denom_403_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_403_cast = rsqrt(epsilon = denom_403_epsilon_0_to_fp16, x = var_13237_cast)[name = tensor("denom_403_cast")]; + tensor out_403_cast = mul(x = zero_mean_403_cast, y = denom_403_cast)[name = tensor("out_403_cast")]; + tensor var_13241_to_fp16 = const()[name = tensor("op_13241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048442816)))]; + tensor var_13242_cast = add(x = out_403_cast, y = var_13241_to_fp16)[name = tensor("op_13242_cast")]; + tensor var_13244_to_fp16 = const()[name = tensor("op_13244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048444160)))]; + tensor hidden_states_541_cast = mul(x = var_13242_cast, y = var_13244_to_fp16)[name = tensor("hidden_states_541_cast")]; + tensor var_13251 = const()[name = tensor("op_13251"), val = tensor([1, 1])]; + tensor var_13253 = const()[name = tensor("op_13253"), val = tensor([1, 1])]; + tensor q_269_pad_type_0 = const()[name = tensor("q_269_pad_type_0"), val = tensor("custom")]; + tensor q_269_pad_0 = const()[name = tensor("q_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048445504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048752768))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_269_cast = conv(dilations = var_13253, groups = var_12518, pad = q_269_pad_0, pad_type = q_269_pad_type_0, strides = var_13251, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("q_269_cast")]; + tensor var_13257 = const()[name = tensor("op_13257"), val = tensor([1, 1])]; + tensor var_13259 = const()[name = tensor("op_13259"), val = tensor([1, 1])]; + tensor k_269_pad_type_0 = const()[name = tensor("k_269_pad_type_0"), val = tensor("custom")]; + tensor k_269_pad_0 = const()[name = tensor("k_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1048752960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049060224))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_269_cast = conv(dilations = var_13259, groups = var_12518, pad = k_269_pad_0, pad_type = k_269_pad_type_0, strides = var_13257, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("k_269_cast")]; + tensor var_13263 = const()[name = tensor("op_13263"), val = tensor([1, 1])]; + tensor var_13265 = const()[name = tensor("op_13265"), val = tensor([1, 1])]; + tensor v_269_pad_type_0 = const()[name = tensor("v_269_pad_type_0"), val = tensor("custom")]; + tensor v_269_pad_0 = const()[name = tensor("v_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049060416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049470080))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_269_cast = conv(dilations = var_13265, groups = var_12518, pad = v_269_pad_0, pad_type = v_269_pad_type_0, strides = var_13263, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("v_269_cast")]; + tensor var_13269 = const()[name = tensor("op_13269"), val = tensor([2, 10, 64, -1])]; + tensor var_13270_cast = reshape(shape = var_13269, x = q_269_cast)[name = tensor("op_13270_cast")]; + tensor var_13271 = const()[name = tensor("op_13271"), val = tensor([2, 10, 64, -1])]; + tensor var_13272_cast = reshape(shape = var_13271, x = k_269_cast)[name = tensor("op_13272_cast")]; + tensor var_13273 = const()[name = tensor("op_13273"), val = tensor([2, 10, 64, -1])]; + tensor var_13274_cast = reshape(shape = var_13273, x = v_269_cast)[name = tensor("op_13274_cast")]; + tensor attn_weights_537_transpose_x_0 = const()[name = tensor("attn_weights_537_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_537_transpose_y_0 = const()[name = tensor("attn_weights_537_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_537_cast = matmul(transpose_x = attn_weights_537_transpose_x_0, transpose_y = attn_weights_537_transpose_y_0, x = var_13270_cast, y = var_13272_cast)[name = tensor("attn_weights_537_cast")]; + tensor attn_weights_539_cast = mul(x = attn_weights_537_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_539_cast")]; + tensor var_13278_cast = softmax(axis = var_12502, x = attn_weights_539_cast)[name = tensor("op_13278_cast")]; + tensor attn_269_transpose_x_0 = const()[name = tensor("attn_269_transpose_x_0"), val = tensor(false)]; + tensor attn_269_transpose_y_0 = const()[name = tensor("attn_269_transpose_y_0"), val = tensor(true)]; + tensor attn_269_cast = matmul(transpose_x = attn_269_transpose_x_0, transpose_y = attn_269_transpose_y_0, x = var_13274_cast, y = var_13278_cast)[name = tensor("attn_269_cast")]; + tensor var_13282 = const()[name = tensor("op_13282"), val = tensor([2, 640, 1, -1])]; + tensor input_779_cast = reshape(shape = var_13282, x = attn_269_cast)[name = tensor("input_779_cast")]; + tensor var_13287 = const()[name = tensor("op_13287"), val = tensor([1, 1])]; + tensor var_13289 = const()[name = tensor("op_13289"), val = tensor([1, 1])]; + tensor var_13291_pad_type_0 = const()[name = tensor("op_13291_pad_type_0"), val = tensor("custom")]; + tensor var_13291_pad_0 = const()[name = tensor("op_13291_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049470656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049880320))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049880896)))]; + tensor var_13291_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13289, groups = var_12518, pad = var_13291_pad_0, pad_type = var_13291_pad_type_0, strides = var_13287, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_779_cast)[name = tensor("op_13291_cast")]; + tensor inputs_405_cast = add(x = var_13291_cast, y = inputs_403_cast)[name = tensor("inputs_405_cast")]; + tensor var_13295 = const()[name = tensor("op_13295"), val = tensor([1])]; + tensor channels_mean_405_cast = reduce_mean(axes = var_13295, keep_dims = var_12513, x = inputs_405_cast)[name = tensor("channels_mean_405_cast")]; + tensor zero_mean_405_cast = sub(x = inputs_405_cast, y = channels_mean_405_cast)[name = tensor("zero_mean_405_cast")]; + tensor zero_mean_sq_405_cast = mul(x = zero_mean_405_cast, y = zero_mean_405_cast)[name = tensor("zero_mean_sq_405_cast")]; + tensor var_13299 = const()[name = tensor("op_13299"), val = tensor([1])]; + tensor var_13300_cast = reduce_mean(axes = var_13299, keep_dims = var_12513, x = zero_mean_sq_405_cast)[name = tensor("op_13300_cast")]; + tensor var_13301_to_fp16 = const()[name = tensor("op_13301_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13302_cast = add(x = var_13300_cast, y = var_13301_to_fp16)[name = tensor("op_13302_cast")]; + tensor denom_405_epsilon_0_to_fp16 = const()[name = tensor("denom_405_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_405_cast = rsqrt(epsilon = denom_405_epsilon_0_to_fp16, x = var_13302_cast)[name = tensor("denom_405_cast")]; + tensor out_405_cast = mul(x = zero_mean_405_cast, y = denom_405_cast)[name = tensor("out_405_cast")]; + tensor var_13306_to_fp16 = const()[name = tensor("op_13306_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049882240)))]; + tensor var_13307_cast = add(x = out_405_cast, y = var_13306_to_fp16)[name = tensor("op_13307_cast")]; + tensor var_13309_to_fp16 = const()[name = tensor("op_13309_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049883584)))]; + tensor hidden_states_543_cast = mul(x = var_13307_cast, y = var_13309_to_fp16)[name = tensor("hidden_states_543_cast")]; + tensor var_13316 = const()[name = tensor("op_13316"), val = tensor([1, 1])]; + tensor var_13318 = const()[name = tensor("op_13318"), val = tensor([1, 1])]; + tensor q_271_pad_type_0 = const()[name = tensor("q_271_pad_type_0"), val = tensor("custom")]; + tensor q_271_pad_0 = const()[name = tensor("q_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1049884928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050089792))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_271_cast = conv(dilations = var_13318, groups = var_12518, pad = q_271_pad_0, pad_type = q_271_pad_type_0, strides = var_13316, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_543_cast)[name = tensor("q_271_cast")]; + tensor var_13322 = const()[name = tensor("op_13322"), val = tensor([1, 1])]; + tensor var_13324 = const()[name = tensor("op_13324"), val = tensor([1, 1])]; + tensor k_271_pad_type_0 = const()[name = tensor("k_271_pad_type_0"), val = tensor("custom")]; + tensor k_271_pad_0 = const()[name = tensor("k_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050089920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050745344))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_271_cast = conv(dilations = var_13324, groups = var_12518, pad = k_271_pad_0, pad_type = k_271_pad_type_0, strides = var_13322, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_271_cast")]; + tensor var_13328 = const()[name = tensor("op_13328"), val = tensor([1, 1])]; + tensor var_13330 = const()[name = tensor("op_13330"), val = tensor([1, 1])]; + tensor v_271_pad_type_0 = const()[name = tensor("v_271_pad_type_0"), val = tensor("custom")]; + tensor v_271_pad_0 = const()[name = tensor("v_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1050745472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051400896))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_271_cast = conv(dilations = var_13330, groups = var_12518, pad = v_271_pad_0, pad_type = v_271_pad_type_0, strides = var_13328, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_271_cast")]; + tensor var_13334 = const()[name = tensor("op_13334"), val = tensor([2, 10, 64, -1])]; + tensor var_13335_cast = reshape(shape = var_13334, x = q_271_cast)[name = tensor("op_13335_cast")]; + tensor var_13336 = const()[name = tensor("op_13336"), val = tensor([2, 10, 64, -1])]; + tensor var_13337_cast = reshape(shape = var_13336, x = k_271_cast)[name = tensor("op_13337_cast")]; + tensor var_13338 = const()[name = tensor("op_13338"), val = tensor([2, 10, 64, -1])]; + tensor var_13339_cast = reshape(shape = var_13338, x = v_271_cast)[name = tensor("op_13339_cast")]; + tensor attn_weights_541_transpose_x_0 = const()[name = tensor("attn_weights_541_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_541_transpose_y_0 = const()[name = tensor("attn_weights_541_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_541_cast = matmul(transpose_x = attn_weights_541_transpose_x_0, transpose_y = attn_weights_541_transpose_y_0, x = var_13335_cast, y = var_13337_cast)[name = tensor("attn_weights_541_cast")]; + tensor attn_weights_543_cast = mul(x = attn_weights_541_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_543_cast")]; + tensor var_13343_cast = softmax(axis = var_12502, x = attn_weights_543_cast)[name = tensor("op_13343_cast")]; + tensor attn_271_transpose_x_0 = const()[name = tensor("attn_271_transpose_x_0"), val = tensor(false)]; + tensor attn_271_transpose_y_0 = const()[name = tensor("attn_271_transpose_y_0"), val = tensor(true)]; + tensor attn_271_cast = matmul(transpose_x = attn_271_transpose_x_0, transpose_y = attn_271_transpose_y_0, x = var_13339_cast, y = var_13343_cast)[name = tensor("attn_271_cast")]; + tensor var_13347 = const()[name = tensor("op_13347"), val = tensor([2, 640, 1, -1])]; + tensor input_781_cast = reshape(shape = var_13347, x = attn_271_cast)[name = tensor("input_781_cast")]; + tensor var_13352 = const()[name = tensor("op_13352"), val = tensor([1, 1])]; + tensor var_13354 = const()[name = tensor("op_13354"), val = tensor([1, 1])]; + tensor var_13356_pad_type_0 = const()[name = tensor("op_13356_pad_type_0"), val = tensor("custom")]; + tensor var_13356_pad_0 = const()[name = tensor("op_13356_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051401024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051708288))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051708480)))]; + tensor var_13356_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13354, groups = var_12518, pad = var_13356_pad_0, pad_type = var_13356_pad_type_0, strides = var_13352, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_781_cast)[name = tensor("op_13356_cast")]; + tensor inputs_407_cast = add(x = var_13356_cast, y = inputs_405_cast)[name = tensor("inputs_407_cast")]; + tensor var_13360 = const()[name = tensor("op_13360"), val = tensor([1])]; + tensor channels_mean_407_cast = reduce_mean(axes = var_13360, keep_dims = var_12513, x = inputs_407_cast)[name = tensor("channels_mean_407_cast")]; + tensor zero_mean_407_cast = sub(x = inputs_407_cast, y = channels_mean_407_cast)[name = tensor("zero_mean_407_cast")]; + tensor zero_mean_sq_407_cast = mul(x = zero_mean_407_cast, y = zero_mean_407_cast)[name = tensor("zero_mean_sq_407_cast")]; + tensor var_13364 = const()[name = tensor("op_13364"), val = tensor([1])]; + tensor var_13365_cast = reduce_mean(axes = var_13364, keep_dims = var_12513, x = zero_mean_sq_407_cast)[name = tensor("op_13365_cast")]; + tensor var_13366_to_fp16 = const()[name = tensor("op_13366_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13367_cast = add(x = var_13365_cast, y = var_13366_to_fp16)[name = tensor("op_13367_cast")]; + tensor denom_407_epsilon_0_to_fp16 = const()[name = tensor("denom_407_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_407_cast = rsqrt(epsilon = denom_407_epsilon_0_to_fp16, x = var_13367_cast)[name = tensor("denom_407_cast")]; + tensor out_407_cast = mul(x = zero_mean_407_cast, y = denom_407_cast)[name = tensor("out_407_cast")]; + tensor var_13371_to_fp16 = const()[name = tensor("op_13371_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051709824)))]; + tensor var_13372_cast = add(x = out_407_cast, y = var_13371_to_fp16)[name = tensor("op_13372_cast")]; + tensor var_13374_to_fp16 = const()[name = tensor("op_13374_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051711168)))]; + tensor input_783_cast = mul(x = var_13372_cast, y = var_13374_to_fp16)[name = tensor("input_783_cast")]; + tensor var_13382 = const()[name = tensor("op_13382"), val = tensor([1, 1])]; + tensor var_13384 = const()[name = tensor("op_13384"), val = tensor([1, 1])]; + tensor var_13386_pad_type_0 = const()[name = tensor("op_13386_pad_type_0"), val = tensor("custom")]; + tensor var_13386_pad_0 = const()[name = tensor("op_13386_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1051712512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054989376))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1054989952)))]; + tensor var_13386_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13384, groups = var_12518, pad = var_13386_pad_0, pad_type = var_13386_pad_type_0, strides = var_13382, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_783_cast)[name = tensor("op_13386_cast")]; + tensor var_13387_split_sizes_0 = const()[name = tensor("op_13387_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13387_axis_0 = const()[name = tensor("op_13387_axis_0"), val = tensor(1)]; + tensor var_13387_cast_0, tensor var_13387_cast_1 = split(axis = var_13387_axis_0, split_sizes = var_13387_split_sizes_0, x = var_13386_cast)[name = tensor("op_13387_cast")]; + tensor var_13389_mode_0 = const()[name = tensor("op_13389_mode_0"), val = tensor("EXACT")]; + tensor var_13389_cast = gelu(mode = var_13389_mode_0, x = var_13387_cast_1)[name = tensor("op_13389_cast")]; + tensor input_785_cast = mul(x = var_13387_cast_0, y = var_13389_cast)[name = tensor("input_785_cast")]; + tensor var_13393 = const()[name = tensor("op_13393"), val = tensor([1, 1])]; + tensor var_13395 = const()[name = tensor("op_13395"), val = tensor([1, 1])]; + tensor var_13397_pad_type_0 = const()[name = tensor("op_13397_pad_type_0"), val = tensor("custom")]; + tensor var_13397_pad_0 = const()[name = tensor("op_13397_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055000256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056229120))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056229312)))]; + tensor var_13397_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13395, groups = var_12518, pad = var_13397_pad_0, pad_type = var_13397_pad_type_0, strides = var_13393, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_785_cast)[name = tensor("op_13397_cast")]; + tensor hidden_states_547_cast = add(x = var_13397_cast, y = inputs_407_cast)[name = tensor("hidden_states_547_cast")]; + tensor var_13399 = const()[name = tensor("op_13399"), val = tensor([2, 640, 64, 64])]; + tensor input_787_cast = reshape(shape = var_13399, x = hidden_states_547_cast)[name = tensor("input_787_cast")]; + tensor var_13403 = const()[name = tensor("op_13403"), val = tensor([1, 1])]; + tensor var_13405 = const()[name = tensor("op_13405"), val = tensor([1, 1])]; + tensor hidden_states_549_pad_type_0 = const()[name = tensor("hidden_states_549_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_549_pad_0 = const()[name = tensor("hidden_states_549_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056230656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056640320))), name = tensor("up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056640896)))]; + tensor hidden_states_549_cast = conv(bias = up_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_13405, groups = var_12518, pad = hidden_states_549_pad_0, pad_type = hidden_states_549_pad_type_0, strides = var_13403, weight = up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_787_cast)[name = tensor("hidden_states_549_cast")]; + tensor hidden_states_551_cast = add(x = hidden_states_549_cast, y = hidden_states_531_cast)[name = tensor("hidden_states_551_cast")]; + tensor input_789_interleave_0 = const()[name = tensor("input_789_interleave_0"), val = tensor(false)]; + tensor input_789_cast = concat(axis = var_12518, interleave = input_789_interleave_0, values = (hidden_states_551_cast, input_45_cast))[name = tensor("input_789_cast")]; + tensor reshape_144_shape_0 = const()[name = tensor("reshape_144_shape_0"), val = tensor([2, 32, 30, 64, 64])]; + tensor reshape_144_cast = reshape(shape = reshape_144_shape_0, x = input_789_cast)[name = tensor("reshape_144_cast")]; + tensor reduce_mean_108_axes_0 = const()[name = tensor("reduce_mean_108_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_108_keep_dims_0 = const()[name = tensor("reduce_mean_108_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_108_cast = reduce_mean(axes = reduce_mean_108_axes_0, keep_dims = reduce_mean_108_keep_dims_0, x = reshape_144_cast)[name = tensor("reduce_mean_108_cast")]; + tensor sub_72_cast = sub(x = reshape_144_cast, y = reduce_mean_108_cast)[name = tensor("sub_72_cast")]; + tensor square_36_cast = square(x = sub_72_cast)[name = tensor("square_36_cast")]; + tensor reduce_mean_110_axes_0 = const()[name = tensor("reduce_mean_110_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_110_keep_dims_0 = const()[name = tensor("reduce_mean_110_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_110_cast = reduce_mean(axes = reduce_mean_110_axes_0, keep_dims = reduce_mean_110_keep_dims_0, x = square_36_cast)[name = tensor("reduce_mean_110_cast")]; + tensor add_72_y_0_to_fp16 = const()[name = tensor("add_72_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_72_cast = add(x = reduce_mean_110_cast, y = add_72_y_0_to_fp16)[name = tensor("add_72_cast")]; + tensor sqrt_36_cast = sqrt(x = add_72_cast)[name = tensor("sqrt_36_cast")]; + tensor real_div_36_cast = real_div(x = sub_72_cast, y = sqrt_36_cast)[name = tensor("real_div_36_cast")]; + tensor reshape_145_shape_0 = const()[name = tensor("reshape_145_shape_0"), val = tensor([2, 960, 64, 64])]; + tensor reshape_145_cast = reshape(shape = reshape_145_shape_0, x = real_div_36_cast)[name = tensor("reshape_145_cast")]; + tensor add_73_mean_0_to_fp16 = const()[name = tensor("add_73_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056642240)))]; + tensor add_73_variance_0_to_fp16 = const()[name = tensor("add_73_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056644224)))]; + tensor add_73_gamma_0_to_fp16 = const()[name = tensor("add_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056646208)))]; + tensor add_73_beta_0_to_fp16 = const()[name = tensor("add_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056648192)))]; + tensor add_73_epsilon_0_to_fp16 = const()[name = tensor("add_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_73_cast = batch_norm(beta = add_73_beta_0_to_fp16, epsilon = add_73_epsilon_0_to_fp16, gamma = add_73_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_145_cast)[name = tensor("add_73_cast")]; + tensor input_793_cast = silu(x = add_73_cast)[name = tensor("input_793_cast")]; + tensor var_13423 = const()[name = tensor("op_13423"), val = tensor([1, 1])]; + tensor var_13425 = const()[name = tensor("op_13425"), val = tensor([1, 1])]; + tensor hidden_states_553_pad_type_0 = const()[name = tensor("hidden_states_553_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_553_pad_0 = const()[name = tensor("hidden_states_553_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056650176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062179840))), name = tensor("up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([640, 960, 3, 3])]; + tensor up_blocks_1_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062180416)))]; + tensor hidden_states_553_cast = conv(bias = up_blocks_1_resnets_2_conv1_bias_to_fp16, dilations = var_13425, groups = var_12518, pad = hidden_states_553_pad_0, pad_type = hidden_states_553_pad_type_0, strides = var_13423, weight = up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized, x = input_793_cast)[name = tensor("hidden_states_553_cast")]; + tensor var_13431 = const()[name = tensor("op_13431"), val = tensor([1, 1])]; + tensor var_13433 = const()[name = tensor("op_13433"), val = tensor([1, 1])]; + tensor temb_27_pad_type_0 = const()[name = tensor("temb_27_pad_type_0"), val = tensor("custom")]; + tensor temb_27_pad_0 = const()[name = tensor("temb_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062181760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062796224))), name = tensor("up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062796416)))]; + tensor temb_27_cast = conv(bias = up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13433, groups = var_12518, pad = temb_27_pad_0, pad_type = temb_27_pad_type_0, strides = var_13431, weight = up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_27_cast")]; + tensor input_797_cast = add(x = hidden_states_553_cast, y = temb_27_cast)[name = tensor("input_797_cast")]; + tensor reshape_148_shape_0 = const()[name = tensor("reshape_148_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_148_cast = reshape(shape = reshape_148_shape_0, x = input_797_cast)[name = tensor("reshape_148_cast")]; + tensor reduce_mean_111_axes_0 = const()[name = tensor("reduce_mean_111_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_111_keep_dims_0 = const()[name = tensor("reduce_mean_111_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_111_cast = reduce_mean(axes = reduce_mean_111_axes_0, keep_dims = reduce_mean_111_keep_dims_0, x = reshape_148_cast)[name = tensor("reduce_mean_111_cast")]; + tensor sub_74_cast = sub(x = reshape_148_cast, y = reduce_mean_111_cast)[name = tensor("sub_74_cast")]; + tensor square_37_cast = square(x = sub_74_cast)[name = tensor("square_37_cast")]; + tensor reduce_mean_113_axes_0 = const()[name = tensor("reduce_mean_113_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_113_keep_dims_0 = const()[name = tensor("reduce_mean_113_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_113_cast = reduce_mean(axes = reduce_mean_113_axes_0, keep_dims = reduce_mean_113_keep_dims_0, x = square_37_cast)[name = tensor("reduce_mean_113_cast")]; + tensor add_74_y_0_to_fp16 = const()[name = tensor("add_74_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_74_cast = add(x = reduce_mean_113_cast, y = add_74_y_0_to_fp16)[name = tensor("add_74_cast")]; + tensor sqrt_37_cast = sqrt(x = add_74_cast)[name = tensor("sqrt_37_cast")]; + tensor real_div_37_cast = real_div(x = sub_74_cast, y = sqrt_37_cast)[name = tensor("real_div_37_cast")]; + tensor reshape_149_shape_0 = const()[name = tensor("reshape_149_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_149_cast = reshape(shape = reshape_149_shape_0, x = real_div_37_cast)[name = tensor("reshape_149_cast")]; + tensor add_75_gamma_0_to_fp16 = const()[name = tensor("add_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062797760)))]; + tensor add_75_beta_0_to_fp16 = const()[name = tensor("add_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062799104)))]; + tensor add_75_epsilon_0_to_fp16 = const()[name = tensor("add_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_75_cast = batch_norm(beta = add_75_beta_0_to_fp16, epsilon = add_75_epsilon_0_to_fp16, gamma = add_75_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_149_cast)[name = tensor("add_75_cast")]; + tensor input_801_cast = silu(x = add_75_cast)[name = tensor("input_801_cast")]; + tensor var_13443 = const()[name = tensor("op_13443"), val = tensor([1, 1])]; + tensor var_13445 = const()[name = tensor("op_13445"), val = tensor([1, 1])]; + tensor hidden_states_555_pad_type_0 = const()[name = tensor("hidden_states_555_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_555_pad_0 = const()[name = tensor("hidden_states_555_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062800448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066486912))), name = tensor("up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066487488)))]; + tensor hidden_states_555_cast = conv(bias = up_blocks_1_resnets_2_conv2_bias_to_fp16, dilations = var_13445, groups = var_12518, pad = hidden_states_555_pad_0, pad_type = hidden_states_555_pad_type_0, strides = var_13443, weight = up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized, x = input_801_cast)[name = tensor("hidden_states_555_cast")]; + tensor var_13450 = const()[name = tensor("op_13450"), val = tensor([1, 1])]; + tensor var_13452 = const()[name = tensor("op_13452"), val = tensor([1, 1])]; + tensor x_15_pad_type_0 = const()[name = tensor("x_15_pad_type_0"), val = tensor("custom")]; + tensor x_15_pad_0 = const()[name = tensor("x_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066488832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067103296))), name = tensor("up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 960, 1, 1])]; + tensor up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067103872)))]; + tensor x_15_cast = conv(bias = up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13452, groups = var_12518, pad = x_15_pad_0, pad_type = x_15_pad_type_0, strides = var_13450, weight = up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_789_cast)[name = tensor("x_15_cast")]; + tensor hidden_states_557_cast = add(x = x_15_cast, y = hidden_states_555_cast)[name = tensor("hidden_states_557_cast")]; + tensor reshape_152_shape_0 = const()[name = tensor("reshape_152_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_152_cast = reshape(shape = reshape_152_shape_0, x = hidden_states_557_cast)[name = tensor("reshape_152_cast")]; + tensor reduce_mean_114_axes_0 = const()[name = tensor("reduce_mean_114_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_114_keep_dims_0 = const()[name = tensor("reduce_mean_114_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_114_cast = reduce_mean(axes = reduce_mean_114_axes_0, keep_dims = reduce_mean_114_keep_dims_0, x = reshape_152_cast)[name = tensor("reduce_mean_114_cast")]; + tensor sub_76_cast = sub(x = reshape_152_cast, y = reduce_mean_114_cast)[name = tensor("sub_76_cast")]; + tensor square_38_cast = square(x = sub_76_cast)[name = tensor("square_38_cast")]; + tensor reduce_mean_116_axes_0 = const()[name = tensor("reduce_mean_116_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_116_keep_dims_0 = const()[name = tensor("reduce_mean_116_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_116_cast = reduce_mean(axes = reduce_mean_116_axes_0, keep_dims = reduce_mean_116_keep_dims_0, x = square_38_cast)[name = tensor("reduce_mean_116_cast")]; + tensor add_76_y_0_to_fp16 = const()[name = tensor("add_76_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_76_cast = add(x = reduce_mean_116_cast, y = add_76_y_0_to_fp16)[name = tensor("add_76_cast")]; + tensor sqrt_38_cast = sqrt(x = add_76_cast)[name = tensor("sqrt_38_cast")]; + tensor real_div_38_cast = real_div(x = sub_76_cast, y = sqrt_38_cast)[name = tensor("real_div_38_cast")]; + tensor reshape_153_shape_0 = const()[name = tensor("reshape_153_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_153_cast = reshape(shape = reshape_153_shape_0, x = real_div_38_cast)[name = tensor("reshape_153_cast")]; + tensor add_77_gamma_0_to_fp16 = const()[name = tensor("add_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067105216)))]; + tensor add_77_beta_0_to_fp16 = const()[name = tensor("add_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067106560)))]; + tensor add_77_epsilon_0_to_fp16 = const()[name = tensor("add_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_77_cast = batch_norm(beta = add_77_beta_0_to_fp16, epsilon = add_77_epsilon_0_to_fp16, gamma = add_77_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_153_cast)[name = tensor("add_77_cast")]; + tensor var_13474 = const()[name = tensor("op_13474"), val = tensor([1, 1])]; + tensor var_13476 = const()[name = tensor("op_13476"), val = tensor([1, 1])]; + tensor hidden_states_559_pad_type_0 = const()[name = tensor("hidden_states_559_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_559_pad_0 = const()[name = tensor("hidden_states_559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067107904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067517568))), name = tensor("up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067518144)))]; + tensor hidden_states_559_cast = conv(bias = up_blocks_1_attentions_2_proj_in_bias_to_fp16, dilations = var_13476, groups = var_12518, pad = hidden_states_559_pad_0, pad_type = hidden_states_559_pad_type_0, strides = var_13474, weight = up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized, x = add_77_cast)[name = tensor("hidden_states_559_cast")]; + tensor var_13481 = const()[name = tensor("op_13481"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_409_cast = reshape(shape = var_13481, x = hidden_states_559_cast)[name = tensor("inputs_409_cast")]; + tensor var_13491 = const()[name = tensor("op_13491"), val = tensor([1])]; + tensor channels_mean_409_cast = reduce_mean(axes = var_13491, keep_dims = var_12513, x = inputs_409_cast)[name = tensor("channels_mean_409_cast")]; + tensor zero_mean_409_cast = sub(x = inputs_409_cast, y = channels_mean_409_cast)[name = tensor("zero_mean_409_cast")]; + tensor zero_mean_sq_409_cast = mul(x = zero_mean_409_cast, y = zero_mean_409_cast)[name = tensor("zero_mean_sq_409_cast")]; + tensor var_13495 = const()[name = tensor("op_13495"), val = tensor([1])]; + tensor var_13496_cast = reduce_mean(axes = var_13495, keep_dims = var_12513, x = zero_mean_sq_409_cast)[name = tensor("op_13496_cast")]; + tensor var_13497_to_fp16 = const()[name = tensor("op_13497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13498_cast = add(x = var_13496_cast, y = var_13497_to_fp16)[name = tensor("op_13498_cast")]; + tensor denom_409_epsilon_0_to_fp16 = const()[name = tensor("denom_409_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_409_cast = rsqrt(epsilon = denom_409_epsilon_0_to_fp16, x = var_13498_cast)[name = tensor("denom_409_cast")]; + tensor out_409_cast = mul(x = zero_mean_409_cast, y = denom_409_cast)[name = tensor("out_409_cast")]; + tensor var_13502_to_fp16 = const()[name = tensor("op_13502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067519488)))]; + tensor var_13503_cast = add(x = out_409_cast, y = var_13502_to_fp16)[name = tensor("op_13503_cast")]; + tensor var_13505_to_fp16 = const()[name = tensor("op_13505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067520832)))]; + tensor hidden_states_561_cast = mul(x = var_13503_cast, y = var_13505_to_fp16)[name = tensor("hidden_states_561_cast")]; + tensor var_13512 = const()[name = tensor("op_13512"), val = tensor([1, 1])]; + tensor var_13514 = const()[name = tensor("op_13514"), val = tensor([1, 1])]; + tensor q_273_pad_type_0 = const()[name = tensor("q_273_pad_type_0"), val = tensor("custom")]; + tensor q_273_pad_0 = const()[name = tensor("q_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067522176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067829440))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_273_cast = conv(dilations = var_13514, groups = var_12518, pad = q_273_pad_0, pad_type = q_273_pad_type_0, strides = var_13512, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("q_273_cast")]; + tensor var_13518 = const()[name = tensor("op_13518"), val = tensor([1, 1])]; + tensor var_13520 = const()[name = tensor("op_13520"), val = tensor([1, 1])]; + tensor k_273_pad_type_0 = const()[name = tensor("k_273_pad_type_0"), val = tensor("custom")]; + tensor k_273_pad_0 = const()[name = tensor("k_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067829632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068136896))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_273_cast = conv(dilations = var_13520, groups = var_12518, pad = k_273_pad_0, pad_type = k_273_pad_type_0, strides = var_13518, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("k_273_cast")]; + tensor var_13524 = const()[name = tensor("op_13524"), val = tensor([1, 1])]; + tensor var_13526 = const()[name = tensor("op_13526"), val = tensor([1, 1])]; + tensor v_273_pad_type_0 = const()[name = tensor("v_273_pad_type_0"), val = tensor("custom")]; + tensor v_273_pad_0 = const()[name = tensor("v_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068137088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068546752))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_273_cast = conv(dilations = var_13526, groups = var_12518, pad = v_273_pad_0, pad_type = v_273_pad_type_0, strides = var_13524, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("v_273_cast")]; + tensor var_13530 = const()[name = tensor("op_13530"), val = tensor([2, 10, 64, -1])]; + tensor var_13531_cast = reshape(shape = var_13530, x = q_273_cast)[name = tensor("op_13531_cast")]; + tensor var_13532 = const()[name = tensor("op_13532"), val = tensor([2, 10, 64, -1])]; + tensor var_13533_cast = reshape(shape = var_13532, x = k_273_cast)[name = tensor("op_13533_cast")]; + tensor var_13534 = const()[name = tensor("op_13534"), val = tensor([2, 10, 64, -1])]; + tensor var_13535_cast = reshape(shape = var_13534, x = v_273_cast)[name = tensor("op_13535_cast")]; + tensor attn_weights_545_transpose_x_0 = const()[name = tensor("attn_weights_545_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_545_transpose_y_0 = const()[name = tensor("attn_weights_545_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_545_cast = matmul(transpose_x = attn_weights_545_transpose_x_0, transpose_y = attn_weights_545_transpose_y_0, x = var_13531_cast, y = var_13533_cast)[name = tensor("attn_weights_545_cast")]; + tensor attn_weights_547_cast = mul(x = attn_weights_545_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_547_cast")]; + tensor var_13539_cast = softmax(axis = var_12502, x = attn_weights_547_cast)[name = tensor("op_13539_cast")]; + tensor attn_273_transpose_x_0 = const()[name = tensor("attn_273_transpose_x_0"), val = tensor(false)]; + tensor attn_273_transpose_y_0 = const()[name = tensor("attn_273_transpose_y_0"), val = tensor(true)]; + tensor attn_273_cast = matmul(transpose_x = attn_273_transpose_x_0, transpose_y = attn_273_transpose_y_0, x = var_13535_cast, y = var_13539_cast)[name = tensor("attn_273_cast")]; + tensor var_13543 = const()[name = tensor("op_13543"), val = tensor([2, 640, 1, -1])]; + tensor input_805_cast = reshape(shape = var_13543, x = attn_273_cast)[name = tensor("input_805_cast")]; + tensor var_13548 = const()[name = tensor("op_13548"), val = tensor([1, 1])]; + tensor var_13550 = const()[name = tensor("op_13550"), val = tensor([1, 1])]; + tensor var_13552_pad_type_0 = const()[name = tensor("op_13552_pad_type_0"), val = tensor("custom")]; + tensor var_13552_pad_0 = const()[name = tensor("op_13552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068547328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068956992))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068957568)))]; + tensor var_13552_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13550, groups = var_12518, pad = var_13552_pad_0, pad_type = var_13552_pad_type_0, strides = var_13548, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_805_cast)[name = tensor("op_13552_cast")]; + tensor inputs_411_cast = add(x = var_13552_cast, y = inputs_409_cast)[name = tensor("inputs_411_cast")]; + tensor var_13556 = const()[name = tensor("op_13556"), val = tensor([1])]; + tensor channels_mean_411_cast = reduce_mean(axes = var_13556, keep_dims = var_12513, x = inputs_411_cast)[name = tensor("channels_mean_411_cast")]; + tensor zero_mean_411_cast = sub(x = inputs_411_cast, y = channels_mean_411_cast)[name = tensor("zero_mean_411_cast")]; + tensor zero_mean_sq_411_cast = mul(x = zero_mean_411_cast, y = zero_mean_411_cast)[name = tensor("zero_mean_sq_411_cast")]; + tensor var_13560 = const()[name = tensor("op_13560"), val = tensor([1])]; + tensor var_13561_cast = reduce_mean(axes = var_13560, keep_dims = var_12513, x = zero_mean_sq_411_cast)[name = tensor("op_13561_cast")]; + tensor var_13562_to_fp16 = const()[name = tensor("op_13562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13563_cast = add(x = var_13561_cast, y = var_13562_to_fp16)[name = tensor("op_13563_cast")]; + tensor denom_411_epsilon_0_to_fp16 = const()[name = tensor("denom_411_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_411_cast = rsqrt(epsilon = denom_411_epsilon_0_to_fp16, x = var_13563_cast)[name = tensor("denom_411_cast")]; + tensor out_411_cast = mul(x = zero_mean_411_cast, y = denom_411_cast)[name = tensor("out_411_cast")]; + tensor var_13567_to_fp16 = const()[name = tensor("op_13567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068958912)))]; + tensor var_13568_cast = add(x = out_411_cast, y = var_13567_to_fp16)[name = tensor("op_13568_cast")]; + tensor var_13570_to_fp16 = const()[name = tensor("op_13570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068960256)))]; + tensor hidden_states_563_cast = mul(x = var_13568_cast, y = var_13570_to_fp16)[name = tensor("hidden_states_563_cast")]; + tensor var_13577 = const()[name = tensor("op_13577"), val = tensor([1, 1])]; + tensor var_13579 = const()[name = tensor("op_13579"), val = tensor([1, 1])]; + tensor q_275_pad_type_0 = const()[name = tensor("q_275_pad_type_0"), val = tensor("custom")]; + tensor q_275_pad_0 = const()[name = tensor("q_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1068961600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069268864))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_275_cast = conv(dilations = var_13579, groups = var_12518, pad = q_275_pad_0, pad_type = q_275_pad_type_0, strides = var_13577, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_563_cast)[name = tensor("q_275_cast")]; + tensor var_13583 = const()[name = tensor("op_13583"), val = tensor([1, 1])]; + tensor var_13585 = const()[name = tensor("op_13585"), val = tensor([1, 1])]; + tensor k_275_pad_type_0 = const()[name = tensor("k_275_pad_type_0"), val = tensor("custom")]; + tensor k_275_pad_0 = const()[name = tensor("k_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069269056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070252160))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_275_cast = conv(dilations = var_13585, groups = var_12518, pad = k_275_pad_0, pad_type = k_275_pad_type_0, strides = var_13583, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_275_cast")]; + tensor var_13589 = const()[name = tensor("op_13589"), val = tensor([1, 1])]; + tensor var_13591 = const()[name = tensor("op_13591"), val = tensor([1, 1])]; + tensor v_275_pad_type_0 = const()[name = tensor("v_275_pad_type_0"), val = tensor("custom")]; + tensor v_275_pad_0 = const()[name = tensor("v_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1070252352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071235456))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_275_cast = conv(dilations = var_13591, groups = var_12518, pad = v_275_pad_0, pad_type = v_275_pad_type_0, strides = var_13589, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_275_cast")]; + tensor var_13595 = const()[name = tensor("op_13595"), val = tensor([2, 10, 64, -1])]; + tensor var_13596_cast = reshape(shape = var_13595, x = q_275_cast)[name = tensor("op_13596_cast")]; + tensor var_13597 = const()[name = tensor("op_13597"), val = tensor([2, 10, 64, -1])]; + tensor var_13598_cast = reshape(shape = var_13597, x = k_275_cast)[name = tensor("op_13598_cast")]; + tensor var_13599 = const()[name = tensor("op_13599"), val = tensor([2, 10, 64, -1])]; + tensor var_13600_cast = reshape(shape = var_13599, x = v_275_cast)[name = tensor("op_13600_cast")]; + tensor attn_weights_549_transpose_x_0 = const()[name = tensor("attn_weights_549_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_549_transpose_y_0 = const()[name = tensor("attn_weights_549_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_549_cast = matmul(transpose_x = attn_weights_549_transpose_x_0, transpose_y = attn_weights_549_transpose_y_0, x = var_13596_cast, y = var_13598_cast)[name = tensor("attn_weights_549_cast")]; + tensor attn_weights_551_cast = mul(x = attn_weights_549_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_551_cast")]; + tensor var_13604_cast = softmax(axis = var_12502, x = attn_weights_551_cast)[name = tensor("op_13604_cast")]; + tensor attn_275_transpose_x_0 = const()[name = tensor("attn_275_transpose_x_0"), val = tensor(false)]; + tensor attn_275_transpose_y_0 = const()[name = tensor("attn_275_transpose_y_0"), val = tensor(true)]; + tensor attn_275_cast = matmul(transpose_x = attn_275_transpose_x_0, transpose_y = attn_275_transpose_y_0, x = var_13600_cast, y = var_13604_cast)[name = tensor("attn_275_cast")]; + tensor var_13608 = const()[name = tensor("op_13608"), val = tensor([2, 640, 1, -1])]; + tensor input_807_cast = reshape(shape = var_13608, x = attn_275_cast)[name = tensor("input_807_cast")]; + tensor var_13613 = const()[name = tensor("op_13613"), val = tensor([1, 1])]; + tensor var_13615 = const()[name = tensor("op_13615"), val = tensor([1, 1])]; + tensor var_13617_pad_type_0 = const()[name = tensor("op_13617_pad_type_0"), val = tensor("custom")]; + tensor var_13617_pad_0 = const()[name = tensor("op_13617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071235648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071542912))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071543104)))]; + tensor var_13617_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13615, groups = var_12518, pad = var_13617_pad_0, pad_type = var_13617_pad_type_0, strides = var_13613, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_807_cast)[name = tensor("op_13617_cast")]; + tensor inputs_413_cast = add(x = var_13617_cast, y = inputs_411_cast)[name = tensor("inputs_413_cast")]; + tensor var_13621 = const()[name = tensor("op_13621"), val = tensor([1])]; + tensor channels_mean_413_cast = reduce_mean(axes = var_13621, keep_dims = var_12513, x = inputs_413_cast)[name = tensor("channels_mean_413_cast")]; + tensor zero_mean_413_cast = sub(x = inputs_413_cast, y = channels_mean_413_cast)[name = tensor("zero_mean_413_cast")]; + tensor zero_mean_sq_413_cast = mul(x = zero_mean_413_cast, y = zero_mean_413_cast)[name = tensor("zero_mean_sq_413_cast")]; + tensor var_13625 = const()[name = tensor("op_13625"), val = tensor([1])]; + tensor var_13626_cast = reduce_mean(axes = var_13625, keep_dims = var_12513, x = zero_mean_sq_413_cast)[name = tensor("op_13626_cast")]; + tensor var_13627_to_fp16 = const()[name = tensor("op_13627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13628_cast = add(x = var_13626_cast, y = var_13627_to_fp16)[name = tensor("op_13628_cast")]; + tensor denom_413_epsilon_0_to_fp16 = const()[name = tensor("denom_413_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_413_cast = rsqrt(epsilon = denom_413_epsilon_0_to_fp16, x = var_13628_cast)[name = tensor("denom_413_cast")]; + tensor out_413_cast = mul(x = zero_mean_413_cast, y = denom_413_cast)[name = tensor("out_413_cast")]; + tensor var_13632_to_fp16 = const()[name = tensor("op_13632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071544448)))]; + tensor var_13633_cast = add(x = out_413_cast, y = var_13632_to_fp16)[name = tensor("op_13633_cast")]; + tensor var_13635_to_fp16 = const()[name = tensor("op_13635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071545792)))]; + tensor input_809_cast = mul(x = var_13633_cast, y = var_13635_to_fp16)[name = tensor("input_809_cast")]; + tensor var_13643 = const()[name = tensor("op_13643"), val = tensor([1, 1])]; + tensor var_13645 = const()[name = tensor("op_13645"), val = tensor([1, 1])]; + tensor var_13647_pad_type_0 = const()[name = tensor("op_13647_pad_type_0"), val = tensor("custom")]; + tensor var_13647_pad_0 = const()[name = tensor("op_13647_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1071547136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074824000))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074824576)))]; + tensor var_13647_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13645, groups = var_12518, pad = var_13647_pad_0, pad_type = var_13647_pad_type_0, strides = var_13643, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_809_cast)[name = tensor("op_13647_cast")]; + tensor var_13648_split_sizes_0 = const()[name = tensor("op_13648_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13648_axis_0 = const()[name = tensor("op_13648_axis_0"), val = tensor(1)]; + tensor var_13648_cast_0, tensor var_13648_cast_1 = split(axis = var_13648_axis_0, split_sizes = var_13648_split_sizes_0, x = var_13647_cast)[name = tensor("op_13648_cast")]; + tensor var_13650_mode_0 = const()[name = tensor("op_13650_mode_0"), val = tensor("EXACT")]; + tensor var_13650_cast = gelu(mode = var_13650_mode_0, x = var_13648_cast_1)[name = tensor("op_13650_cast")]; + tensor input_811_cast = mul(x = var_13648_cast_0, y = var_13650_cast)[name = tensor("input_811_cast")]; + tensor var_13654 = const()[name = tensor("op_13654"), val = tensor([1, 1])]; + tensor var_13656 = const()[name = tensor("op_13656"), val = tensor([1, 1])]; + tensor var_13658_pad_type_0 = const()[name = tensor("op_13658_pad_type_0"), val = tensor("custom")]; + tensor var_13658_pad_0 = const()[name = tensor("op_13658_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1074834880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076473344))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076473920)))]; + tensor var_13658_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13656, groups = var_12518, pad = var_13658_pad_0, pad_type = var_13658_pad_type_0, strides = var_13654, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_811_cast)[name = tensor("op_13658_cast")]; + tensor inputs_415_cast = add(x = var_13658_cast, y = inputs_413_cast)[name = tensor("inputs_415_cast")]; + tensor var_13668 = const()[name = tensor("op_13668"), val = tensor([1])]; + tensor channels_mean_415_cast = reduce_mean(axes = var_13668, keep_dims = var_12513, x = inputs_415_cast)[name = tensor("channels_mean_415_cast")]; + tensor zero_mean_415_cast = sub(x = inputs_415_cast, y = channels_mean_415_cast)[name = tensor("zero_mean_415_cast")]; + tensor zero_mean_sq_415_cast = mul(x = zero_mean_415_cast, y = zero_mean_415_cast)[name = tensor("zero_mean_sq_415_cast")]; + tensor var_13672 = const()[name = tensor("op_13672"), val = tensor([1])]; + tensor var_13673_cast = reduce_mean(axes = var_13672, keep_dims = var_12513, x = zero_mean_sq_415_cast)[name = tensor("op_13673_cast")]; + tensor var_13674_to_fp16 = const()[name = tensor("op_13674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13675_cast = add(x = var_13673_cast, y = var_13674_to_fp16)[name = tensor("op_13675_cast")]; + tensor denom_415_epsilon_0_to_fp16 = const()[name = tensor("denom_415_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_415_cast = rsqrt(epsilon = denom_415_epsilon_0_to_fp16, x = var_13675_cast)[name = tensor("denom_415_cast")]; + tensor out_415_cast = mul(x = zero_mean_415_cast, y = denom_415_cast)[name = tensor("out_415_cast")]; + tensor var_13679_to_fp16 = const()[name = tensor("op_13679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076475264)))]; + tensor var_13680_cast = add(x = out_415_cast, y = var_13679_to_fp16)[name = tensor("op_13680_cast")]; + tensor var_13682_to_fp16 = const()[name = tensor("op_13682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076476608)))]; + tensor hidden_states_567_cast = mul(x = var_13680_cast, y = var_13682_to_fp16)[name = tensor("hidden_states_567_cast")]; + tensor var_13689 = const()[name = tensor("op_13689"), val = tensor([1, 1])]; + tensor var_13691 = const()[name = tensor("op_13691"), val = tensor([1, 1])]; + tensor q_277_pad_type_0 = const()[name = tensor("q_277_pad_type_0"), val = tensor("custom")]; + tensor q_277_pad_0 = const()[name = tensor("q_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076477952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076785216))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_277_cast = conv(dilations = var_13691, groups = var_12518, pad = q_277_pad_0, pad_type = q_277_pad_type_0, strides = var_13689, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("q_277_cast")]; + tensor var_13695 = const()[name = tensor("op_13695"), val = tensor([1, 1])]; + tensor var_13697 = const()[name = tensor("op_13697"), val = tensor([1, 1])]; + tensor k_277_pad_type_0 = const()[name = tensor("k_277_pad_type_0"), val = tensor("custom")]; + tensor k_277_pad_0 = const()[name = tensor("k_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1076785408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077092672))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_277_cast = conv(dilations = var_13697, groups = var_12518, pad = k_277_pad_0, pad_type = k_277_pad_type_0, strides = var_13695, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("k_277_cast")]; + tensor var_13701 = const()[name = tensor("op_13701"), val = tensor([1, 1])]; + tensor var_13703 = const()[name = tensor("op_13703"), val = tensor([1, 1])]; + tensor v_277_pad_type_0 = const()[name = tensor("v_277_pad_type_0"), val = tensor("custom")]; + tensor v_277_pad_0 = const()[name = tensor("v_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077092864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077502528))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_277_cast = conv(dilations = var_13703, groups = var_12518, pad = v_277_pad_0, pad_type = v_277_pad_type_0, strides = var_13701, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("v_277_cast")]; + tensor var_13707 = const()[name = tensor("op_13707"), val = tensor([2, 10, 64, -1])]; + tensor var_13708_cast = reshape(shape = var_13707, x = q_277_cast)[name = tensor("op_13708_cast")]; + tensor var_13709 = const()[name = tensor("op_13709"), val = tensor([2, 10, 64, -1])]; + tensor var_13710_cast = reshape(shape = var_13709, x = k_277_cast)[name = tensor("op_13710_cast")]; + tensor var_13711 = const()[name = tensor("op_13711"), val = tensor([2, 10, 64, -1])]; + tensor var_13712_cast = reshape(shape = var_13711, x = v_277_cast)[name = tensor("op_13712_cast")]; + tensor attn_weights_553_transpose_x_0 = const()[name = tensor("attn_weights_553_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_553_transpose_y_0 = const()[name = tensor("attn_weights_553_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_553_cast = matmul(transpose_x = attn_weights_553_transpose_x_0, transpose_y = attn_weights_553_transpose_y_0, x = var_13708_cast, y = var_13710_cast)[name = tensor("attn_weights_553_cast")]; + tensor attn_weights_555_cast = mul(x = attn_weights_553_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_555_cast")]; + tensor var_13716_cast = softmax(axis = var_12502, x = attn_weights_555_cast)[name = tensor("op_13716_cast")]; + tensor attn_277_transpose_x_0 = const()[name = tensor("attn_277_transpose_x_0"), val = tensor(false)]; + tensor attn_277_transpose_y_0 = const()[name = tensor("attn_277_transpose_y_0"), val = tensor(true)]; + tensor attn_277_cast = matmul(transpose_x = attn_277_transpose_x_0, transpose_y = attn_277_transpose_y_0, x = var_13712_cast, y = var_13716_cast)[name = tensor("attn_277_cast")]; + tensor var_13720 = const()[name = tensor("op_13720"), val = tensor([2, 640, 1, -1])]; + tensor input_813_cast = reshape(shape = var_13720, x = attn_277_cast)[name = tensor("input_813_cast")]; + tensor var_13725 = const()[name = tensor("op_13725"), val = tensor([1, 1])]; + tensor var_13727 = const()[name = tensor("op_13727"), val = tensor([1, 1])]; + tensor var_13729_pad_type_0 = const()[name = tensor("op_13729_pad_type_0"), val = tensor("custom")]; + tensor var_13729_pad_0 = const()[name = tensor("op_13729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077503104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077912768))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077913344)))]; + tensor var_13729_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13727, groups = var_12518, pad = var_13729_pad_0, pad_type = var_13729_pad_type_0, strides = var_13725, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_813_cast)[name = tensor("op_13729_cast")]; + tensor inputs_417_cast = add(x = var_13729_cast, y = inputs_415_cast)[name = tensor("inputs_417_cast")]; + tensor var_13733 = const()[name = tensor("op_13733"), val = tensor([1])]; + tensor channels_mean_417_cast = reduce_mean(axes = var_13733, keep_dims = var_12513, x = inputs_417_cast)[name = tensor("channels_mean_417_cast")]; + tensor zero_mean_417_cast = sub(x = inputs_417_cast, y = channels_mean_417_cast)[name = tensor("zero_mean_417_cast")]; + tensor zero_mean_sq_417_cast = mul(x = zero_mean_417_cast, y = zero_mean_417_cast)[name = tensor("zero_mean_sq_417_cast")]; + tensor var_13737 = const()[name = tensor("op_13737"), val = tensor([1])]; + tensor var_13738_cast = reduce_mean(axes = var_13737, keep_dims = var_12513, x = zero_mean_sq_417_cast)[name = tensor("op_13738_cast")]; + tensor var_13739_to_fp16 = const()[name = tensor("op_13739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13740_cast = add(x = var_13738_cast, y = var_13739_to_fp16)[name = tensor("op_13740_cast")]; + tensor denom_417_epsilon_0_to_fp16 = const()[name = tensor("denom_417_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_417_cast = rsqrt(epsilon = denom_417_epsilon_0_to_fp16, x = var_13740_cast)[name = tensor("denom_417_cast")]; + tensor out_417_cast = mul(x = zero_mean_417_cast, y = denom_417_cast)[name = tensor("out_417_cast")]; + tensor var_13744_to_fp16 = const()[name = tensor("op_13744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077914688)))]; + tensor var_13745_cast = add(x = out_417_cast, y = var_13744_to_fp16)[name = tensor("op_13745_cast")]; + tensor var_13747_to_fp16 = const()[name = tensor("op_13747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077916032)))]; + tensor hidden_states_569_cast = mul(x = var_13745_cast, y = var_13747_to_fp16)[name = tensor("hidden_states_569_cast")]; + tensor var_13754 = const()[name = tensor("op_13754"), val = tensor([1, 1])]; + tensor var_13756 = const()[name = tensor("op_13756"), val = tensor([1, 1])]; + tensor q_pad_type_0 = const()[name = tensor("q_pad_type_0"), val = tensor("custom")]; + tensor q_pad_0 = const()[name = tensor("q_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1077917376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1078122240))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_cast = conv(dilations = var_13756, groups = var_12518, pad = q_pad_0, pad_type = q_pad_type_0, strides = var_13754, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_569_cast)[name = tensor("q_cast")]; + tensor var_13760 = const()[name = tensor("op_13760"), val = tensor([1, 1])]; + tensor var_13762 = const()[name = tensor("op_13762"), val = tensor([1, 1])]; + tensor k_pad_type_0 = const()[name = tensor("k_pad_type_0"), val = tensor("custom")]; + tensor k_pad_0 = const()[name = tensor("k_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1078122368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079105472))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_cast = conv(dilations = var_13762, groups = var_12518, pad = k_pad_0, pad_type = k_pad_type_0, strides = var_13760, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_cast")]; + tensor var_13766 = const()[name = tensor("op_13766"), val = tensor([1, 1])]; + tensor var_13768 = const()[name = tensor("op_13768"), val = tensor([1, 1])]; + tensor v_pad_type_0 = const()[name = tensor("v_pad_type_0"), val = tensor("custom")]; + tensor v_pad_0 = const()[name = tensor("v_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079105664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079761088))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_cast = conv(dilations = var_13768, groups = var_12518, pad = v_pad_0, pad_type = v_pad_type_0, strides = var_13766, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_cast")]; + tensor var_13772 = const()[name = tensor("op_13772"), val = tensor([2, 10, 64, -1])]; + tensor var_13773_cast = reshape(shape = var_13772, x = q_cast)[name = tensor("op_13773_cast")]; + tensor var_13774 = const()[name = tensor("op_13774"), val = tensor([2, 10, 64, -1])]; + tensor var_13775_cast = reshape(shape = var_13774, x = k_cast)[name = tensor("op_13775_cast")]; + tensor var_13776 = const()[name = tensor("op_13776"), val = tensor([2, 10, 64, -1])]; + tensor var_13777_cast = reshape(shape = var_13776, x = v_cast)[name = tensor("op_13777_cast")]; + tensor attn_weights_557_transpose_x_0 = const()[name = tensor("attn_weights_557_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_557_transpose_y_0 = const()[name = tensor("attn_weights_557_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_557_cast = matmul(transpose_x = attn_weights_557_transpose_x_0, transpose_y = attn_weights_557_transpose_y_0, x = var_13773_cast, y = var_13775_cast)[name = tensor("attn_weights_557_cast")]; + tensor attn_weights_cast = mul(x = attn_weights_557_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_cast")]; + tensor var_13781_cast = softmax(axis = var_12502, x = attn_weights_cast)[name = tensor("op_13781_cast")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_13777_cast, y = var_13781_cast)[name = tensor("attn_cast")]; + tensor var_13785 = const()[name = tensor("op_13785"), val = tensor([2, 640, 1, -1])]; + tensor input_815_cast = reshape(shape = var_13785, x = attn_cast)[name = tensor("input_815_cast")]; + tensor var_13790 = const()[name = tensor("op_13790"), val = tensor([1, 1])]; + tensor var_13792 = const()[name = tensor("op_13792"), val = tensor([1, 1])]; + tensor var_13794_pad_type_0 = const()[name = tensor("op_13794_pad_type_0"), val = tensor("custom")]; + tensor var_13794_pad_0 = const()[name = tensor("op_13794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079761216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080068480))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080068672)))]; + tensor var_13794_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13792, groups = var_12518, pad = var_13794_pad_0, pad_type = var_13794_pad_type_0, strides = var_13790, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_815_cast)[name = tensor("op_13794_cast")]; + tensor inputs_cast = add(x = var_13794_cast, y = inputs_417_cast)[name = tensor("inputs_cast")]; + tensor var_13798 = const()[name = tensor("op_13798"), val = tensor([1])]; + tensor channels_mean_cast = reduce_mean(axes = var_13798, keep_dims = var_12513, x = inputs_cast)[name = tensor("channels_mean_cast")]; + tensor zero_mean_cast = sub(x = inputs_cast, y = channels_mean_cast)[name = tensor("zero_mean_cast")]; + tensor zero_mean_sq_cast = mul(x = zero_mean_cast, y = zero_mean_cast)[name = tensor("zero_mean_sq_cast")]; + tensor var_13802 = const()[name = tensor("op_13802"), val = tensor([1])]; + tensor var_13803_cast = reduce_mean(axes = var_13802, keep_dims = var_12513, x = zero_mean_sq_cast)[name = tensor("op_13803_cast")]; + tensor var_13804_to_fp16 = const()[name = tensor("op_13804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13805_cast = add(x = var_13803_cast, y = var_13804_to_fp16)[name = tensor("op_13805_cast")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_13805_cast)[name = tensor("denom_cast")]; + tensor out_cast = mul(x = zero_mean_cast, y = denom_cast)[name = tensor("out_cast")]; + tensor var_13809_to_fp16 = const()[name = tensor("op_13809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080070016)))]; + tensor var_13810_cast = add(x = out_cast, y = var_13809_to_fp16)[name = tensor("op_13810_cast")]; + tensor var_13812_to_fp16 = const()[name = tensor("op_13812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080071360)))]; + tensor input_817_cast = mul(x = var_13810_cast, y = var_13812_to_fp16)[name = tensor("input_817_cast")]; + tensor var_13820 = const()[name = tensor("op_13820"), val = tensor([1, 1])]; + tensor var_13822 = const()[name = tensor("op_13822"), val = tensor([1, 1])]; + tensor var_13824_pad_type_0 = const()[name = tensor("op_13824_pad_type_0"), val = tensor("custom")]; + tensor var_13824_pad_0 = const()[name = tensor("op_13824_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080072704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083349568))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083350144)))]; + tensor var_13824_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13822, groups = var_12518, pad = var_13824_pad_0, pad_type = var_13824_pad_type_0, strides = var_13820, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_817_cast)[name = tensor("op_13824_cast")]; + tensor var_13825_split_sizes_0 = const()[name = tensor("op_13825_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13825_axis_0 = const()[name = tensor("op_13825_axis_0"), val = tensor(1)]; + tensor var_13825_cast_0, tensor var_13825_cast_1 = split(axis = var_13825_axis_0, split_sizes = var_13825_split_sizes_0, x = var_13824_cast)[name = tensor("op_13825_cast")]; + tensor var_13827_mode_0 = const()[name = tensor("op_13827_mode_0"), val = tensor("EXACT")]; + tensor var_13827_cast = gelu(mode = var_13827_mode_0, x = var_13825_cast_1)[name = tensor("op_13827_cast")]; + tensor input_819_cast = mul(x = var_13825_cast_0, y = var_13827_cast)[name = tensor("input_819_cast")]; + tensor var_13831 = const()[name = tensor("op_13831"), val = tensor([1, 1])]; + tensor var_13833 = const()[name = tensor("op_13833"), val = tensor([1, 1])]; + tensor var_13835_pad_type_0 = const()[name = tensor("op_13835_pad_type_0"), val = tensor("custom")]; + tensor var_13835_pad_0 = const()[name = tensor("op_13835_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1083360448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084998912))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084999488)))]; + tensor var_13835_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13833, groups = var_12518, pad = var_13835_pad_0, pad_type = var_13835_pad_type_0, strides = var_13831, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_819_cast)[name = tensor("op_13835_cast")]; + tensor hidden_states_573_cast = add(x = var_13835_cast, y = inputs_cast)[name = tensor("hidden_states_573_cast")]; + tensor var_13837 = const()[name = tensor("op_13837"), val = tensor([2, 640, 64, 64])]; + tensor input_821_cast = reshape(shape = var_13837, x = hidden_states_573_cast)[name = tensor("input_821_cast")]; + tensor var_13841 = const()[name = tensor("op_13841"), val = tensor([1, 1])]; + tensor var_13843 = const()[name = tensor("op_13843"), val = tensor([1, 1])]; + tensor hidden_states_575_pad_type_0 = const()[name = tensor("hidden_states_575_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_575_pad_0 = const()[name = tensor("hidden_states_575_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085000832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085410496))), name = tensor("up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085411072)))]; + tensor hidden_states_575_cast = conv(bias = up_blocks_1_attentions_2_proj_out_bias_to_fp16, dilations = var_13843, groups = var_12518, pad = hidden_states_575_pad_0, pad_type = hidden_states_575_pad_type_0, strides = var_13841, weight = up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized, x = input_821_cast)[name = tensor("hidden_states_575_cast")]; + tensor input_823_cast = add(x = hidden_states_575_cast, y = hidden_states_557_cast)[name = tensor("input_823_cast")]; + tensor input_825_scale_factor_height_0 = const()[name = tensor("input_825_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_825_scale_factor_width_0 = const()[name = tensor("input_825_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_825_cast = upsample_nearest_neighbor(scale_factor_height = input_825_scale_factor_height_0, scale_factor_width = input_825_scale_factor_width_0, x = input_823_cast)[name = tensor("input_825_cast")]; + tensor var_13852 = const()[name = tensor("op_13852"), val = tensor([1, 1])]; + tensor var_13854 = const()[name = tensor("op_13854"), val = tensor([1, 1])]; + tensor hidden_states_577_pad_type_0 = const()[name = tensor("hidden_states_577_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_577_pad_0 = const()[name = tensor("hidden_states_577_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1085412416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089098880))), name = tensor("up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089099456)))]; + tensor hidden_states_577_cast = conv(bias = up_blocks_1_upsamplers_0_conv_bias_to_fp16, dilations = var_13854, groups = var_12518, pad = hidden_states_577_pad_0, pad_type = hidden_states_577_pad_type_0, strides = var_13852, weight = up_blocks_1_upsamplers_0_conv_weight_to_fp16_palettized, x = input_825_cast)[name = tensor("hidden_states_577_cast")]; + tensor var_13862 = const()[name = tensor("op_13862"), val = tensor(1)]; + tensor input_827_interleave_0 = const()[name = tensor("input_827_interleave_0"), val = tensor(false)]; + tensor input_827_cast = concat(axis = var_13862, interleave = input_827_interleave_0, values = (hidden_states_577_cast, input_43_cast))[name = tensor("input_827_cast")]; + tensor reshape_156_shape_0 = const()[name = tensor("reshape_156_shape_0"), val = tensor([2, 32, 30, 128, 128])]; + tensor reshape_156_cast = reshape(shape = reshape_156_shape_0, x = input_827_cast)[name = tensor("reshape_156_cast")]; + tensor reduce_mean_117_axes_0 = const()[name = tensor("reduce_mean_117_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_117_keep_dims_0 = const()[name = tensor("reduce_mean_117_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_117_cast = reduce_mean(axes = reduce_mean_117_axes_0, keep_dims = reduce_mean_117_keep_dims_0, x = reshape_156_cast)[name = tensor("reduce_mean_117_cast")]; + tensor sub_78_cast = sub(x = reshape_156_cast, y = reduce_mean_117_cast)[name = tensor("sub_78_cast")]; + tensor square_39_cast = square(x = sub_78_cast)[name = tensor("square_39_cast")]; + tensor reduce_mean_119_axes_0 = const()[name = tensor("reduce_mean_119_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_119_keep_dims_0 = const()[name = tensor("reduce_mean_119_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_119_cast = reduce_mean(axes = reduce_mean_119_axes_0, keep_dims = reduce_mean_119_keep_dims_0, x = square_39_cast)[name = tensor("reduce_mean_119_cast")]; + tensor add_78_y_0_to_fp16 = const()[name = tensor("add_78_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_78_cast = add(x = reduce_mean_119_cast, y = add_78_y_0_to_fp16)[name = tensor("add_78_cast")]; + tensor sqrt_39_cast = sqrt(x = add_78_cast)[name = tensor("sqrt_39_cast")]; + tensor real_div_39_cast = real_div(x = sub_78_cast, y = sqrt_39_cast)[name = tensor("real_div_39_cast")]; + tensor reshape_157_shape_0 = const()[name = tensor("reshape_157_shape_0"), val = tensor([2, 960, 128, 128])]; + tensor reshape_157_cast = reshape(shape = reshape_157_shape_0, x = real_div_39_cast)[name = tensor("reshape_157_cast")]; + tensor add_79_gamma_0_to_fp16 = const()[name = tensor("add_79_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089100800)))]; + tensor add_79_beta_0_to_fp16 = const()[name = tensor("add_79_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089102784)))]; + tensor add_79_epsilon_0_to_fp16 = const()[name = tensor("add_79_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_79_cast = batch_norm(beta = add_79_beta_0_to_fp16, epsilon = add_79_epsilon_0_to_fp16, gamma = add_79_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_157_cast)[name = tensor("add_79_cast")]; + tensor input_831_cast = silu(x = add_79_cast)[name = tensor("input_831_cast")]; + tensor var_13883 = const()[name = tensor("op_13883"), val = tensor([1, 1])]; + tensor var_13885 = const()[name = tensor("op_13885"), val = tensor([1, 1])]; + tensor hidden_states_579_pad_type_0 = const()[name = tensor("hidden_states_579_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_579_pad_0 = const()[name = tensor("hidden_states_579_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089104768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091869632))), name = tensor("up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([320, 960, 3, 3])]; + tensor up_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091870208)))]; + tensor hidden_states_579_cast = conv(bias = up_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_13885, groups = var_13862, pad = hidden_states_579_pad_0, pad_type = hidden_states_579_pad_type_0, strides = var_13883, weight = up_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_831_cast)[name = tensor("hidden_states_579_cast")]; + tensor var_13891 = const()[name = tensor("op_13891"), val = tensor([1, 1])]; + tensor var_13893 = const()[name = tensor("op_13893"), val = tensor([1, 1])]; + tensor temb_29_pad_type_0 = const()[name = tensor("temb_29_pad_type_0"), val = tensor("custom")]; + tensor temb_29_pad_0 = const()[name = tensor("temb_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091870912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092178176))), name = tensor("up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092178368)))]; + tensor temb_29_cast = conv(bias = up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_13893, groups = var_13862, pad = temb_29_pad_0, pad_type = temb_29_pad_type_0, strides = var_13891, weight = up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_29_cast")]; + tensor input_835_cast = add(x = hidden_states_579_cast, y = temb_29_cast)[name = tensor("input_835_cast")]; + tensor reshape_160_shape_0 = const()[name = tensor("reshape_160_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_160_cast = reshape(shape = reshape_160_shape_0, x = input_835_cast)[name = tensor("reshape_160_cast")]; + tensor reduce_mean_120_axes_0 = const()[name = tensor("reduce_mean_120_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_120_keep_dims_0 = const()[name = tensor("reduce_mean_120_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_120_cast = reduce_mean(axes = reduce_mean_120_axes_0, keep_dims = reduce_mean_120_keep_dims_0, x = reshape_160_cast)[name = tensor("reduce_mean_120_cast")]; + tensor sub_80_cast = sub(x = reshape_160_cast, y = reduce_mean_120_cast)[name = tensor("sub_80_cast")]; + tensor square_40_cast = square(x = sub_80_cast)[name = tensor("square_40_cast")]; + tensor reduce_mean_122_axes_0 = const()[name = tensor("reduce_mean_122_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_122_keep_dims_0 = const()[name = tensor("reduce_mean_122_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_122_cast = reduce_mean(axes = reduce_mean_122_axes_0, keep_dims = reduce_mean_122_keep_dims_0, x = square_40_cast)[name = tensor("reduce_mean_122_cast")]; + tensor add_80_y_0_to_fp16 = const()[name = tensor("add_80_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_80_cast = add(x = reduce_mean_122_cast, y = add_80_y_0_to_fp16)[name = tensor("add_80_cast")]; + tensor sqrt_40_cast = sqrt(x = add_80_cast)[name = tensor("sqrt_40_cast")]; + tensor real_div_40_cast = real_div(x = sub_80_cast, y = sqrt_40_cast)[name = tensor("real_div_40_cast")]; + tensor reshape_161_shape_0 = const()[name = tensor("reshape_161_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_161_cast = reshape(shape = reshape_161_shape_0, x = real_div_40_cast)[name = tensor("reshape_161_cast")]; + tensor add_81_gamma_0_to_fp16 = const()[name = tensor("add_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092179072)))]; + tensor add_81_beta_0_to_fp16 = const()[name = tensor("add_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092179776)))]; + tensor add_81_epsilon_0_to_fp16 = const()[name = tensor("add_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_81_cast = batch_norm(beta = add_81_beta_0_to_fp16, epsilon = add_81_epsilon_0_to_fp16, gamma = add_81_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_161_cast)[name = tensor("add_81_cast")]; + tensor input_839_cast = silu(x = add_81_cast)[name = tensor("input_839_cast")]; + tensor var_13903 = const()[name = tensor("op_13903"), val = tensor([1, 1])]; + tensor var_13905 = const()[name = tensor("op_13905"), val = tensor([1, 1])]; + tensor hidden_states_581_pad_type_0 = const()[name = tensor("hidden_states_581_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_581_pad_0 = const()[name = tensor("hidden_states_581_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1092180480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093102144))), name = tensor("up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor up_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093102720)))]; + tensor hidden_states_581_cast = conv(bias = up_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_13905, groups = var_13862, pad = hidden_states_581_pad_0, pad_type = hidden_states_581_pad_type_0, strides = var_13903, weight = up_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_839_cast)[name = tensor("hidden_states_581_cast")]; + tensor var_13910 = const()[name = tensor("op_13910"), val = tensor([1, 1])]; + tensor var_13912 = const()[name = tensor("op_13912"), val = tensor([1, 1])]; + tensor x_17_pad_type_0 = const()[name = tensor("x_17_pad_type_0"), val = tensor("custom")]; + tensor x_17_pad_0 = const()[name = tensor("x_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093103424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093410688))), name = tensor("up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 960, 1, 1])]; + tensor up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093411264)))]; + tensor x_17_cast = conv(bias = up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_13912, groups = var_13862, pad = x_17_pad_0, pad_type = x_17_pad_type_0, strides = var_13910, weight = up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_827_cast)[name = tensor("x_17_cast")]; + tensor hidden_states_583_cast = add(x = x_17_cast, y = hidden_states_581_cast)[name = tensor("hidden_states_583_cast")]; + tensor input_841_interleave_0 = const()[name = tensor("input_841_interleave_0"), val = tensor(false)]; + tensor input_841_cast = concat(axis = var_13862, interleave = input_841_interleave_0, values = (hidden_states_583_cast, input_29_cast))[name = tensor("input_841_cast")]; + tensor reshape_164_shape_0 = const()[name = tensor("reshape_164_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_164_cast = reshape(shape = reshape_164_shape_0, x = input_841_cast)[name = tensor("reshape_164_cast")]; + tensor reduce_mean_123_axes_0 = const()[name = tensor("reduce_mean_123_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_123_keep_dims_0 = const()[name = tensor("reduce_mean_123_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_123_cast = reduce_mean(axes = reduce_mean_123_axes_0, keep_dims = reduce_mean_123_keep_dims_0, x = reshape_164_cast)[name = tensor("reduce_mean_123_cast")]; + tensor sub_82_cast = sub(x = reshape_164_cast, y = reduce_mean_123_cast)[name = tensor("sub_82_cast")]; + tensor square_41_cast = square(x = sub_82_cast)[name = tensor("square_41_cast")]; + tensor reduce_mean_125_axes_0 = const()[name = tensor("reduce_mean_125_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_125_keep_dims_0 = const()[name = tensor("reduce_mean_125_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_125_cast = reduce_mean(axes = reduce_mean_125_axes_0, keep_dims = reduce_mean_125_keep_dims_0, x = square_41_cast)[name = tensor("reduce_mean_125_cast")]; + tensor add_82_y_0_to_fp16 = const()[name = tensor("add_82_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_82_cast = add(x = reduce_mean_125_cast, y = add_82_y_0_to_fp16)[name = tensor("add_82_cast")]; + tensor sqrt_41_cast = sqrt(x = add_82_cast)[name = tensor("sqrt_41_cast")]; + tensor real_div_41_cast = real_div(x = sub_82_cast, y = sqrt_41_cast)[name = tensor("real_div_41_cast")]; + tensor reshape_165_shape_0 = const()[name = tensor("reshape_165_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_165_cast = reshape(shape = reshape_165_shape_0, x = real_div_41_cast)[name = tensor("reshape_165_cast")]; + tensor add_83_gamma_0_to_fp16 = const()[name = tensor("add_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093411968)))]; + tensor add_83_beta_0_to_fp16 = const()[name = tensor("add_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093413312)))]; + tensor add_83_epsilon_0_to_fp16 = const()[name = tensor("add_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_83_cast = batch_norm(beta = add_83_beta_0_to_fp16, epsilon = add_83_epsilon_0_to_fp16, gamma = add_83_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_165_cast)[name = tensor("add_83_cast")]; + tensor input_845_cast = silu(x = add_83_cast)[name = tensor("input_845_cast")]; + tensor var_13930 = const()[name = tensor("op_13930"), val = tensor([1, 1])]; + tensor var_13932 = const()[name = tensor("op_13932"), val = tensor([1, 1])]; + tensor hidden_states_585_pad_type_0 = const()[name = tensor("hidden_states_585_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_585_pad_0 = const()[name = tensor("hidden_states_585_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093414656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095257920))), name = tensor("up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([320, 640, 3, 3])]; + tensor up_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095258496)))]; + tensor hidden_states_585_cast = conv(bias = up_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_13932, groups = var_13862, pad = hidden_states_585_pad_0, pad_type = hidden_states_585_pad_type_0, strides = var_13930, weight = up_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_845_cast)[name = tensor("hidden_states_585_cast")]; + tensor var_13938 = const()[name = tensor("op_13938"), val = tensor([1, 1])]; + tensor var_13940 = const()[name = tensor("op_13940"), val = tensor([1, 1])]; + tensor temb_31_pad_type_0 = const()[name = tensor("temb_31_pad_type_0"), val = tensor("custom")]; + tensor temb_31_pad_0 = const()[name = tensor("temb_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095259200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095668864))), name = tensor("up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095669440)))]; + tensor temb_31_cast = conv(bias = up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_13940, groups = var_13862, pad = temb_31_pad_0, pad_type = temb_31_pad_type_0, strides = var_13938, weight = up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_31_cast")]; + tensor input_849_cast = add(x = hidden_states_585_cast, y = temb_31_cast)[name = tensor("input_849_cast")]; + tensor reshape_168_shape_0 = const()[name = tensor("reshape_168_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_168_cast = reshape(shape = reshape_168_shape_0, x = input_849_cast)[name = tensor("reshape_168_cast")]; + tensor reduce_mean_126_axes_0 = const()[name = tensor("reduce_mean_126_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_126_keep_dims_0 = const()[name = tensor("reduce_mean_126_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_126_cast = reduce_mean(axes = reduce_mean_126_axes_0, keep_dims = reduce_mean_126_keep_dims_0, x = reshape_168_cast)[name = tensor("reduce_mean_126_cast")]; + tensor sub_84_cast = sub(x = reshape_168_cast, y = reduce_mean_126_cast)[name = tensor("sub_84_cast")]; + tensor square_42_cast = square(x = sub_84_cast)[name = tensor("square_42_cast")]; + tensor reduce_mean_128_axes_0 = const()[name = tensor("reduce_mean_128_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_128_keep_dims_0 = const()[name = tensor("reduce_mean_128_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_128_cast = reduce_mean(axes = reduce_mean_128_axes_0, keep_dims = reduce_mean_128_keep_dims_0, x = square_42_cast)[name = tensor("reduce_mean_128_cast")]; + tensor add_84_y_0_to_fp16 = const()[name = tensor("add_84_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_84_cast = add(x = reduce_mean_128_cast, y = add_84_y_0_to_fp16)[name = tensor("add_84_cast")]; + tensor sqrt_42_cast = sqrt(x = add_84_cast)[name = tensor("sqrt_42_cast")]; + tensor real_div_42_cast = real_div(x = sub_84_cast, y = sqrt_42_cast)[name = tensor("real_div_42_cast")]; + tensor reshape_169_shape_0 = const()[name = tensor("reshape_169_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_169_cast = reshape(shape = reshape_169_shape_0, x = real_div_42_cast)[name = tensor("reshape_169_cast")]; + tensor add_85_gamma_0_to_fp16 = const()[name = tensor("add_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095670144)))]; + tensor add_85_beta_0_to_fp16 = const()[name = tensor("add_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095670848)))]; + tensor add_85_epsilon_0_to_fp16 = const()[name = tensor("add_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_85_cast = batch_norm(beta = add_85_beta_0_to_fp16, epsilon = add_85_epsilon_0_to_fp16, gamma = add_85_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_169_cast)[name = tensor("add_85_cast")]; + tensor input_853_cast = silu(x = add_85_cast)[name = tensor("input_853_cast")]; + tensor var_13950 = const()[name = tensor("op_13950"), val = tensor([1, 1])]; + tensor var_13952 = const()[name = tensor("op_13952"), val = tensor([1, 1])]; + tensor hidden_states_587_pad_type_0 = const()[name = tensor("hidden_states_587_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_587_pad_0 = const()[name = tensor("hidden_states_587_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095671552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096593216))), name = tensor("up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor up_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096593792)))]; + tensor hidden_states_587_cast = conv(bias = up_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_13952, groups = var_13862, pad = hidden_states_587_pad_0, pad_type = hidden_states_587_pad_type_0, strides = var_13950, weight = up_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_853_cast)[name = tensor("hidden_states_587_cast")]; + tensor var_13957 = const()[name = tensor("op_13957"), val = tensor([1, 1])]; + tensor var_13959 = const()[name = tensor("op_13959"), val = tensor([1, 1])]; + tensor x_19_pad_type_0 = const()[name = tensor("x_19_pad_type_0"), val = tensor("custom")]; + tensor x_19_pad_0 = const()[name = tensor("x_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096594496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096799360))), name = tensor("up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096799936)))]; + tensor x_19_cast = conv(bias = up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13959, groups = var_13862, pad = x_19_pad_0, pad_type = x_19_pad_type_0, strides = var_13957, weight = up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_841_cast)[name = tensor("x_19_cast")]; + tensor hidden_states_589_cast = add(x = x_19_cast, y = hidden_states_587_cast)[name = tensor("hidden_states_589_cast")]; + tensor input_855_interleave_0 = const()[name = tensor("input_855_interleave_0"), val = tensor(false)]; + tensor input_855_cast = concat(axis = var_13862, interleave = input_855_interleave_0, values = (hidden_states_589_cast, input_13_cast))[name = tensor("input_855_cast")]; + tensor reshape_172_shape_0 = const()[name = tensor("reshape_172_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_172_cast = reshape(shape = reshape_172_shape_0, x = input_855_cast)[name = tensor("reshape_172_cast")]; + tensor reduce_mean_129_axes_0 = const()[name = tensor("reduce_mean_129_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_129_keep_dims_0 = const()[name = tensor("reduce_mean_129_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_129_cast = reduce_mean(axes = reduce_mean_129_axes_0, keep_dims = reduce_mean_129_keep_dims_0, x = reshape_172_cast)[name = tensor("reduce_mean_129_cast")]; + tensor sub_86_cast = sub(x = reshape_172_cast, y = reduce_mean_129_cast)[name = tensor("sub_86_cast")]; + tensor square_43_cast = square(x = sub_86_cast)[name = tensor("square_43_cast")]; + tensor reduce_mean_131_axes_0 = const()[name = tensor("reduce_mean_131_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_131_keep_dims_0 = const()[name = tensor("reduce_mean_131_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_131_cast = reduce_mean(axes = reduce_mean_131_axes_0, keep_dims = reduce_mean_131_keep_dims_0, x = square_43_cast)[name = tensor("reduce_mean_131_cast")]; + tensor add_86_y_0_to_fp16 = const()[name = tensor("add_86_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_86_cast = add(x = reduce_mean_131_cast, y = add_86_y_0_to_fp16)[name = tensor("add_86_cast")]; + tensor sqrt_43_cast = sqrt(x = add_86_cast)[name = tensor("sqrt_43_cast")]; + tensor real_div_43_cast = real_div(x = sub_86_cast, y = sqrt_43_cast)[name = tensor("real_div_43_cast")]; + tensor reshape_173_shape_0 = const()[name = tensor("reshape_173_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_173_cast = reshape(shape = reshape_173_shape_0, x = real_div_43_cast)[name = tensor("reshape_173_cast")]; + tensor add_87_gamma_0_to_fp16 = const()[name = tensor("add_87_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096800640)))]; + tensor add_87_beta_0_to_fp16 = const()[name = tensor("add_87_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096801984)))]; + tensor add_87_epsilon_0_to_fp16 = const()[name = tensor("add_87_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_87_cast = batch_norm(beta = add_87_beta_0_to_fp16, epsilon = add_87_epsilon_0_to_fp16, gamma = add_87_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_173_cast)[name = tensor("add_87_cast")]; + tensor input_859_cast = silu(x = add_87_cast)[name = tensor("input_859_cast")]; + tensor var_13977 = const()[name = tensor("op_13977"), val = tensor([1, 1])]; + tensor var_13979 = const()[name = tensor("op_13979"), val = tensor([1, 1])]; + tensor hidden_states_591_pad_type_0 = const()[name = tensor("hidden_states_591_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_591_pad_0 = const()[name = tensor("hidden_states_591_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096803328)))]; + tensor up_blocks_2_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100489792)))]; + tensor hidden_states_591_cast = conv(bias = up_blocks_2_resnets_2_conv1_bias_to_fp16, dilations = var_13979, groups = var_13862, pad = hidden_states_591_pad_0, pad_type = hidden_states_591_pad_type_0, strides = var_13977, weight = up_blocks_2_resnets_2_conv1_weight_to_fp16, x = input_859_cast)[name = tensor("hidden_states_591_cast")]; + tensor var_13985 = const()[name = tensor("op_13985"), val = tensor([1, 1])]; + tensor var_13987 = const()[name = tensor("op_13987"), val = tensor([1, 1])]; + tensor temb_pad_type_0 = const()[name = tensor("temb_pad_type_0"), val = tensor("custom")]; + tensor temb_pad_0 = const()[name = tensor("temb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100490496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100797760))), name = tensor("up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100797952)))]; + tensor temb_cast = conv(bias = up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13987, groups = var_13862, pad = temb_pad_0, pad_type = temb_pad_type_0, strides = var_13985, weight = up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_cast")]; + tensor input_863_cast = add(x = hidden_states_591_cast, y = temb_cast)[name = tensor("input_863_cast")]; + tensor reshape_176_shape_0 = const()[name = tensor("reshape_176_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_176_cast = reshape(shape = reshape_176_shape_0, x = input_863_cast)[name = tensor("reshape_176_cast")]; + tensor reduce_mean_132_axes_0 = const()[name = tensor("reduce_mean_132_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_132_keep_dims_0 = const()[name = tensor("reduce_mean_132_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_132_cast = reduce_mean(axes = reduce_mean_132_axes_0, keep_dims = reduce_mean_132_keep_dims_0, x = reshape_176_cast)[name = tensor("reduce_mean_132_cast")]; + tensor sub_88_cast = sub(x = reshape_176_cast, y = reduce_mean_132_cast)[name = tensor("sub_88_cast")]; + tensor square_44_cast = square(x = sub_88_cast)[name = tensor("square_44_cast")]; + tensor reduce_mean_134_axes_0 = const()[name = tensor("reduce_mean_134_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_134_keep_dims_0 = const()[name = tensor("reduce_mean_134_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_134_cast = reduce_mean(axes = reduce_mean_134_axes_0, keep_dims = reduce_mean_134_keep_dims_0, x = square_44_cast)[name = tensor("reduce_mean_134_cast")]; + tensor add_88_y_0_to_fp16 = const()[name = tensor("add_88_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_88_cast = add(x = reduce_mean_134_cast, y = add_88_y_0_to_fp16)[name = tensor("add_88_cast")]; + tensor sqrt_44_cast = sqrt(x = add_88_cast)[name = tensor("sqrt_44_cast")]; + tensor real_div_44_cast = real_div(x = sub_88_cast, y = sqrt_44_cast)[name = tensor("real_div_44_cast")]; + tensor reshape_177_shape_0 = const()[name = tensor("reshape_177_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_177_cast = reshape(shape = reshape_177_shape_0, x = real_div_44_cast)[name = tensor("reshape_177_cast")]; + tensor add_89_gamma_0_to_fp16 = const()[name = tensor("add_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100798656)))]; + tensor add_89_beta_0_to_fp16 = const()[name = tensor("add_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100799360)))]; + tensor add_89_epsilon_0_to_fp16 = const()[name = tensor("add_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_89_cast = batch_norm(beta = add_89_beta_0_to_fp16, epsilon = add_89_epsilon_0_to_fp16, gamma = add_89_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_177_cast)[name = tensor("add_89_cast")]; + tensor input_867_cast = silu(x = add_89_cast)[name = tensor("input_867_cast")]; + tensor var_13997 = const()[name = tensor("op_13997"), val = tensor([1, 1])]; + tensor var_13999 = const()[name = tensor("op_13999"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100800064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101721728))), name = tensor("up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor up_blocks_2_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101722304)))]; + tensor hidden_states_cast = conv(bias = up_blocks_2_resnets_2_conv2_bias_to_fp16, dilations = var_13999, groups = var_13862, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_13997, weight = up_blocks_2_resnets_2_conv2_weight_to_fp16_palettized, x = input_867_cast)[name = tensor("hidden_states_cast")]; + tensor var_14004 = const()[name = tensor("op_14004"), val = tensor([1, 1])]; + tensor var_14006 = const()[name = tensor("op_14006"), val = tensor([1, 1])]; + tensor x_pad_type_0 = const()[name = tensor("x_pad_type_0"), val = tensor("custom")]; + tensor x_pad_0 = const()[name = tensor("x_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101723008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101927872))), name = tensor("up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101928448)))]; + tensor x_cast = conv(bias = up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_14006, groups = var_13862, pad = x_pad_0, pad_type = x_pad_type_0, strides = var_14004, weight = up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_855_cast)[name = tensor("x_cast")]; + tensor input_869_cast = add(x = x_cast, y = hidden_states_cast)[name = tensor("input_869_cast")]; + tensor reshape_180_shape_0 = const()[name = tensor("reshape_180_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_180_cast = reshape(shape = reshape_180_shape_0, x = input_869_cast)[name = tensor("reshape_180_cast")]; + tensor reduce_mean_135_axes_0 = const()[name = tensor("reduce_mean_135_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_135_keep_dims_0 = const()[name = tensor("reduce_mean_135_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_135_cast = reduce_mean(axes = reduce_mean_135_axes_0, keep_dims = reduce_mean_135_keep_dims_0, x = reshape_180_cast)[name = tensor("reduce_mean_135_cast")]; + tensor sub_90_cast = sub(x = reshape_180_cast, y = reduce_mean_135_cast)[name = tensor("sub_90_cast")]; + tensor square_45_cast = square(x = sub_90_cast)[name = tensor("square_45_cast")]; + tensor reduce_mean_137_axes_0 = const()[name = tensor("reduce_mean_137_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_137_keep_dims_0 = const()[name = tensor("reduce_mean_137_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_137_cast = reduce_mean(axes = reduce_mean_137_axes_0, keep_dims = reduce_mean_137_keep_dims_0, x = square_45_cast)[name = tensor("reduce_mean_137_cast")]; + tensor add_90_y_0_to_fp16 = const()[name = tensor("add_90_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_90_cast = add(x = reduce_mean_137_cast, y = add_90_y_0_to_fp16)[name = tensor("add_90_cast")]; + tensor sqrt_45_cast = sqrt(x = add_90_cast)[name = tensor("sqrt_45_cast")]; + tensor real_div_45_cast = real_div(x = sub_90_cast, y = sqrt_45_cast)[name = tensor("real_div_45_cast")]; + tensor reshape_181_shape_0 = const()[name = tensor("reshape_181_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_181_cast = reshape(shape = reshape_181_shape_0, x = real_div_45_cast)[name = tensor("reshape_181_cast")]; + tensor add_91_gamma_0_to_fp16 = const()[name = tensor("add_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101929152)))]; + tensor add_91_beta_0_to_fp16 = const()[name = tensor("add_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101929856)))]; + tensor add_91_epsilon_0_to_fp16 = const()[name = tensor("add_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_91_cast = batch_norm(beta = add_91_beta_0_to_fp16, epsilon = add_91_epsilon_0_to_fp16, gamma = add_91_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_181_cast)[name = tensor("add_91_cast")]; + tensor input_cast = silu(x = add_91_cast)[name = tensor("input_cast")]; + tensor var_14020 = const()[name = tensor("op_14020"), val = tensor(1)]; + tensor var_14023 = const()[name = tensor("op_14023"), val = tensor([1, 1])]; + tensor var_14025 = const()[name = tensor("op_14025"), val = tensor([1, 1])]; + tensor var_14027_pad_type_0 = const()[name = tensor("op_14027_pad_type_0"), val = tensor("custom")]; + tensor var_14027_pad_0 = const()[name = tensor("op_14027_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_out_weight_to_fp16 = const()[name = tensor("conv_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101930560)))]; + tensor conv_out_bias_to_fp16 = const()[name = tensor("conv_out_bias_to_fp16"), val = tensor([0x1.664p-9, -0x1.72p-10, 0x1.06p-9, -0x1.9b8p-9])]; + tensor var_14027_cast = conv(bias = conv_out_bias_to_fp16, dilations = var_14025, groups = var_14020, pad = var_14027_pad_0, pad_type = var_14027_pad_type_0, strides = var_14023, weight = conv_out_weight_to_fp16, x = input_cast)[name = tensor("op_14027_cast")]; + tensor var_14027_cast_to_fp32_dtype_0 = const()[name = tensor("op_14027_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor noise_pred = cast(dtype = var_14027_cast_to_fp32_dtype_0, x = var_14027_cast)[name = tensor("cast_0")]; + } -> (noise_pred); +} \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..5a1e02a5236f43c595914225160faecb50affdba --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5299a934ac3fa5684f59df3750a7136023d7056cf3cbc8bf70b042d4574b3c1 +size 1101953664 diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..11880ea263f8b80f413d0ecdcecfe8a266482246 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc2464e9bb0bea7af1a6f57a1f9263a289712a2d4ad52ce90490b50f8898ad5f +size 2134925 diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..5a1e02a5236f43c595914225160faecb50affdba --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5299a934ac3fa5684f59df3750a7136023d7056cf3cbc8bf70b042d4574b3c1 +size 1101953664 diff --git a/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Manifest.json b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..15251ae3d6a420a49539f03bcc4b2c7b7336af3d --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_3_41_bit_mixedpalette/Unet.mlpackage/Manifest.json @@ -0,0 +1,18 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "5327df99-ec60-4f1d-afc9-2440e87c68cf": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "5cf5495f-cb27-4f52-8fc6-7f85ec0067dc": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "5cf5495f-cb27-4f52-8fc6-7f85ec0067dc" +} diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..eb60d599194a9b6cff324207a0c3eb731dd9c13d --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f726f7f29091a23eb434bd4febf1f83733c95db26b912dfa671cdccad5d874 +size 243 diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..6ec4882d93c41f4d859f419572182f8503ec75fc --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aea3887ffdc8e059925f3981259e1cd3227b827e5f91edff613c73ac0ea16f6 +size 1338 diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/metadata.json b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..cfc4e39d604df0fb34c37b3ffeafe70d32210d51 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/metadata.json @@ -0,0 +1,124 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process", + "shape" : "[]", + "name" : "noise_pred", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Mixed (Float16, Palettized (1 bits), Palettized (2 bits), Palettized (4 bits), Palettized (6 bits), Palettized (8 bits))", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "UpsampleNearestNeighbor" : 2, + "Ios16.reduceMean" : 512, + "Ios16.sin" : 2, + "Ios16.softmax" : 140, + "Split" : 70, + "Ios16.add" : 722, + "Concat" : 14, + "Ios16.realDiv" : 46, + "Ios16.square" : 46, + "ExpandDims" : 6, + "Ios16.sub" : 256, + "Ios16.cast" : 1, + "Ios16.conv" : 794, + "Ios16.constexprLutToDense" : 775, + "Ios16.gelu" : 70, + "Ios16.matmul" : 280, + "Ios16.batchNorm" : 46, + "Ios16.reshape" : 675, + "Ios16.rsqrt" : 210, + "Ios16.silu" : 38, + "Ios16.sqrt" : 46, + "Ios16.mul" : 842, + "Ios16.cos" : 2, + "SliceByIndex" : 4 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "visionOS" : "1.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 4 × 128 × 128)", + "shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion", + "shape" : "[2, 4, 128, 128]", + "name" : "sample", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2)", + "shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule", + "shape" : "[2]", + "name" : "timestep", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 2048 × 1 × 77)", + "shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.", + "shape" : "[2, 2048, 1, 77]", + "name" : "encoder_hidden_states", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 12)", + "shortDescription" : "", + "shape" : "[12]", + "name" : "time_ids", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 1280)", + "shortDescription" : "", + "shape" : "[2, 1280]", + "name" : "text_embeds", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117", + "com.github.apple.ml-stable-diffusion.version" : "1.0.0" + }, + "generatedClassName" : "recipe_4_50_bit_mixedpalette", + "method" : "predict" + } +] \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/model.mil b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..f64fd3ffc051e97cd7f9701d941beb7ee45e6e14 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/model.mil @@ -0,0 +1,12327 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.0.48"}})] +{ + func main(tensor encoder_hidden_states, tensor sample, tensor text_embeds, tensor time_ids, tensor timestep) { + tensor var_24 = const()[name = tensor("op_24"), val = tensor(-1)]; + tensor var_41_axes_0 = const()[name = tensor("op_41_axes_0"), val = tensor([1])]; + tensor var_41_cast = expand_dims(axes = var_41_axes_0, x = timestep)[name = tensor("op_41_cast")]; + tensor var_43_to_fp16 = const()[name = tensor("op_43_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor emb_3_cast = mul(x = var_41_cast, y = var_43_to_fp16)[name = tensor("emb_3_cast")]; + tensor var_48_cast = sin(x = emb_3_cast)[name = tensor("op_48_cast")]; + tensor var_49_cast = cos(x = emb_3_cast)[name = tensor("op_49_cast")]; + tensor emb_7_interleave_0 = const()[name = tensor("emb_7_interleave_0"), val = tensor(false)]; + tensor emb_7_cast = concat(axis = var_24, interleave = emb_7_interleave_0, values = (var_48_cast, var_49_cast))[name = tensor("emb_7_cast")]; + tensor var_53_begin_0 = const()[name = tensor("op_53_begin_0"), val = tensor([0, 160])]; + tensor var_53_end_0 = const()[name = tensor("op_53_end_0"), val = tensor([2, 320])]; + tensor var_53_end_mask_0 = const()[name = tensor("op_53_end_mask_0"), val = tensor([true, true])]; + tensor var_53_cast = slice_by_index(begin = var_53_begin_0, end = var_53_end_0, end_mask = var_53_end_mask_0, x = emb_7_cast)[name = tensor("op_53_cast")]; + tensor var_55_begin_0 = const()[name = tensor("op_55_begin_0"), val = tensor([0, 0])]; + tensor var_55_end_0 = const()[name = tensor("op_55_end_0"), val = tensor([2, 160])]; + tensor var_55_end_mask_0 = const()[name = tensor("op_55_end_mask_0"), val = tensor([true, false])]; + tensor var_55_cast = slice_by_index(begin = var_55_begin_0, end = var_55_end_0, end_mask = var_55_end_mask_0, x = emb_7_cast)[name = tensor("op_55_cast")]; + tensor sample_3_interleave_0 = const()[name = tensor("sample_3_interleave_0"), val = tensor(false)]; + tensor sample_3_cast = concat(axis = var_24, interleave = sample_3_interleave_0, values = (var_53_cast, var_55_cast))[name = tensor("sample_3_cast")]; + tensor var_58 = const()[name = tensor("op_58"), val = tensor(1)]; + tensor var_65_axes_0 = const()[name = tensor("op_65_axes_0"), val = tensor([-1])]; + tensor var_65_cast = expand_dims(axes = var_65_axes_0, x = sample_3_cast)[name = tensor("op_65_cast")]; + tensor input_1_axes_0 = const()[name = tensor("input_1_axes_0"), val = tensor([-1])]; + tensor input_1_cast = expand_dims(axes = input_1_axes_0, x = var_65_cast)[name = tensor("input_1_cast")]; + tensor var_69 = const()[name = tensor("op_69"), val = tensor([1, 1])]; + tensor var_71 = const()[name = tensor("op_71"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410112))), name = tensor("time_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 320, 1, 1])]; + tensor time_embedding_linear_1_bias_to_fp16 = const()[name = tensor("time_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410688)))]; + tensor input_3_cast = conv(bias = time_embedding_linear_1_bias_to_fp16, dilations = var_71, groups = var_58, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_69, weight = time_embedding_linear_1_weight_to_fp16_palettized, x = input_1_cast)[name = tensor("input_3_cast")]; + tensor input_5_cast = silu(x = input_3_cast)[name = tensor("input_5_cast")]; + tensor var_77 = const()[name = tensor("op_77"), val = tensor([1, 1])]; + tensor var_79 = const()[name = tensor("op_79"), val = tensor([1, 1])]; + tensor emb_pad_type_0 = const()[name = tensor("emb_pad_type_0"), val = tensor("custom")]; + tensor emb_pad_0 = const()[name = tensor("emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_2_weight_to_fp16 = const()[name = tensor("time_embedding_linear_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(413312)))]; + tensor time_embedding_linear_2_bias_to_fp16 = const()[name = tensor("time_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3690176)))]; + tensor emb_cast = conv(bias = time_embedding_linear_2_bias_to_fp16, dilations = var_79, groups = var_58, pad = emb_pad_0, pad_type = emb_pad_type_0, strides = var_77, weight = time_embedding_linear_2_weight_to_fp16, x = input_5_cast)[name = tensor("emb_cast")]; + tensor var_85 = const()[name = tensor("op_85"), val = tensor(-1)]; + tensor var_102_axes_0 = const()[name = tensor("op_102_axes_0"), val = tensor([1])]; + tensor var_102_cast = expand_dims(axes = var_102_axes_0, x = time_ids)[name = tensor("op_102_cast")]; + tensor var_104_to_fp16 = const()[name = tensor("op_104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3692800)))]; + tensor emb_11_cast = mul(x = var_102_cast, y = var_104_to_fp16)[name = tensor("emb_11_cast")]; + tensor var_109_cast = sin(x = emb_11_cast)[name = tensor("op_109_cast")]; + tensor var_110_cast = cos(x = emb_11_cast)[name = tensor("op_110_cast")]; + tensor emb_15_interleave_0 = const()[name = tensor("emb_15_interleave_0"), val = tensor(false)]; + tensor emb_15_cast = concat(axis = var_85, interleave = emb_15_interleave_0, values = (var_109_cast, var_110_cast))[name = tensor("emb_15_cast")]; + tensor var_114_begin_0 = const()[name = tensor("op_114_begin_0"), val = tensor([0, 128])]; + tensor var_114_end_0 = const()[name = tensor("op_114_end_0"), val = tensor([12, 256])]; + tensor var_114_end_mask_0 = const()[name = tensor("op_114_end_mask_0"), val = tensor([true, true])]; + tensor var_114_cast = slice_by_index(begin = var_114_begin_0, end = var_114_end_0, end_mask = var_114_end_mask_0, x = emb_15_cast)[name = tensor("op_114_cast")]; + tensor var_116_begin_0 = const()[name = tensor("op_116_begin_0"), val = tensor([0, 0])]; + tensor var_116_end_0 = const()[name = tensor("op_116_end_0"), val = tensor([12, 128])]; + tensor var_116_end_mask_0 = const()[name = tensor("op_116_end_mask_0"), val = tensor([true, false])]; + tensor var_116_cast = slice_by_index(begin = var_116_begin_0, end = var_116_end_0, end_mask = var_116_end_mask_0, x = emb_15_cast)[name = tensor("op_116_cast")]; + tensor time_embeds_1_interleave_0 = const()[name = tensor("time_embeds_1_interleave_0"), val = tensor(false)]; + tensor time_embeds_1_cast = concat(axis = var_85, interleave = time_embeds_1_interleave_0, values = (var_114_cast, var_116_cast))[name = tensor("time_embeds_1_cast")]; + tensor var_124 = const()[name = tensor("op_124"), val = tensor([2, -1])]; + tensor time_embeds_cast = reshape(shape = var_124, x = time_embeds_1_cast)[name = tensor("time_embeds_cast")]; + tensor var_127 = const()[name = tensor("op_127"), val = tensor(-1)]; + tensor sample_interleave_0 = const()[name = tensor("sample_interleave_0"), val = tensor(false)]; + tensor sample_cast = concat(axis = var_127, interleave = sample_interleave_0, values = (text_embeds, time_embeds_cast))[name = tensor("sample_cast")]; + tensor var_129 = const()[name = tensor("op_129"), val = tensor(1)]; + tensor var_136_axes_0 = const()[name = tensor("op_136_axes_0"), val = tensor([-1])]; + tensor var_136_cast = expand_dims(axes = var_136_axes_0, x = sample_cast)[name = tensor("op_136_cast")]; + tensor input_7_axes_0 = const()[name = tensor("input_7_axes_0"), val = tensor([-1])]; + tensor input_7_cast = expand_dims(axes = input_7_axes_0, x = var_136_cast)[name = tensor("input_7_cast")]; + tensor var_140 = const()[name = tensor("op_140"), val = tensor([1, 1])]; + tensor var_142 = const()[name = tensor("op_142"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(3693120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7297664))), name = tensor("add_embedding_linear_1_weight_to_fp16_palettized"), shape = tensor([1280, 2816, 1, 1])]; + tensor add_embedding_linear_1_bias_to_fp16 = const()[name = tensor("add_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7298240)))]; + tensor input_9_cast = conv(bias = add_embedding_linear_1_bias_to_fp16, dilations = var_142, groups = var_129, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_140, weight = add_embedding_linear_1_weight_to_fp16_palettized, x = input_7_cast)[name = tensor("input_9_cast")]; + tensor input_11_cast = silu(x = input_9_cast)[name = tensor("input_11_cast")]; + tensor var_148 = const()[name = tensor("op_148"), val = tensor([1, 1])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 1])]; + tensor aug_emb_pad_type_0 = const()[name = tensor("aug_emb_pad_type_0"), val = tensor("custom")]; + tensor aug_emb_pad_0 = const()[name = tensor("aug_emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(7300864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8939328))), name = tensor("add_embedding_linear_2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor add_embedding_linear_2_bias_to_fp16 = const()[name = tensor("add_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8939904)))]; + tensor aug_emb_cast = conv(bias = add_embedding_linear_2_bias_to_fp16, dilations = var_150, groups = var_129, pad = aug_emb_pad_0, pad_type = aug_emb_pad_type_0, strides = var_148, weight = add_embedding_linear_2_weight_to_fp16_palettized, x = input_11_cast)[name = tensor("aug_emb_cast")]; + tensor input_19_cast = add(x = emb_cast, y = aug_emb_cast)[name = tensor("input_19_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor(1)]; + tensor var_161 = const()[name = tensor("op_161"), val = tensor([1, 1])]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_in_weight_to_fp16 = const()[name = tensor("conv_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8942528)))]; + tensor conv_in_bias_to_fp16 = const()[name = tensor("conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8965632)))]; + tensor input_13_cast = conv(bias = conv_in_bias_to_fp16, dilations = var_163, groups = var_158, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_161, weight = conv_in_weight_to_fp16, x = sample)[name = tensor("input_13_cast")]; + tensor var_172 = const()[name = tensor("op_172"), val = tensor(1)]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_13_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8966336)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8967040)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8967744)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8968448)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_17_cast = silu(x = add_1_cast)[name = tensor("input_17_cast")]; + tensor var_190 = const()[name = tensor("op_190"), val = tensor([1, 1])]; + tensor var_192 = const()[name = tensor("op_192"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(8969152)))]; + tensor down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10812416)))]; + tensor hidden_states_1_cast = conv(bias = down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_192, groups = var_172, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_190, weight = down_blocks_0_resnets_0_conv1_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_1_cast")]; + tensor input_21_cast = silu(x = input_19_cast)[name = tensor("input_21_cast")]; + tensor var_198 = const()[name = tensor("op_198"), val = tensor([1, 1])]; + tensor var_200 = const()[name = tensor("op_200"), val = tensor([1, 1])]; + tensor temb_1_pad_type_0 = const()[name = tensor("temb_1_pad_type_0"), val = tensor("custom")]; + tensor temb_1_pad_0 = const()[name = tensor("temb_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(10813120)))]; + tensor down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11632384)))]; + tensor temb_1_cast = conv(bias = down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_200, groups = var_172, pad = temb_1_pad_0, pad_type = temb_1_pad_type_0, strides = var_198, weight = down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_1_cast")]; + tensor input_23_cast = add(x = hidden_states_1_cast, y = temb_1_cast)[name = tensor("input_23_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_23_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11633088)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11633792)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_27_cast = silu(x = add_3_cast)[name = tensor("input_27_cast")]; + tensor var_210 = const()[name = tensor("op_210"), val = tensor([1, 1])]; + tensor var_212 = const()[name = tensor("op_212"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11634496)))]; + tensor down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13477760)))]; + tensor hidden_states_3_cast = conv(bias = down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_212, groups = var_172, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_210, weight = down_blocks_0_resnets_0_conv2_weight_to_fp16, x = input_27_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_29_cast = add(x = input_13_cast, y = hidden_states_3_cast)[name = tensor("input_29_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = input_29_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13478464)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13479168)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_33_cast = silu(x = add_5_cast)[name = tensor("input_33_cast")]; + tensor var_227 = const()[name = tensor("op_227"), val = tensor([1, 1])]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(13479872)))]; + tensor down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15323136)))]; + tensor hidden_states_5_cast = conv(bias = down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_229, groups = var_172, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_227, weight = down_blocks_0_resnets_1_conv1_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_5_cast")]; + tensor var_235 = const()[name = tensor("op_235"), val = tensor([1, 1])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 1])]; + tensor temb_3_pad_type_0 = const()[name = tensor("temb_3_pad_type_0"), val = tensor("custom")]; + tensor temb_3_pad_0 = const()[name = tensor("temb_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15323840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15733504))), name = tensor("down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15734080)))]; + tensor temb_3_cast = conv(bias = down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_237, groups = var_172, pad = temb_3_pad_0, pad_type = temb_3_pad_type_0, strides = var_235, weight = down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_3_cast")]; + tensor input_37_cast = add(x = hidden_states_5_cast, y = temb_3_cast)[name = tensor("input_37_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_37_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15734784)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15735488)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_41_cast = silu(x = add_7_cast)[name = tensor("input_41_cast")]; + tensor var_247 = const()[name = tensor("op_247"), val = tensor([1, 1])]; + tensor var_249 = const()[name = tensor("op_249"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(15736192)))]; + tensor down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17579456)))]; + tensor hidden_states_7_cast = conv(bias = down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_249, groups = var_172, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_247, weight = down_blocks_0_resnets_1_conv2_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_7_cast")]; + tensor input_43_cast = add(x = input_29_cast, y = hidden_states_7_cast)[name = tensor("input_43_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([2, 2])]; + tensor var_258 = const()[name = tensor("op_258"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17580160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18501824))), name = tensor("down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([320, 320, 3, 3])]; + tensor down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18502400)))]; + tensor input_45_cast = conv(bias = down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_258, groups = var_172, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_256, weight = down_blocks_0_downsamplers_0_conv_weight_to_fp16_palettized, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor(3)]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor(true)]; + tensor var_282 = const()[name = tensor("op_282"), val = tensor(1)]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([2, 32, 10, 64, 64])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_45_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([2, 320, 64, 64])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18503104)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18503808)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_49_cast = silu(x = add_9_cast)[name = tensor("input_49_cast")]; + tensor var_305 = const()[name = tensor("op_305"), val = tensor([1, 1])]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(18504512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20347776))), name = tensor("down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 320, 3, 3])]; + tensor down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20348352)))]; + tensor hidden_states_9_cast = conv(bias = down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_307, groups = var_282, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_305, weight = down_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_49_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_313 = const()[name = tensor("op_313"), val = tensor([1, 1])]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, 1])]; + tensor temb_5_pad_type_0 = const()[name = tensor("temb_5_pad_type_0"), val = tensor("custom")]; + tensor temb_5_pad_0 = const()[name = tensor("temb_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20349696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20964160))), name = tensor("down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20964352)))]; + tensor temb_5_cast = conv(bias = down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_315, groups = var_282, pad = temb_5_pad_0, pad_type = temb_5_pad_type_0, strides = var_313, weight = down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_5_cast")]; + tensor input_53_cast = add(x = hidden_states_9_cast, y = temb_5_cast)[name = tensor("input_53_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_53_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20965696)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20967040)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20968384)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20969728)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_57_cast = silu(x = add_11_cast)[name = tensor("input_57_cast")]; + tensor var_325 = const()[name = tensor("op_325"), val = tensor([1, 1])]; + tensor var_327 = const()[name = tensor("op_327"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20971072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24657536))), name = tensor("down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24658112)))]; + tensor hidden_states_11_cast = conv(bias = down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_327, groups = var_282, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_325, weight = down_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_57_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([1, 1])]; + tensor var_334 = const()[name = tensor("op_334"), val = tensor([1, 1])]; + tensor x_1_pad_type_0 = const()[name = tensor("x_1_pad_type_0"), val = tensor("custom")]; + tensor x_1_pad_0 = const()[name = tensor("x_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24659456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24864320))), name = tensor("down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 320, 1, 1])]; + tensor down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24864896)))]; + tensor x_1_cast = conv(bias = down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_334, groups = var_282, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = var_332, weight = down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_45_cast)[name = tensor("x_1_cast")]; + tensor hidden_states_13_cast = add(x = x_1_cast, y = hidden_states_11_cast)[name = tensor("hidden_states_13_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = hidden_states_13_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24866240)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24867584)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor var_356 = const()[name = tensor("op_356"), val = tensor([1, 1])]; + tensor var_358 = const()[name = tensor("op_358"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(24868928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25278592))), name = tensor("down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25279168)))]; + tensor hidden_states_15_cast = conv(bias = down_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_358, groups = var_282, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_356, weight = down_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_13_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_1_cast = reshape(shape = var_363, x = hidden_states_15_cast)[name = tensor("inputs_1_cast")]; + tensor var_373 = const()[name = tensor("op_373"), val = tensor([1])]; + tensor channels_mean_1_cast = reduce_mean(axes = var_373, keep_dims = var_277, x = inputs_1_cast)[name = tensor("channels_mean_1_cast")]; + tensor zero_mean_1_cast = sub(x = inputs_1_cast, y = channels_mean_1_cast)[name = tensor("zero_mean_1_cast")]; + tensor zero_mean_sq_1_cast = mul(x = zero_mean_1_cast, y = zero_mean_1_cast)[name = tensor("zero_mean_sq_1_cast")]; + tensor var_377 = const()[name = tensor("op_377"), val = tensor([1])]; + tensor var_378_cast = reduce_mean(axes = var_377, keep_dims = var_277, x = zero_mean_sq_1_cast)[name = tensor("op_378_cast")]; + tensor var_379_to_fp16 = const()[name = tensor("op_379_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_380_cast = add(x = var_378_cast, y = var_379_to_fp16)[name = tensor("op_380_cast")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_380_cast)[name = tensor("denom_1_cast")]; + tensor out_1_cast = mul(x = zero_mean_1_cast, y = denom_1_cast)[name = tensor("out_1_cast")]; + tensor var_384_to_fp16 = const()[name = tensor("op_384_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25280512)))]; + tensor var_385_cast = add(x = out_1_cast, y = var_384_to_fp16)[name = tensor("op_385_cast")]; + tensor var_387_to_fp16 = const()[name = tensor("op_387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25281856)))]; + tensor hidden_states_17_cast = mul(x = var_385_cast, y = var_387_to_fp16)[name = tensor("hidden_states_17_cast")]; + tensor var_394 = const()[name = tensor("op_394"), val = tensor([1, 1])]; + tensor var_396 = const()[name = tensor("op_396"), val = tensor([1, 1])]; + tensor q_1_pad_type_0 = const()[name = tensor("q_1_pad_type_0"), val = tensor("custom")]; + tensor q_1_pad_0 = const()[name = tensor("q_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25283200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25590464))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_1_cast = conv(dilations = var_396, groups = var_282, pad = q_1_pad_0, pad_type = q_1_pad_type_0, strides = var_394, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("q_1_cast")]; + tensor var_400 = const()[name = tensor("op_400"), val = tensor([1, 1])]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, 1])]; + tensor k_1_pad_type_0 = const()[name = tensor("k_1_pad_type_0"), val = tensor("custom")]; + tensor k_1_pad_0 = const()[name = tensor("k_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25590656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25897920))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_1_cast = conv(dilations = var_402, groups = var_282, pad = k_1_pad_0, pad_type = k_1_pad_type_0, strides = var_400, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("k_1_cast")]; + tensor var_406 = const()[name = tensor("op_406"), val = tensor([1, 1])]; + tensor var_408 = const()[name = tensor("op_408"), val = tensor([1, 1])]; + tensor v_1_pad_type_0 = const()[name = tensor("v_1_pad_type_0"), val = tensor("custom")]; + tensor v_1_pad_0 = const()[name = tensor("v_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25898112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26307776))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_1_cast = conv(dilations = var_408, groups = var_282, pad = v_1_pad_0, pad_type = v_1_pad_type_0, strides = var_406, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("v_1_cast")]; + tensor var_412 = const()[name = tensor("op_412"), val = tensor([2, 10, 64, -1])]; + tensor var_413_cast = reshape(shape = var_412, x = q_1_cast)[name = tensor("op_413_cast")]; + tensor var_414 = const()[name = tensor("op_414"), val = tensor([2, 10, 64, -1])]; + tensor var_415_cast = reshape(shape = var_414, x = k_1_cast)[name = tensor("op_415_cast")]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([2, 10, 64, -1])]; + tensor var_417_cast = reshape(shape = var_416, x = v_1_cast)[name = tensor("op_417_cast")]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_413_cast, y = var_415_cast)[name = tensor("attn_weights_1_cast")]; + tensor var_273_to_fp16 = const()[name = tensor("op_273_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_3_cast = mul(x = attn_weights_1_cast, y = var_273_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_421_cast = softmax(axis = var_266, x = attn_weights_3_cast)[name = tensor("op_421_cast")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_417_cast, y = var_421_cast)[name = tensor("attn_1_cast")]; + tensor var_425 = const()[name = tensor("op_425"), val = tensor([2, 640, 1, -1])]; + tensor input_61_cast = reshape(shape = var_425, x = attn_1_cast)[name = tensor("input_61_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor var_432 = const()[name = tensor("op_432"), val = tensor([1, 1])]; + tensor var_434_pad_type_0 = const()[name = tensor("op_434_pad_type_0"), val = tensor("custom")]; + tensor var_434_pad_0 = const()[name = tensor("op_434_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26308352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26718016))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26718592)))]; + tensor var_434_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_432, groups = var_282, pad = var_434_pad_0, pad_type = var_434_pad_type_0, strides = var_430, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_61_cast)[name = tensor("op_434_cast")]; + tensor inputs_3_cast = add(x = var_434_cast, y = inputs_1_cast)[name = tensor("inputs_3_cast")]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1])]; + tensor channels_mean_3_cast = reduce_mean(axes = var_438, keep_dims = var_277, x = inputs_3_cast)[name = tensor("channels_mean_3_cast")]; + tensor zero_mean_3_cast = sub(x = inputs_3_cast, y = channels_mean_3_cast)[name = tensor("zero_mean_3_cast")]; + tensor zero_mean_sq_3_cast = mul(x = zero_mean_3_cast, y = zero_mean_3_cast)[name = tensor("zero_mean_sq_3_cast")]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([1])]; + tensor var_443_cast = reduce_mean(axes = var_442, keep_dims = var_277, x = zero_mean_sq_3_cast)[name = tensor("op_443_cast")]; + tensor var_444_to_fp16 = const()[name = tensor("op_444_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_445_cast = add(x = var_443_cast, y = var_444_to_fp16)[name = tensor("op_445_cast")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_445_cast)[name = tensor("denom_3_cast")]; + tensor out_3_cast = mul(x = zero_mean_3_cast, y = denom_3_cast)[name = tensor("out_3_cast")]; + tensor var_449_to_fp16 = const()[name = tensor("op_449_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26719936)))]; + tensor var_450_cast = add(x = out_3_cast, y = var_449_to_fp16)[name = tensor("op_450_cast")]; + tensor var_452_to_fp16 = const()[name = tensor("op_452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26721280)))]; + tensor hidden_states_19_cast = mul(x = var_450_cast, y = var_452_to_fp16)[name = tensor("hidden_states_19_cast")]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, 1])]; + tensor var_461 = const()[name = tensor("op_461"), val = tensor([1, 1])]; + tensor q_3_pad_type_0 = const()[name = tensor("q_3_pad_type_0"), val = tensor("custom")]; + tensor q_3_pad_0 = const()[name = tensor("q_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26722624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26927488))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_3_cast = conv(dilations = var_461, groups = var_282, pad = q_3_pad_0, pad_type = q_3_pad_type_0, strides = var_459, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("q_3_cast")]; + tensor var_465 = const()[name = tensor("op_465"), val = tensor([1, 1])]; + tensor var_467 = const()[name = tensor("op_467"), val = tensor([1, 1])]; + tensor k_3_pad_type_0 = const()[name = tensor("k_3_pad_type_0"), val = tensor("custom")]; + tensor k_3_pad_0 = const()[name = tensor("k_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(26927616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27910720))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_3_cast = conv(dilations = var_467, groups = var_282, pad = k_3_pad_0, pad_type = k_3_pad_type_0, strides = var_465, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_3_cast")]; + tensor var_471 = const()[name = tensor("op_471"), val = tensor([1, 1])]; + tensor var_473 = const()[name = tensor("op_473"), val = tensor([1, 1])]; + tensor v_3_pad_type_0 = const()[name = tensor("v_3_pad_type_0"), val = tensor("custom")]; + tensor v_3_pad_0 = const()[name = tensor("v_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(27910912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28894016))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_3_cast = conv(dilations = var_473, groups = var_282, pad = v_3_pad_0, pad_type = v_3_pad_type_0, strides = var_471, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_3_cast")]; + tensor var_477 = const()[name = tensor("op_477"), val = tensor([2, 10, 64, -1])]; + tensor var_478_cast = reshape(shape = var_477, x = q_3_cast)[name = tensor("op_478_cast")]; + tensor var_479 = const()[name = tensor("op_479"), val = tensor([2, 10, 64, -1])]; + tensor var_480_cast = reshape(shape = var_479, x = k_3_cast)[name = tensor("op_480_cast")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([2, 10, 64, -1])]; + tensor var_482_cast = reshape(shape = var_481, x = v_3_cast)[name = tensor("op_482_cast")]; + tensor attn_weights_5_transpose_x_0 = const()[name = tensor("attn_weights_5_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_5_transpose_y_0 = const()[name = tensor("attn_weights_5_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_5_cast = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_478_cast, y = var_480_cast)[name = tensor("attn_weights_5_cast")]; + tensor attn_weights_7_cast = mul(x = attn_weights_5_cast, y = var_273_to_fp16)[name = tensor("attn_weights_7_cast")]; + tensor var_486_cast = softmax(axis = var_266, x = attn_weights_7_cast)[name = tensor("op_486_cast")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_482_cast, y = var_486_cast)[name = tensor("attn_3_cast")]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([2, 640, 1, -1])]; + tensor input_63_cast = reshape(shape = var_490, x = attn_3_cast)[name = tensor("input_63_cast")]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 1])]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1, 1])]; + tensor var_499_pad_type_0 = const()[name = tensor("op_499_pad_type_0"), val = tensor("custom")]; + tensor var_499_pad_0 = const()[name = tensor("op_499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(28894208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29201472))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29201664)))]; + tensor var_499_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_497, groups = var_282, pad = var_499_pad_0, pad_type = var_499_pad_type_0, strides = var_495, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_63_cast)[name = tensor("op_499_cast")]; + tensor inputs_5_cast = add(x = var_499_cast, y = inputs_3_cast)[name = tensor("inputs_5_cast")]; + tensor var_503 = const()[name = tensor("op_503"), val = tensor([1])]; + tensor channels_mean_5_cast = reduce_mean(axes = var_503, keep_dims = var_277, x = inputs_5_cast)[name = tensor("channels_mean_5_cast")]; + tensor zero_mean_5_cast = sub(x = inputs_5_cast, y = channels_mean_5_cast)[name = tensor("zero_mean_5_cast")]; + tensor zero_mean_sq_5_cast = mul(x = zero_mean_5_cast, y = zero_mean_5_cast)[name = tensor("zero_mean_sq_5_cast")]; + tensor var_507 = const()[name = tensor("op_507"), val = tensor([1])]; + tensor var_508_cast = reduce_mean(axes = var_507, keep_dims = var_277, x = zero_mean_sq_5_cast)[name = tensor("op_508_cast")]; + tensor var_509_to_fp16 = const()[name = tensor("op_509_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_510_cast = add(x = var_508_cast, y = var_509_to_fp16)[name = tensor("op_510_cast")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_510_cast)[name = tensor("denom_5_cast")]; + tensor out_5_cast = mul(x = zero_mean_5_cast, y = denom_5_cast)[name = tensor("out_5_cast")]; + tensor var_514_to_fp16 = const()[name = tensor("op_514_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29203008)))]; + tensor var_515_cast = add(x = out_5_cast, y = var_514_to_fp16)[name = tensor("op_515_cast")]; + tensor var_517_to_fp16 = const()[name = tensor("op_517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29204352)))]; + tensor input_65_cast = mul(x = var_515_cast, y = var_517_to_fp16)[name = tensor("input_65_cast")]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 1])]; + tensor var_527 = const()[name = tensor("op_527"), val = tensor([1, 1])]; + tensor var_529_pad_type_0 = const()[name = tensor("op_529_pad_type_0"), val = tensor("custom")]; + tensor var_529_pad_0 = const()[name = tensor("op_529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29205696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32482560))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32483136)))]; + tensor var_529_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_527, groups = var_282, pad = var_529_pad_0, pad_type = var_529_pad_type_0, strides = var_525, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_65_cast)[name = tensor("op_529_cast")]; + tensor var_530_split_sizes_0 = const()[name = tensor("op_530_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_530_axis_0 = const()[name = tensor("op_530_axis_0"), val = tensor(1)]; + tensor var_530_cast_0, tensor var_530_cast_1 = split(axis = var_530_axis_0, split_sizes = var_530_split_sizes_0, x = var_529_cast)[name = tensor("op_530_cast")]; + tensor var_532_mode_0 = const()[name = tensor("op_532_mode_0"), val = tensor("EXACT")]; + tensor var_532_cast = gelu(mode = var_532_mode_0, x = var_530_cast_1)[name = tensor("op_532_cast")]; + tensor input_67_cast = mul(x = var_530_cast_0, y = var_532_cast)[name = tensor("input_67_cast")]; + tensor var_536 = const()[name = tensor("op_536"), val = tensor([1, 1])]; + tensor var_538 = const()[name = tensor("op_538"), val = tensor([1, 1])]; + tensor var_540_pad_type_0 = const()[name = tensor("op_540_pad_type_0"), val = tensor("custom")]; + tensor var_540_pad_0 = const()[name = tensor("op_540_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(32493440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34131904))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34132480)))]; + tensor var_540_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_538, groups = var_282, pad = var_540_pad_0, pad_type = var_540_pad_type_0, strides = var_536, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_67_cast)[name = tensor("op_540_cast")]; + tensor inputs_7_cast = add(x = var_540_cast, y = inputs_5_cast)[name = tensor("inputs_7_cast")]; + tensor var_550 = const()[name = tensor("op_550"), val = tensor([1])]; + tensor channels_mean_7_cast = reduce_mean(axes = var_550, keep_dims = var_277, x = inputs_7_cast)[name = tensor("channels_mean_7_cast")]; + tensor zero_mean_7_cast = sub(x = inputs_7_cast, y = channels_mean_7_cast)[name = tensor("zero_mean_7_cast")]; + tensor zero_mean_sq_7_cast = mul(x = zero_mean_7_cast, y = zero_mean_7_cast)[name = tensor("zero_mean_sq_7_cast")]; + tensor var_554 = const()[name = tensor("op_554"), val = tensor([1])]; + tensor var_555_cast = reduce_mean(axes = var_554, keep_dims = var_277, x = zero_mean_sq_7_cast)[name = tensor("op_555_cast")]; + tensor var_556_to_fp16 = const()[name = tensor("op_556_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_557_cast = add(x = var_555_cast, y = var_556_to_fp16)[name = tensor("op_557_cast")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_557_cast)[name = tensor("denom_7_cast")]; + tensor out_7_cast = mul(x = zero_mean_7_cast, y = denom_7_cast)[name = tensor("out_7_cast")]; + tensor var_561_to_fp16 = const()[name = tensor("op_561_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34133824)))]; + tensor var_562_cast = add(x = out_7_cast, y = var_561_to_fp16)[name = tensor("op_562_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34135168)))]; + tensor hidden_states_23_cast = mul(x = var_562_cast, y = var_564_to_fp16)[name = tensor("hidden_states_23_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, 1])]; + tensor var_573 = const()[name = tensor("op_573"), val = tensor([1, 1])]; + tensor q_5_pad_type_0 = const()[name = tensor("q_5_pad_type_0"), val = tensor("custom")]; + tensor q_5_pad_0 = const()[name = tensor("q_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34136512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34443776))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_5_cast = conv(dilations = var_573, groups = var_282, pad = q_5_pad_0, pad_type = q_5_pad_type_0, strides = var_571, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("q_5_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([1, 1])]; + tensor var_579 = const()[name = tensor("op_579"), val = tensor([1, 1])]; + tensor k_5_pad_type_0 = const()[name = tensor("k_5_pad_type_0"), val = tensor("custom")]; + tensor k_5_pad_0 = const()[name = tensor("k_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34443968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34751232))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_5_cast = conv(dilations = var_579, groups = var_282, pad = k_5_pad_0, pad_type = k_5_pad_type_0, strides = var_577, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("k_5_cast")]; + tensor var_583 = const()[name = tensor("op_583"), val = tensor([1, 1])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 1])]; + tensor v_5_pad_type_0 = const()[name = tensor("v_5_pad_type_0"), val = tensor("custom")]; + tensor v_5_pad_0 = const()[name = tensor("v_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(34751424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35161088))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_5_cast = conv(dilations = var_585, groups = var_282, pad = v_5_pad_0, pad_type = v_5_pad_type_0, strides = var_583, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("v_5_cast")]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([2, 10, 64, -1])]; + tensor var_590_cast = reshape(shape = var_589, x = q_5_cast)[name = tensor("op_590_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([2, 10, 64, -1])]; + tensor var_592_cast = reshape(shape = var_591, x = k_5_cast)[name = tensor("op_592_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([2, 10, 64, -1])]; + tensor var_594_cast = reshape(shape = var_593, x = v_5_cast)[name = tensor("op_594_cast")]; + tensor attn_weights_9_transpose_x_0 = const()[name = tensor("attn_weights_9_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_9_transpose_y_0 = const()[name = tensor("attn_weights_9_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_9_cast = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_590_cast, y = var_592_cast)[name = tensor("attn_weights_9_cast")]; + tensor attn_weights_11_cast = mul(x = attn_weights_9_cast, y = var_273_to_fp16)[name = tensor("attn_weights_11_cast")]; + tensor var_598_cast = softmax(axis = var_266, x = attn_weights_11_cast)[name = tensor("op_598_cast")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_594_cast, y = var_598_cast)[name = tensor("attn_5_cast")]; + tensor var_602 = const()[name = tensor("op_602"), val = tensor([2, 640, 1, -1])]; + tensor input_69_cast = reshape(shape = var_602, x = attn_5_cast)[name = tensor("input_69_cast")]; + tensor var_607 = const()[name = tensor("op_607"), val = tensor([1, 1])]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 1])]; + tensor var_611_pad_type_0 = const()[name = tensor("op_611_pad_type_0"), val = tensor("custom")]; + tensor var_611_pad_0 = const()[name = tensor("op_611_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35161664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35571328))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35571904)))]; + tensor var_611_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_609, groups = var_282, pad = var_611_pad_0, pad_type = var_611_pad_type_0, strides = var_607, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_69_cast)[name = tensor("op_611_cast")]; + tensor inputs_9_cast = add(x = var_611_cast, y = inputs_7_cast)[name = tensor("inputs_9_cast")]; + tensor var_615 = const()[name = tensor("op_615"), val = tensor([1])]; + tensor channels_mean_9_cast = reduce_mean(axes = var_615, keep_dims = var_277, x = inputs_9_cast)[name = tensor("channels_mean_9_cast")]; + tensor zero_mean_9_cast = sub(x = inputs_9_cast, y = channels_mean_9_cast)[name = tensor("zero_mean_9_cast")]; + tensor zero_mean_sq_9_cast = mul(x = zero_mean_9_cast, y = zero_mean_9_cast)[name = tensor("zero_mean_sq_9_cast")]; + tensor var_619 = const()[name = tensor("op_619"), val = tensor([1])]; + tensor var_620_cast = reduce_mean(axes = var_619, keep_dims = var_277, x = zero_mean_sq_9_cast)[name = tensor("op_620_cast")]; + tensor var_621_to_fp16 = const()[name = tensor("op_621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_622_cast = add(x = var_620_cast, y = var_621_to_fp16)[name = tensor("op_622_cast")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_622_cast)[name = tensor("denom_9_cast")]; + tensor out_9_cast = mul(x = zero_mean_9_cast, y = denom_9_cast)[name = tensor("out_9_cast")]; + tensor var_626_to_fp16 = const()[name = tensor("op_626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35573248)))]; + tensor var_627_cast = add(x = out_9_cast, y = var_626_to_fp16)[name = tensor("op_627_cast")]; + tensor var_629_to_fp16 = const()[name = tensor("op_629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35574592)))]; + tensor hidden_states_25_cast = mul(x = var_627_cast, y = var_629_to_fp16)[name = tensor("hidden_states_25_cast")]; + tensor var_636 = const()[name = tensor("op_636"), val = tensor([1, 1])]; + tensor var_638 = const()[name = tensor("op_638"), val = tensor([1, 1])]; + tensor q_7_pad_type_0 = const()[name = tensor("q_7_pad_type_0"), val = tensor("custom")]; + tensor q_7_pad_0 = const()[name = tensor("q_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35575936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35883200))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_7_cast = conv(dilations = var_638, groups = var_282, pad = q_7_pad_0, pad_type = q_7_pad_type_0, strides = var_636, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("q_7_cast")]; + tensor var_642 = const()[name = tensor("op_642"), val = tensor([1, 1])]; + tensor var_644 = const()[name = tensor("op_644"), val = tensor([1, 1])]; + tensor k_7_pad_type_0 = const()[name = tensor("k_7_pad_type_0"), val = tensor("custom")]; + tensor k_7_pad_0 = const()[name = tensor("k_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(35883392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36866496))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_7_cast = conv(dilations = var_644, groups = var_282, pad = k_7_pad_0, pad_type = k_7_pad_type_0, strides = var_642, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_7_cast")]; + tensor var_648 = const()[name = tensor("op_648"), val = tensor([1, 1])]; + tensor var_650 = const()[name = tensor("op_650"), val = tensor([1, 1])]; + tensor v_7_pad_type_0 = const()[name = tensor("v_7_pad_type_0"), val = tensor("custom")]; + tensor v_7_pad_0 = const()[name = tensor("v_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(36866688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849792))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_7_cast = conv(dilations = var_650, groups = var_282, pad = v_7_pad_0, pad_type = v_7_pad_type_0, strides = var_648, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_7_cast")]; + tensor var_654 = const()[name = tensor("op_654"), val = tensor([2, 10, 64, -1])]; + tensor var_655_cast = reshape(shape = var_654, x = q_7_cast)[name = tensor("op_655_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([2, 10, 64, -1])]; + tensor var_657_cast = reshape(shape = var_656, x = k_7_cast)[name = tensor("op_657_cast")]; + tensor var_658 = const()[name = tensor("op_658"), val = tensor([2, 10, 64, -1])]; + tensor var_659_cast = reshape(shape = var_658, x = v_7_cast)[name = tensor("op_659_cast")]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = var_655_cast, y = var_657_cast)[name = tensor("attn_weights_13_cast")]; + tensor attn_weights_15_cast = mul(x = attn_weights_13_cast, y = var_273_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_663_cast = softmax(axis = var_266, x = attn_weights_15_cast)[name = tensor("op_663_cast")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_659_cast, y = var_663_cast)[name = tensor("attn_7_cast")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([2, 640, 1, -1])]; + tensor input_71_cast = reshape(shape = var_667, x = attn_7_cast)[name = tensor("input_71_cast")]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 1])]; + tensor var_674 = const()[name = tensor("op_674"), val = tensor([1, 1])]; + tensor var_676_pad_type_0 = const()[name = tensor("op_676_pad_type_0"), val = tensor("custom")]; + tensor var_676_pad_0 = const()[name = tensor("op_676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(37849984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38157248))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38157440)))]; + tensor var_676_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_674, groups = var_282, pad = var_676_pad_0, pad_type = var_676_pad_type_0, strides = var_672, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_71_cast)[name = tensor("op_676_cast")]; + tensor inputs_11_cast = add(x = var_676_cast, y = inputs_9_cast)[name = tensor("inputs_11_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([1])]; + tensor channels_mean_11_cast = reduce_mean(axes = var_680, keep_dims = var_277, x = inputs_11_cast)[name = tensor("channels_mean_11_cast")]; + tensor zero_mean_11_cast = sub(x = inputs_11_cast, y = channels_mean_11_cast)[name = tensor("zero_mean_11_cast")]; + tensor zero_mean_sq_11_cast = mul(x = zero_mean_11_cast, y = zero_mean_11_cast)[name = tensor("zero_mean_sq_11_cast")]; + tensor var_684 = const()[name = tensor("op_684"), val = tensor([1])]; + tensor var_685_cast = reduce_mean(axes = var_684, keep_dims = var_277, x = zero_mean_sq_11_cast)[name = tensor("op_685_cast")]; + tensor var_686_to_fp16 = const()[name = tensor("op_686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_687_cast = add(x = var_685_cast, y = var_686_to_fp16)[name = tensor("op_687_cast")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_687_cast)[name = tensor("denom_11_cast")]; + tensor out_11_cast = mul(x = zero_mean_11_cast, y = denom_11_cast)[name = tensor("out_11_cast")]; + tensor var_691_to_fp16 = const()[name = tensor("op_691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38158784)))]; + tensor var_692_cast = add(x = out_11_cast, y = var_691_to_fp16)[name = tensor("op_692_cast")]; + tensor var_694_to_fp16 = const()[name = tensor("op_694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38160128)))]; + tensor input_73_cast = mul(x = var_692_cast, y = var_694_to_fp16)[name = tensor("input_73_cast")]; + tensor var_702 = const()[name = tensor("op_702"), val = tensor([1, 1])]; + tensor var_704 = const()[name = tensor("op_704"), val = tensor([1, 1])]; + tensor var_706_pad_type_0 = const()[name = tensor("op_706_pad_type_0"), val = tensor("custom")]; + tensor var_706_pad_0 = const()[name = tensor("op_706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38161472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41438336))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41438912)))]; + tensor var_706_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_704, groups = var_282, pad = var_706_pad_0, pad_type = var_706_pad_type_0, strides = var_702, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_73_cast)[name = tensor("op_706_cast")]; + tensor var_707_split_sizes_0 = const()[name = tensor("op_707_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_707_axis_0 = const()[name = tensor("op_707_axis_0"), val = tensor(1)]; + tensor var_707_cast_0, tensor var_707_cast_1 = split(axis = var_707_axis_0, split_sizes = var_707_split_sizes_0, x = var_706_cast)[name = tensor("op_707_cast")]; + tensor var_709_mode_0 = const()[name = tensor("op_709_mode_0"), val = tensor("EXACT")]; + tensor var_709_cast = gelu(mode = var_709_mode_0, x = var_707_cast_1)[name = tensor("op_709_cast")]; + tensor input_75_cast = mul(x = var_707_cast_0, y = var_709_cast)[name = tensor("input_75_cast")]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 1])]; + tensor var_715 = const()[name = tensor("op_715"), val = tensor([1, 1])]; + tensor var_717_pad_type_0 = const()[name = tensor("op_717_pad_type_0"), val = tensor("custom")]; + tensor var_717_pad_0 = const()[name = tensor("op_717_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41449216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43087680))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43088256)))]; + tensor var_717_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_715, groups = var_282, pad = var_717_pad_0, pad_type = var_717_pad_type_0, strides = var_713, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_75_cast)[name = tensor("op_717_cast")]; + tensor hidden_states_29_cast = add(x = var_717_cast, y = inputs_11_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_719 = const()[name = tensor("op_719"), val = tensor([2, 640, 64, 64])]; + tensor input_77_cast = reshape(shape = var_719, x = hidden_states_29_cast)[name = tensor("input_77_cast")]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor var_725 = const()[name = tensor("op_725"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43089600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43499264))), name = tensor("down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43499840)))]; + tensor hidden_states_31_cast = conv(bias = down_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_725, groups = var_282, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_723, weight = down_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_77_cast)[name = tensor("hidden_states_31_cast")]; + tensor input_79_cast = add(x = hidden_states_31_cast, y = hidden_states_13_cast)[name = tensor("input_79_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_79_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43501184)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43502528)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_83_cast = silu(x = add_15_cast)[name = tensor("input_83_cast")]; + tensor var_740 = const()[name = tensor("op_740"), val = tensor([1, 1])]; + tensor var_742 = const()[name = tensor("op_742"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43503872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47190336))), name = tensor("down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47190912)))]; + tensor hidden_states_33_cast = conv(bias = down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_742, groups = var_282, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_740, weight = down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_83_cast)[name = tensor("hidden_states_33_cast")]; + tensor var_748 = const()[name = tensor("op_748"), val = tensor([1, 1])]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, 1])]; + tensor temb_7_pad_type_0 = const()[name = tensor("temb_7_pad_type_0"), val = tensor("custom")]; + tensor temb_7_pad_0 = const()[name = tensor("temb_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47192256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47806720))), name = tensor("down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47806912)))]; + tensor temb_7_cast = conv(bias = down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_750, groups = var_282, pad = temb_7_pad_0, pad_type = temb_7_pad_type_0, strides = var_748, weight = down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_7_cast")]; + tensor input_87_cast = add(x = hidden_states_33_cast, y = temb_7_cast)[name = tensor("input_87_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_87_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47808256)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47809600)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_91_cast = silu(x = add_17_cast)[name = tensor("input_91_cast")]; + tensor var_760 = const()[name = tensor("op_760"), val = tensor([1, 1])]; + tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(47810944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51497408))), name = tensor("down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51497984)))]; + tensor hidden_states_35_cast = conv(bias = down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_762, groups = var_282, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_760, weight = down_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_91_cast)[name = tensor("hidden_states_35_cast")]; + tensor hidden_states_37_cast = add(x = input_79_cast, y = hidden_states_35_cast)[name = tensor("hidden_states_37_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = hidden_states_37_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51499328)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51500672)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor var_784 = const()[name = tensor("op_784"), val = tensor([1, 1])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51502016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51911680))), name = tensor("down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51912256)))]; + tensor hidden_states_39_cast = conv(bias = down_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_786, groups = var_282, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_784, weight = down_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_19_cast)[name = tensor("hidden_states_39_cast")]; + tensor var_791 = const()[name = tensor("op_791"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_13_cast = reshape(shape = var_791, x = hidden_states_39_cast)[name = tensor("inputs_13_cast")]; + tensor var_801 = const()[name = tensor("op_801"), val = tensor([1])]; + tensor channels_mean_13_cast = reduce_mean(axes = var_801, keep_dims = var_277, x = inputs_13_cast)[name = tensor("channels_mean_13_cast")]; + tensor zero_mean_13_cast = sub(x = inputs_13_cast, y = channels_mean_13_cast)[name = tensor("zero_mean_13_cast")]; + tensor zero_mean_sq_13_cast = mul(x = zero_mean_13_cast, y = zero_mean_13_cast)[name = tensor("zero_mean_sq_13_cast")]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([1])]; + tensor var_806_cast = reduce_mean(axes = var_805, keep_dims = var_277, x = zero_mean_sq_13_cast)[name = tensor("op_806_cast")]; + tensor var_807_to_fp16 = const()[name = tensor("op_807_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_808_cast = add(x = var_806_cast, y = var_807_to_fp16)[name = tensor("op_808_cast")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_808_cast)[name = tensor("denom_13_cast")]; + tensor out_13_cast = mul(x = zero_mean_13_cast, y = denom_13_cast)[name = tensor("out_13_cast")]; + tensor var_812_to_fp16 = const()[name = tensor("op_812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51913600)))]; + tensor var_813_cast = add(x = out_13_cast, y = var_812_to_fp16)[name = tensor("op_813_cast")]; + tensor var_815_to_fp16 = const()[name = tensor("op_815_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51914944)))]; + tensor hidden_states_41_cast = mul(x = var_813_cast, y = var_815_to_fp16)[name = tensor("hidden_states_41_cast")]; + tensor var_822 = const()[name = tensor("op_822"), val = tensor([1, 1])]; + tensor var_824 = const()[name = tensor("op_824"), val = tensor([1, 1])]; + tensor q_9_pad_type_0 = const()[name = tensor("q_9_pad_type_0"), val = tensor("custom")]; + tensor q_9_pad_0 = const()[name = tensor("q_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51916288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52223552))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_9_cast = conv(dilations = var_824, groups = var_282, pad = q_9_pad_0, pad_type = q_9_pad_type_0, strides = var_822, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("q_9_cast")]; + tensor var_828 = const()[name = tensor("op_828"), val = tensor([1, 1])]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, 1])]; + tensor k_9_pad_type_0 = const()[name = tensor("k_9_pad_type_0"), val = tensor("custom")]; + tensor k_9_pad_0 = const()[name = tensor("k_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52223744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52531008))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_9_cast = conv(dilations = var_830, groups = var_282, pad = k_9_pad_0, pad_type = k_9_pad_type_0, strides = var_828, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("k_9_cast")]; + tensor var_834 = const()[name = tensor("op_834"), val = tensor([1, 1])]; + tensor var_836 = const()[name = tensor("op_836"), val = tensor([1, 1])]; + tensor v_9_pad_type_0 = const()[name = tensor("v_9_pad_type_0"), val = tensor("custom")]; + tensor v_9_pad_0 = const()[name = tensor("v_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52531200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52838464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_9_cast = conv(dilations = var_836, groups = var_282, pad = v_9_pad_0, pad_type = v_9_pad_type_0, strides = var_834, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("v_9_cast")]; + tensor var_840 = const()[name = tensor("op_840"), val = tensor([2, 10, 64, -1])]; + tensor var_841_cast = reshape(shape = var_840, x = q_9_cast)[name = tensor("op_841_cast")]; + tensor var_842 = const()[name = tensor("op_842"), val = tensor([2, 10, 64, -1])]; + tensor var_843_cast = reshape(shape = var_842, x = k_9_cast)[name = tensor("op_843_cast")]; + tensor var_844 = const()[name = tensor("op_844"), val = tensor([2, 10, 64, -1])]; + tensor var_845_cast = reshape(shape = var_844, x = v_9_cast)[name = tensor("op_845_cast")]; + tensor attn_weights_17_transpose_x_0 = const()[name = tensor("attn_weights_17_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_17_transpose_y_0 = const()[name = tensor("attn_weights_17_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_17_cast = matmul(transpose_x = attn_weights_17_transpose_x_0, transpose_y = attn_weights_17_transpose_y_0, x = var_841_cast, y = var_843_cast)[name = tensor("attn_weights_17_cast")]; + tensor attn_weights_19_cast = mul(x = attn_weights_17_cast, y = var_273_to_fp16)[name = tensor("attn_weights_19_cast")]; + tensor var_849_cast = softmax(axis = var_266, x = attn_weights_19_cast)[name = tensor("op_849_cast")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_845_cast, y = var_849_cast)[name = tensor("attn_9_cast")]; + tensor var_853 = const()[name = tensor("op_853"), val = tensor([2, 640, 1, -1])]; + tensor input_95_cast = reshape(shape = var_853, x = attn_9_cast)[name = tensor("input_95_cast")]; + tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; + tensor var_860 = const()[name = tensor("op_860"), val = tensor([1, 1])]; + tensor var_862_pad_type_0 = const()[name = tensor("op_862_pad_type_0"), val = tensor("custom")]; + tensor var_862_pad_0 = const()[name = tensor("op_862_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(52838656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53248320))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53248896)))]; + tensor var_862_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_860, groups = var_282, pad = var_862_pad_0, pad_type = var_862_pad_type_0, strides = var_858, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_95_cast)[name = tensor("op_862_cast")]; + tensor inputs_15_cast = add(x = var_862_cast, y = inputs_13_cast)[name = tensor("inputs_15_cast")]; + tensor var_866 = const()[name = tensor("op_866"), val = tensor([1])]; + tensor channels_mean_15_cast = reduce_mean(axes = var_866, keep_dims = var_277, x = inputs_15_cast)[name = tensor("channels_mean_15_cast")]; + tensor zero_mean_15_cast = sub(x = inputs_15_cast, y = channels_mean_15_cast)[name = tensor("zero_mean_15_cast")]; + tensor zero_mean_sq_15_cast = mul(x = zero_mean_15_cast, y = zero_mean_15_cast)[name = tensor("zero_mean_sq_15_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1])]; + tensor var_871_cast = reduce_mean(axes = var_870, keep_dims = var_277, x = zero_mean_sq_15_cast)[name = tensor("op_871_cast")]; + tensor var_872_to_fp16 = const()[name = tensor("op_872_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_873_cast = add(x = var_871_cast, y = var_872_to_fp16)[name = tensor("op_873_cast")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_873_cast)[name = tensor("denom_15_cast")]; + tensor out_15_cast = mul(x = zero_mean_15_cast, y = denom_15_cast)[name = tensor("out_15_cast")]; + tensor var_877_to_fp16 = const()[name = tensor("op_877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53250240)))]; + tensor var_878_cast = add(x = out_15_cast, y = var_877_to_fp16)[name = tensor("op_878_cast")]; + tensor var_880_to_fp16 = const()[name = tensor("op_880_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53251584)))]; + tensor hidden_states_43_cast = mul(x = var_878_cast, y = var_880_to_fp16)[name = tensor("hidden_states_43_cast")]; + tensor var_887 = const()[name = tensor("op_887"), val = tensor([1, 1])]; + tensor var_889 = const()[name = tensor("op_889"), val = tensor([1, 1])]; + tensor q_11_pad_type_0 = const()[name = tensor("q_11_pad_type_0"), val = tensor("custom")]; + tensor q_11_pad_0 = const()[name = tensor("q_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53252928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53457792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_11_cast = conv(dilations = var_889, groups = var_282, pad = q_11_pad_0, pad_type = q_11_pad_type_0, strides = var_887, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("q_11_cast")]; + tensor var_893 = const()[name = tensor("op_893"), val = tensor([1, 1])]; + tensor var_895 = const()[name = tensor("op_895"), val = tensor([1, 1])]; + tensor k_11_pad_type_0 = const()[name = tensor("k_11_pad_type_0"), val = tensor("custom")]; + tensor k_11_pad_0 = const()[name = tensor("k_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(53457920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54113344))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_11_cast = conv(dilations = var_895, groups = var_282, pad = k_11_pad_0, pad_type = k_11_pad_type_0, strides = var_893, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_11_cast")]; + tensor var_899 = const()[name = tensor("op_899"), val = tensor([1, 1])]; + tensor var_901 = const()[name = tensor("op_901"), val = tensor([1, 1])]; + tensor v_11_pad_type_0 = const()[name = tensor("v_11_pad_type_0"), val = tensor("custom")]; + tensor v_11_pad_0 = const()[name = tensor("v_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(54113472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55096576))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_11_cast = conv(dilations = var_901, groups = var_282, pad = v_11_pad_0, pad_type = v_11_pad_type_0, strides = var_899, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_11_cast")]; + tensor var_905 = const()[name = tensor("op_905"), val = tensor([2, 10, 64, -1])]; + tensor var_906_cast = reshape(shape = var_905, x = q_11_cast)[name = tensor("op_906_cast")]; + tensor var_907 = const()[name = tensor("op_907"), val = tensor([2, 10, 64, -1])]; + tensor var_908_cast = reshape(shape = var_907, x = k_11_cast)[name = tensor("op_908_cast")]; + tensor var_909 = const()[name = tensor("op_909"), val = tensor([2, 10, 64, -1])]; + tensor var_910_cast = reshape(shape = var_909, x = v_11_cast)[name = tensor("op_910_cast")]; + tensor attn_weights_21_transpose_x_0 = const()[name = tensor("attn_weights_21_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_21_transpose_y_0 = const()[name = tensor("attn_weights_21_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_21_cast = matmul(transpose_x = attn_weights_21_transpose_x_0, transpose_y = attn_weights_21_transpose_y_0, x = var_906_cast, y = var_908_cast)[name = tensor("attn_weights_21_cast")]; + tensor attn_weights_23_cast = mul(x = attn_weights_21_cast, y = var_273_to_fp16)[name = tensor("attn_weights_23_cast")]; + tensor var_914_cast = softmax(axis = var_266, x = attn_weights_23_cast)[name = tensor("op_914_cast")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_910_cast, y = var_914_cast)[name = tensor("attn_11_cast")]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([2, 640, 1, -1])]; + tensor input_97_cast = reshape(shape = var_918, x = attn_11_cast)[name = tensor("input_97_cast")]; + tensor var_923 = const()[name = tensor("op_923"), val = tensor([1, 1])]; + tensor var_925 = const()[name = tensor("op_925"), val = tensor([1, 1])]; + tensor var_927_pad_type_0 = const()[name = tensor("op_927_pad_type_0"), val = tensor("custom")]; + tensor var_927_pad_0 = const()[name = tensor("op_927_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55096768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55404032))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55404224)))]; + tensor var_927_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_925, groups = var_282, pad = var_927_pad_0, pad_type = var_927_pad_type_0, strides = var_923, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("op_927_cast")]; + tensor inputs_17_cast = add(x = var_927_cast, y = inputs_15_cast)[name = tensor("inputs_17_cast")]; + tensor var_931 = const()[name = tensor("op_931"), val = tensor([1])]; + tensor channels_mean_17_cast = reduce_mean(axes = var_931, keep_dims = var_277, x = inputs_17_cast)[name = tensor("channels_mean_17_cast")]; + tensor zero_mean_17_cast = sub(x = inputs_17_cast, y = channels_mean_17_cast)[name = tensor("zero_mean_17_cast")]; + tensor zero_mean_sq_17_cast = mul(x = zero_mean_17_cast, y = zero_mean_17_cast)[name = tensor("zero_mean_sq_17_cast")]; + tensor var_935 = const()[name = tensor("op_935"), val = tensor([1])]; + tensor var_936_cast = reduce_mean(axes = var_935, keep_dims = var_277, x = zero_mean_sq_17_cast)[name = tensor("op_936_cast")]; + tensor var_937_to_fp16 = const()[name = tensor("op_937_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_938_cast = add(x = var_936_cast, y = var_937_to_fp16)[name = tensor("op_938_cast")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_938_cast)[name = tensor("denom_17_cast")]; + tensor out_17_cast = mul(x = zero_mean_17_cast, y = denom_17_cast)[name = tensor("out_17_cast")]; + tensor var_942_to_fp16 = const()[name = tensor("op_942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55405568)))]; + tensor var_943_cast = add(x = out_17_cast, y = var_942_to_fp16)[name = tensor("op_943_cast")]; + tensor var_945_to_fp16 = const()[name = tensor("op_945_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55406912)))]; + tensor input_99_cast = mul(x = var_943_cast, y = var_945_to_fp16)[name = tensor("input_99_cast")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([1, 1])]; + tensor var_955 = const()[name = tensor("op_955"), val = tensor([1, 1])]; + tensor var_957_pad_type_0 = const()[name = tensor("op_957_pad_type_0"), val = tensor("custom")]; + tensor var_957_pad_0 = const()[name = tensor("op_957_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55408256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58685120))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58685696)))]; + tensor var_957_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_955, groups = var_282, pad = var_957_pad_0, pad_type = var_957_pad_type_0, strides = var_953, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_99_cast)[name = tensor("op_957_cast")]; + tensor var_958_split_sizes_0 = const()[name = tensor("op_958_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_958_axis_0 = const()[name = tensor("op_958_axis_0"), val = tensor(1)]; + tensor var_958_cast_0, tensor var_958_cast_1 = split(axis = var_958_axis_0, split_sizes = var_958_split_sizes_0, x = var_957_cast)[name = tensor("op_958_cast")]; + tensor var_960_mode_0 = const()[name = tensor("op_960_mode_0"), val = tensor("EXACT")]; + tensor var_960_cast = gelu(mode = var_960_mode_0, x = var_958_cast_1)[name = tensor("op_960_cast")]; + tensor input_101_cast = mul(x = var_958_cast_0, y = var_960_cast)[name = tensor("input_101_cast")]; + tensor var_964 = const()[name = tensor("op_964"), val = tensor([1, 1])]; + tensor var_966 = const()[name = tensor("op_966"), val = tensor([1, 1])]; + tensor var_968_pad_type_0 = const()[name = tensor("op_968_pad_type_0"), val = tensor("custom")]; + tensor var_968_pad_0 = const()[name = tensor("op_968_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(58696000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60334464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60335040)))]; + tensor var_968_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_966, groups = var_282, pad = var_968_pad_0, pad_type = var_968_pad_type_0, strides = var_964, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_101_cast)[name = tensor("op_968_cast")]; + tensor inputs_19_cast = add(x = var_968_cast, y = inputs_17_cast)[name = tensor("inputs_19_cast")]; + tensor var_978 = const()[name = tensor("op_978"), val = tensor([1])]; + tensor channels_mean_19_cast = reduce_mean(axes = var_978, keep_dims = var_277, x = inputs_19_cast)[name = tensor("channels_mean_19_cast")]; + tensor zero_mean_19_cast = sub(x = inputs_19_cast, y = channels_mean_19_cast)[name = tensor("zero_mean_19_cast")]; + tensor zero_mean_sq_19_cast = mul(x = zero_mean_19_cast, y = zero_mean_19_cast)[name = tensor("zero_mean_sq_19_cast")]; + tensor var_982 = const()[name = tensor("op_982"), val = tensor([1])]; + tensor var_983_cast = reduce_mean(axes = var_982, keep_dims = var_277, x = zero_mean_sq_19_cast)[name = tensor("op_983_cast")]; + tensor var_984_to_fp16 = const()[name = tensor("op_984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_985_cast = add(x = var_983_cast, y = var_984_to_fp16)[name = tensor("op_985_cast")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_985_cast)[name = tensor("denom_19_cast")]; + tensor out_19_cast = mul(x = zero_mean_19_cast, y = denom_19_cast)[name = tensor("out_19_cast")]; + tensor var_989_to_fp16 = const()[name = tensor("op_989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60336384)))]; + tensor var_990_cast = add(x = out_19_cast, y = var_989_to_fp16)[name = tensor("op_990_cast")]; + tensor var_992_to_fp16 = const()[name = tensor("op_992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60337728)))]; + tensor hidden_states_47_cast = mul(x = var_990_cast, y = var_992_to_fp16)[name = tensor("hidden_states_47_cast")]; + tensor var_999 = const()[name = tensor("op_999"), val = tensor([1, 1])]; + tensor var_1001 = const()[name = tensor("op_1001"), val = tensor([1, 1])]; + tensor q_13_pad_type_0 = const()[name = tensor("q_13_pad_type_0"), val = tensor("custom")]; + tensor q_13_pad_0 = const()[name = tensor("q_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60339072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60646336))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_13_cast = conv(dilations = var_1001, groups = var_282, pad = q_13_pad_0, pad_type = q_13_pad_type_0, strides = var_999, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("q_13_cast")]; + tensor var_1005 = const()[name = tensor("op_1005"), val = tensor([1, 1])]; + tensor var_1007 = const()[name = tensor("op_1007"), val = tensor([1, 1])]; + tensor k_13_pad_type_0 = const()[name = tensor("k_13_pad_type_0"), val = tensor("custom")]; + tensor k_13_pad_0 = const()[name = tensor("k_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60646528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60953792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_13_cast = conv(dilations = var_1007, groups = var_282, pad = k_13_pad_0, pad_type = k_13_pad_type_0, strides = var_1005, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("k_13_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, 1])]; + tensor var_1013 = const()[name = tensor("op_1013"), val = tensor([1, 1])]; + tensor v_13_pad_type_0 = const()[name = tensor("v_13_pad_type_0"), val = tensor("custom")]; + tensor v_13_pad_0 = const()[name = tensor("v_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60953984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61261248))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_13_cast = conv(dilations = var_1013, groups = var_282, pad = v_13_pad_0, pad_type = v_13_pad_type_0, strides = var_1011, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("v_13_cast")]; + tensor var_1017 = const()[name = tensor("op_1017"), val = tensor([2, 10, 64, -1])]; + tensor var_1018_cast = reshape(shape = var_1017, x = q_13_cast)[name = tensor("op_1018_cast")]; + tensor var_1019 = const()[name = tensor("op_1019"), val = tensor([2, 10, 64, -1])]; + tensor var_1020_cast = reshape(shape = var_1019, x = k_13_cast)[name = tensor("op_1020_cast")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([2, 10, 64, -1])]; + tensor var_1022_cast = reshape(shape = var_1021, x = v_13_cast)[name = tensor("op_1022_cast")]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = var_1018_cast, y = var_1020_cast)[name = tensor("attn_weights_25_cast")]; + tensor attn_weights_27_cast = mul(x = attn_weights_25_cast, y = var_273_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_1026_cast = softmax(axis = var_266, x = attn_weights_27_cast)[name = tensor("op_1026_cast")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_1022_cast, y = var_1026_cast)[name = tensor("attn_13_cast")]; + tensor var_1030 = const()[name = tensor("op_1030"), val = tensor([2, 640, 1, -1])]; + tensor input_103_cast = reshape(shape = var_1030, x = attn_13_cast)[name = tensor("input_103_cast")]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 1])]; + tensor var_1037 = const()[name = tensor("op_1037"), val = tensor([1, 1])]; + tensor var_1039_pad_type_0 = const()[name = tensor("op_1039_pad_type_0"), val = tensor("custom")]; + tensor var_1039_pad_0 = const()[name = tensor("op_1039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61261440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61568704))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61568896)))]; + tensor var_1039_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1037, groups = var_282, pad = var_1039_pad_0, pad_type = var_1039_pad_type_0, strides = var_1035, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_103_cast)[name = tensor("op_1039_cast")]; + tensor inputs_21_cast = add(x = var_1039_cast, y = inputs_19_cast)[name = tensor("inputs_21_cast")]; + tensor var_1043 = const()[name = tensor("op_1043"), val = tensor([1])]; + tensor channels_mean_21_cast = reduce_mean(axes = var_1043, keep_dims = var_277, x = inputs_21_cast)[name = tensor("channels_mean_21_cast")]; + tensor zero_mean_21_cast = sub(x = inputs_21_cast, y = channels_mean_21_cast)[name = tensor("zero_mean_21_cast")]; + tensor zero_mean_sq_21_cast = mul(x = zero_mean_21_cast, y = zero_mean_21_cast)[name = tensor("zero_mean_sq_21_cast")]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1])]; + tensor var_1048_cast = reduce_mean(axes = var_1047, keep_dims = var_277, x = zero_mean_sq_21_cast)[name = tensor("op_1048_cast")]; + tensor var_1049_to_fp16 = const()[name = tensor("op_1049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1050_cast = add(x = var_1048_cast, y = var_1049_to_fp16)[name = tensor("op_1050_cast")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_1050_cast)[name = tensor("denom_21_cast")]; + tensor out_21_cast = mul(x = zero_mean_21_cast, y = denom_21_cast)[name = tensor("out_21_cast")]; + tensor var_1054_to_fp16 = const()[name = tensor("op_1054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61570240)))]; + tensor var_1055_cast = add(x = out_21_cast, y = var_1054_to_fp16)[name = tensor("op_1055_cast")]; + tensor var_1057_to_fp16 = const()[name = tensor("op_1057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61571584)))]; + tensor hidden_states_49_cast = mul(x = var_1055_cast, y = var_1057_to_fp16)[name = tensor("hidden_states_49_cast")]; + tensor var_1064 = const()[name = tensor("op_1064"), val = tensor([1, 1])]; + tensor var_1066 = const()[name = tensor("op_1066"), val = tensor([1, 1])]; + tensor q_15_pad_type_0 = const()[name = tensor("q_15_pad_type_0"), val = tensor("custom")]; + tensor q_15_pad_0 = const()[name = tensor("q_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61572928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61777792))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_15_cast = conv(dilations = var_1066, groups = var_282, pad = q_15_pad_0, pad_type = q_15_pad_type_0, strides = var_1064, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("q_15_cast")]; + tensor var_1070 = const()[name = tensor("op_1070"), val = tensor([1, 1])]; + tensor var_1072 = const()[name = tensor("op_1072"), val = tensor([1, 1])]; + tensor k_15_pad_type_0 = const()[name = tensor("k_15_pad_type_0"), val = tensor("custom")]; + tensor k_15_pad_0 = const()[name = tensor("k_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(61777920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62433344))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_15_cast = conv(dilations = var_1072, groups = var_282, pad = k_15_pad_0, pad_type = k_15_pad_type_0, strides = var_1070, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_15_cast")]; + tensor var_1076 = const()[name = tensor("op_1076"), val = tensor([1, 1])]; + tensor var_1078 = const()[name = tensor("op_1078"), val = tensor([1, 1])]; + tensor v_15_pad_type_0 = const()[name = tensor("v_15_pad_type_0"), val = tensor("custom")]; + tensor v_15_pad_0 = const()[name = tensor("v_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(62433472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63416576))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_15_cast = conv(dilations = var_1078, groups = var_282, pad = v_15_pad_0, pad_type = v_15_pad_type_0, strides = var_1076, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_15_cast")]; + tensor var_1082 = const()[name = tensor("op_1082"), val = tensor([2, 10, 64, -1])]; + tensor var_1083_cast = reshape(shape = var_1082, x = q_15_cast)[name = tensor("op_1083_cast")]; + tensor var_1084 = const()[name = tensor("op_1084"), val = tensor([2, 10, 64, -1])]; + tensor var_1085_cast = reshape(shape = var_1084, x = k_15_cast)[name = tensor("op_1085_cast")]; + tensor var_1086 = const()[name = tensor("op_1086"), val = tensor([2, 10, 64, -1])]; + tensor var_1087_cast = reshape(shape = var_1086, x = v_15_cast)[name = tensor("op_1087_cast")]; + tensor attn_weights_29_transpose_x_0 = const()[name = tensor("attn_weights_29_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_29_transpose_y_0 = const()[name = tensor("attn_weights_29_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_29_cast = matmul(transpose_x = attn_weights_29_transpose_x_0, transpose_y = attn_weights_29_transpose_y_0, x = var_1083_cast, y = var_1085_cast)[name = tensor("attn_weights_29_cast")]; + tensor attn_weights_31_cast = mul(x = attn_weights_29_cast, y = var_273_to_fp16)[name = tensor("attn_weights_31_cast")]; + tensor var_1091_cast = softmax(axis = var_266, x = attn_weights_31_cast)[name = tensor("op_1091_cast")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1087_cast, y = var_1091_cast)[name = tensor("attn_15_cast")]; + tensor var_1095 = const()[name = tensor("op_1095"), val = tensor([2, 640, 1, -1])]; + tensor input_105_cast = reshape(shape = var_1095, x = attn_15_cast)[name = tensor("input_105_cast")]; + tensor var_1100 = const()[name = tensor("op_1100"), val = tensor([1, 1])]; + tensor var_1102 = const()[name = tensor("op_1102"), val = tensor([1, 1])]; + tensor var_1104_pad_type_0 = const()[name = tensor("op_1104_pad_type_0"), val = tensor("custom")]; + tensor var_1104_pad_0 = const()[name = tensor("op_1104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63416768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63724032))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63724224)))]; + tensor var_1104_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1102, groups = var_282, pad = var_1104_pad_0, pad_type = var_1104_pad_type_0, strides = var_1100, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("op_1104_cast")]; + tensor inputs_23_cast = add(x = var_1104_cast, y = inputs_21_cast)[name = tensor("inputs_23_cast")]; + tensor var_1108 = const()[name = tensor("op_1108"), val = tensor([1])]; + tensor channels_mean_23_cast = reduce_mean(axes = var_1108, keep_dims = var_277, x = inputs_23_cast)[name = tensor("channels_mean_23_cast")]; + tensor zero_mean_23_cast = sub(x = inputs_23_cast, y = channels_mean_23_cast)[name = tensor("zero_mean_23_cast")]; + tensor zero_mean_sq_23_cast = mul(x = zero_mean_23_cast, y = zero_mean_23_cast)[name = tensor("zero_mean_sq_23_cast")]; + tensor var_1112 = const()[name = tensor("op_1112"), val = tensor([1])]; + tensor var_1113_cast = reduce_mean(axes = var_1112, keep_dims = var_277, x = zero_mean_sq_23_cast)[name = tensor("op_1113_cast")]; + tensor var_1114_to_fp16 = const()[name = tensor("op_1114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1115_cast = add(x = var_1113_cast, y = var_1114_to_fp16)[name = tensor("op_1115_cast")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_1115_cast)[name = tensor("denom_23_cast")]; + tensor out_23_cast = mul(x = zero_mean_23_cast, y = denom_23_cast)[name = tensor("out_23_cast")]; + tensor var_1119_to_fp16 = const()[name = tensor("op_1119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63725568)))]; + tensor var_1120_cast = add(x = out_23_cast, y = var_1119_to_fp16)[name = tensor("op_1120_cast")]; + tensor var_1122_to_fp16 = const()[name = tensor("op_1122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63726912)))]; + tensor input_107_cast = mul(x = var_1120_cast, y = var_1122_to_fp16)[name = tensor("input_107_cast")]; + tensor var_1130 = const()[name = tensor("op_1130"), val = tensor([1, 1])]; + tensor var_1132 = const()[name = tensor("op_1132"), val = tensor([1, 1])]; + tensor var_1134_pad_type_0 = const()[name = tensor("op_1134_pad_type_0"), val = tensor("custom")]; + tensor var_1134_pad_0 = const()[name = tensor("op_1134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(63728256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67005120))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67005696)))]; + tensor var_1134_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1132, groups = var_282, pad = var_1134_pad_0, pad_type = var_1134_pad_type_0, strides = var_1130, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_107_cast)[name = tensor("op_1134_cast")]; + tensor var_1135_split_sizes_0 = const()[name = tensor("op_1135_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_1135_axis_0 = const()[name = tensor("op_1135_axis_0"), val = tensor(1)]; + tensor var_1135_cast_0, tensor var_1135_cast_1 = split(axis = var_1135_axis_0, split_sizes = var_1135_split_sizes_0, x = var_1134_cast)[name = tensor("op_1135_cast")]; + tensor var_1137_mode_0 = const()[name = tensor("op_1137_mode_0"), val = tensor("EXACT")]; + tensor var_1137_cast = gelu(mode = var_1137_mode_0, x = var_1135_cast_1)[name = tensor("op_1137_cast")]; + tensor input_109_cast = mul(x = var_1135_cast_0, y = var_1137_cast)[name = tensor("input_109_cast")]; + tensor var_1141 = const()[name = tensor("op_1141"), val = tensor([1, 1])]; + tensor var_1143 = const()[name = tensor("op_1143"), val = tensor([1, 1])]; + tensor var_1145_pad_type_0 = const()[name = tensor("op_1145_pad_type_0"), val = tensor("custom")]; + tensor var_1145_pad_0 = const()[name = tensor("op_1145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67016000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68654464))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68655040)))]; + tensor var_1145_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1143, groups = var_282, pad = var_1145_pad_0, pad_type = var_1145_pad_type_0, strides = var_1141, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_109_cast)[name = tensor("op_1145_cast")]; + tensor hidden_states_53_cast = add(x = var_1145_cast, y = inputs_23_cast)[name = tensor("hidden_states_53_cast")]; + tensor var_1147 = const()[name = tensor("op_1147"), val = tensor([2, 640, 64, 64])]; + tensor input_111_cast = reshape(shape = var_1147, x = hidden_states_53_cast)[name = tensor("input_111_cast")]; + tensor var_1151 = const()[name = tensor("op_1151"), val = tensor([1, 1])]; + tensor var_1153 = const()[name = tensor("op_1153"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(68656384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69066048))), name = tensor("down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69066624)))]; + tensor hidden_states_55_cast = conv(bias = down_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_1153, groups = var_282, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_1151, weight = down_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_111_cast)[name = tensor("hidden_states_55_cast")]; + tensor input_113_cast = add(x = hidden_states_55_cast, y = hidden_states_37_cast)[name = tensor("input_113_cast")]; + tensor var_1160 = const()[name = tensor("op_1160"), val = tensor([2, 2])]; + tensor var_1162 = const()[name = tensor("op_1162"), val = tensor([1, 1])]; + tensor input_115_pad_type_0 = const()[name = tensor("input_115_pad_type_0"), val = tensor("custom")]; + tensor input_115_pad_0 = const()[name = tensor("input_115_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(69067968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72754432))), name = tensor("down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72755008)))]; + tensor input_115_cast = conv(bias = down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_1162, groups = var_282, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = var_1160, weight = down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("input_115_cast")]; + tensor var_1170 = const()[name = tensor("op_1170"), val = tensor(3)]; + tensor var_1181 = const()[name = tensor("op_1181"), val = tensor(true)]; + tensor var_1186 = const()[name = tensor("op_1186"), val = tensor(1)]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([2, 32, 20, 32, 32])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = input_115_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([2, 640, 32, 32])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72756352)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72757696)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_119_cast = silu(x = add_21_cast)[name = tensor("input_119_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 1])]; + tensor var_1209 = const()[name = tensor("op_1209"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(72759040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78288704))), name = tensor("down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 640, 3, 3])]; + tensor down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78288896)))]; + tensor hidden_states_57_cast = conv(bias = down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_1209, groups = var_1186, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_1207, weight = down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_119_cast)[name = tensor("hidden_states_57_cast")]; + tensor var_1215 = const()[name = tensor("op_1215"), val = tensor([1, 1])]; + tensor var_1217 = const()[name = tensor("op_1217"), val = tensor([1, 1])]; + tensor temb_9_pad_type_0 = const()[name = tensor("temb_9_pad_type_0"), val = tensor("custom")]; + tensor temb_9_pad_0 = const()[name = tensor("temb_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(78291520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79520384))), name = tensor("down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79520576)))]; + tensor temb_9_cast = conv(bias = down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_1217, groups = var_1186, pad = temb_9_pad_0, pad_type = temb_9_pad_type_0, strides = var_1215, weight = down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_9_cast")]; + tensor input_123_cast = add(x = hidden_states_57_cast, y = temb_9_cast)[name = tensor("input_123_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_123_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_mean_0_to_fp16 = const()[name = tensor("add_23_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79523200)))]; + tensor add_23_variance_0_to_fp16 = const()[name = tensor("add_23_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79525824)))]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79528448)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79531072)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_127_cast = silu(x = add_23_cast)[name = tensor("input_127_cast")]; + tensor var_1227 = const()[name = tensor("op_1227"), val = tensor([1, 1])]; + tensor var_1229 = const()[name = tensor("op_1229"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(79533696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90592960))), name = tensor("down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90593152)))]; + tensor hidden_states_59_cast = conv(bias = down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_1229, groups = var_1186, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_1227, weight = down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_127_cast)[name = tensor("hidden_states_59_cast")]; + tensor var_1234 = const()[name = tensor("op_1234"), val = tensor([1, 1])]; + tensor var_1236 = const()[name = tensor("op_1236"), val = tensor([1, 1])]; + tensor x_3_pad_type_0 = const()[name = tensor("x_3_pad_type_0"), val = tensor("custom")]; + tensor x_3_pad_0 = const()[name = tensor("x_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(90595776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91415040))), name = tensor("down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 640, 1, 1])]; + tensor down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91415616)))]; + tensor x_3_cast = conv(bias = down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_1236, groups = var_1186, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = var_1234, weight = down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_115_cast)[name = tensor("x_3_cast")]; + tensor hidden_states_61_cast = add(x = x_3_cast, y = hidden_states_59_cast)[name = tensor("hidden_states_61_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = hidden_states_61_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91418240)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91420864)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor var_1274 = const()[name = tensor("op_1274"), val = tensor([1, 1])]; + tensor var_1276 = const()[name = tensor("op_1276"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(91423488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92652352))), name = tensor("down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92652544)))]; + tensor hidden_states_63_cast = conv(bias = down_blocks_2_attentions_0_proj_in_bias_to_fp16, dilations = var_1276, groups = var_1186, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_1274, weight = down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized, x = add_25_cast)[name = tensor("hidden_states_63_cast")]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_25_cast = reshape(shape = var_1281, x = hidden_states_63_cast)[name = tensor("inputs_25_cast")]; + tensor var_1291 = const()[name = tensor("op_1291"), val = tensor([1])]; + tensor channels_mean_25_cast = reduce_mean(axes = var_1291, keep_dims = var_1181, x = inputs_25_cast)[name = tensor("channels_mean_25_cast")]; + tensor zero_mean_25_cast = sub(x = inputs_25_cast, y = channels_mean_25_cast)[name = tensor("zero_mean_25_cast")]; + tensor zero_mean_sq_25_cast = mul(x = zero_mean_25_cast, y = zero_mean_25_cast)[name = tensor("zero_mean_sq_25_cast")]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1])]; + tensor var_1296_cast = reduce_mean(axes = var_1295, keep_dims = var_1181, x = zero_mean_sq_25_cast)[name = tensor("op_1296_cast")]; + tensor var_1297_to_fp16 = const()[name = tensor("op_1297_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1298_cast = add(x = var_1296_cast, y = var_1297_to_fp16)[name = tensor("op_1298_cast")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_1298_cast)[name = tensor("denom_25_cast")]; + tensor out_25_cast = mul(x = zero_mean_25_cast, y = denom_25_cast)[name = tensor("out_25_cast")]; + tensor var_1302_to_fp16 = const()[name = tensor("op_1302_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92655168)))]; + tensor var_1303_cast = add(x = out_25_cast, y = var_1302_to_fp16)[name = tensor("op_1303_cast")]; + tensor var_1305_to_fp16 = const()[name = tensor("op_1305_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92657792)))]; + tensor hidden_states_65_cast = mul(x = var_1303_cast, y = var_1305_to_fp16)[name = tensor("hidden_states_65_cast")]; + tensor var_1312 = const()[name = tensor("op_1312"), val = tensor([1, 1])]; + tensor var_1314 = const()[name = tensor("op_1314"), val = tensor([1, 1])]; + tensor q_17_pad_type_0 = const()[name = tensor("q_17_pad_type_0"), val = tensor("custom")]; + tensor q_17_pad_0 = const()[name = tensor("q_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(92660416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93479680))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_17_cast = conv(dilations = var_1314, groups = var_1186, pad = q_17_pad_0, pad_type = q_17_pad_type_0, strides = var_1312, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("q_17_cast")]; + tensor var_1318 = const()[name = tensor("op_1318"), val = tensor([1, 1])]; + tensor var_1320 = const()[name = tensor("op_1320"), val = tensor([1, 1])]; + tensor k_17_pad_type_0 = const()[name = tensor("k_17_pad_type_0"), val = tensor("custom")]; + tensor k_17_pad_0 = const()[name = tensor("k_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(93479808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94299072))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_17_cast = conv(dilations = var_1320, groups = var_1186, pad = k_17_pad_0, pad_type = k_17_pad_type_0, strides = var_1318, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("k_17_cast")]; + tensor var_1324 = const()[name = tensor("op_1324"), val = tensor([1, 1])]; + tensor var_1326 = const()[name = tensor("op_1326"), val = tensor([1, 1])]; + tensor v_17_pad_type_0 = const()[name = tensor("v_17_pad_type_0"), val = tensor("custom")]; + tensor v_17_pad_0 = const()[name = tensor("v_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(94299200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95118464))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_17_cast = conv(dilations = var_1326, groups = var_1186, pad = v_17_pad_0, pad_type = v_17_pad_type_0, strides = var_1324, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("v_17_cast")]; + tensor var_1330 = const()[name = tensor("op_1330"), val = tensor([2, 20, 64, -1])]; + tensor var_1331_cast = reshape(shape = var_1330, x = q_17_cast)[name = tensor("op_1331_cast")]; + tensor var_1332 = const()[name = tensor("op_1332"), val = tensor([2, 20, 64, -1])]; + tensor var_1333_cast = reshape(shape = var_1332, x = k_17_cast)[name = tensor("op_1333_cast")]; + tensor var_1334 = const()[name = tensor("op_1334"), val = tensor([2, 20, 64, -1])]; + tensor var_1335_cast = reshape(shape = var_1334, x = v_17_cast)[name = tensor("op_1335_cast")]; + tensor attn_weights_33_transpose_x_0 = const()[name = tensor("attn_weights_33_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_33_transpose_y_0 = const()[name = tensor("attn_weights_33_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_33_cast = matmul(transpose_x = attn_weights_33_transpose_x_0, transpose_y = attn_weights_33_transpose_y_0, x = var_1331_cast, y = var_1333_cast)[name = tensor("attn_weights_33_cast")]; + tensor var_1177_to_fp16 = const()[name = tensor("op_1177_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_35_cast = mul(x = attn_weights_33_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_35_cast")]; + tensor var_1339_cast = softmax(axis = var_1170, x = attn_weights_35_cast)[name = tensor("op_1339_cast")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1335_cast, y = var_1339_cast)[name = tensor("attn_17_cast")]; + tensor var_1343 = const()[name = tensor("op_1343"), val = tensor([2, 1280, 1, -1])]; + tensor input_131_cast = reshape(shape = var_1343, x = attn_17_cast)[name = tensor("input_131_cast")]; + tensor var_1348 = const()[name = tensor("op_1348"), val = tensor([1, 1])]; + tensor var_1350 = const()[name = tensor("op_1350"), val = tensor([1, 1])]; + tensor var_1352_pad_type_0 = const()[name = tensor("op_1352_pad_type_0"), val = tensor("custom")]; + tensor var_1352_pad_0 = const()[name = tensor("op_1352_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95118592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96347456))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96347648)))]; + tensor var_1352_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_1350, groups = var_1186, pad = var_1352_pad_0, pad_type = var_1352_pad_type_0, strides = var_1348, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_131_cast)[name = tensor("op_1352_cast")]; + tensor inputs_27_cast = add(x = var_1352_cast, y = inputs_25_cast)[name = tensor("inputs_27_cast")]; + tensor var_1356 = const()[name = tensor("op_1356"), val = tensor([1])]; + tensor channels_mean_27_cast = reduce_mean(axes = var_1356, keep_dims = var_1181, x = inputs_27_cast)[name = tensor("channels_mean_27_cast")]; + tensor zero_mean_27_cast = sub(x = inputs_27_cast, y = channels_mean_27_cast)[name = tensor("zero_mean_27_cast")]; + tensor zero_mean_sq_27_cast = mul(x = zero_mean_27_cast, y = zero_mean_27_cast)[name = tensor("zero_mean_sq_27_cast")]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1])]; + tensor var_1361_cast = reduce_mean(axes = var_1360, keep_dims = var_1181, x = zero_mean_sq_27_cast)[name = tensor("op_1361_cast")]; + tensor var_1362_to_fp16 = const()[name = tensor("op_1362_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1363_cast = add(x = var_1361_cast, y = var_1362_to_fp16)[name = tensor("op_1363_cast")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_1363_cast)[name = tensor("denom_27_cast")]; + tensor out_27_cast = mul(x = zero_mean_27_cast, y = denom_27_cast)[name = tensor("out_27_cast")]; + tensor var_1367_to_fp16 = const()[name = tensor("op_1367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96350272)))]; + tensor var_1368_cast = add(x = out_27_cast, y = var_1367_to_fp16)[name = tensor("op_1368_cast")]; + tensor var_1370_to_fp16 = const()[name = tensor("op_1370_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96352896)))]; + tensor hidden_states_67_cast = mul(x = var_1368_cast, y = var_1370_to_fp16)[name = tensor("hidden_states_67_cast")]; + tensor var_1377 = const()[name = tensor("op_1377"), val = tensor([1, 1])]; + tensor var_1379 = const()[name = tensor("op_1379"), val = tensor([1, 1])]; + tensor q_19_pad_type_0 = const()[name = tensor("q_19_pad_type_0"), val = tensor("custom")]; + tensor q_19_pad_0 = const()[name = tensor("q_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96355520))), lut = tensor([-0x1.adp-7, 0x1.ad8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_19_cast = conv(dilations = var_1379, groups = var_1186, pad = q_19_pad_0, pad_type = q_19_pad_type_0, strides = var_1377, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("q_19_cast")]; + tensor var_1383 = const()[name = tensor("op_1383"), val = tensor([1, 1])]; + tensor var_1385 = const()[name = tensor("op_1385"), val = tensor([1, 1])]; + tensor k_19_pad_type_0 = const()[name = tensor("k_19_pad_type_0"), val = tensor("custom")]; + tensor k_19_pad_0 = const()[name = tensor("k_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(96560384))), lut = tensor([-0x1.7fcp-6, -0x1.bdcp-8, 0x1.c44p-8, 0x1.818p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_19_cast = conv(dilations = var_1385, groups = var_1186, pad = k_19_pad_0, pad_type = k_19_pad_type_0, strides = var_1383, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_19_cast")]; + tensor var_1389 = const()[name = tensor("op_1389"), val = tensor([1, 1])]; + tensor var_1391 = const()[name = tensor("op_1391"), val = tensor([1, 1])]; + tensor v_19_pad_type_0 = const()[name = tensor("v_19_pad_type_0"), val = tensor("custom")]; + tensor v_19_pad_0 = const()[name = tensor("v_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(97215808))), lut = tensor([-0x1.8e8p-6, -0x1.cbp-8, 0x1.cccp-8, 0x1.9p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_19_cast = conv(dilations = var_1391, groups = var_1186, pad = v_19_pad_0, pad_type = v_19_pad_type_0, strides = var_1389, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_19_cast")]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([2, 20, 64, -1])]; + tensor var_1396_cast = reshape(shape = var_1395, x = q_19_cast)[name = tensor("op_1396_cast")]; + tensor var_1397 = const()[name = tensor("op_1397"), val = tensor([2, 20, 64, -1])]; + tensor var_1398_cast = reshape(shape = var_1397, x = k_19_cast)[name = tensor("op_1398_cast")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([2, 20, 64, -1])]; + tensor var_1400_cast = reshape(shape = var_1399, x = v_19_cast)[name = tensor("op_1400_cast")]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = var_1396_cast, y = var_1398_cast)[name = tensor("attn_weights_37_cast")]; + tensor attn_weights_39_cast = mul(x = attn_weights_37_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_1404_cast = softmax(axis = var_1170, x = attn_weights_39_cast)[name = tensor("op_1404_cast")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1400_cast, y = var_1404_cast)[name = tensor("attn_19_cast")]; + tensor var_1408 = const()[name = tensor("op_1408"), val = tensor([2, 1280, 1, -1])]; + tensor input_133_cast = reshape(shape = var_1408, x = attn_19_cast)[name = tensor("input_133_cast")]; + tensor var_1413 = const()[name = tensor("op_1413"), val = tensor([1, 1])]; + tensor var_1415 = const()[name = tensor("op_1415"), val = tensor([1, 1])]; + tensor var_1417_pad_type_0 = const()[name = tensor("op_1417_pad_type_0"), val = tensor("custom")]; + tensor var_1417_pad_0 = const()[name = tensor("op_1417_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(97871232))), lut = tensor([-0x1.aep-7, -0x1.f5p-9, 0x1.fa8p-9, 0x1.afcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98280896)))]; + tensor var_1417_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_1415, groups = var_1186, pad = var_1417_pad_0, pad_type = var_1417_pad_type_0, strides = var_1413, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_133_cast)[name = tensor("op_1417_cast")]; + tensor inputs_29_cast = add(x = var_1417_cast, y = inputs_27_cast)[name = tensor("inputs_29_cast")]; + tensor var_1421 = const()[name = tensor("op_1421"), val = tensor([1])]; + tensor channels_mean_29_cast = reduce_mean(axes = var_1421, keep_dims = var_1181, x = inputs_29_cast)[name = tensor("channels_mean_29_cast")]; + tensor zero_mean_29_cast = sub(x = inputs_29_cast, y = channels_mean_29_cast)[name = tensor("zero_mean_29_cast")]; + tensor zero_mean_sq_29_cast = mul(x = zero_mean_29_cast, y = zero_mean_29_cast)[name = tensor("zero_mean_sq_29_cast")]; + tensor var_1425 = const()[name = tensor("op_1425"), val = tensor([1])]; + tensor var_1426_cast = reduce_mean(axes = var_1425, keep_dims = var_1181, x = zero_mean_sq_29_cast)[name = tensor("op_1426_cast")]; + tensor var_1427_to_fp16 = const()[name = tensor("op_1427_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1428_cast = add(x = var_1426_cast, y = var_1427_to_fp16)[name = tensor("op_1428_cast")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1428_cast)[name = tensor("denom_29_cast")]; + tensor out_29_cast = mul(x = zero_mean_29_cast, y = denom_29_cast)[name = tensor("out_29_cast")]; + tensor var_1432_to_fp16 = const()[name = tensor("op_1432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98283520)))]; + tensor var_1433_cast = add(x = out_29_cast, y = var_1432_to_fp16)[name = tensor("op_1433_cast")]; + tensor var_1435_to_fp16 = const()[name = tensor("op_1435_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98286144)))]; + tensor input_135_cast = mul(x = var_1433_cast, y = var_1435_to_fp16)[name = tensor("input_135_cast")]; + tensor var_1443 = const()[name = tensor("op_1443"), val = tensor([1, 1])]; + tensor var_1445 = const()[name = tensor("op_1445"), val = tensor([1, 1])]; + tensor var_1447_pad_type_0 = const()[name = tensor("op_1447_pad_type_0"), val = tensor("custom")]; + tensor var_1447_pad_0 = const()[name = tensor("op_1447_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(98288768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108119232))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108119424)))]; + tensor var_1447_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_1445, groups = var_1186, pad = var_1447_pad_0, pad_type = var_1447_pad_type_0, strides = var_1443, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_135_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_split_sizes_0 = const()[name = tensor("op_1448_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1448_axis_0 = const()[name = tensor("op_1448_axis_0"), val = tensor(1)]; + tensor var_1448_cast_0, tensor var_1448_cast_1 = split(axis = var_1448_axis_0, split_sizes = var_1448_split_sizes_0, x = var_1447_cast)[name = tensor("op_1448_cast")]; + tensor var_1450_mode_0 = const()[name = tensor("op_1450_mode_0"), val = tensor("EXACT")]; + tensor var_1450_cast = gelu(mode = var_1450_mode_0, x = var_1448_cast_1)[name = tensor("op_1450_cast")]; + tensor input_137_cast = mul(x = var_1448_cast_0, y = var_1450_cast)[name = tensor("input_137_cast")]; + tensor var_1454 = const()[name = tensor("op_1454"), val = tensor([1, 1])]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([1, 1])]; + tensor var_1458_pad_type_0 = const()[name = tensor("op_1458_pad_type_0"), val = tensor("custom")]; + tensor var_1458_pad_0 = const()[name = tensor("op_1458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(108139968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111416832))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111416960)))]; + tensor var_1458_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_1456, groups = var_1186, pad = var_1458_pad_0, pad_type = var_1458_pad_type_0, strides = var_1454, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("op_1458_cast")]; + tensor inputs_31_cast = add(x = var_1458_cast, y = inputs_29_cast)[name = tensor("inputs_31_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1])]; + tensor channels_mean_31_cast = reduce_mean(axes = var_1468, keep_dims = var_1181, x = inputs_31_cast)[name = tensor("channels_mean_31_cast")]; + tensor zero_mean_31_cast = sub(x = inputs_31_cast, y = channels_mean_31_cast)[name = tensor("zero_mean_31_cast")]; + tensor zero_mean_sq_31_cast = mul(x = zero_mean_31_cast, y = zero_mean_31_cast)[name = tensor("zero_mean_sq_31_cast")]; + tensor var_1472 = const()[name = tensor("op_1472"), val = tensor([1])]; + tensor var_1473_cast = reduce_mean(axes = var_1472, keep_dims = var_1181, x = zero_mean_sq_31_cast)[name = tensor("op_1473_cast")]; + tensor var_1474_to_fp16 = const()[name = tensor("op_1474_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1475_cast = add(x = var_1473_cast, y = var_1474_to_fp16)[name = tensor("op_1475_cast")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1475_cast)[name = tensor("denom_31_cast")]; + tensor out_31_cast = mul(x = zero_mean_31_cast, y = denom_31_cast)[name = tensor("out_31_cast")]; + tensor var_1479_to_fp16 = const()[name = tensor("op_1479_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111419584)))]; + tensor var_1480_cast = add(x = out_31_cast, y = var_1479_to_fp16)[name = tensor("op_1480_cast")]; + tensor var_1482_to_fp16 = const()[name = tensor("op_1482_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111422208)))]; + tensor hidden_states_71_cast = mul(x = var_1480_cast, y = var_1482_to_fp16)[name = tensor("hidden_states_71_cast")]; + tensor var_1489 = const()[name = tensor("op_1489"), val = tensor([1, 1])]; + tensor var_1491 = const()[name = tensor("op_1491"), val = tensor([1, 1])]; + tensor q_21_pad_type_0 = const()[name = tensor("q_21_pad_type_0"), val = tensor("custom")]; + tensor q_21_pad_0 = const()[name = tensor("q_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111424832))), lut = tensor([-0x1.17p-5, -0x1.518p-7, 0x1.4d4p-7, 0x1.16p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_21_cast = conv(dilations = var_1491, groups = var_1186, pad = q_21_pad_0, pad_type = q_21_pad_type_0, strides = var_1489, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("q_21_cast")]; + tensor var_1495 = const()[name = tensor("op_1495"), val = tensor([1, 1])]; + tensor var_1497 = const()[name = tensor("op_1497"), val = tensor([1, 1])]; + tensor k_21_pad_type_0 = const()[name = tensor("k_21_pad_type_0"), val = tensor("custom")]; + tensor k_21_pad_0 = const()[name = tensor("k_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(111834496))), lut = tensor([-0x1.16p-5, -0x1.4fp-7, 0x1.4e8p-7, 0x1.16p-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_21_cast = conv(dilations = var_1497, groups = var_1186, pad = k_21_pad_0, pad_type = k_21_pad_type_0, strides = var_1495, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("k_21_cast")]; + tensor var_1501 = const()[name = tensor("op_1501"), val = tensor([1, 1])]; + tensor var_1503 = const()[name = tensor("op_1503"), val = tensor([1, 1])]; + tensor v_21_pad_type_0 = const()[name = tensor("v_21_pad_type_0"), val = tensor("custom")]; + tensor v_21_pad_0 = const()[name = tensor("v_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(112244160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113063424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_21_cast = conv(dilations = var_1503, groups = var_1186, pad = v_21_pad_0, pad_type = v_21_pad_type_0, strides = var_1501, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("v_21_cast")]; + tensor var_1507 = const()[name = tensor("op_1507"), val = tensor([2, 20, 64, -1])]; + tensor var_1508_cast = reshape(shape = var_1507, x = q_21_cast)[name = tensor("op_1508_cast")]; + tensor var_1509 = const()[name = tensor("op_1509"), val = tensor([2, 20, 64, -1])]; + tensor var_1510_cast = reshape(shape = var_1509, x = k_21_cast)[name = tensor("op_1510_cast")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([2, 20, 64, -1])]; + tensor var_1512_cast = reshape(shape = var_1511, x = v_21_cast)[name = tensor("op_1512_cast")]; + tensor attn_weights_41_transpose_x_0 = const()[name = tensor("attn_weights_41_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_41_transpose_y_0 = const()[name = tensor("attn_weights_41_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_41_cast = matmul(transpose_x = attn_weights_41_transpose_x_0, transpose_y = attn_weights_41_transpose_y_0, x = var_1508_cast, y = var_1510_cast)[name = tensor("attn_weights_41_cast")]; + tensor attn_weights_43_cast = mul(x = attn_weights_41_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_43_cast")]; + tensor var_1516_cast = softmax(axis = var_1170, x = attn_weights_43_cast)[name = tensor("op_1516_cast")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1512_cast, y = var_1516_cast)[name = tensor("attn_21_cast")]; + tensor var_1520 = const()[name = tensor("op_1520"), val = tensor([2, 1280, 1, -1])]; + tensor input_139_cast = reshape(shape = var_1520, x = attn_21_cast)[name = tensor("input_139_cast")]; + tensor var_1525 = const()[name = tensor("op_1525"), val = tensor([1, 1])]; + tensor var_1527 = const()[name = tensor("op_1527"), val = tensor([1, 1])]; + tensor var_1529_pad_type_0 = const()[name = tensor("op_1529_pad_type_0"), val = tensor("custom")]; + tensor var_1529_pad_0 = const()[name = tensor("op_1529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113063552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113882816))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113882944)))]; + tensor var_1529_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1527, groups = var_1186, pad = var_1529_pad_0, pad_type = var_1529_pad_type_0, strides = var_1525, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_139_cast)[name = tensor("op_1529_cast")]; + tensor inputs_33_cast = add(x = var_1529_cast, y = inputs_31_cast)[name = tensor("inputs_33_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1])]; + tensor channels_mean_33_cast = reduce_mean(axes = var_1533, keep_dims = var_1181, x = inputs_33_cast)[name = tensor("channels_mean_33_cast")]; + tensor zero_mean_33_cast = sub(x = inputs_33_cast, y = channels_mean_33_cast)[name = tensor("zero_mean_33_cast")]; + tensor zero_mean_sq_33_cast = mul(x = zero_mean_33_cast, y = zero_mean_33_cast)[name = tensor("zero_mean_sq_33_cast")]; + tensor var_1537 = const()[name = tensor("op_1537"), val = tensor([1])]; + tensor var_1538_cast = reduce_mean(axes = var_1537, keep_dims = var_1181, x = zero_mean_sq_33_cast)[name = tensor("op_1538_cast")]; + tensor var_1539_to_fp16 = const()[name = tensor("op_1539_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1540_cast = add(x = var_1538_cast, y = var_1539_to_fp16)[name = tensor("op_1540_cast")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1540_cast)[name = tensor("denom_33_cast")]; + tensor out_33_cast = mul(x = zero_mean_33_cast, y = denom_33_cast)[name = tensor("out_33_cast")]; + tensor var_1544_to_fp16 = const()[name = tensor("op_1544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113885568)))]; + tensor var_1545_cast = add(x = out_33_cast, y = var_1544_to_fp16)[name = tensor("op_1545_cast")]; + tensor var_1547_to_fp16 = const()[name = tensor("op_1547_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113888192)))]; + tensor hidden_states_73_cast = mul(x = var_1545_cast, y = var_1547_to_fp16)[name = tensor("hidden_states_73_cast")]; + tensor var_1554 = const()[name = tensor("op_1554"), val = tensor([1, 1])]; + tensor var_1556 = const()[name = tensor("op_1556"), val = tensor([1, 1])]; + tensor q_23_pad_type_0 = const()[name = tensor("q_23_pad_type_0"), val = tensor("custom")]; + tensor q_23_pad_0 = const()[name = tensor("q_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113890816))), lut = tensor([-0x1.aap-6, -0x1.058p-7, 0x1.068p-7, 0x1.aa8p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_23_cast = conv(dilations = var_1556, groups = var_1186, pad = q_23_pad_0, pad_type = q_23_pad_type_0, strides = var_1554, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("q_23_cast")]; + tensor var_1560 = const()[name = tensor("op_1560"), val = tensor([1, 1])]; + tensor var_1562 = const()[name = tensor("op_1562"), val = tensor([1, 1])]; + tensor k_23_pad_type_0 = const()[name = tensor("k_23_pad_type_0"), val = tensor("custom")]; + tensor k_23_pad_0 = const()[name = tensor("k_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114300480))), lut = tensor([-0x1.85cp-6, -0x1.cbp-8, 0x1.c18p-8, 0x1.82cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_23_cast = conv(dilations = var_1562, groups = var_1186, pad = k_23_pad_0, pad_type = k_23_pad_type_0, strides = var_1560, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_23_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 1])]; + tensor var_1568 = const()[name = tensor("op_1568"), val = tensor([1, 1])]; + tensor v_23_pad_type_0 = const()[name = tensor("v_23_pad_type_0"), val = tensor("custom")]; + tensor v_23_pad_0 = const()[name = tensor("v_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114955904))), lut = tensor([-0x1.a5p-6, -0x1.e3p-8, 0x1.e54p-8, 0x1.a5p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_23_cast = conv(dilations = var_1568, groups = var_1186, pad = v_23_pad_0, pad_type = v_23_pad_type_0, strides = var_1566, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_23_cast")]; + tensor var_1572 = const()[name = tensor("op_1572"), val = tensor([2, 20, 64, -1])]; + tensor var_1573_cast = reshape(shape = var_1572, x = q_23_cast)[name = tensor("op_1573_cast")]; + tensor var_1574 = const()[name = tensor("op_1574"), val = tensor([2, 20, 64, -1])]; + tensor var_1575_cast = reshape(shape = var_1574, x = k_23_cast)[name = tensor("op_1575_cast")]; + tensor var_1576 = const()[name = tensor("op_1576"), val = tensor([2, 20, 64, -1])]; + tensor var_1577_cast = reshape(shape = var_1576, x = v_23_cast)[name = tensor("op_1577_cast")]; + tensor attn_weights_45_transpose_x_0 = const()[name = tensor("attn_weights_45_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_45_transpose_y_0 = const()[name = tensor("attn_weights_45_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_45_cast = matmul(transpose_x = attn_weights_45_transpose_x_0, transpose_y = attn_weights_45_transpose_y_0, x = var_1573_cast, y = var_1575_cast)[name = tensor("attn_weights_45_cast")]; + tensor attn_weights_47_cast = mul(x = attn_weights_45_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_47_cast")]; + tensor var_1581_cast = softmax(axis = var_1170, x = attn_weights_47_cast)[name = tensor("op_1581_cast")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1577_cast, y = var_1581_cast)[name = tensor("attn_23_cast")]; + tensor var_1585 = const()[name = tensor("op_1585"), val = tensor([2, 1280, 1, -1])]; + tensor input_141_cast = reshape(shape = var_1585, x = attn_23_cast)[name = tensor("input_141_cast")]; + tensor var_1590 = const()[name = tensor("op_1590"), val = tensor([1, 1])]; + tensor var_1592 = const()[name = tensor("op_1592"), val = tensor([1, 1])]; + tensor var_1594_pad_type_0 = const()[name = tensor("op_1594_pad_type_0"), val = tensor("custom")]; + tensor var_1594_pad_0 = const()[name = tensor("op_1594_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115611328))), lut = tensor([-0x1.ca8p-8, 0x1.ccp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115816192)))]; + tensor var_1594_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1592, groups = var_1186, pad = var_1594_pad_0, pad_type = var_1594_pad_type_0, strides = var_1590, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("op_1594_cast")]; + tensor inputs_35_cast = add(x = var_1594_cast, y = inputs_33_cast)[name = tensor("inputs_35_cast")]; + tensor var_1598 = const()[name = tensor("op_1598"), val = tensor([1])]; + tensor channels_mean_35_cast = reduce_mean(axes = var_1598, keep_dims = var_1181, x = inputs_35_cast)[name = tensor("channels_mean_35_cast")]; + tensor zero_mean_35_cast = sub(x = inputs_35_cast, y = channels_mean_35_cast)[name = tensor("zero_mean_35_cast")]; + tensor zero_mean_sq_35_cast = mul(x = zero_mean_35_cast, y = zero_mean_35_cast)[name = tensor("zero_mean_sq_35_cast")]; + tensor var_1602 = const()[name = tensor("op_1602"), val = tensor([1])]; + tensor var_1603_cast = reduce_mean(axes = var_1602, keep_dims = var_1181, x = zero_mean_sq_35_cast)[name = tensor("op_1603_cast")]; + tensor var_1604_to_fp16 = const()[name = tensor("op_1604_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1605_cast = add(x = var_1603_cast, y = var_1604_to_fp16)[name = tensor("op_1605_cast")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1605_cast)[name = tensor("denom_35_cast")]; + tensor out_35_cast = mul(x = zero_mean_35_cast, y = denom_35_cast)[name = tensor("out_35_cast")]; + tensor var_1609_to_fp16 = const()[name = tensor("op_1609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115818816)))]; + tensor var_1610_cast = add(x = out_35_cast, y = var_1609_to_fp16)[name = tensor("op_1610_cast")]; + tensor var_1612_to_fp16 = const()[name = tensor("op_1612_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115821440)))]; + tensor input_143_cast = mul(x = var_1610_cast, y = var_1612_to_fp16)[name = tensor("input_143_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, 1])]; + tensor var_1622 = const()[name = tensor("op_1622"), val = tensor([1, 1])]; + tensor var_1624_pad_type_0 = const()[name = tensor("op_1624_pad_type_0"), val = tensor("custom")]; + tensor var_1624_pad_0 = const()[name = tensor("op_1624_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(115824064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122377728))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122377856)))]; + tensor var_1624_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1622, groups = var_1186, pad = var_1624_pad_0, pad_type = var_1624_pad_type_0, strides = var_1620, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_143_cast)[name = tensor("op_1624_cast")]; + tensor var_1625_split_sizes_0 = const()[name = tensor("op_1625_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1625_axis_0 = const()[name = tensor("op_1625_axis_0"), val = tensor(1)]; + tensor var_1625_cast_0, tensor var_1625_cast_1 = split(axis = var_1625_axis_0, split_sizes = var_1625_split_sizes_0, x = var_1624_cast)[name = tensor("op_1625_cast")]; + tensor var_1627_mode_0 = const()[name = tensor("op_1627_mode_0"), val = tensor("EXACT")]; + tensor var_1627_cast = gelu(mode = var_1627_mode_0, x = var_1625_cast_1)[name = tensor("op_1627_cast")]; + tensor input_145_cast = mul(x = var_1625_cast_0, y = var_1627_cast)[name = tensor("input_145_cast")]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 1])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([1, 1])]; + tensor var_1635_pad_type_0 = const()[name = tensor("op_1635_pad_type_0"), val = tensor("custom")]; + tensor var_1635_pad_0 = const()[name = tensor("op_1635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(122398400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125675264))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125675392)))]; + tensor var_1635_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1633, groups = var_1186, pad = var_1635_pad_0, pad_type = var_1635_pad_type_0, strides = var_1631, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("op_1635_cast")]; + tensor inputs_37_cast = add(x = var_1635_cast, y = inputs_35_cast)[name = tensor("inputs_37_cast")]; + tensor var_1645 = const()[name = tensor("op_1645"), val = tensor([1])]; + tensor channels_mean_37_cast = reduce_mean(axes = var_1645, keep_dims = var_1181, x = inputs_37_cast)[name = tensor("channels_mean_37_cast")]; + tensor zero_mean_37_cast = sub(x = inputs_37_cast, y = channels_mean_37_cast)[name = tensor("zero_mean_37_cast")]; + tensor zero_mean_sq_37_cast = mul(x = zero_mean_37_cast, y = zero_mean_37_cast)[name = tensor("zero_mean_sq_37_cast")]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([1])]; + tensor var_1650_cast = reduce_mean(axes = var_1649, keep_dims = var_1181, x = zero_mean_sq_37_cast)[name = tensor("op_1650_cast")]; + tensor var_1651_to_fp16 = const()[name = tensor("op_1651_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1652_cast = add(x = var_1650_cast, y = var_1651_to_fp16)[name = tensor("op_1652_cast")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1652_cast)[name = tensor("denom_37_cast")]; + tensor out_37_cast = mul(x = zero_mean_37_cast, y = denom_37_cast)[name = tensor("out_37_cast")]; + tensor var_1656_to_fp16 = const()[name = tensor("op_1656_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125678016)))]; + tensor var_1657_cast = add(x = out_37_cast, y = var_1656_to_fp16)[name = tensor("op_1657_cast")]; + tensor var_1659_to_fp16 = const()[name = tensor("op_1659_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125680640)))]; + tensor hidden_states_77_cast = mul(x = var_1657_cast, y = var_1659_to_fp16)[name = tensor("hidden_states_77_cast")]; + tensor var_1666 = const()[name = tensor("op_1666"), val = tensor([1, 1])]; + tensor var_1668 = const()[name = tensor("op_1668"), val = tensor([1, 1])]; + tensor q_25_pad_type_0 = const()[name = tensor("q_25_pad_type_0"), val = tensor("custom")]; + tensor q_25_pad_0 = const()[name = tensor("q_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(125683264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126502528))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_25_cast = conv(dilations = var_1668, groups = var_1186, pad = q_25_pad_0, pad_type = q_25_pad_type_0, strides = var_1666, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("q_25_cast")]; + tensor var_1672 = const()[name = tensor("op_1672"), val = tensor([1, 1])]; + tensor var_1674 = const()[name = tensor("op_1674"), val = tensor([1, 1])]; + tensor k_25_pad_type_0 = const()[name = tensor("k_25_pad_type_0"), val = tensor("custom")]; + tensor k_25_pad_0 = const()[name = tensor("k_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(126502656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127321920))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_25_cast = conv(dilations = var_1674, groups = var_1186, pad = k_25_pad_0, pad_type = k_25_pad_type_0, strides = var_1672, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("k_25_cast")]; + tensor var_1678 = const()[name = tensor("op_1678"), val = tensor([1, 1])]; + tensor var_1680 = const()[name = tensor("op_1680"), val = tensor([1, 1])]; + tensor v_25_pad_type_0 = const()[name = tensor("v_25_pad_type_0"), val = tensor("custom")]; + tensor v_25_pad_0 = const()[name = tensor("v_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(127322048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128141312))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_25_cast = conv(dilations = var_1680, groups = var_1186, pad = v_25_pad_0, pad_type = v_25_pad_type_0, strides = var_1678, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("v_25_cast")]; + tensor var_1684 = const()[name = tensor("op_1684"), val = tensor([2, 20, 64, -1])]; + tensor var_1685_cast = reshape(shape = var_1684, x = q_25_cast)[name = tensor("op_1685_cast")]; + tensor var_1686 = const()[name = tensor("op_1686"), val = tensor([2, 20, 64, -1])]; + tensor var_1687_cast = reshape(shape = var_1686, x = k_25_cast)[name = tensor("op_1687_cast")]; + tensor var_1688 = const()[name = tensor("op_1688"), val = tensor([2, 20, 64, -1])]; + tensor var_1689_cast = reshape(shape = var_1688, x = v_25_cast)[name = tensor("op_1689_cast")]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = var_1685_cast, y = var_1687_cast)[name = tensor("attn_weights_49_cast")]; + tensor attn_weights_51_cast = mul(x = attn_weights_49_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_1693_cast = softmax(axis = var_1170, x = attn_weights_51_cast)[name = tensor("op_1693_cast")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1689_cast, y = var_1693_cast)[name = tensor("attn_25_cast")]; + tensor var_1697 = const()[name = tensor("op_1697"), val = tensor([2, 1280, 1, -1])]; + tensor input_147_cast = reshape(shape = var_1697, x = attn_25_cast)[name = tensor("input_147_cast")]; + tensor var_1702 = const()[name = tensor("op_1702"), val = tensor([1, 1])]; + tensor var_1704 = const()[name = tensor("op_1704"), val = tensor([1, 1])]; + tensor var_1706_pad_type_0 = const()[name = tensor("op_1706_pad_type_0"), val = tensor("custom")]; + tensor var_1706_pad_0 = const()[name = tensor("op_1706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128141440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128960704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128960832)))]; + tensor var_1706_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_1704, groups = var_1186, pad = var_1706_pad_0, pad_type = var_1706_pad_type_0, strides = var_1702, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_147_cast)[name = tensor("op_1706_cast")]; + tensor inputs_39_cast = add(x = var_1706_cast, y = inputs_37_cast)[name = tensor("inputs_39_cast")]; + tensor var_1710 = const()[name = tensor("op_1710"), val = tensor([1])]; + tensor channels_mean_39_cast = reduce_mean(axes = var_1710, keep_dims = var_1181, x = inputs_39_cast)[name = tensor("channels_mean_39_cast")]; + tensor zero_mean_39_cast = sub(x = inputs_39_cast, y = channels_mean_39_cast)[name = tensor("zero_mean_39_cast")]; + tensor zero_mean_sq_39_cast = mul(x = zero_mean_39_cast, y = zero_mean_39_cast)[name = tensor("zero_mean_sq_39_cast")]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor([1])]; + tensor var_1715_cast = reduce_mean(axes = var_1714, keep_dims = var_1181, x = zero_mean_sq_39_cast)[name = tensor("op_1715_cast")]; + tensor var_1716_to_fp16 = const()[name = tensor("op_1716_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1717_cast = add(x = var_1715_cast, y = var_1716_to_fp16)[name = tensor("op_1717_cast")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1717_cast)[name = tensor("denom_39_cast")]; + tensor out_39_cast = mul(x = zero_mean_39_cast, y = denom_39_cast)[name = tensor("out_39_cast")]; + tensor var_1721_to_fp16 = const()[name = tensor("op_1721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128963456)))]; + tensor var_1722_cast = add(x = out_39_cast, y = var_1721_to_fp16)[name = tensor("op_1722_cast")]; + tensor var_1724_to_fp16 = const()[name = tensor("op_1724_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128966080)))]; + tensor hidden_states_79_cast = mul(x = var_1722_cast, y = var_1724_to_fp16)[name = tensor("hidden_states_79_cast")]; + tensor var_1731 = const()[name = tensor("op_1731"), val = tensor([1, 1])]; + tensor var_1733 = const()[name = tensor("op_1733"), val = tensor([1, 1])]; + tensor q_27_pad_type_0 = const()[name = tensor("q_27_pad_type_0"), val = tensor("custom")]; + tensor q_27_pad_0 = const()[name = tensor("q_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(128968704))), lut = tensor([-0x1.72cp-6, -0x1.d2cp-8, 0x1.d5p-8, 0x1.73cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_27_cast = conv(dilations = var_1733, groups = var_1186, pad = q_27_pad_0, pad_type = q_27_pad_type_0, strides = var_1731, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("q_27_cast")]; + tensor var_1737 = const()[name = tensor("op_1737"), val = tensor([1, 1])]; + tensor var_1739 = const()[name = tensor("op_1739"), val = tensor([1, 1])]; + tensor k_27_pad_type_0 = const()[name = tensor("k_27_pad_type_0"), val = tensor("custom")]; + tensor k_27_pad_0 = const()[name = tensor("k_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(129378368))), lut = tensor([-0x1.394p-6, -0x1.768p-8, 0x1.704p-8, 0x1.378p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_27_cast = conv(dilations = var_1739, groups = var_1186, pad = k_27_pad_0, pad_type = k_27_pad_type_0, strides = var_1737, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_27_cast")]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 1])]; + tensor var_1745 = const()[name = tensor("op_1745"), val = tensor([1, 1])]; + tensor v_27_pad_type_0 = const()[name = tensor("v_27_pad_type_0"), val = tensor("custom")]; + tensor v_27_pad_0 = const()[name = tensor("v_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130033792))), lut = tensor([-0x1.74cp-6, -0x1.ab8p-8, 0x1.a44p-8, 0x1.72p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_27_cast = conv(dilations = var_1745, groups = var_1186, pad = v_27_pad_0, pad_type = v_27_pad_type_0, strides = var_1743, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_27_cast")]; + tensor var_1749 = const()[name = tensor("op_1749"), val = tensor([2, 20, 64, -1])]; + tensor var_1750_cast = reshape(shape = var_1749, x = q_27_cast)[name = tensor("op_1750_cast")]; + tensor var_1751 = const()[name = tensor("op_1751"), val = tensor([2, 20, 64, -1])]; + tensor var_1752_cast = reshape(shape = var_1751, x = k_27_cast)[name = tensor("op_1752_cast")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([2, 20, 64, -1])]; + tensor var_1754_cast = reshape(shape = var_1753, x = v_27_cast)[name = tensor("op_1754_cast")]; + tensor attn_weights_53_transpose_x_0 = const()[name = tensor("attn_weights_53_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_53_transpose_y_0 = const()[name = tensor("attn_weights_53_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_53_cast = matmul(transpose_x = attn_weights_53_transpose_x_0, transpose_y = attn_weights_53_transpose_y_0, x = var_1750_cast, y = var_1752_cast)[name = tensor("attn_weights_53_cast")]; + tensor attn_weights_55_cast = mul(x = attn_weights_53_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_55_cast")]; + tensor var_1758_cast = softmax(axis = var_1170, x = attn_weights_55_cast)[name = tensor("op_1758_cast")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1754_cast, y = var_1758_cast)[name = tensor("attn_27_cast")]; + tensor var_1762 = const()[name = tensor("op_1762"), val = tensor([2, 1280, 1, -1])]; + tensor input_149_cast = reshape(shape = var_1762, x = attn_27_cast)[name = tensor("input_149_cast")]; + tensor var_1767 = const()[name = tensor("op_1767"), val = tensor([1, 1])]; + tensor var_1769 = const()[name = tensor("op_1769"), val = tensor([1, 1])]; + tensor var_1771_pad_type_0 = const()[name = tensor("op_1771_pad_type_0"), val = tensor("custom")]; + tensor var_1771_pad_0 = const()[name = tensor("op_1771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130689216))), lut = tensor([-0x1.8bp-8, 0x1.8bp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130894080)))]; + tensor var_1771_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_1769, groups = var_1186, pad = var_1771_pad_0, pad_type = var_1771_pad_type_0, strides = var_1767, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_149_cast)[name = tensor("op_1771_cast")]; + tensor inputs_41_cast = add(x = var_1771_cast, y = inputs_39_cast)[name = tensor("inputs_41_cast")]; + tensor var_1775 = const()[name = tensor("op_1775"), val = tensor([1])]; + tensor channels_mean_41_cast = reduce_mean(axes = var_1775, keep_dims = var_1181, x = inputs_41_cast)[name = tensor("channels_mean_41_cast")]; + tensor zero_mean_41_cast = sub(x = inputs_41_cast, y = channels_mean_41_cast)[name = tensor("zero_mean_41_cast")]; + tensor zero_mean_sq_41_cast = mul(x = zero_mean_41_cast, y = zero_mean_41_cast)[name = tensor("zero_mean_sq_41_cast")]; + tensor var_1779 = const()[name = tensor("op_1779"), val = tensor([1])]; + tensor var_1780_cast = reduce_mean(axes = var_1779, keep_dims = var_1181, x = zero_mean_sq_41_cast)[name = tensor("op_1780_cast")]; + tensor var_1781_to_fp16 = const()[name = tensor("op_1781_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1782_cast = add(x = var_1780_cast, y = var_1781_to_fp16)[name = tensor("op_1782_cast")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1782_cast)[name = tensor("denom_41_cast")]; + tensor out_41_cast = mul(x = zero_mean_41_cast, y = denom_41_cast)[name = tensor("out_41_cast")]; + tensor var_1786_to_fp16 = const()[name = tensor("op_1786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130896704)))]; + tensor var_1787_cast = add(x = out_41_cast, y = var_1786_to_fp16)[name = tensor("op_1787_cast")]; + tensor var_1789_to_fp16 = const()[name = tensor("op_1789_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130899328)))]; + tensor input_151_cast = mul(x = var_1787_cast, y = var_1789_to_fp16)[name = tensor("input_151_cast")]; + tensor var_1797 = const()[name = tensor("op_1797"), val = tensor([1, 1])]; + tensor var_1799 = const()[name = tensor("op_1799"), val = tensor([1, 1])]; + tensor var_1801_pad_type_0 = const()[name = tensor("op_1801_pad_type_0"), val = tensor("custom")]; + tensor var_1801_pad_0 = const()[name = tensor("op_1801_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(130901952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140732416))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140732608)))]; + tensor var_1801_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_1799, groups = var_1186, pad = var_1801_pad_0, pad_type = var_1801_pad_type_0, strides = var_1797, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_151_cast)[name = tensor("op_1801_cast")]; + tensor var_1802_split_sizes_0 = const()[name = tensor("op_1802_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1802_axis_0 = const()[name = tensor("op_1802_axis_0"), val = tensor(1)]; + tensor var_1802_cast_0, tensor var_1802_cast_1 = split(axis = var_1802_axis_0, split_sizes = var_1802_split_sizes_0, x = var_1801_cast)[name = tensor("op_1802_cast")]; + tensor var_1804_mode_0 = const()[name = tensor("op_1804_mode_0"), val = tensor("EXACT")]; + tensor var_1804_cast = gelu(mode = var_1804_mode_0, x = var_1802_cast_1)[name = tensor("op_1804_cast")]; + tensor input_153_cast = mul(x = var_1802_cast_0, y = var_1804_cast)[name = tensor("input_153_cast")]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([1, 1])]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([1, 1])]; + tensor var_1812_pad_type_0 = const()[name = tensor("op_1812_pad_type_0"), val = tensor("custom")]; + tensor var_1812_pad_0 = const()[name = tensor("op_1812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(140753152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144030016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144030144)))]; + tensor var_1812_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_1810, groups = var_1186, pad = var_1812_pad_0, pad_type = var_1812_pad_type_0, strides = var_1808, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("op_1812_cast")]; + tensor inputs_43_cast = add(x = var_1812_cast, y = inputs_41_cast)[name = tensor("inputs_43_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([1])]; + tensor channels_mean_43_cast = reduce_mean(axes = var_1822, keep_dims = var_1181, x = inputs_43_cast)[name = tensor("channels_mean_43_cast")]; + tensor zero_mean_43_cast = sub(x = inputs_43_cast, y = channels_mean_43_cast)[name = tensor("zero_mean_43_cast")]; + tensor zero_mean_sq_43_cast = mul(x = zero_mean_43_cast, y = zero_mean_43_cast)[name = tensor("zero_mean_sq_43_cast")]; + tensor var_1826 = const()[name = tensor("op_1826"), val = tensor([1])]; + tensor var_1827_cast = reduce_mean(axes = var_1826, keep_dims = var_1181, x = zero_mean_sq_43_cast)[name = tensor("op_1827_cast")]; + tensor var_1828_to_fp16 = const()[name = tensor("op_1828_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1829_cast = add(x = var_1827_cast, y = var_1828_to_fp16)[name = tensor("op_1829_cast")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1829_cast)[name = tensor("denom_43_cast")]; + tensor out_43_cast = mul(x = zero_mean_43_cast, y = denom_43_cast)[name = tensor("out_43_cast")]; + tensor var_1833_to_fp16 = const()[name = tensor("op_1833_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144032768)))]; + tensor var_1834_cast = add(x = out_43_cast, y = var_1833_to_fp16)[name = tensor("op_1834_cast")]; + tensor var_1836_to_fp16 = const()[name = tensor("op_1836_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144035392)))]; + tensor hidden_states_83_cast = mul(x = var_1834_cast, y = var_1836_to_fp16)[name = tensor("hidden_states_83_cast")]; + tensor var_1843 = const()[name = tensor("op_1843"), val = tensor([1, 1])]; + tensor var_1845 = const()[name = tensor("op_1845"), val = tensor([1, 1])]; + tensor q_29_pad_type_0 = const()[name = tensor("q_29_pad_type_0"), val = tensor("custom")]; + tensor q_29_pad_0 = const()[name = tensor("q_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144038016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144857280))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_29_cast = conv(dilations = var_1845, groups = var_1186, pad = q_29_pad_0, pad_type = q_29_pad_type_0, strides = var_1843, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("q_29_cast")]; + tensor var_1849 = const()[name = tensor("op_1849"), val = tensor([1, 1])]; + tensor var_1851 = const()[name = tensor("op_1851"), val = tensor([1, 1])]; + tensor k_29_pad_type_0 = const()[name = tensor("k_29_pad_type_0"), val = tensor("custom")]; + tensor k_29_pad_0 = const()[name = tensor("k_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144857408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145676672))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_29_cast = conv(dilations = var_1851, groups = var_1186, pad = k_29_pad_0, pad_type = k_29_pad_type_0, strides = var_1849, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("k_29_cast")]; + tensor var_1855 = const()[name = tensor("op_1855"), val = tensor([1, 1])]; + tensor var_1857 = const()[name = tensor("op_1857"), val = tensor([1, 1])]; + tensor v_29_pad_type_0 = const()[name = tensor("v_29_pad_type_0"), val = tensor("custom")]; + tensor v_29_pad_0 = const()[name = tensor("v_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(145676800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146496064))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_29_cast = conv(dilations = var_1857, groups = var_1186, pad = v_29_pad_0, pad_type = v_29_pad_type_0, strides = var_1855, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("v_29_cast")]; + tensor var_1861 = const()[name = tensor("op_1861"), val = tensor([2, 20, 64, -1])]; + tensor var_1862_cast = reshape(shape = var_1861, x = q_29_cast)[name = tensor("op_1862_cast")]; + tensor var_1863 = const()[name = tensor("op_1863"), val = tensor([2, 20, 64, -1])]; + tensor var_1864_cast = reshape(shape = var_1863, x = k_29_cast)[name = tensor("op_1864_cast")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([2, 20, 64, -1])]; + tensor var_1866_cast = reshape(shape = var_1865, x = v_29_cast)[name = tensor("op_1866_cast")]; + tensor attn_weights_57_transpose_x_0 = const()[name = tensor("attn_weights_57_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_57_transpose_y_0 = const()[name = tensor("attn_weights_57_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_57_cast = matmul(transpose_x = attn_weights_57_transpose_x_0, transpose_y = attn_weights_57_transpose_y_0, x = var_1862_cast, y = var_1864_cast)[name = tensor("attn_weights_57_cast")]; + tensor attn_weights_59_cast = mul(x = attn_weights_57_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_59_cast")]; + tensor var_1870_cast = softmax(axis = var_1170, x = attn_weights_59_cast)[name = tensor("op_1870_cast")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1866_cast, y = var_1870_cast)[name = tensor("attn_29_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([2, 1280, 1, -1])]; + tensor input_155_cast = reshape(shape = var_1874, x = attn_29_cast)[name = tensor("input_155_cast")]; + tensor var_1879 = const()[name = tensor("op_1879"), val = tensor([1, 1])]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, 1])]; + tensor var_1883_pad_type_0 = const()[name = tensor("op_1883_pad_type_0"), val = tensor("custom")]; + tensor var_1883_pad_0 = const()[name = tensor("op_1883_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146496192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147315456))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147315584)))]; + tensor var_1883_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_1881, groups = var_1186, pad = var_1883_pad_0, pad_type = var_1883_pad_type_0, strides = var_1879, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_155_cast)[name = tensor("op_1883_cast")]; + tensor inputs_45_cast = add(x = var_1883_cast, y = inputs_43_cast)[name = tensor("inputs_45_cast")]; + tensor var_1887 = const()[name = tensor("op_1887"), val = tensor([1])]; + tensor channels_mean_45_cast = reduce_mean(axes = var_1887, keep_dims = var_1181, x = inputs_45_cast)[name = tensor("channels_mean_45_cast")]; + tensor zero_mean_45_cast = sub(x = inputs_45_cast, y = channels_mean_45_cast)[name = tensor("zero_mean_45_cast")]; + tensor zero_mean_sq_45_cast = mul(x = zero_mean_45_cast, y = zero_mean_45_cast)[name = tensor("zero_mean_sq_45_cast")]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1])]; + tensor var_1892_cast = reduce_mean(axes = var_1891, keep_dims = var_1181, x = zero_mean_sq_45_cast)[name = tensor("op_1892_cast")]; + tensor var_1893_to_fp16 = const()[name = tensor("op_1893_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1894_cast = add(x = var_1892_cast, y = var_1893_to_fp16)[name = tensor("op_1894_cast")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1894_cast)[name = tensor("denom_45_cast")]; + tensor out_45_cast = mul(x = zero_mean_45_cast, y = denom_45_cast)[name = tensor("out_45_cast")]; + tensor var_1898_to_fp16 = const()[name = tensor("op_1898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147318208)))]; + tensor var_1899_cast = add(x = out_45_cast, y = var_1898_to_fp16)[name = tensor("op_1899_cast")]; + tensor var_1901_to_fp16 = const()[name = tensor("op_1901_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147320832)))]; + tensor hidden_states_85_cast = mul(x = var_1899_cast, y = var_1901_to_fp16)[name = tensor("hidden_states_85_cast")]; + tensor var_1908 = const()[name = tensor("op_1908"), val = tensor([1, 1])]; + tensor var_1910 = const()[name = tensor("op_1910"), val = tensor([1, 1])]; + tensor q_31_pad_type_0 = const()[name = tensor("q_31_pad_type_0"), val = tensor("custom")]; + tensor q_31_pad_0 = const()[name = tensor("q_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147323456))), lut = tensor([-0x1.918p-7, 0x1.924p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_31_cast = conv(dilations = var_1910, groups = var_1186, pad = q_31_pad_0, pad_type = q_31_pad_type_0, strides = var_1908, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("q_31_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 1])]; + tensor var_1916 = const()[name = tensor("op_1916"), val = tensor([1, 1])]; + tensor k_31_pad_type_0 = const()[name = tensor("k_31_pad_type_0"), val = tensor("custom")]; + tensor k_31_pad_0 = const()[name = tensor("k_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147528320))), lut = tensor([-0x1.214p-6, -0x1.5c8p-8, 0x1.5bcp-8, 0x1.218p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_31_cast = conv(dilations = var_1916, groups = var_1186, pad = k_31_pad_0, pad_type = k_31_pad_type_0, strides = var_1914, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_31_cast")]; + tensor var_1920 = const()[name = tensor("op_1920"), val = tensor([1, 1])]; + tensor var_1922 = const()[name = tensor("op_1922"), val = tensor([1, 1])]; + tensor v_31_pad_type_0 = const()[name = tensor("v_31_pad_type_0"), val = tensor("custom")]; + tensor v_31_pad_0 = const()[name = tensor("v_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148183744))), lut = tensor([-0x1.6d4p-6, -0x1.9d8p-8, 0x1.a04p-8, 0x1.6e4p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_31_cast = conv(dilations = var_1922, groups = var_1186, pad = v_31_pad_0, pad_type = v_31_pad_type_0, strides = var_1920, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_31_cast")]; + tensor var_1926 = const()[name = tensor("op_1926"), val = tensor([2, 20, 64, -1])]; + tensor var_1927_cast = reshape(shape = var_1926, x = q_31_cast)[name = tensor("op_1927_cast")]; + tensor var_1928 = const()[name = tensor("op_1928"), val = tensor([2, 20, 64, -1])]; + tensor var_1929_cast = reshape(shape = var_1928, x = k_31_cast)[name = tensor("op_1929_cast")]; + tensor var_1930 = const()[name = tensor("op_1930"), val = tensor([2, 20, 64, -1])]; + tensor var_1931_cast = reshape(shape = var_1930, x = v_31_cast)[name = tensor("op_1931_cast")]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = var_1927_cast, y = var_1929_cast)[name = tensor("attn_weights_61_cast")]; + tensor attn_weights_63_cast = mul(x = attn_weights_61_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1935_cast = softmax(axis = var_1170, x = attn_weights_63_cast)[name = tensor("op_1935_cast")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_1931_cast, y = var_1935_cast)[name = tensor("attn_31_cast")]; + tensor var_1939 = const()[name = tensor("op_1939"), val = tensor([2, 1280, 1, -1])]; + tensor input_157_cast = reshape(shape = var_1939, x = attn_31_cast)[name = tensor("input_157_cast")]; + tensor var_1944 = const()[name = tensor("op_1944"), val = tensor([1, 1])]; + tensor var_1946 = const()[name = tensor("op_1946"), val = tensor([1, 1])]; + tensor var_1948_pad_type_0 = const()[name = tensor("op_1948_pad_type_0"), val = tensor("custom")]; + tensor var_1948_pad_0 = const()[name = tensor("op_1948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148839168))), lut = tensor([-0x1.8a8p-8, 0x1.89cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149044032)))]; + tensor var_1948_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_1946, groups = var_1186, pad = var_1948_pad_0, pad_type = var_1948_pad_type_0, strides = var_1944, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("op_1948_cast")]; + tensor inputs_47_cast = add(x = var_1948_cast, y = inputs_45_cast)[name = tensor("inputs_47_cast")]; + tensor var_1952 = const()[name = tensor("op_1952"), val = tensor([1])]; + tensor channels_mean_47_cast = reduce_mean(axes = var_1952, keep_dims = var_1181, x = inputs_47_cast)[name = tensor("channels_mean_47_cast")]; + tensor zero_mean_47_cast = sub(x = inputs_47_cast, y = channels_mean_47_cast)[name = tensor("zero_mean_47_cast")]; + tensor zero_mean_sq_47_cast = mul(x = zero_mean_47_cast, y = zero_mean_47_cast)[name = tensor("zero_mean_sq_47_cast")]; + tensor var_1956 = const()[name = tensor("op_1956"), val = tensor([1])]; + tensor var_1957_cast = reduce_mean(axes = var_1956, keep_dims = var_1181, x = zero_mean_sq_47_cast)[name = tensor("op_1957_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1959_cast = add(x = var_1957_cast, y = var_1958_to_fp16)[name = tensor("op_1959_cast")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1959_cast)[name = tensor("denom_47_cast")]; + tensor out_47_cast = mul(x = zero_mean_47_cast, y = denom_47_cast)[name = tensor("out_47_cast")]; + tensor var_1963_to_fp16 = const()[name = tensor("op_1963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149046656)))]; + tensor var_1964_cast = add(x = out_47_cast, y = var_1963_to_fp16)[name = tensor("op_1964_cast")]; + tensor var_1966_to_fp16 = const()[name = tensor("op_1966_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149049280)))]; + tensor input_159_cast = mul(x = var_1964_cast, y = var_1966_to_fp16)[name = tensor("input_159_cast")]; + tensor var_1974 = const()[name = tensor("op_1974"), val = tensor([1, 1])]; + tensor var_1976 = const()[name = tensor("op_1976"), val = tensor([1, 1])]; + tensor var_1978_pad_type_0 = const()[name = tensor("op_1978_pad_type_0"), val = tensor("custom")]; + tensor var_1978_pad_0 = const()[name = tensor("op_1978_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(149051904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155605568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155605696)))]; + tensor var_1978_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_1976, groups = var_1186, pad = var_1978_pad_0, pad_type = var_1978_pad_type_0, strides = var_1974, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_159_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_split_sizes_0 = const()[name = tensor("op_1979_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1979_axis_0 = const()[name = tensor("op_1979_axis_0"), val = tensor(1)]; + tensor var_1979_cast_0, tensor var_1979_cast_1 = split(axis = var_1979_axis_0, split_sizes = var_1979_split_sizes_0, x = var_1978_cast)[name = tensor("op_1979_cast")]; + tensor var_1981_mode_0 = const()[name = tensor("op_1981_mode_0"), val = tensor("EXACT")]; + tensor var_1981_cast = gelu(mode = var_1981_mode_0, x = var_1979_cast_1)[name = tensor("op_1981_cast")]; + tensor input_161_cast = mul(x = var_1979_cast_0, y = var_1981_cast)[name = tensor("input_161_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([1, 1])]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([1, 1])]; + tensor var_1989_pad_type_0 = const()[name = tensor("op_1989_pad_type_0"), val = tensor("custom")]; + tensor var_1989_pad_0 = const()[name = tensor("op_1989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(155626240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160541504))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160541696)))]; + tensor var_1989_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_1987, groups = var_1186, pad = var_1989_pad_0, pad_type = var_1989_pad_type_0, strides = var_1985, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("op_1989_cast")]; + tensor inputs_49_cast = add(x = var_1989_cast, y = inputs_47_cast)[name = tensor("inputs_49_cast")]; + tensor var_1999 = const()[name = tensor("op_1999"), val = tensor([1])]; + tensor channels_mean_49_cast = reduce_mean(axes = var_1999, keep_dims = var_1181, x = inputs_49_cast)[name = tensor("channels_mean_49_cast")]; + tensor zero_mean_49_cast = sub(x = inputs_49_cast, y = channels_mean_49_cast)[name = tensor("zero_mean_49_cast")]; + tensor zero_mean_sq_49_cast = mul(x = zero_mean_49_cast, y = zero_mean_49_cast)[name = tensor("zero_mean_sq_49_cast")]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1])]; + tensor var_2004_cast = reduce_mean(axes = var_2003, keep_dims = var_1181, x = zero_mean_sq_49_cast)[name = tensor("op_2004_cast")]; + tensor var_2005_to_fp16 = const()[name = tensor("op_2005_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2006_cast = add(x = var_2004_cast, y = var_2005_to_fp16)[name = tensor("op_2006_cast")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_2006_cast)[name = tensor("denom_49_cast")]; + tensor out_49_cast = mul(x = zero_mean_49_cast, y = denom_49_cast)[name = tensor("out_49_cast")]; + tensor var_2010_to_fp16 = const()[name = tensor("op_2010_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160544320)))]; + tensor var_2011_cast = add(x = out_49_cast, y = var_2010_to_fp16)[name = tensor("op_2011_cast")]; + tensor var_2013_to_fp16 = const()[name = tensor("op_2013_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160546944)))]; + tensor hidden_states_89_cast = mul(x = var_2011_cast, y = var_2013_to_fp16)[name = tensor("hidden_states_89_cast")]; + tensor var_2020 = const()[name = tensor("op_2020"), val = tensor([1, 1])]; + tensor var_2022 = const()[name = tensor("op_2022"), val = tensor([1, 1])]; + tensor q_33_pad_type_0 = const()[name = tensor("q_33_pad_type_0"), val = tensor("custom")]; + tensor q_33_pad_0 = const()[name = tensor("q_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(160549568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161368832))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_33_cast = conv(dilations = var_2022, groups = var_1186, pad = q_33_pad_0, pad_type = q_33_pad_type_0, strides = var_2020, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("q_33_cast")]; + tensor var_2026 = const()[name = tensor("op_2026"), val = tensor([1, 1])]; + tensor var_2028 = const()[name = tensor("op_2028"), val = tensor([1, 1])]; + tensor k_33_pad_type_0 = const()[name = tensor("k_33_pad_type_0"), val = tensor("custom")]; + tensor k_33_pad_0 = const()[name = tensor("k_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(161368960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162188224))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_33_cast = conv(dilations = var_2028, groups = var_1186, pad = k_33_pad_0, pad_type = k_33_pad_type_0, strides = var_2026, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("k_33_cast")]; + tensor var_2032 = const()[name = tensor("op_2032"), val = tensor([1, 1])]; + tensor var_2034 = const()[name = tensor("op_2034"), val = tensor([1, 1])]; + tensor v_33_pad_type_0 = const()[name = tensor("v_33_pad_type_0"), val = tensor("custom")]; + tensor v_33_pad_0 = const()[name = tensor("v_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(162188352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163007616))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_33_cast = conv(dilations = var_2034, groups = var_1186, pad = v_33_pad_0, pad_type = v_33_pad_type_0, strides = var_2032, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("v_33_cast")]; + tensor var_2038 = const()[name = tensor("op_2038"), val = tensor([2, 20, 64, -1])]; + tensor var_2039_cast = reshape(shape = var_2038, x = q_33_cast)[name = tensor("op_2039_cast")]; + tensor var_2040 = const()[name = tensor("op_2040"), val = tensor([2, 20, 64, -1])]; + tensor var_2041_cast = reshape(shape = var_2040, x = k_33_cast)[name = tensor("op_2041_cast")]; + tensor var_2042 = const()[name = tensor("op_2042"), val = tensor([2, 20, 64, -1])]; + tensor var_2043_cast = reshape(shape = var_2042, x = v_33_cast)[name = tensor("op_2043_cast")]; + tensor attn_weights_65_transpose_x_0 = const()[name = tensor("attn_weights_65_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_65_transpose_y_0 = const()[name = tensor("attn_weights_65_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_65_cast = matmul(transpose_x = attn_weights_65_transpose_x_0, transpose_y = attn_weights_65_transpose_y_0, x = var_2039_cast, y = var_2041_cast)[name = tensor("attn_weights_65_cast")]; + tensor attn_weights_67_cast = mul(x = attn_weights_65_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_67_cast")]; + tensor var_2047_cast = softmax(axis = var_1170, x = attn_weights_67_cast)[name = tensor("op_2047_cast")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_2043_cast, y = var_2047_cast)[name = tensor("attn_33_cast")]; + tensor var_2051 = const()[name = tensor("op_2051"), val = tensor([2, 1280, 1, -1])]; + tensor input_163_cast = reshape(shape = var_2051, x = attn_33_cast)[name = tensor("input_163_cast")]; + tensor var_2056 = const()[name = tensor("op_2056"), val = tensor([1, 1])]; + tensor var_2058 = const()[name = tensor("op_2058"), val = tensor([1, 1])]; + tensor var_2060_pad_type_0 = const()[name = tensor("op_2060_pad_type_0"), val = tensor("custom")]; + tensor var_2060_pad_0 = const()[name = tensor("op_2060_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163007744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163827008))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163827136)))]; + tensor var_2060_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_2058, groups = var_1186, pad = var_2060_pad_0, pad_type = var_2060_pad_type_0, strides = var_2056, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_163_cast)[name = tensor("op_2060_cast")]; + tensor inputs_51_cast = add(x = var_2060_cast, y = inputs_49_cast)[name = tensor("inputs_51_cast")]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1])]; + tensor channels_mean_51_cast = reduce_mean(axes = var_2064, keep_dims = var_1181, x = inputs_51_cast)[name = tensor("channels_mean_51_cast")]; + tensor zero_mean_51_cast = sub(x = inputs_51_cast, y = channels_mean_51_cast)[name = tensor("zero_mean_51_cast")]; + tensor zero_mean_sq_51_cast = mul(x = zero_mean_51_cast, y = zero_mean_51_cast)[name = tensor("zero_mean_sq_51_cast")]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([1])]; + tensor var_2069_cast = reduce_mean(axes = var_2068, keep_dims = var_1181, x = zero_mean_sq_51_cast)[name = tensor("op_2069_cast")]; + tensor var_2070_to_fp16 = const()[name = tensor("op_2070_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2071_cast = add(x = var_2069_cast, y = var_2070_to_fp16)[name = tensor("op_2071_cast")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_2071_cast)[name = tensor("denom_51_cast")]; + tensor out_51_cast = mul(x = zero_mean_51_cast, y = denom_51_cast)[name = tensor("out_51_cast")]; + tensor var_2075_to_fp16 = const()[name = tensor("op_2075_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163829760)))]; + tensor var_2076_cast = add(x = out_51_cast, y = var_2075_to_fp16)[name = tensor("op_2076_cast")]; + tensor var_2078_to_fp16 = const()[name = tensor("op_2078_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163832384)))]; + tensor hidden_states_91_cast = mul(x = var_2076_cast, y = var_2078_to_fp16)[name = tensor("hidden_states_91_cast")]; + tensor var_2085 = const()[name = tensor("op_2085"), val = tensor([1, 1])]; + tensor var_2087 = const()[name = tensor("op_2087"), val = tensor([1, 1])]; + tensor q_35_pad_type_0 = const()[name = tensor("q_35_pad_type_0"), val = tensor("custom")]; + tensor q_35_pad_0 = const()[name = tensor("q_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(163835008))), lut = tensor([-0x1.83p-7, 0x1.82cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_35_cast = conv(dilations = var_2087, groups = var_1186, pad = q_35_pad_0, pad_type = q_35_pad_type_0, strides = var_2085, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("q_35_cast")]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 1])]; + tensor var_2093 = const()[name = tensor("op_2093"), val = tensor([1, 1])]; + tensor k_35_pad_type_0 = const()[name = tensor("k_35_pad_type_0"), val = tensor("custom")]; + tensor k_35_pad_0 = const()[name = tensor("k_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164039872))), lut = tensor([-0x1.064p-6, -0x1.42p-8, 0x1.42cp-8, 0x1.064p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_35_cast = conv(dilations = var_2093, groups = var_1186, pad = k_35_pad_0, pad_type = k_35_pad_type_0, strides = var_2091, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_35_cast")]; + tensor var_2097 = const()[name = tensor("op_2097"), val = tensor([1, 1])]; + tensor var_2099 = const()[name = tensor("op_2099"), val = tensor([1, 1])]; + tensor v_35_pad_type_0 = const()[name = tensor("v_35_pad_type_0"), val = tensor("custom")]; + tensor v_35_pad_0 = const()[name = tensor("v_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(164695296))), lut = tensor([-0x1.3e4p-7, 0x1.3e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_35_cast = conv(dilations = var_2099, groups = var_1186, pad = v_35_pad_0, pad_type = v_35_pad_type_0, strides = var_2097, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_35_cast")]; + tensor var_2103 = const()[name = tensor("op_2103"), val = tensor([2, 20, 64, -1])]; + tensor var_2104_cast = reshape(shape = var_2103, x = q_35_cast)[name = tensor("op_2104_cast")]; + tensor var_2105 = const()[name = tensor("op_2105"), val = tensor([2, 20, 64, -1])]; + tensor var_2106_cast = reshape(shape = var_2105, x = k_35_cast)[name = tensor("op_2106_cast")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([2, 20, 64, -1])]; + tensor var_2108_cast = reshape(shape = var_2107, x = v_35_cast)[name = tensor("op_2108_cast")]; + tensor attn_weights_69_transpose_x_0 = const()[name = tensor("attn_weights_69_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_69_transpose_y_0 = const()[name = tensor("attn_weights_69_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_69_cast = matmul(transpose_x = attn_weights_69_transpose_x_0, transpose_y = attn_weights_69_transpose_y_0, x = var_2104_cast, y = var_2106_cast)[name = tensor("attn_weights_69_cast")]; + tensor attn_weights_71_cast = mul(x = attn_weights_69_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_71_cast")]; + tensor var_2112_cast = softmax(axis = var_1170, x = attn_weights_71_cast)[name = tensor("op_2112_cast")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2108_cast, y = var_2112_cast)[name = tensor("attn_35_cast")]; + tensor var_2116 = const()[name = tensor("op_2116"), val = tensor([2, 1280, 1, -1])]; + tensor input_165_cast = reshape(shape = var_2116, x = attn_35_cast)[name = tensor("input_165_cast")]; + tensor var_2121 = const()[name = tensor("op_2121"), val = tensor([1, 1])]; + tensor var_2123 = const()[name = tensor("op_2123"), val = tensor([1, 1])]; + tensor var_2125_pad_type_0 = const()[name = tensor("op_2125_pad_type_0"), val = tensor("custom")]; + tensor var_2125_pad_0 = const()[name = tensor("op_2125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165023040))), lut = tensor([-0x1.684p-8, 0x1.68cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165227904)))]; + tensor var_2125_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_2123, groups = var_1186, pad = var_2125_pad_0, pad_type = var_2125_pad_type_0, strides = var_2121, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_165_cast)[name = tensor("op_2125_cast")]; + tensor inputs_53_cast = add(x = var_2125_cast, y = inputs_51_cast)[name = tensor("inputs_53_cast")]; + tensor var_2129 = const()[name = tensor("op_2129"), val = tensor([1])]; + tensor channels_mean_53_cast = reduce_mean(axes = var_2129, keep_dims = var_1181, x = inputs_53_cast)[name = tensor("channels_mean_53_cast")]; + tensor zero_mean_53_cast = sub(x = inputs_53_cast, y = channels_mean_53_cast)[name = tensor("zero_mean_53_cast")]; + tensor zero_mean_sq_53_cast = mul(x = zero_mean_53_cast, y = zero_mean_53_cast)[name = tensor("zero_mean_sq_53_cast")]; + tensor var_2133 = const()[name = tensor("op_2133"), val = tensor([1])]; + tensor var_2134_cast = reduce_mean(axes = var_2133, keep_dims = var_1181, x = zero_mean_sq_53_cast)[name = tensor("op_2134_cast")]; + tensor var_2135_to_fp16 = const()[name = tensor("op_2135_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2136_cast = add(x = var_2134_cast, y = var_2135_to_fp16)[name = tensor("op_2136_cast")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_2136_cast)[name = tensor("denom_53_cast")]; + tensor out_53_cast = mul(x = zero_mean_53_cast, y = denom_53_cast)[name = tensor("out_53_cast")]; + tensor var_2140_to_fp16 = const()[name = tensor("op_2140_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165230528)))]; + tensor var_2141_cast = add(x = out_53_cast, y = var_2140_to_fp16)[name = tensor("op_2141_cast")]; + tensor var_2143_to_fp16 = const()[name = tensor("op_2143_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165233152)))]; + tensor input_167_cast = mul(x = var_2141_cast, y = var_2143_to_fp16)[name = tensor("input_167_cast")]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 1])]; + tensor var_2153 = const()[name = tensor("op_2153"), val = tensor([1, 1])]; + tensor var_2155_pad_type_0 = const()[name = tensor("op_2155_pad_type_0"), val = tensor("custom")]; + tensor var_2155_pad_0 = const()[name = tensor("op_2155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165235776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175066240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175066432)))]; + tensor var_2155_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_2153, groups = var_1186, pad = var_2155_pad_0, pad_type = var_2155_pad_type_0, strides = var_2151, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_167_cast)[name = tensor("op_2155_cast")]; + tensor var_2156_split_sizes_0 = const()[name = tensor("op_2156_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2156_axis_0 = const()[name = tensor("op_2156_axis_0"), val = tensor(1)]; + tensor var_2156_cast_0, tensor var_2156_cast_1 = split(axis = var_2156_axis_0, split_sizes = var_2156_split_sizes_0, x = var_2155_cast)[name = tensor("op_2156_cast")]; + tensor var_2158_mode_0 = const()[name = tensor("op_2158_mode_0"), val = tensor("EXACT")]; + tensor var_2158_cast = gelu(mode = var_2158_mode_0, x = var_2156_cast_1)[name = tensor("op_2158_cast")]; + tensor input_169_cast = mul(x = var_2156_cast_0, y = var_2158_cast)[name = tensor("input_169_cast")]; + tensor var_2162 = const()[name = tensor("op_2162"), val = tensor([1, 1])]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 1])]; + tensor var_2166_pad_type_0 = const()[name = tensor("op_2166_pad_type_0"), val = tensor("custom")]; + tensor var_2166_pad_0 = const()[name = tensor("op_2166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175086976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180002240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180002432)))]; + tensor var_2166_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_2164, groups = var_1186, pad = var_2166_pad_0, pad_type = var_2166_pad_type_0, strides = var_2162, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("op_2166_cast")]; + tensor inputs_55_cast = add(x = var_2166_cast, y = inputs_53_cast)[name = tensor("inputs_55_cast")]; + tensor var_2176 = const()[name = tensor("op_2176"), val = tensor([1])]; + tensor channels_mean_55_cast = reduce_mean(axes = var_2176, keep_dims = var_1181, x = inputs_55_cast)[name = tensor("channels_mean_55_cast")]; + tensor zero_mean_55_cast = sub(x = inputs_55_cast, y = channels_mean_55_cast)[name = tensor("zero_mean_55_cast")]; + tensor zero_mean_sq_55_cast = mul(x = zero_mean_55_cast, y = zero_mean_55_cast)[name = tensor("zero_mean_sq_55_cast")]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1])]; + tensor var_2181_cast = reduce_mean(axes = var_2180, keep_dims = var_1181, x = zero_mean_sq_55_cast)[name = tensor("op_2181_cast")]; + tensor var_2182_to_fp16 = const()[name = tensor("op_2182_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2183_cast = add(x = var_2181_cast, y = var_2182_to_fp16)[name = tensor("op_2183_cast")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_2183_cast)[name = tensor("denom_55_cast")]; + tensor out_55_cast = mul(x = zero_mean_55_cast, y = denom_55_cast)[name = tensor("out_55_cast")]; + tensor var_2187_to_fp16 = const()[name = tensor("op_2187_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180005056)))]; + tensor var_2188_cast = add(x = out_55_cast, y = var_2187_to_fp16)[name = tensor("op_2188_cast")]; + tensor var_2190_to_fp16 = const()[name = tensor("op_2190_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180007680)))]; + tensor hidden_states_95_cast = mul(x = var_2188_cast, y = var_2190_to_fp16)[name = tensor("hidden_states_95_cast")]; + tensor var_2197 = const()[name = tensor("op_2197"), val = tensor([1, 1])]; + tensor var_2199 = const()[name = tensor("op_2199"), val = tensor([1, 1])]; + tensor q_37_pad_type_0 = const()[name = tensor("q_37_pad_type_0"), val = tensor("custom")]; + tensor q_37_pad_0 = const()[name = tensor("q_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180010304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180829568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_37_cast = conv(dilations = var_2199, groups = var_1186, pad = q_37_pad_0, pad_type = q_37_pad_type_0, strides = var_2197, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("q_37_cast")]; + tensor var_2203 = const()[name = tensor("op_2203"), val = tensor([1, 1])]; + tensor var_2205 = const()[name = tensor("op_2205"), val = tensor([1, 1])]; + tensor k_37_pad_type_0 = const()[name = tensor("k_37_pad_type_0"), val = tensor("custom")]; + tensor k_37_pad_0 = const()[name = tensor("k_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180829696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181648960))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_37_cast = conv(dilations = var_2205, groups = var_1186, pad = k_37_pad_0, pad_type = k_37_pad_type_0, strides = var_2203, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("k_37_cast")]; + tensor var_2209 = const()[name = tensor("op_2209"), val = tensor([1, 1])]; + tensor var_2211 = const()[name = tensor("op_2211"), val = tensor([1, 1])]; + tensor v_37_pad_type_0 = const()[name = tensor("v_37_pad_type_0"), val = tensor("custom")]; + tensor v_37_pad_0 = const()[name = tensor("v_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181649088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182468352))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_37_cast = conv(dilations = var_2211, groups = var_1186, pad = v_37_pad_0, pad_type = v_37_pad_type_0, strides = var_2209, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("v_37_cast")]; + tensor var_2215 = const()[name = tensor("op_2215"), val = tensor([2, 20, 64, -1])]; + tensor var_2216_cast = reshape(shape = var_2215, x = q_37_cast)[name = tensor("op_2216_cast")]; + tensor var_2217 = const()[name = tensor("op_2217"), val = tensor([2, 20, 64, -1])]; + tensor var_2218_cast = reshape(shape = var_2217, x = k_37_cast)[name = tensor("op_2218_cast")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([2, 20, 64, -1])]; + tensor var_2220_cast = reshape(shape = var_2219, x = v_37_cast)[name = tensor("op_2220_cast")]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = var_2216_cast, y = var_2218_cast)[name = tensor("attn_weights_73_cast")]; + tensor attn_weights_75_cast = mul(x = attn_weights_73_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_2224_cast = softmax(axis = var_1170, x = attn_weights_75_cast)[name = tensor("op_2224_cast")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2220_cast, y = var_2224_cast)[name = tensor("attn_37_cast")]; + tensor var_2228 = const()[name = tensor("op_2228"), val = tensor([2, 1280, 1, -1])]; + tensor input_171_cast = reshape(shape = var_2228, x = attn_37_cast)[name = tensor("input_171_cast")]; + tensor var_2233 = const()[name = tensor("op_2233"), val = tensor([1, 1])]; + tensor var_2235 = const()[name = tensor("op_2235"), val = tensor([1, 1])]; + tensor var_2237_pad_type_0 = const()[name = tensor("op_2237_pad_type_0"), val = tensor("custom")]; + tensor var_2237_pad_0 = const()[name = tensor("op_2237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(182468480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183287744))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183287872)))]; + tensor var_2237_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_2235, groups = var_1186, pad = var_2237_pad_0, pad_type = var_2237_pad_type_0, strides = var_2233, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_171_cast)[name = tensor("op_2237_cast")]; + tensor inputs_57_cast = add(x = var_2237_cast, y = inputs_55_cast)[name = tensor("inputs_57_cast")]; + tensor var_2241 = const()[name = tensor("op_2241"), val = tensor([1])]; + tensor channels_mean_57_cast = reduce_mean(axes = var_2241, keep_dims = var_1181, x = inputs_57_cast)[name = tensor("channels_mean_57_cast")]; + tensor zero_mean_57_cast = sub(x = inputs_57_cast, y = channels_mean_57_cast)[name = tensor("zero_mean_57_cast")]; + tensor zero_mean_sq_57_cast = mul(x = zero_mean_57_cast, y = zero_mean_57_cast)[name = tensor("zero_mean_sq_57_cast")]; + tensor var_2245 = const()[name = tensor("op_2245"), val = tensor([1])]; + tensor var_2246_cast = reduce_mean(axes = var_2245, keep_dims = var_1181, x = zero_mean_sq_57_cast)[name = tensor("op_2246_cast")]; + tensor var_2247_to_fp16 = const()[name = tensor("op_2247_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2248_cast = add(x = var_2246_cast, y = var_2247_to_fp16)[name = tensor("op_2248_cast")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_2248_cast)[name = tensor("denom_57_cast")]; + tensor out_57_cast = mul(x = zero_mean_57_cast, y = denom_57_cast)[name = tensor("out_57_cast")]; + tensor var_2252_to_fp16 = const()[name = tensor("op_2252_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183290496)))]; + tensor var_2253_cast = add(x = out_57_cast, y = var_2252_to_fp16)[name = tensor("op_2253_cast")]; + tensor var_2255_to_fp16 = const()[name = tensor("op_2255_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183293120)))]; + tensor hidden_states_97_cast = mul(x = var_2253_cast, y = var_2255_to_fp16)[name = tensor("hidden_states_97_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 1])]; + tensor var_2264 = const()[name = tensor("op_2264"), val = tensor([1, 1])]; + tensor q_39_pad_type_0 = const()[name = tensor("q_39_pad_type_0"), val = tensor("custom")]; + tensor q_39_pad_0 = const()[name = tensor("q_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183295744))), lut = tensor([-0x1.7p-7, 0x1.6fcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_39_cast = conv(dilations = var_2264, groups = var_1186, pad = q_39_pad_0, pad_type = q_39_pad_type_0, strides = var_2262, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("q_39_cast")]; + tensor var_2268 = const()[name = tensor("op_2268"), val = tensor([1, 1])]; + tensor var_2270 = const()[name = tensor("op_2270"), val = tensor([1, 1])]; + tensor k_39_pad_type_0 = const()[name = tensor("k_39_pad_type_0"), val = tensor("custom")]; + tensor k_39_pad_0 = const()[name = tensor("k_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(183500608))), lut = tensor([-0x1.cbcp-7, -0x1.22p-8, 0x1.214p-8, 0x1.cbcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_39_cast = conv(dilations = var_2270, groups = var_1186, pad = k_39_pad_0, pad_type = k_39_pad_type_0, strides = var_2268, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_39_cast")]; + tensor var_2274 = const()[name = tensor("op_2274"), val = tensor([1, 1])]; + tensor var_2276 = const()[name = tensor("op_2276"), val = tensor([1, 1])]; + tensor v_39_pad_type_0 = const()[name = tensor("v_39_pad_type_0"), val = tensor("custom")]; + tensor v_39_pad_0 = const()[name = tensor("v_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184156032))), lut = tensor([-0x1.1e8p-7, 0x1.1e8p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_39_cast = conv(dilations = var_2276, groups = var_1186, pad = v_39_pad_0, pad_type = v_39_pad_type_0, strides = var_2274, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_39_cast")]; + tensor var_2280 = const()[name = tensor("op_2280"), val = tensor([2, 20, 64, -1])]; + tensor var_2281_cast = reshape(shape = var_2280, x = q_39_cast)[name = tensor("op_2281_cast")]; + tensor var_2282 = const()[name = tensor("op_2282"), val = tensor([2, 20, 64, -1])]; + tensor var_2283_cast = reshape(shape = var_2282, x = k_39_cast)[name = tensor("op_2283_cast")]; + tensor var_2284 = const()[name = tensor("op_2284"), val = tensor([2, 20, 64, -1])]; + tensor var_2285_cast = reshape(shape = var_2284, x = v_39_cast)[name = tensor("op_2285_cast")]; + tensor attn_weights_77_transpose_x_0 = const()[name = tensor("attn_weights_77_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_77_transpose_y_0 = const()[name = tensor("attn_weights_77_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_77_cast = matmul(transpose_x = attn_weights_77_transpose_x_0, transpose_y = attn_weights_77_transpose_y_0, x = var_2281_cast, y = var_2283_cast)[name = tensor("attn_weights_77_cast")]; + tensor attn_weights_79_cast = mul(x = attn_weights_77_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_79_cast")]; + tensor var_2289_cast = softmax(axis = var_1170, x = attn_weights_79_cast)[name = tensor("op_2289_cast")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2285_cast, y = var_2289_cast)[name = tensor("attn_39_cast")]; + tensor var_2293 = const()[name = tensor("op_2293"), val = tensor([2, 1280, 1, -1])]; + tensor input_173_cast = reshape(shape = var_2293, x = attn_39_cast)[name = tensor("input_173_cast")]; + tensor var_2298 = const()[name = tensor("op_2298"), val = tensor([1, 1])]; + tensor var_2300 = const()[name = tensor("op_2300"), val = tensor([1, 1])]; + tensor var_2302_pad_type_0 = const()[name = tensor("op_2302_pad_type_0"), val = tensor("custom")]; + tensor var_2302_pad_0 = const()[name = tensor("op_2302_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184483776))), lut = tensor([-0x1.46cp-8, 0x1.48p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184688640)))]; + tensor var_2302_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_2300, groups = var_1186, pad = var_2302_pad_0, pad_type = var_2302_pad_type_0, strides = var_2298, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("op_2302_cast")]; + tensor inputs_59_cast = add(x = var_2302_cast, y = inputs_57_cast)[name = tensor("inputs_59_cast")]; + tensor var_2306 = const()[name = tensor("op_2306"), val = tensor([1])]; + tensor channels_mean_59_cast = reduce_mean(axes = var_2306, keep_dims = var_1181, x = inputs_59_cast)[name = tensor("channels_mean_59_cast")]; + tensor zero_mean_59_cast = sub(x = inputs_59_cast, y = channels_mean_59_cast)[name = tensor("zero_mean_59_cast")]; + tensor zero_mean_sq_59_cast = mul(x = zero_mean_59_cast, y = zero_mean_59_cast)[name = tensor("zero_mean_sq_59_cast")]; + tensor var_2310 = const()[name = tensor("op_2310"), val = tensor([1])]; + tensor var_2311_cast = reduce_mean(axes = var_2310, keep_dims = var_1181, x = zero_mean_sq_59_cast)[name = tensor("op_2311_cast")]; + tensor var_2312_to_fp16 = const()[name = tensor("op_2312_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2313_cast = add(x = var_2311_cast, y = var_2312_to_fp16)[name = tensor("op_2313_cast")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_2313_cast)[name = tensor("denom_59_cast")]; + tensor out_59_cast = mul(x = zero_mean_59_cast, y = denom_59_cast)[name = tensor("out_59_cast")]; + tensor var_2317_to_fp16 = const()[name = tensor("op_2317_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184691264)))]; + tensor var_2318_cast = add(x = out_59_cast, y = var_2317_to_fp16)[name = tensor("op_2318_cast")]; + tensor var_2320_to_fp16 = const()[name = tensor("op_2320_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184693888)))]; + tensor input_175_cast = mul(x = var_2318_cast, y = var_2320_to_fp16)[name = tensor("input_175_cast")]; + tensor var_2328 = const()[name = tensor("op_2328"), val = tensor([1, 1])]; + tensor var_2330 = const()[name = tensor("op_2330"), val = tensor([1, 1])]; + tensor var_2332_pad_type_0 = const()[name = tensor("op_2332_pad_type_0"), val = tensor("custom")]; + tensor var_2332_pad_0 = const()[name = tensor("op_2332_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(184696512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194526976))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194527168)))]; + tensor var_2332_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_2330, groups = var_1186, pad = var_2332_pad_0, pad_type = var_2332_pad_type_0, strides = var_2328, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_175_cast)[name = tensor("op_2332_cast")]; + tensor var_2333_split_sizes_0 = const()[name = tensor("op_2333_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2333_axis_0 = const()[name = tensor("op_2333_axis_0"), val = tensor(1)]; + tensor var_2333_cast_0, tensor var_2333_cast_1 = split(axis = var_2333_axis_0, split_sizes = var_2333_split_sizes_0, x = var_2332_cast)[name = tensor("op_2333_cast")]; + tensor var_2335_mode_0 = const()[name = tensor("op_2335_mode_0"), val = tensor("EXACT")]; + tensor var_2335_cast = gelu(mode = var_2335_mode_0, x = var_2333_cast_1)[name = tensor("op_2335_cast")]; + tensor input_177_cast = mul(x = var_2333_cast_0, y = var_2335_cast)[name = tensor("input_177_cast")]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([1, 1])]; + tensor var_2341 = const()[name = tensor("op_2341"), val = tensor([1, 1])]; + tensor var_2343_pad_type_0 = const()[name = tensor("op_2343_pad_type_0"), val = tensor("custom")]; + tensor var_2343_pad_0 = const()[name = tensor("op_2343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(194547712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197824576))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197824704)))]; + tensor var_2343_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_2341, groups = var_1186, pad = var_2343_pad_0, pad_type = var_2343_pad_type_0, strides = var_2339, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("op_2343_cast")]; + tensor inputs_61_cast = add(x = var_2343_cast, y = inputs_59_cast)[name = tensor("inputs_61_cast")]; + tensor var_2353 = const()[name = tensor("op_2353"), val = tensor([1])]; + tensor channels_mean_61_cast = reduce_mean(axes = var_2353, keep_dims = var_1181, x = inputs_61_cast)[name = tensor("channels_mean_61_cast")]; + tensor zero_mean_61_cast = sub(x = inputs_61_cast, y = channels_mean_61_cast)[name = tensor("zero_mean_61_cast")]; + tensor zero_mean_sq_61_cast = mul(x = zero_mean_61_cast, y = zero_mean_61_cast)[name = tensor("zero_mean_sq_61_cast")]; + tensor var_2357 = const()[name = tensor("op_2357"), val = tensor([1])]; + tensor var_2358_cast = reduce_mean(axes = var_2357, keep_dims = var_1181, x = zero_mean_sq_61_cast)[name = tensor("op_2358_cast")]; + tensor var_2359_to_fp16 = const()[name = tensor("op_2359_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2360_cast = add(x = var_2358_cast, y = var_2359_to_fp16)[name = tensor("op_2360_cast")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_2360_cast)[name = tensor("denom_61_cast")]; + tensor out_61_cast = mul(x = zero_mean_61_cast, y = denom_61_cast)[name = tensor("out_61_cast")]; + tensor var_2364_to_fp16 = const()[name = tensor("op_2364_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197827328)))]; + tensor var_2365_cast = add(x = out_61_cast, y = var_2364_to_fp16)[name = tensor("op_2365_cast")]; + tensor var_2367_to_fp16 = const()[name = tensor("op_2367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197829952)))]; + tensor hidden_states_101_cast = mul(x = var_2365_cast, y = var_2367_to_fp16)[name = tensor("hidden_states_101_cast")]; + tensor var_2374 = const()[name = tensor("op_2374"), val = tensor([1, 1])]; + tensor var_2376 = const()[name = tensor("op_2376"), val = tensor([1, 1])]; + tensor q_41_pad_type_0 = const()[name = tensor("q_41_pad_type_0"), val = tensor("custom")]; + tensor q_41_pad_0 = const()[name = tensor("q_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197832576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198651840))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_41_cast = conv(dilations = var_2376, groups = var_1186, pad = q_41_pad_0, pad_type = q_41_pad_type_0, strides = var_2374, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("q_41_cast")]; + tensor var_2380 = const()[name = tensor("op_2380"), val = tensor([1, 1])]; + tensor var_2382 = const()[name = tensor("op_2382"), val = tensor([1, 1])]; + tensor k_41_pad_type_0 = const()[name = tensor("k_41_pad_type_0"), val = tensor("custom")]; + tensor k_41_pad_0 = const()[name = tensor("k_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198651968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199471232))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_41_cast = conv(dilations = var_2382, groups = var_1186, pad = k_41_pad_0, pad_type = k_41_pad_type_0, strides = var_2380, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("k_41_cast")]; + tensor var_2386 = const()[name = tensor("op_2386"), val = tensor([1, 1])]; + tensor var_2388 = const()[name = tensor("op_2388"), val = tensor([1, 1])]; + tensor v_41_pad_type_0 = const()[name = tensor("v_41_pad_type_0"), val = tensor("custom")]; + tensor v_41_pad_0 = const()[name = tensor("v_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199471360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200290624))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_41_cast = conv(dilations = var_2388, groups = var_1186, pad = v_41_pad_0, pad_type = v_41_pad_type_0, strides = var_2386, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("v_41_cast")]; + tensor var_2392 = const()[name = tensor("op_2392"), val = tensor([2, 20, 64, -1])]; + tensor var_2393_cast = reshape(shape = var_2392, x = q_41_cast)[name = tensor("op_2393_cast")]; + tensor var_2394 = const()[name = tensor("op_2394"), val = tensor([2, 20, 64, -1])]; + tensor var_2395_cast = reshape(shape = var_2394, x = k_41_cast)[name = tensor("op_2395_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([2, 20, 64, -1])]; + tensor var_2397_cast = reshape(shape = var_2396, x = v_41_cast)[name = tensor("op_2397_cast")]; + tensor attn_weights_81_transpose_x_0 = const()[name = tensor("attn_weights_81_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_81_transpose_y_0 = const()[name = tensor("attn_weights_81_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_81_cast = matmul(transpose_x = attn_weights_81_transpose_x_0, transpose_y = attn_weights_81_transpose_y_0, x = var_2393_cast, y = var_2395_cast)[name = tensor("attn_weights_81_cast")]; + tensor attn_weights_83_cast = mul(x = attn_weights_81_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_83_cast")]; + tensor var_2401_cast = softmax(axis = var_1170, x = attn_weights_83_cast)[name = tensor("op_2401_cast")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2397_cast, y = var_2401_cast)[name = tensor("attn_41_cast")]; + tensor var_2405 = const()[name = tensor("op_2405"), val = tensor([2, 1280, 1, -1])]; + tensor input_179_cast = reshape(shape = var_2405, x = attn_41_cast)[name = tensor("input_179_cast")]; + tensor var_2410 = const()[name = tensor("op_2410"), val = tensor([1, 1])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 1])]; + tensor var_2414_pad_type_0 = const()[name = tensor("op_2414_pad_type_0"), val = tensor("custom")]; + tensor var_2414_pad_0 = const()[name = tensor("op_2414_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200290752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201110016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201110144)))]; + tensor var_2414_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_2412, groups = var_1186, pad = var_2414_pad_0, pad_type = var_2414_pad_type_0, strides = var_2410, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_179_cast)[name = tensor("op_2414_cast")]; + tensor inputs_63_cast = add(x = var_2414_cast, y = inputs_61_cast)[name = tensor("inputs_63_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([1])]; + tensor channels_mean_63_cast = reduce_mean(axes = var_2418, keep_dims = var_1181, x = inputs_63_cast)[name = tensor("channels_mean_63_cast")]; + tensor zero_mean_63_cast = sub(x = inputs_63_cast, y = channels_mean_63_cast)[name = tensor("zero_mean_63_cast")]; + tensor zero_mean_sq_63_cast = mul(x = zero_mean_63_cast, y = zero_mean_63_cast)[name = tensor("zero_mean_sq_63_cast")]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([1])]; + tensor var_2423_cast = reduce_mean(axes = var_2422, keep_dims = var_1181, x = zero_mean_sq_63_cast)[name = tensor("op_2423_cast")]; + tensor var_2424_to_fp16 = const()[name = tensor("op_2424_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2425_cast = add(x = var_2423_cast, y = var_2424_to_fp16)[name = tensor("op_2425_cast")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2425_cast)[name = tensor("denom_63_cast")]; + tensor out_63_cast = mul(x = zero_mean_63_cast, y = denom_63_cast)[name = tensor("out_63_cast")]; + tensor var_2429_to_fp16 = const()[name = tensor("op_2429_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201112768)))]; + tensor var_2430_cast = add(x = out_63_cast, y = var_2429_to_fp16)[name = tensor("op_2430_cast")]; + tensor var_2432_to_fp16 = const()[name = tensor("op_2432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201115392)))]; + tensor hidden_states_103_cast = mul(x = var_2430_cast, y = var_2432_to_fp16)[name = tensor("hidden_states_103_cast")]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 1])]; + tensor var_2441 = const()[name = tensor("op_2441"), val = tensor([1, 1])]; + tensor q_43_pad_type_0 = const()[name = tensor("q_43_pad_type_0"), val = tensor("custom")]; + tensor q_43_pad_0 = const()[name = tensor("q_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201118016))), lut = tensor([-0x1.61p-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_43_cast = conv(dilations = var_2441, groups = var_1186, pad = q_43_pad_0, pad_type = q_43_pad_type_0, strides = var_2439, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("q_43_cast")]; + tensor var_2445 = const()[name = tensor("op_2445"), val = tensor([1, 1])]; + tensor var_2447 = const()[name = tensor("op_2447"), val = tensor([1, 1])]; + tensor k_43_pad_type_0 = const()[name = tensor("k_43_pad_type_0"), val = tensor("custom")]; + tensor k_43_pad_0 = const()[name = tensor("k_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201322880))), lut = tensor([-0x1.c8p-8, 0x1.c8cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_43_cast = conv(dilations = var_2447, groups = var_1186, pad = k_43_pad_0, pad_type = k_43_pad_type_0, strides = var_2445, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_43_cast")]; + tensor var_2451 = const()[name = tensor("op_2451"), val = tensor([1, 1])]; + tensor var_2453 = const()[name = tensor("op_2453"), val = tensor([1, 1])]; + tensor v_43_pad_type_0 = const()[name = tensor("v_43_pad_type_0"), val = tensor("custom")]; + tensor v_43_pad_0 = const()[name = tensor("v_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201650624))), lut = tensor([-0x1.e5cp-8, 0x1.e78p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_43_cast = conv(dilations = var_2453, groups = var_1186, pad = v_43_pad_0, pad_type = v_43_pad_type_0, strides = var_2451, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_43_cast")]; + tensor var_2457 = const()[name = tensor("op_2457"), val = tensor([2, 20, 64, -1])]; + tensor var_2458_cast = reshape(shape = var_2457, x = q_43_cast)[name = tensor("op_2458_cast")]; + tensor var_2459 = const()[name = tensor("op_2459"), val = tensor([2, 20, 64, -1])]; + tensor var_2460_cast = reshape(shape = var_2459, x = k_43_cast)[name = tensor("op_2460_cast")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([2, 20, 64, -1])]; + tensor var_2462_cast = reshape(shape = var_2461, x = v_43_cast)[name = tensor("op_2462_cast")]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = var_2458_cast, y = var_2460_cast)[name = tensor("attn_weights_85_cast")]; + tensor attn_weights_87_cast = mul(x = attn_weights_85_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_2466_cast = softmax(axis = var_1170, x = attn_weights_87_cast)[name = tensor("op_2466_cast")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2462_cast, y = var_2466_cast)[name = tensor("attn_43_cast")]; + tensor var_2470 = const()[name = tensor("op_2470"), val = tensor([2, 1280, 1, -1])]; + tensor input_181_cast = reshape(shape = var_2470, x = attn_43_cast)[name = tensor("input_181_cast")]; + tensor var_2475 = const()[name = tensor("op_2475"), val = tensor([1, 1])]; + tensor var_2477 = const()[name = tensor("op_2477"), val = tensor([1, 1])]; + tensor var_2479_pad_type_0 = const()[name = tensor("op_2479_pad_type_0"), val = tensor("custom")]; + tensor var_2479_pad_0 = const()[name = tensor("op_2479_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201978368))), lut = tensor([-0x1.1a8p-8, 0x1.1a8p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202183232)))]; + tensor var_2479_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_2477, groups = var_1186, pad = var_2479_pad_0, pad_type = var_2479_pad_type_0, strides = var_2475, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_181_cast)[name = tensor("op_2479_cast")]; + tensor inputs_65_cast = add(x = var_2479_cast, y = inputs_63_cast)[name = tensor("inputs_65_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1])]; + tensor channels_mean_65_cast = reduce_mean(axes = var_2483, keep_dims = var_1181, x = inputs_65_cast)[name = tensor("channels_mean_65_cast")]; + tensor zero_mean_65_cast = sub(x = inputs_65_cast, y = channels_mean_65_cast)[name = tensor("zero_mean_65_cast")]; + tensor zero_mean_sq_65_cast = mul(x = zero_mean_65_cast, y = zero_mean_65_cast)[name = tensor("zero_mean_sq_65_cast")]; + tensor var_2487 = const()[name = tensor("op_2487"), val = tensor([1])]; + tensor var_2488_cast = reduce_mean(axes = var_2487, keep_dims = var_1181, x = zero_mean_sq_65_cast)[name = tensor("op_2488_cast")]; + tensor var_2489_to_fp16 = const()[name = tensor("op_2489_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2490_cast = add(x = var_2488_cast, y = var_2489_to_fp16)[name = tensor("op_2490_cast")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2490_cast)[name = tensor("denom_65_cast")]; + tensor out_65_cast = mul(x = zero_mean_65_cast, y = denom_65_cast)[name = tensor("out_65_cast")]; + tensor var_2494_to_fp16 = const()[name = tensor("op_2494_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202185856)))]; + tensor var_2495_cast = add(x = out_65_cast, y = var_2494_to_fp16)[name = tensor("op_2495_cast")]; + tensor var_2497_to_fp16 = const()[name = tensor("op_2497_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202188480)))]; + tensor input_183_cast = mul(x = var_2495_cast, y = var_2497_to_fp16)[name = tensor("input_183_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([1, 1])]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([1, 1])]; + tensor var_2509_pad_type_0 = const()[name = tensor("op_2509_pad_type_0"), val = tensor("custom")]; + tensor var_2509_pad_0 = const()[name = tensor("op_2509_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202191104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212021568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212021760)))]; + tensor var_2509_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_2507, groups = var_1186, pad = var_2509_pad_0, pad_type = var_2509_pad_type_0, strides = var_2505, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_183_cast)[name = tensor("op_2509_cast")]; + tensor var_2510_split_sizes_0 = const()[name = tensor("op_2510_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2510_axis_0 = const()[name = tensor("op_2510_axis_0"), val = tensor(1)]; + tensor var_2510_cast_0, tensor var_2510_cast_1 = split(axis = var_2510_axis_0, split_sizes = var_2510_split_sizes_0, x = var_2509_cast)[name = tensor("op_2510_cast")]; + tensor var_2512_mode_0 = const()[name = tensor("op_2512_mode_0"), val = tensor("EXACT")]; + tensor var_2512_cast = gelu(mode = var_2512_mode_0, x = var_2510_cast_1)[name = tensor("op_2512_cast")]; + tensor input_185_cast = mul(x = var_2510_cast_0, y = var_2512_cast)[name = tensor("input_185_cast")]; + tensor var_2516 = const()[name = tensor("op_2516"), val = tensor([1, 1])]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([1, 1])]; + tensor var_2520_pad_type_0 = const()[name = tensor("op_2520_pad_type_0"), val = tensor("custom")]; + tensor var_2520_pad_0 = const()[name = tensor("op_2520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(212042304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216957568))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216957760)))]; + tensor var_2520_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_2518, groups = var_1186, pad = var_2520_pad_0, pad_type = var_2520_pad_type_0, strides = var_2516, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("op_2520_cast")]; + tensor inputs_67_cast = add(x = var_2520_cast, y = inputs_65_cast)[name = tensor("inputs_67_cast")]; + tensor var_2530 = const()[name = tensor("op_2530"), val = tensor([1])]; + tensor channels_mean_67_cast = reduce_mean(axes = var_2530, keep_dims = var_1181, x = inputs_67_cast)[name = tensor("channels_mean_67_cast")]; + tensor zero_mean_67_cast = sub(x = inputs_67_cast, y = channels_mean_67_cast)[name = tensor("zero_mean_67_cast")]; + tensor zero_mean_sq_67_cast = mul(x = zero_mean_67_cast, y = zero_mean_67_cast)[name = tensor("zero_mean_sq_67_cast")]; + tensor var_2534 = const()[name = tensor("op_2534"), val = tensor([1])]; + tensor var_2535_cast = reduce_mean(axes = var_2534, keep_dims = var_1181, x = zero_mean_sq_67_cast)[name = tensor("op_2535_cast")]; + tensor var_2536_to_fp16 = const()[name = tensor("op_2536_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2537_cast = add(x = var_2535_cast, y = var_2536_to_fp16)[name = tensor("op_2537_cast")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2537_cast)[name = tensor("denom_67_cast")]; + tensor out_67_cast = mul(x = zero_mean_67_cast, y = denom_67_cast)[name = tensor("out_67_cast")]; + tensor var_2541_to_fp16 = const()[name = tensor("op_2541_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216960384)))]; + tensor var_2542_cast = add(x = out_67_cast, y = var_2541_to_fp16)[name = tensor("op_2542_cast")]; + tensor var_2544_to_fp16 = const()[name = tensor("op_2544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216963008)))]; + tensor hidden_states_107_cast = mul(x = var_2542_cast, y = var_2544_to_fp16)[name = tensor("hidden_states_107_cast")]; + tensor var_2551 = const()[name = tensor("op_2551"), val = tensor([1, 1])]; + tensor var_2553 = const()[name = tensor("op_2553"), val = tensor([1, 1])]; + tensor q_45_pad_type_0 = const()[name = tensor("q_45_pad_type_0"), val = tensor("custom")]; + tensor q_45_pad_0 = const()[name = tensor("q_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(216965632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217784896))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_45_cast = conv(dilations = var_2553, groups = var_1186, pad = q_45_pad_0, pad_type = q_45_pad_type_0, strides = var_2551, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("q_45_cast")]; + tensor var_2557 = const()[name = tensor("op_2557"), val = tensor([1, 1])]; + tensor var_2559 = const()[name = tensor("op_2559"), val = tensor([1, 1])]; + tensor k_45_pad_type_0 = const()[name = tensor("k_45_pad_type_0"), val = tensor("custom")]; + tensor k_45_pad_0 = const()[name = tensor("k_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(217785024))), lut = tensor([-0x1.09p-5, -0x1.404p-7, 0x1.404p-7, 0x1.08cp-5]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_45_cast = conv(dilations = var_2559, groups = var_1186, pad = k_45_pad_0, pad_type = k_45_pad_type_0, strides = var_2557, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("k_45_cast")]; + tensor var_2563 = const()[name = tensor("op_2563"), val = tensor([1, 1])]; + tensor var_2565 = const()[name = tensor("op_2565"), val = tensor([1, 1])]; + tensor v_45_pad_type_0 = const()[name = tensor("v_45_pad_type_0"), val = tensor("custom")]; + tensor v_45_pad_0 = const()[name = tensor("v_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218194688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219013952))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_45_cast = conv(dilations = var_2565, groups = var_1186, pad = v_45_pad_0, pad_type = v_45_pad_type_0, strides = var_2563, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("v_45_cast")]; + tensor var_2569 = const()[name = tensor("op_2569"), val = tensor([2, 20, 64, -1])]; + tensor var_2570_cast = reshape(shape = var_2569, x = q_45_cast)[name = tensor("op_2570_cast")]; + tensor var_2571 = const()[name = tensor("op_2571"), val = tensor([2, 20, 64, -1])]; + tensor var_2572_cast = reshape(shape = var_2571, x = k_45_cast)[name = tensor("op_2572_cast")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([2, 20, 64, -1])]; + tensor var_2574_cast = reshape(shape = var_2573, x = v_45_cast)[name = tensor("op_2574_cast")]; + tensor attn_weights_89_transpose_x_0 = const()[name = tensor("attn_weights_89_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_89_transpose_y_0 = const()[name = tensor("attn_weights_89_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_89_cast = matmul(transpose_x = attn_weights_89_transpose_x_0, transpose_y = attn_weights_89_transpose_y_0, x = var_2570_cast, y = var_2572_cast)[name = tensor("attn_weights_89_cast")]; + tensor attn_weights_91_cast = mul(x = attn_weights_89_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_91_cast")]; + tensor var_2578_cast = softmax(axis = var_1170, x = attn_weights_91_cast)[name = tensor("op_2578_cast")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2574_cast, y = var_2578_cast)[name = tensor("attn_45_cast")]; + tensor var_2582 = const()[name = tensor("op_2582"), val = tensor([2, 1280, 1, -1])]; + tensor input_187_cast = reshape(shape = var_2582, x = attn_45_cast)[name = tensor("input_187_cast")]; + tensor var_2587 = const()[name = tensor("op_2587"), val = tensor([1, 1])]; + tensor var_2589 = const()[name = tensor("op_2589"), val = tensor([1, 1])]; + tensor var_2591_pad_type_0 = const()[name = tensor("op_2591_pad_type_0"), val = tensor("custom")]; + tensor var_2591_pad_0 = const()[name = tensor("op_2591_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219014080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219833344))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219833472)))]; + tensor var_2591_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_2589, groups = var_1186, pad = var_2591_pad_0, pad_type = var_2591_pad_type_0, strides = var_2587, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_187_cast)[name = tensor("op_2591_cast")]; + tensor inputs_69_cast = add(x = var_2591_cast, y = inputs_67_cast)[name = tensor("inputs_69_cast")]; + tensor var_2595 = const()[name = tensor("op_2595"), val = tensor([1])]; + tensor channels_mean_69_cast = reduce_mean(axes = var_2595, keep_dims = var_1181, x = inputs_69_cast)[name = tensor("channels_mean_69_cast")]; + tensor zero_mean_69_cast = sub(x = inputs_69_cast, y = channels_mean_69_cast)[name = tensor("zero_mean_69_cast")]; + tensor zero_mean_sq_69_cast = mul(x = zero_mean_69_cast, y = zero_mean_69_cast)[name = tensor("zero_mean_sq_69_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1])]; + tensor var_2600_cast = reduce_mean(axes = var_2599, keep_dims = var_1181, x = zero_mean_sq_69_cast)[name = tensor("op_2600_cast")]; + tensor var_2601_to_fp16 = const()[name = tensor("op_2601_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2602_cast = add(x = var_2600_cast, y = var_2601_to_fp16)[name = tensor("op_2602_cast")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2602_cast)[name = tensor("denom_69_cast")]; + tensor out_69_cast = mul(x = zero_mean_69_cast, y = denom_69_cast)[name = tensor("out_69_cast")]; + tensor var_2606_to_fp16 = const()[name = tensor("op_2606_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219836096)))]; + tensor var_2607_cast = add(x = out_69_cast, y = var_2606_to_fp16)[name = tensor("op_2607_cast")]; + tensor var_2609_to_fp16 = const()[name = tensor("op_2609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219838720)))]; + tensor hidden_states_109_cast = mul(x = var_2607_cast, y = var_2609_to_fp16)[name = tensor("hidden_states_109_cast")]; + tensor var_2616 = const()[name = tensor("op_2616"), val = tensor([1, 1])]; + tensor var_2618 = const()[name = tensor("op_2618"), val = tensor([1, 1])]; + tensor q_47_pad_type_0 = const()[name = tensor("q_47_pad_type_0"), val = tensor("custom")]; + tensor q_47_pad_0 = const()[name = tensor("q_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(219841344))), lut = tensor([-0x1.61p-7, 0x1.614p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_47_cast = conv(dilations = var_2618, groups = var_1186, pad = q_47_pad_0, pad_type = q_47_pad_type_0, strides = var_2616, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("q_47_cast")]; + tensor var_2622 = const()[name = tensor("op_2622"), val = tensor([1, 1])]; + tensor var_2624 = const()[name = tensor("op_2624"), val = tensor([1, 1])]; + tensor k_47_pad_type_0 = const()[name = tensor("k_47_pad_type_0"), val = tensor("custom")]; + tensor k_47_pad_0 = const()[name = tensor("k_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220046208))), lut = tensor([-0x1.c84p-8, 0x1.c98p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_47_cast = conv(dilations = var_2624, groups = var_1186, pad = k_47_pad_0, pad_type = k_47_pad_type_0, strides = var_2622, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_47_cast")]; + tensor var_2628 = const()[name = tensor("op_2628"), val = tensor([1, 1])]; + tensor var_2630 = const()[name = tensor("op_2630"), val = tensor([1, 1])]; + tensor v_47_pad_type_0 = const()[name = tensor("v_47_pad_type_0"), val = tensor("custom")]; + tensor v_47_pad_0 = const()[name = tensor("v_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220373952))), lut = tensor([-0x1.e6cp-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_47_cast = conv(dilations = var_2630, groups = var_1186, pad = v_47_pad_0, pad_type = v_47_pad_type_0, strides = var_2628, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_47_cast")]; + tensor var_2634 = const()[name = tensor("op_2634"), val = tensor([2, 20, 64, -1])]; + tensor var_2635_cast = reshape(shape = var_2634, x = q_47_cast)[name = tensor("op_2635_cast")]; + tensor var_2636 = const()[name = tensor("op_2636"), val = tensor([2, 20, 64, -1])]; + tensor var_2637_cast = reshape(shape = var_2636, x = k_47_cast)[name = tensor("op_2637_cast")]; + tensor var_2638 = const()[name = tensor("op_2638"), val = tensor([2, 20, 64, -1])]; + tensor var_2639_cast = reshape(shape = var_2638, x = v_47_cast)[name = tensor("op_2639_cast")]; + tensor attn_weights_93_transpose_x_0 = const()[name = tensor("attn_weights_93_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_93_transpose_y_0 = const()[name = tensor("attn_weights_93_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_93_cast = matmul(transpose_x = attn_weights_93_transpose_x_0, transpose_y = attn_weights_93_transpose_y_0, x = var_2635_cast, y = var_2637_cast)[name = tensor("attn_weights_93_cast")]; + tensor attn_weights_95_cast = mul(x = attn_weights_93_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_95_cast")]; + tensor var_2643_cast = softmax(axis = var_1170, x = attn_weights_95_cast)[name = tensor("op_2643_cast")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2639_cast, y = var_2643_cast)[name = tensor("attn_47_cast")]; + tensor var_2647 = const()[name = tensor("op_2647"), val = tensor([2, 1280, 1, -1])]; + tensor input_189_cast = reshape(shape = var_2647, x = attn_47_cast)[name = tensor("input_189_cast")]; + tensor var_2652 = const()[name = tensor("op_2652"), val = tensor([1, 1])]; + tensor var_2654 = const()[name = tensor("op_2654"), val = tensor([1, 1])]; + tensor var_2656_pad_type_0 = const()[name = tensor("op_2656_pad_type_0"), val = tensor("custom")]; + tensor var_2656_pad_0 = const()[name = tensor("op_2656_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220701696))), lut = tensor([-0x1.214p-8, 0x1.21p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220906560)))]; + tensor var_2656_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_2654, groups = var_1186, pad = var_2656_pad_0, pad_type = var_2656_pad_type_0, strides = var_2652, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("op_2656_cast")]; + tensor inputs_71_cast = add(x = var_2656_cast, y = inputs_69_cast)[name = tensor("inputs_71_cast")]; + tensor var_2660 = const()[name = tensor("op_2660"), val = tensor([1])]; + tensor channels_mean_71_cast = reduce_mean(axes = var_2660, keep_dims = var_1181, x = inputs_71_cast)[name = tensor("channels_mean_71_cast")]; + tensor zero_mean_71_cast = sub(x = inputs_71_cast, y = channels_mean_71_cast)[name = tensor("zero_mean_71_cast")]; + tensor zero_mean_sq_71_cast = mul(x = zero_mean_71_cast, y = zero_mean_71_cast)[name = tensor("zero_mean_sq_71_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1])]; + tensor var_2665_cast = reduce_mean(axes = var_2664, keep_dims = var_1181, x = zero_mean_sq_71_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_to_fp16 = const()[name = tensor("op_2666_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2667_cast = add(x = var_2665_cast, y = var_2666_to_fp16)[name = tensor("op_2667_cast")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2667_cast)[name = tensor("denom_71_cast")]; + tensor out_71_cast = mul(x = zero_mean_71_cast, y = denom_71_cast)[name = tensor("out_71_cast")]; + tensor var_2671_to_fp16 = const()[name = tensor("op_2671_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220909184)))]; + tensor var_2672_cast = add(x = out_71_cast, y = var_2671_to_fp16)[name = tensor("op_2672_cast")]; + tensor var_2674_to_fp16 = const()[name = tensor("op_2674_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220911808)))]; + tensor input_191_cast = mul(x = var_2672_cast, y = var_2674_to_fp16)[name = tensor("input_191_cast")]; + tensor var_2682 = const()[name = tensor("op_2682"), val = tensor([1, 1])]; + tensor var_2684 = const()[name = tensor("op_2684"), val = tensor([1, 1])]; + tensor var_2686_pad_type_0 = const()[name = tensor("op_2686_pad_type_0"), val = tensor("custom")]; + tensor var_2686_pad_0 = const()[name = tensor("op_2686_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(220914432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227468096))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227468224)))]; + tensor var_2686_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_2684, groups = var_1186, pad = var_2686_pad_0, pad_type = var_2686_pad_type_0, strides = var_2682, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_191_cast)[name = tensor("op_2686_cast")]; + tensor var_2687_split_sizes_0 = const()[name = tensor("op_2687_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2687_axis_0 = const()[name = tensor("op_2687_axis_0"), val = tensor(1)]; + tensor var_2687_cast_0, tensor var_2687_cast_1 = split(axis = var_2687_axis_0, split_sizes = var_2687_split_sizes_0, x = var_2686_cast)[name = tensor("op_2687_cast")]; + tensor var_2689_mode_0 = const()[name = tensor("op_2689_mode_0"), val = tensor("EXACT")]; + tensor var_2689_cast = gelu(mode = var_2689_mode_0, x = var_2687_cast_1)[name = tensor("op_2689_cast")]; + tensor input_193_cast = mul(x = var_2687_cast_0, y = var_2689_cast)[name = tensor("input_193_cast")]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([1, 1])]; + tensor var_2695 = const()[name = tensor("op_2695"), val = tensor([1, 1])]; + tensor var_2697_pad_type_0 = const()[name = tensor("op_2697_pad_type_0"), val = tensor("custom")]; + tensor var_2697_pad_0 = const()[name = tensor("op_2697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227488768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232404032))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232404224)))]; + tensor var_2697_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_2695, groups = var_1186, pad = var_2697_pad_0, pad_type = var_2697_pad_type_0, strides = var_2693, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("op_2697_cast")]; + tensor inputs_73_cast = add(x = var_2697_cast, y = inputs_71_cast)[name = tensor("inputs_73_cast")]; + tensor var_2707 = const()[name = tensor("op_2707"), val = tensor([1])]; + tensor channels_mean_73_cast = reduce_mean(axes = var_2707, keep_dims = var_1181, x = inputs_73_cast)[name = tensor("channels_mean_73_cast")]; + tensor zero_mean_73_cast = sub(x = inputs_73_cast, y = channels_mean_73_cast)[name = tensor("zero_mean_73_cast")]; + tensor zero_mean_sq_73_cast = mul(x = zero_mean_73_cast, y = zero_mean_73_cast)[name = tensor("zero_mean_sq_73_cast")]; + tensor var_2711 = const()[name = tensor("op_2711"), val = tensor([1])]; + tensor var_2712_cast = reduce_mean(axes = var_2711, keep_dims = var_1181, x = zero_mean_sq_73_cast)[name = tensor("op_2712_cast")]; + tensor var_2713_to_fp16 = const()[name = tensor("op_2713_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2714_cast = add(x = var_2712_cast, y = var_2713_to_fp16)[name = tensor("op_2714_cast")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2714_cast)[name = tensor("denom_73_cast")]; + tensor out_73_cast = mul(x = zero_mean_73_cast, y = denom_73_cast)[name = tensor("out_73_cast")]; + tensor var_2718_to_fp16 = const()[name = tensor("op_2718_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232406848)))]; + tensor var_2719_cast = add(x = out_73_cast, y = var_2718_to_fp16)[name = tensor("op_2719_cast")]; + tensor var_2721_to_fp16 = const()[name = tensor("op_2721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232409472)))]; + tensor hidden_states_113_cast = mul(x = var_2719_cast, y = var_2721_to_fp16)[name = tensor("hidden_states_113_cast")]; + tensor var_2728 = const()[name = tensor("op_2728"), val = tensor([1, 1])]; + tensor var_2730 = const()[name = tensor("op_2730"), val = tensor([1, 1])]; + tensor q_49_pad_type_0 = const()[name = tensor("q_49_pad_type_0"), val = tensor("custom")]; + tensor q_49_pad_0 = const()[name = tensor("q_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232412096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233231360))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_49_cast = conv(dilations = var_2730, groups = var_1186, pad = q_49_pad_0, pad_type = q_49_pad_type_0, strides = var_2728, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("q_49_cast")]; + tensor var_2734 = const()[name = tensor("op_2734"), val = tensor([1, 1])]; + tensor var_2736 = const()[name = tensor("op_2736"), val = tensor([1, 1])]; + tensor k_49_pad_type_0 = const()[name = tensor("k_49_pad_type_0"), val = tensor("custom")]; + tensor k_49_pad_0 = const()[name = tensor("k_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(233231488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234050752))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_49_cast = conv(dilations = var_2736, groups = var_1186, pad = k_49_pad_0, pad_type = k_49_pad_type_0, strides = var_2734, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("k_49_cast")]; + tensor var_2740 = const()[name = tensor("op_2740"), val = tensor([1, 1])]; + tensor var_2742 = const()[name = tensor("op_2742"), val = tensor([1, 1])]; + tensor v_49_pad_type_0 = const()[name = tensor("v_49_pad_type_0"), val = tensor("custom")]; + tensor v_49_pad_0 = const()[name = tensor("v_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234050880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234870144))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_49_cast = conv(dilations = var_2742, groups = var_1186, pad = v_49_pad_0, pad_type = v_49_pad_type_0, strides = var_2740, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("v_49_cast")]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([2, 20, 64, -1])]; + tensor var_2747_cast = reshape(shape = var_2746, x = q_49_cast)[name = tensor("op_2747_cast")]; + tensor var_2748 = const()[name = tensor("op_2748"), val = tensor([2, 20, 64, -1])]; + tensor var_2749_cast = reshape(shape = var_2748, x = k_49_cast)[name = tensor("op_2749_cast")]; + tensor var_2750 = const()[name = tensor("op_2750"), val = tensor([2, 20, 64, -1])]; + tensor var_2751_cast = reshape(shape = var_2750, x = v_49_cast)[name = tensor("op_2751_cast")]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = var_2747_cast, y = var_2749_cast)[name = tensor("attn_weights_97_cast")]; + tensor attn_weights_99_cast = mul(x = attn_weights_97_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_2755_cast = softmax(axis = var_1170, x = attn_weights_99_cast)[name = tensor("op_2755_cast")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_2751_cast, y = var_2755_cast)[name = tensor("attn_49_cast")]; + tensor var_2759 = const()[name = tensor("op_2759"), val = tensor([2, 1280, 1, -1])]; + tensor input_195_cast = reshape(shape = var_2759, x = attn_49_cast)[name = tensor("input_195_cast")]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([1, 1])]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([1, 1])]; + tensor var_2768_pad_type_0 = const()[name = tensor("op_2768_pad_type_0"), val = tensor("custom")]; + tensor var_2768_pad_0 = const()[name = tensor("op_2768_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(234870272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235689536))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235689664)))]; + tensor var_2768_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_2766, groups = var_1186, pad = var_2768_pad_0, pad_type = var_2768_pad_type_0, strides = var_2764, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_195_cast)[name = tensor("op_2768_cast")]; + tensor inputs_75_cast = add(x = var_2768_cast, y = inputs_73_cast)[name = tensor("inputs_75_cast")]; + tensor var_2772 = const()[name = tensor("op_2772"), val = tensor([1])]; + tensor channels_mean_75_cast = reduce_mean(axes = var_2772, keep_dims = var_1181, x = inputs_75_cast)[name = tensor("channels_mean_75_cast")]; + tensor zero_mean_75_cast = sub(x = inputs_75_cast, y = channels_mean_75_cast)[name = tensor("zero_mean_75_cast")]; + tensor zero_mean_sq_75_cast = mul(x = zero_mean_75_cast, y = zero_mean_75_cast)[name = tensor("zero_mean_sq_75_cast")]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor([1])]; + tensor var_2777_cast = reduce_mean(axes = var_2776, keep_dims = var_1181, x = zero_mean_sq_75_cast)[name = tensor("op_2777_cast")]; + tensor var_2778_to_fp16 = const()[name = tensor("op_2778_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2779_cast = add(x = var_2777_cast, y = var_2778_to_fp16)[name = tensor("op_2779_cast")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2779_cast)[name = tensor("denom_75_cast")]; + tensor out_75_cast = mul(x = zero_mean_75_cast, y = denom_75_cast)[name = tensor("out_75_cast")]; + tensor var_2783_to_fp16 = const()[name = tensor("op_2783_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235692288)))]; + tensor var_2784_cast = add(x = out_75_cast, y = var_2783_to_fp16)[name = tensor("op_2784_cast")]; + tensor var_2786_to_fp16 = const()[name = tensor("op_2786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235694912)))]; + tensor hidden_states_115_cast = mul(x = var_2784_cast, y = var_2786_to_fp16)[name = tensor("hidden_states_115_cast")]; + tensor var_2793 = const()[name = tensor("op_2793"), val = tensor([1, 1])]; + tensor var_2795 = const()[name = tensor("op_2795"), val = tensor([1, 1])]; + tensor q_51_pad_type_0 = const()[name = tensor("q_51_pad_type_0"), val = tensor("custom")]; + tensor q_51_pad_0 = const()[name = tensor("q_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235697536))), lut = tensor([-0x1.60cp-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_51_cast = conv(dilations = var_2795, groups = var_1186, pad = q_51_pad_0, pad_type = q_51_pad_type_0, strides = var_2793, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("q_51_cast")]; + tensor var_2799 = const()[name = tensor("op_2799"), val = tensor([1, 1])]; + tensor var_2801 = const()[name = tensor("op_2801"), val = tensor([1, 1])]; + tensor k_51_pad_type_0 = const()[name = tensor("k_51_pad_type_0"), val = tensor("custom")]; + tensor k_51_pad_0 = const()[name = tensor("k_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(235902400))), lut = tensor([-0x1.838p-7, -0x1.f7p-9, 0x1.f84p-9, 0x1.83cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_51_cast = conv(dilations = var_2801, groups = var_1186, pad = k_51_pad_0, pad_type = k_51_pad_type_0, strides = var_2799, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_51_cast")]; + tensor var_2805 = const()[name = tensor("op_2805"), val = tensor([1, 1])]; + tensor var_2807 = const()[name = tensor("op_2807"), val = tensor([1, 1])]; + tensor v_51_pad_type_0 = const()[name = tensor("v_51_pad_type_0"), val = tensor("custom")]; + tensor v_51_pad_0 = const()[name = tensor("v_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236557824))), lut = tensor([-0x1.eap-8, 0x1.ecp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_51_cast = conv(dilations = var_2807, groups = var_1186, pad = v_51_pad_0, pad_type = v_51_pad_type_0, strides = var_2805, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_51_cast")]; + tensor var_2811 = const()[name = tensor("op_2811"), val = tensor([2, 20, 64, -1])]; + tensor var_2812_cast = reshape(shape = var_2811, x = q_51_cast)[name = tensor("op_2812_cast")]; + tensor var_2813 = const()[name = tensor("op_2813"), val = tensor([2, 20, 64, -1])]; + tensor var_2814_cast = reshape(shape = var_2813, x = k_51_cast)[name = tensor("op_2814_cast")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([2, 20, 64, -1])]; + tensor var_2816_cast = reshape(shape = var_2815, x = v_51_cast)[name = tensor("op_2816_cast")]; + tensor attn_weights_101_transpose_x_0 = const()[name = tensor("attn_weights_101_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_101_transpose_y_0 = const()[name = tensor("attn_weights_101_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_101_cast = matmul(transpose_x = attn_weights_101_transpose_x_0, transpose_y = attn_weights_101_transpose_y_0, x = var_2812_cast, y = var_2814_cast)[name = tensor("attn_weights_101_cast")]; + tensor attn_weights_103_cast = mul(x = attn_weights_101_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_103_cast")]; + tensor var_2820_cast = softmax(axis = var_1170, x = attn_weights_103_cast)[name = tensor("op_2820_cast")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_2816_cast, y = var_2820_cast)[name = tensor("attn_51_cast")]; + tensor var_2824 = const()[name = tensor("op_2824"), val = tensor([2, 1280, 1, -1])]; + tensor input_197_cast = reshape(shape = var_2824, x = attn_51_cast)[name = tensor("input_197_cast")]; + tensor var_2829 = const()[name = tensor("op_2829"), val = tensor([1, 1])]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, 1])]; + tensor var_2833_pad_type_0 = const()[name = tensor("op_2833_pad_type_0"), val = tensor("custom")]; + tensor var_2833_pad_0 = const()[name = tensor("op_2833_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(236885568))), lut = tensor([-0x1.2b8p-8, 0x1.2cp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237090432)))]; + tensor var_2833_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_2831, groups = var_1186, pad = var_2833_pad_0, pad_type = var_2833_pad_type_0, strides = var_2829, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_197_cast)[name = tensor("op_2833_cast")]; + tensor inputs_77_cast = add(x = var_2833_cast, y = inputs_75_cast)[name = tensor("inputs_77_cast")]; + tensor var_2837 = const()[name = tensor("op_2837"), val = tensor([1])]; + tensor channels_mean_77_cast = reduce_mean(axes = var_2837, keep_dims = var_1181, x = inputs_77_cast)[name = tensor("channels_mean_77_cast")]; + tensor zero_mean_77_cast = sub(x = inputs_77_cast, y = channels_mean_77_cast)[name = tensor("zero_mean_77_cast")]; + tensor zero_mean_sq_77_cast = mul(x = zero_mean_77_cast, y = zero_mean_77_cast)[name = tensor("zero_mean_sq_77_cast")]; + tensor var_2841 = const()[name = tensor("op_2841"), val = tensor([1])]; + tensor var_2842_cast = reduce_mean(axes = var_2841, keep_dims = var_1181, x = zero_mean_sq_77_cast)[name = tensor("op_2842_cast")]; + tensor var_2843_to_fp16 = const()[name = tensor("op_2843_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2844_cast = add(x = var_2842_cast, y = var_2843_to_fp16)[name = tensor("op_2844_cast")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2844_cast)[name = tensor("denom_77_cast")]; + tensor out_77_cast = mul(x = zero_mean_77_cast, y = denom_77_cast)[name = tensor("out_77_cast")]; + tensor var_2848_to_fp16 = const()[name = tensor("op_2848_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237093056)))]; + tensor var_2849_cast = add(x = out_77_cast, y = var_2848_to_fp16)[name = tensor("op_2849_cast")]; + tensor var_2851_to_fp16 = const()[name = tensor("op_2851_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237095680)))]; + tensor input_199_cast = mul(x = var_2849_cast, y = var_2851_to_fp16)[name = tensor("input_199_cast")]; + tensor var_2859 = const()[name = tensor("op_2859"), val = tensor([1, 1])]; + tensor var_2861 = const()[name = tensor("op_2861"), val = tensor([1, 1])]; + tensor var_2863_pad_type_0 = const()[name = tensor("op_2863_pad_type_0"), val = tensor("custom")]; + tensor var_2863_pad_0 = const()[name = tensor("op_2863_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(237098304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246928768))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246928960)))]; + tensor var_2863_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_2861, groups = var_1186, pad = var_2863_pad_0, pad_type = var_2863_pad_type_0, strides = var_2859, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_199_cast)[name = tensor("op_2863_cast")]; + tensor var_2864_split_sizes_0 = const()[name = tensor("op_2864_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2864_axis_0 = const()[name = tensor("op_2864_axis_0"), val = tensor(1)]; + tensor var_2864_cast_0, tensor var_2864_cast_1 = split(axis = var_2864_axis_0, split_sizes = var_2864_split_sizes_0, x = var_2863_cast)[name = tensor("op_2864_cast")]; + tensor var_2866_mode_0 = const()[name = tensor("op_2866_mode_0"), val = tensor("EXACT")]; + tensor var_2866_cast = gelu(mode = var_2866_mode_0, x = var_2864_cast_1)[name = tensor("op_2866_cast")]; + tensor input_201_cast = mul(x = var_2864_cast_0, y = var_2866_cast)[name = tensor("input_201_cast")]; + tensor var_2870 = const()[name = tensor("op_2870"), val = tensor([1, 1])]; + tensor var_2872 = const()[name = tensor("op_2872"), val = tensor([1, 1])]; + tensor var_2874_pad_type_0 = const()[name = tensor("op_2874_pad_type_0"), val = tensor("custom")]; + tensor var_2874_pad_0 = const()[name = tensor("op_2874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(246949504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250226368))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250226496)))]; + tensor var_2874_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_2872, groups = var_1186, pad = var_2874_pad_0, pad_type = var_2874_pad_type_0, strides = var_2870, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("op_2874_cast")]; + tensor inputs_79_cast = add(x = var_2874_cast, y = inputs_77_cast)[name = tensor("inputs_79_cast")]; + tensor var_2884 = const()[name = tensor("op_2884"), val = tensor([1])]; + tensor channels_mean_79_cast = reduce_mean(axes = var_2884, keep_dims = var_1181, x = inputs_79_cast)[name = tensor("channels_mean_79_cast")]; + tensor zero_mean_79_cast = sub(x = inputs_79_cast, y = channels_mean_79_cast)[name = tensor("zero_mean_79_cast")]; + tensor zero_mean_sq_79_cast = mul(x = zero_mean_79_cast, y = zero_mean_79_cast)[name = tensor("zero_mean_sq_79_cast")]; + tensor var_2888 = const()[name = tensor("op_2888"), val = tensor([1])]; + tensor var_2889_cast = reduce_mean(axes = var_2888, keep_dims = var_1181, x = zero_mean_sq_79_cast)[name = tensor("op_2889_cast")]; + tensor var_2890_to_fp16 = const()[name = tensor("op_2890_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2891_cast = add(x = var_2889_cast, y = var_2890_to_fp16)[name = tensor("op_2891_cast")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2891_cast)[name = tensor("denom_79_cast")]; + tensor out_79_cast = mul(x = zero_mean_79_cast, y = denom_79_cast)[name = tensor("out_79_cast")]; + tensor var_2895_to_fp16 = const()[name = tensor("op_2895_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250229120)))]; + tensor var_2896_cast = add(x = out_79_cast, y = var_2895_to_fp16)[name = tensor("op_2896_cast")]; + tensor var_2898_to_fp16 = const()[name = tensor("op_2898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250231744)))]; + tensor hidden_states_119_cast = mul(x = var_2896_cast, y = var_2898_to_fp16)[name = tensor("hidden_states_119_cast")]; + tensor var_2905 = const()[name = tensor("op_2905"), val = tensor([1, 1])]; + tensor var_2907 = const()[name = tensor("op_2907"), val = tensor([1, 1])]; + tensor q_53_pad_type_0 = const()[name = tensor("q_53_pad_type_0"), val = tensor("custom")]; + tensor q_53_pad_0 = const()[name = tensor("q_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250234368))), lut = tensor([-0x1.1d4p-6, 0x1.1dcp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_53_cast = conv(dilations = var_2907, groups = var_1186, pad = q_53_pad_0, pad_type = q_53_pad_type_0, strides = var_2905, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("q_53_cast")]; + tensor var_2911 = const()[name = tensor("op_2911"), val = tensor([1, 1])]; + tensor var_2913 = const()[name = tensor("op_2913"), val = tensor([1, 1])]; + tensor k_53_pad_type_0 = const()[name = tensor("k_53_pad_type_0"), val = tensor("custom")]; + tensor k_53_pad_0 = const()[name = tensor("k_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250439232))), lut = tensor([-0x1.194p-6, 0x1.1ap-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_53_cast = conv(dilations = var_2913, groups = var_1186, pad = k_53_pad_0, pad_type = k_53_pad_type_0, strides = var_2911, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("k_53_cast")]; + tensor var_2917 = const()[name = tensor("op_2917"), val = tensor([1, 1])]; + tensor var_2919 = const()[name = tensor("op_2919"), val = tensor([1, 1])]; + tensor v_53_pad_type_0 = const()[name = tensor("v_53_pad_type_0"), val = tensor("custom")]; + tensor v_53_pad_0 = const()[name = tensor("v_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250644096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251463360))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_53_cast = conv(dilations = var_2919, groups = var_1186, pad = v_53_pad_0, pad_type = v_53_pad_type_0, strides = var_2917, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("v_53_cast")]; + tensor var_2923 = const()[name = tensor("op_2923"), val = tensor([2, 20, 64, -1])]; + tensor var_2924_cast = reshape(shape = var_2923, x = q_53_cast)[name = tensor("op_2924_cast")]; + tensor var_2925 = const()[name = tensor("op_2925"), val = tensor([2, 20, 64, -1])]; + tensor var_2926_cast = reshape(shape = var_2925, x = k_53_cast)[name = tensor("op_2926_cast")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([2, 20, 64, -1])]; + tensor var_2928_cast = reshape(shape = var_2927, x = v_53_cast)[name = tensor("op_2928_cast")]; + tensor attn_weights_105_transpose_x_0 = const()[name = tensor("attn_weights_105_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_105_transpose_y_0 = const()[name = tensor("attn_weights_105_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_105_cast = matmul(transpose_x = attn_weights_105_transpose_x_0, transpose_y = attn_weights_105_transpose_y_0, x = var_2924_cast, y = var_2926_cast)[name = tensor("attn_weights_105_cast")]; + tensor attn_weights_107_cast = mul(x = attn_weights_105_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_107_cast")]; + tensor var_2932_cast = softmax(axis = var_1170, x = attn_weights_107_cast)[name = tensor("op_2932_cast")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_2928_cast, y = var_2932_cast)[name = tensor("attn_53_cast")]; + tensor var_2936 = const()[name = tensor("op_2936"), val = tensor([2, 1280, 1, -1])]; + tensor input_203_cast = reshape(shape = var_2936, x = attn_53_cast)[name = tensor("input_203_cast")]; + tensor var_2941 = const()[name = tensor("op_2941"), val = tensor([1, 1])]; + tensor var_2943 = const()[name = tensor("op_2943"), val = tensor([1, 1])]; + tensor var_2945_pad_type_0 = const()[name = tensor("op_2945_pad_type_0"), val = tensor("custom")]; + tensor var_2945_pad_0 = const()[name = tensor("op_2945_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251463488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252282752))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252282880)))]; + tensor var_2945_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_2943, groups = var_1186, pad = var_2945_pad_0, pad_type = var_2945_pad_type_0, strides = var_2941, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_203_cast)[name = tensor("op_2945_cast")]; + tensor inputs_81_cast = add(x = var_2945_cast, y = inputs_79_cast)[name = tensor("inputs_81_cast")]; + tensor var_2949 = const()[name = tensor("op_2949"), val = tensor([1])]; + tensor channels_mean_81_cast = reduce_mean(axes = var_2949, keep_dims = var_1181, x = inputs_81_cast)[name = tensor("channels_mean_81_cast")]; + tensor zero_mean_81_cast = sub(x = inputs_81_cast, y = channels_mean_81_cast)[name = tensor("zero_mean_81_cast")]; + tensor zero_mean_sq_81_cast = mul(x = zero_mean_81_cast, y = zero_mean_81_cast)[name = tensor("zero_mean_sq_81_cast")]; + tensor var_2953 = const()[name = tensor("op_2953"), val = tensor([1])]; + tensor var_2954_cast = reduce_mean(axes = var_2953, keep_dims = var_1181, x = zero_mean_sq_81_cast)[name = tensor("op_2954_cast")]; + tensor var_2955_to_fp16 = const()[name = tensor("op_2955_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2956_cast = add(x = var_2954_cast, y = var_2955_to_fp16)[name = tensor("op_2956_cast")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2956_cast)[name = tensor("denom_81_cast")]; + tensor out_81_cast = mul(x = zero_mean_81_cast, y = denom_81_cast)[name = tensor("out_81_cast")]; + tensor var_2960_to_fp16 = const()[name = tensor("op_2960_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252285504)))]; + tensor var_2961_cast = add(x = out_81_cast, y = var_2960_to_fp16)[name = tensor("op_2961_cast")]; + tensor var_2963_to_fp16 = const()[name = tensor("op_2963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252288128)))]; + tensor hidden_states_121_cast = mul(x = var_2961_cast, y = var_2963_to_fp16)[name = tensor("hidden_states_121_cast")]; + tensor var_2970 = const()[name = tensor("op_2970"), val = tensor([1, 1])]; + tensor var_2972 = const()[name = tensor("op_2972"), val = tensor([1, 1])]; + tensor q_55_pad_type_0 = const()[name = tensor("q_55_pad_type_0"), val = tensor("custom")]; + tensor q_55_pad_0 = const()[name = tensor("q_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252290752))), lut = tensor([-0x1.688p-7, 0x1.688p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_55_cast = conv(dilations = var_2972, groups = var_1186, pad = q_55_pad_0, pad_type = q_55_pad_type_0, strides = var_2970, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("q_55_cast")]; + tensor var_2976 = const()[name = tensor("op_2976"), val = tensor([1, 1])]; + tensor var_2978 = const()[name = tensor("op_2978"), val = tensor([1, 1])]; + tensor k_55_pad_type_0 = const()[name = tensor("k_55_pad_type_0"), val = tensor("custom")]; + tensor k_55_pad_0 = const()[name = tensor("k_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(252495616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253806400))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_55_cast = conv(dilations = var_2978, groups = var_1186, pad = k_55_pad_0, pad_type = k_55_pad_type_0, strides = var_2976, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_55_cast")]; + tensor var_2982 = const()[name = tensor("op_2982"), val = tensor([1, 1])]; + tensor var_2984 = const()[name = tensor("op_2984"), val = tensor([1, 1])]; + tensor v_55_pad_type_0 = const()[name = tensor("v_55_pad_type_0"), val = tensor("custom")]; + tensor v_55_pad_0 = const()[name = tensor("v_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253806528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255117312))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_55_cast = conv(dilations = var_2984, groups = var_1186, pad = v_55_pad_0, pad_type = v_55_pad_type_0, strides = var_2982, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_55_cast")]; + tensor var_2988 = const()[name = tensor("op_2988"), val = tensor([2, 20, 64, -1])]; + tensor var_2989_cast = reshape(shape = var_2988, x = q_55_cast)[name = tensor("op_2989_cast")]; + tensor var_2990 = const()[name = tensor("op_2990"), val = tensor([2, 20, 64, -1])]; + tensor var_2991_cast = reshape(shape = var_2990, x = k_55_cast)[name = tensor("op_2991_cast")]; + tensor var_2992 = const()[name = tensor("op_2992"), val = tensor([2, 20, 64, -1])]; + tensor var_2993_cast = reshape(shape = var_2992, x = v_55_cast)[name = tensor("op_2993_cast")]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = var_2989_cast, y = var_2991_cast)[name = tensor("attn_weights_109_cast")]; + tensor attn_weights_111_cast = mul(x = attn_weights_109_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_2997_cast = softmax(axis = var_1170, x = attn_weights_111_cast)[name = tensor("op_2997_cast")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_2993_cast, y = var_2997_cast)[name = tensor("attn_55_cast")]; + tensor var_3001 = const()[name = tensor("op_3001"), val = tensor([2, 1280, 1, -1])]; + tensor input_205_cast = reshape(shape = var_3001, x = attn_55_cast)[name = tensor("input_205_cast")]; + tensor var_3006 = const()[name = tensor("op_3006"), val = tensor([1, 1])]; + tensor var_3008 = const()[name = tensor("op_3008"), val = tensor([1, 1])]; + tensor var_3010_pad_type_0 = const()[name = tensor("op_3010_pad_type_0"), val = tensor("custom")]; + tensor var_3010_pad_0 = const()[name = tensor("op_3010_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255117440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255936704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255936832)))]; + tensor var_3010_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_3008, groups = var_1186, pad = var_3010_pad_0, pad_type = var_3010_pad_type_0, strides = var_3006, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("op_3010_cast")]; + tensor inputs_83_cast = add(x = var_3010_cast, y = inputs_81_cast)[name = tensor("inputs_83_cast")]; + tensor var_3014 = const()[name = tensor("op_3014"), val = tensor([1])]; + tensor channels_mean_83_cast = reduce_mean(axes = var_3014, keep_dims = var_1181, x = inputs_83_cast)[name = tensor("channels_mean_83_cast")]; + tensor zero_mean_83_cast = sub(x = inputs_83_cast, y = channels_mean_83_cast)[name = tensor("zero_mean_83_cast")]; + tensor zero_mean_sq_83_cast = mul(x = zero_mean_83_cast, y = zero_mean_83_cast)[name = tensor("zero_mean_sq_83_cast")]; + tensor var_3018 = const()[name = tensor("op_3018"), val = tensor([1])]; + tensor var_3019_cast = reduce_mean(axes = var_3018, keep_dims = var_1181, x = zero_mean_sq_83_cast)[name = tensor("op_3019_cast")]; + tensor var_3020_to_fp16 = const()[name = tensor("op_3020_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3021_cast = add(x = var_3019_cast, y = var_3020_to_fp16)[name = tensor("op_3021_cast")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_3021_cast)[name = tensor("denom_83_cast")]; + tensor out_83_cast = mul(x = zero_mean_83_cast, y = denom_83_cast)[name = tensor("out_83_cast")]; + tensor var_3025_to_fp16 = const()[name = tensor("op_3025_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255939456)))]; + tensor var_3026_cast = add(x = out_83_cast, y = var_3025_to_fp16)[name = tensor("op_3026_cast")]; + tensor var_3028_to_fp16 = const()[name = tensor("op_3028_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255942080)))]; + tensor input_207_cast = mul(x = var_3026_cast, y = var_3028_to_fp16)[name = tensor("input_207_cast")]; + tensor var_3036 = const()[name = tensor("op_3036"), val = tensor([1, 1])]; + tensor var_3038 = const()[name = tensor("op_3038"), val = tensor([1, 1])]; + tensor var_3040_pad_type_0 = const()[name = tensor("op_3040_pad_type_0"), val = tensor("custom")]; + tensor var_3040_pad_0 = const()[name = tensor("op_3040_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255944704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265775168))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265775360)))]; + tensor var_3040_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_3038, groups = var_1186, pad = var_3040_pad_0, pad_type = var_3040_pad_type_0, strides = var_3036, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_207_cast)[name = tensor("op_3040_cast")]; + tensor var_3041_split_sizes_0 = const()[name = tensor("op_3041_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3041_axis_0 = const()[name = tensor("op_3041_axis_0"), val = tensor(1)]; + tensor var_3041_cast_0, tensor var_3041_cast_1 = split(axis = var_3041_axis_0, split_sizes = var_3041_split_sizes_0, x = var_3040_cast)[name = tensor("op_3041_cast")]; + tensor var_3043_mode_0 = const()[name = tensor("op_3043_mode_0"), val = tensor("EXACT")]; + tensor var_3043_cast = gelu(mode = var_3043_mode_0, x = var_3041_cast_1)[name = tensor("op_3043_cast")]; + tensor input_209_cast = mul(x = var_3041_cast_0, y = var_3043_cast)[name = tensor("input_209_cast")]; + tensor var_3047 = const()[name = tensor("op_3047"), val = tensor([1, 1])]; + tensor var_3049 = const()[name = tensor("op_3049"), val = tensor([1, 1])]; + tensor var_3051_pad_type_0 = const()[name = tensor("op_3051_pad_type_0"), val = tensor("custom")]; + tensor var_3051_pad_0 = const()[name = tensor("op_3051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(265795904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269072768))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269072896)))]; + tensor var_3051_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_3049, groups = var_1186, pad = var_3051_pad_0, pad_type = var_3051_pad_type_0, strides = var_3047, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("op_3051_cast")]; + tensor hidden_states_125_cast = add(x = var_3051_cast, y = inputs_83_cast)[name = tensor("hidden_states_125_cast")]; + tensor var_3053 = const()[name = tensor("op_3053"), val = tensor([2, 1280, 32, 32])]; + tensor input_211_cast = reshape(shape = var_3053, x = hidden_states_125_cast)[name = tensor("input_211_cast")]; + tensor var_3057 = const()[name = tensor("op_3057"), val = tensor([1, 1])]; + tensor var_3059 = const()[name = tensor("op_3059"), val = tensor([1, 1])]; + tensor hidden_states_127_pad_type_0 = const()[name = tensor("hidden_states_127_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_127_pad_0 = const()[name = tensor("hidden_states_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(269075520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270304384))), name = tensor("down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270304576)))]; + tensor hidden_states_127_cast = conv(bias = down_blocks_2_attentions_0_proj_out_bias_to_fp16, dilations = var_3059, groups = var_1186, pad = hidden_states_127_pad_0, pad_type = hidden_states_127_pad_type_0, strides = var_3057, weight = down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized, x = input_211_cast)[name = tensor("hidden_states_127_cast")]; + tensor input_213_cast = add(x = hidden_states_127_cast, y = hidden_states_61_cast)[name = tensor("input_213_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_213_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270307200)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270309824)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_217_cast = silu(x = add_27_cast)[name = tensor("input_217_cast")]; + tensor var_3074 = const()[name = tensor("op_3074"), val = tensor([1, 1])]; + tensor var_3076 = const()[name = tensor("op_3076"), val = tensor([1, 1])]; + tensor hidden_states_129_pad_type_0 = const()[name = tensor("hidden_states_129_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_129_pad_0 = const()[name = tensor("hidden_states_129_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(270312448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281371712))), name = tensor("down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281371904)))]; + tensor hidden_states_129_cast = conv(bias = down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_3076, groups = var_1186, pad = hidden_states_129_pad_0, pad_type = hidden_states_129_pad_type_0, strides = var_3074, weight = down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("hidden_states_129_cast")]; + tensor var_3082 = const()[name = tensor("op_3082"), val = tensor([1, 1])]; + tensor var_3084 = const()[name = tensor("op_3084"), val = tensor([1, 1])]; + tensor temb_11_pad_type_0 = const()[name = tensor("temb_11_pad_type_0"), val = tensor("custom")]; + tensor temb_11_pad_0 = const()[name = tensor("temb_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(281374528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282193792))), name = tensor("down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282193920)))]; + tensor temb_11_cast = conv(bias = down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_3084, groups = var_1186, pad = temb_11_pad_0, pad_type = temb_11_pad_type_0, strides = var_3082, weight = down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_11_cast")]; + tensor input_221_cast = add(x = hidden_states_129_cast, y = temb_11_cast)[name = tensor("input_221_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = input_221_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282196544)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282199168)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_225_cast = silu(x = add_29_cast)[name = tensor("input_225_cast")]; + tensor var_3094 = const()[name = tensor("op_3094"), val = tensor([1, 1])]; + tensor var_3096 = const()[name = tensor("op_3096"), val = tensor([1, 1])]; + tensor hidden_states_131_pad_type_0 = const()[name = tensor("hidden_states_131_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_131_pad_0 = const()[name = tensor("hidden_states_131_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(282201792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289574656))), name = tensor("down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289574784)))]; + tensor hidden_states_131_cast = conv(bias = down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_3096, groups = var_1186, pad = hidden_states_131_pad_0, pad_type = hidden_states_131_pad_type_0, strides = var_3094, weight = down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("hidden_states_131_cast")]; + tensor hidden_states_133_cast = add(x = input_213_cast, y = hidden_states_131_cast)[name = tensor("hidden_states_133_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = hidden_states_133_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289577408)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289580032)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor var_3134 = const()[name = tensor("op_3134"), val = tensor([1, 1])]; + tensor var_3136 = const()[name = tensor("op_3136"), val = tensor([1, 1])]; + tensor hidden_states_135_pad_type_0 = const()[name = tensor("hidden_states_135_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_135_pad_0 = const()[name = tensor("hidden_states_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(289582656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290401920))), name = tensor("down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290402048)))]; + tensor hidden_states_135_cast = conv(bias = down_blocks_2_attentions_1_proj_in_bias_to_fp16, dilations = var_3136, groups = var_1186, pad = hidden_states_135_pad_0, pad_type = hidden_states_135_pad_type_0, strides = var_3134, weight = down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized, x = add_31_cast)[name = tensor("hidden_states_135_cast")]; + tensor var_3141 = const()[name = tensor("op_3141"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_85_cast = reshape(shape = var_3141, x = hidden_states_135_cast)[name = tensor("inputs_85_cast")]; + tensor var_3151 = const()[name = tensor("op_3151"), val = tensor([1])]; + tensor channels_mean_85_cast = reduce_mean(axes = var_3151, keep_dims = var_1181, x = inputs_85_cast)[name = tensor("channels_mean_85_cast")]; + tensor zero_mean_85_cast = sub(x = inputs_85_cast, y = channels_mean_85_cast)[name = tensor("zero_mean_85_cast")]; + tensor zero_mean_sq_85_cast = mul(x = zero_mean_85_cast, y = zero_mean_85_cast)[name = tensor("zero_mean_sq_85_cast")]; + tensor var_3155 = const()[name = tensor("op_3155"), val = tensor([1])]; + tensor var_3156_cast = reduce_mean(axes = var_3155, keep_dims = var_1181, x = zero_mean_sq_85_cast)[name = tensor("op_3156_cast")]; + tensor var_3157_to_fp16 = const()[name = tensor("op_3157_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3158_cast = add(x = var_3156_cast, y = var_3157_to_fp16)[name = tensor("op_3158_cast")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_3158_cast)[name = tensor("denom_85_cast")]; + tensor out_85_cast = mul(x = zero_mean_85_cast, y = denom_85_cast)[name = tensor("out_85_cast")]; + tensor var_3162_to_fp16 = const()[name = tensor("op_3162_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290404672)))]; + tensor var_3163_cast = add(x = out_85_cast, y = var_3162_to_fp16)[name = tensor("op_3163_cast")]; + tensor var_3165_to_fp16 = const()[name = tensor("op_3165_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290407296)))]; + tensor hidden_states_137_cast = mul(x = var_3163_cast, y = var_3165_to_fp16)[name = tensor("hidden_states_137_cast")]; + tensor var_3172 = const()[name = tensor("op_3172"), val = tensor([1, 1])]; + tensor var_3174 = const()[name = tensor("op_3174"), val = tensor([1, 1])]; + tensor q_57_pad_type_0 = const()[name = tensor("q_57_pad_type_0"), val = tensor("custom")]; + tensor q_57_pad_0 = const()[name = tensor("q_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(290409920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291229184))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_57_cast = conv(dilations = var_3174, groups = var_1186, pad = q_57_pad_0, pad_type = q_57_pad_type_0, strides = var_3172, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("q_57_cast")]; + tensor var_3178 = const()[name = tensor("op_3178"), val = tensor([1, 1])]; + tensor var_3180 = const()[name = tensor("op_3180"), val = tensor([1, 1])]; + tensor k_57_pad_type_0 = const()[name = tensor("k_57_pad_type_0"), val = tensor("custom")]; + tensor k_57_pad_0 = const()[name = tensor("k_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291229312))), lut = tensor([-0x1.394p-5, -0x1.784p-7, 0x1.7c4p-7, 0x1.3a4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_57_cast = conv(dilations = var_3180, groups = var_1186, pad = k_57_pad_0, pad_type = k_57_pad_type_0, strides = var_3178, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("k_57_cast")]; + tensor var_3184 = const()[name = tensor("op_3184"), val = tensor([1, 1])]; + tensor var_3186 = const()[name = tensor("op_3186"), val = tensor([1, 1])]; + tensor v_57_pad_type_0 = const()[name = tensor("v_57_pad_type_0"), val = tensor("custom")]; + tensor v_57_pad_0 = const()[name = tensor("v_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(291638976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(292458240))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_57_cast = conv(dilations = var_3186, groups = var_1186, pad = v_57_pad_0, pad_type = v_57_pad_type_0, strides = var_3184, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("v_57_cast")]; + tensor var_3190 = const()[name = tensor("op_3190"), val = tensor([2, 20, 64, -1])]; + tensor var_3191_cast = reshape(shape = var_3190, x = q_57_cast)[name = tensor("op_3191_cast")]; + tensor var_3192 = const()[name = tensor("op_3192"), val = tensor([2, 20, 64, -1])]; + tensor var_3193_cast = reshape(shape = var_3192, x = k_57_cast)[name = tensor("op_3193_cast")]; + tensor var_3194 = const()[name = tensor("op_3194"), val = tensor([2, 20, 64, -1])]; + tensor var_3195_cast = reshape(shape = var_3194, x = v_57_cast)[name = tensor("op_3195_cast")]; + tensor attn_weights_113_transpose_x_0 = const()[name = tensor("attn_weights_113_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_113_transpose_y_0 = const()[name = tensor("attn_weights_113_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_113_cast = matmul(transpose_x = attn_weights_113_transpose_x_0, transpose_y = attn_weights_113_transpose_y_0, x = var_3191_cast, y = var_3193_cast)[name = tensor("attn_weights_113_cast")]; + tensor attn_weights_115_cast = mul(x = attn_weights_113_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_115_cast")]; + tensor var_3199_cast = softmax(axis = var_1170, x = attn_weights_115_cast)[name = tensor("op_3199_cast")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3195_cast, y = var_3199_cast)[name = tensor("attn_57_cast")]; + tensor var_3203 = const()[name = tensor("op_3203"), val = tensor([2, 1280, 1, -1])]; + tensor input_229_cast = reshape(shape = var_3203, x = attn_57_cast)[name = tensor("input_229_cast")]; + tensor var_3208 = const()[name = tensor("op_3208"), val = tensor([1, 1])]; + tensor var_3210 = const()[name = tensor("op_3210"), val = tensor([1, 1])]; + tensor var_3212_pad_type_0 = const()[name = tensor("op_3212_pad_type_0"), val = tensor("custom")]; + tensor var_3212_pad_0 = const()[name = tensor("op_3212_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(292458368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293277632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293277760)))]; + tensor var_3212_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_3210, groups = var_1186, pad = var_3212_pad_0, pad_type = var_3212_pad_type_0, strides = var_3208, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_229_cast)[name = tensor("op_3212_cast")]; + tensor inputs_87_cast = add(x = var_3212_cast, y = inputs_85_cast)[name = tensor("inputs_87_cast")]; + tensor var_3216 = const()[name = tensor("op_3216"), val = tensor([1])]; + tensor channels_mean_87_cast = reduce_mean(axes = var_3216, keep_dims = var_1181, x = inputs_87_cast)[name = tensor("channels_mean_87_cast")]; + tensor zero_mean_87_cast = sub(x = inputs_87_cast, y = channels_mean_87_cast)[name = tensor("zero_mean_87_cast")]; + tensor zero_mean_sq_87_cast = mul(x = zero_mean_87_cast, y = zero_mean_87_cast)[name = tensor("zero_mean_sq_87_cast")]; + tensor var_3220 = const()[name = tensor("op_3220"), val = tensor([1])]; + tensor var_3221_cast = reduce_mean(axes = var_3220, keep_dims = var_1181, x = zero_mean_sq_87_cast)[name = tensor("op_3221_cast")]; + tensor var_3222_to_fp16 = const()[name = tensor("op_3222_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3223_cast = add(x = var_3221_cast, y = var_3222_to_fp16)[name = tensor("op_3223_cast")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_3223_cast)[name = tensor("denom_87_cast")]; + tensor out_87_cast = mul(x = zero_mean_87_cast, y = denom_87_cast)[name = tensor("out_87_cast")]; + tensor var_3227_to_fp16 = const()[name = tensor("op_3227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293280384)))]; + tensor var_3228_cast = add(x = out_87_cast, y = var_3227_to_fp16)[name = tensor("op_3228_cast")]; + tensor var_3230_to_fp16 = const()[name = tensor("op_3230_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293283008)))]; + tensor hidden_states_139_cast = mul(x = var_3228_cast, y = var_3230_to_fp16)[name = tensor("hidden_states_139_cast")]; + tensor var_3237 = const()[name = tensor("op_3237"), val = tensor([1, 1])]; + tensor var_3239 = const()[name = tensor("op_3239"), val = tensor([1, 1])]; + tensor q_59_pad_type_0 = const()[name = tensor("q_59_pad_type_0"), val = tensor("custom")]; + tensor q_59_pad_0 = const()[name = tensor("q_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293285632))), lut = tensor([-0x1.f38p-7, 0x1.f48p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_59_cast = conv(dilations = var_3239, groups = var_1186, pad = q_59_pad_0, pad_type = q_59_pad_type_0, strides = var_3237, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_139_cast)[name = tensor("q_59_cast")]; + tensor var_3243 = const()[name = tensor("op_3243"), val = tensor([1, 1])]; + tensor var_3245 = const()[name = tensor("op_3245"), val = tensor([1, 1])]; + tensor k_59_pad_type_0 = const()[name = tensor("k_59_pad_type_0"), val = tensor("custom")]; + tensor k_59_pad_0 = const()[name = tensor("k_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293490496))), lut = tensor([-0x1.dc8p-6, -0x1.1b8p-7, 0x1.198p-7, 0x1.dbp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_59_cast = conv(dilations = var_3245, groups = var_1186, pad = k_59_pad_0, pad_type = k_59_pad_type_0, strides = var_3243, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_59_cast")]; + tensor var_3249 = const()[name = tensor("op_3249"), val = tensor([1, 1])]; + tensor var_3251 = const()[name = tensor("op_3251"), val = tensor([1, 1])]; + tensor v_59_pad_type_0 = const()[name = tensor("v_59_pad_type_0"), val = tensor("custom")]; + tensor v_59_pad_0 = const()[name = tensor("v_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294145920))), lut = tensor([-0x1.0d4p-5, -0x1.34cp-7, 0x1.358p-7, 0x1.0d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_59_cast = conv(dilations = var_3251, groups = var_1186, pad = v_59_pad_0, pad_type = v_59_pad_type_0, strides = var_3249, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_59_cast")]; + tensor var_3255 = const()[name = tensor("op_3255"), val = tensor([2, 20, 64, -1])]; + tensor var_3256_cast = reshape(shape = var_3255, x = q_59_cast)[name = tensor("op_3256_cast")]; + tensor var_3257 = const()[name = tensor("op_3257"), val = tensor([2, 20, 64, -1])]; + tensor var_3258_cast = reshape(shape = var_3257, x = k_59_cast)[name = tensor("op_3258_cast")]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([2, 20, 64, -1])]; + tensor var_3260_cast = reshape(shape = var_3259, x = v_59_cast)[name = tensor("op_3260_cast")]; + tensor attn_weights_117_transpose_x_0 = const()[name = tensor("attn_weights_117_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_117_transpose_y_0 = const()[name = tensor("attn_weights_117_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_117_cast = matmul(transpose_x = attn_weights_117_transpose_x_0, transpose_y = attn_weights_117_transpose_y_0, x = var_3256_cast, y = var_3258_cast)[name = tensor("attn_weights_117_cast")]; + tensor attn_weights_119_cast = mul(x = attn_weights_117_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_119_cast")]; + tensor var_3264_cast = softmax(axis = var_1170, x = attn_weights_119_cast)[name = tensor("op_3264_cast")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3260_cast, y = var_3264_cast)[name = tensor("attn_59_cast")]; + tensor var_3268 = const()[name = tensor("op_3268"), val = tensor([2, 1280, 1, -1])]; + tensor input_231_cast = reshape(shape = var_3268, x = attn_59_cast)[name = tensor("input_231_cast")]; + tensor var_3273 = const()[name = tensor("op_3273"), val = tensor([1, 1])]; + tensor var_3275 = const()[name = tensor("op_3275"), val = tensor([1, 1])]; + tensor var_3277_pad_type_0 = const()[name = tensor("op_3277_pad_type_0"), val = tensor("custom")]; + tensor var_3277_pad_0 = const()[name = tensor("op_3277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(294801344))), lut = tensor([-0x1.2d8p-6, -0x1.698p-8, 0x1.68cp-8, 0x1.2dp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295211008)))]; + tensor var_3277_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_3275, groups = var_1186, pad = var_3277_pad_0, pad_type = var_3277_pad_type_0, strides = var_3273, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_231_cast)[name = tensor("op_3277_cast")]; + tensor inputs_89_cast = add(x = var_3277_cast, y = inputs_87_cast)[name = tensor("inputs_89_cast")]; + tensor var_3281 = const()[name = tensor("op_3281"), val = tensor([1])]; + tensor channels_mean_89_cast = reduce_mean(axes = var_3281, keep_dims = var_1181, x = inputs_89_cast)[name = tensor("channels_mean_89_cast")]; + tensor zero_mean_89_cast = sub(x = inputs_89_cast, y = channels_mean_89_cast)[name = tensor("zero_mean_89_cast")]; + tensor zero_mean_sq_89_cast = mul(x = zero_mean_89_cast, y = zero_mean_89_cast)[name = tensor("zero_mean_sq_89_cast")]; + tensor var_3285 = const()[name = tensor("op_3285"), val = tensor([1])]; + tensor var_3286_cast = reduce_mean(axes = var_3285, keep_dims = var_1181, x = zero_mean_sq_89_cast)[name = tensor("op_3286_cast")]; + tensor var_3287_to_fp16 = const()[name = tensor("op_3287_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3288_cast = add(x = var_3286_cast, y = var_3287_to_fp16)[name = tensor("op_3288_cast")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_3288_cast)[name = tensor("denom_89_cast")]; + tensor out_89_cast = mul(x = zero_mean_89_cast, y = denom_89_cast)[name = tensor("out_89_cast")]; + tensor var_3292_to_fp16 = const()[name = tensor("op_3292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295213632)))]; + tensor var_3293_cast = add(x = out_89_cast, y = var_3292_to_fp16)[name = tensor("op_3293_cast")]; + tensor var_3295_to_fp16 = const()[name = tensor("op_3295_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295216256)))]; + tensor input_233_cast = mul(x = var_3293_cast, y = var_3295_to_fp16)[name = tensor("input_233_cast")]; + tensor var_3303 = const()[name = tensor("op_3303"), val = tensor([1, 1])]; + tensor var_3305 = const()[name = tensor("op_3305"), val = tensor([1, 1])]; + tensor var_3307_pad_type_0 = const()[name = tensor("op_3307_pad_type_0"), val = tensor("custom")]; + tensor var_3307_pad_0 = const()[name = tensor("op_3307_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(295218880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305049344))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305049536)))]; + tensor var_3307_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_3305, groups = var_1186, pad = var_3307_pad_0, pad_type = var_3307_pad_type_0, strides = var_3303, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("op_3307_cast")]; + tensor var_3308_split_sizes_0 = const()[name = tensor("op_3308_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3308_axis_0 = const()[name = tensor("op_3308_axis_0"), val = tensor(1)]; + tensor var_3308_cast_0, tensor var_3308_cast_1 = split(axis = var_3308_axis_0, split_sizes = var_3308_split_sizes_0, x = var_3307_cast)[name = tensor("op_3308_cast")]; + tensor var_3310_mode_0 = const()[name = tensor("op_3310_mode_0"), val = tensor("EXACT")]; + tensor var_3310_cast = gelu(mode = var_3310_mode_0, x = var_3308_cast_1)[name = tensor("op_3310_cast")]; + tensor input_235_cast = mul(x = var_3308_cast_0, y = var_3310_cast)[name = tensor("input_235_cast")]; + tensor var_3314 = const()[name = tensor("op_3314"), val = tensor([1, 1])]; + tensor var_3316 = const()[name = tensor("op_3316"), val = tensor([1, 1])]; + tensor var_3318_pad_type_0 = const()[name = tensor("op_3318_pad_type_0"), val = tensor("custom")]; + tensor var_3318_pad_0 = const()[name = tensor("op_3318_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(305070080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308346944))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308347072)))]; + tensor var_3318_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_3316, groups = var_1186, pad = var_3318_pad_0, pad_type = var_3318_pad_type_0, strides = var_3314, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_235_cast)[name = tensor("op_3318_cast")]; + tensor inputs_91_cast = add(x = var_3318_cast, y = inputs_89_cast)[name = tensor("inputs_91_cast")]; + tensor var_3328 = const()[name = tensor("op_3328"), val = tensor([1])]; + tensor channels_mean_91_cast = reduce_mean(axes = var_3328, keep_dims = var_1181, x = inputs_91_cast)[name = tensor("channels_mean_91_cast")]; + tensor zero_mean_91_cast = sub(x = inputs_91_cast, y = channels_mean_91_cast)[name = tensor("zero_mean_91_cast")]; + tensor zero_mean_sq_91_cast = mul(x = zero_mean_91_cast, y = zero_mean_91_cast)[name = tensor("zero_mean_sq_91_cast")]; + tensor var_3332 = const()[name = tensor("op_3332"), val = tensor([1])]; + tensor var_3333_cast = reduce_mean(axes = var_3332, keep_dims = var_1181, x = zero_mean_sq_91_cast)[name = tensor("op_3333_cast")]; + tensor var_3334_to_fp16 = const()[name = tensor("op_3334_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3335_cast = add(x = var_3333_cast, y = var_3334_to_fp16)[name = tensor("op_3335_cast")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_3335_cast)[name = tensor("denom_91_cast")]; + tensor out_91_cast = mul(x = zero_mean_91_cast, y = denom_91_cast)[name = tensor("out_91_cast")]; + tensor var_3339_to_fp16 = const()[name = tensor("op_3339_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308349696)))]; + tensor var_3340_cast = add(x = out_91_cast, y = var_3339_to_fp16)[name = tensor("op_3340_cast")]; + tensor var_3342_to_fp16 = const()[name = tensor("op_3342_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308352320)))]; + tensor hidden_states_143_cast = mul(x = var_3340_cast, y = var_3342_to_fp16)[name = tensor("hidden_states_143_cast")]; + tensor var_3349 = const()[name = tensor("op_3349"), val = tensor([1, 1])]; + tensor var_3351 = const()[name = tensor("op_3351"), val = tensor([1, 1])]; + tensor q_61_pad_type_0 = const()[name = tensor("q_61_pad_type_0"), val = tensor("custom")]; + tensor q_61_pad_0 = const()[name = tensor("q_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308354944))), lut = tensor([-0x1.414p-5, -0x1.83cp-7, 0x1.814p-7, 0x1.414p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_61_cast = conv(dilations = var_3351, groups = var_1186, pad = q_61_pad_0, pad_type = q_61_pad_type_0, strides = var_3349, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("q_61_cast")]; + tensor var_3355 = const()[name = tensor("op_3355"), val = tensor([1, 1])]; + tensor var_3357 = const()[name = tensor("op_3357"), val = tensor([1, 1])]; + tensor k_61_pad_type_0 = const()[name = tensor("k_61_pad_type_0"), val = tensor("custom")]; + tensor k_61_pad_0 = const()[name = tensor("k_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(308764608))), lut = tensor([-0x1.444p-5, -0x1.84cp-7, 0x1.884p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_61_cast = conv(dilations = var_3357, groups = var_1186, pad = k_61_pad_0, pad_type = k_61_pad_type_0, strides = var_3355, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("k_61_cast")]; + tensor var_3361 = const()[name = tensor("op_3361"), val = tensor([1, 1])]; + tensor var_3363 = const()[name = tensor("op_3363"), val = tensor([1, 1])]; + tensor v_61_pad_type_0 = const()[name = tensor("v_61_pad_type_0"), val = tensor("custom")]; + tensor v_61_pad_0 = const()[name = tensor("v_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309174272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309993536))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_61_cast = conv(dilations = var_3363, groups = var_1186, pad = v_61_pad_0, pad_type = v_61_pad_type_0, strides = var_3361, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("v_61_cast")]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor([2, 20, 64, -1])]; + tensor var_3368_cast = reshape(shape = var_3367, x = q_61_cast)[name = tensor("op_3368_cast")]; + tensor var_3369 = const()[name = tensor("op_3369"), val = tensor([2, 20, 64, -1])]; + tensor var_3370_cast = reshape(shape = var_3369, x = k_61_cast)[name = tensor("op_3370_cast")]; + tensor var_3371 = const()[name = tensor("op_3371"), val = tensor([2, 20, 64, -1])]; + tensor var_3372_cast = reshape(shape = var_3371, x = v_61_cast)[name = tensor("op_3372_cast")]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = var_3368_cast, y = var_3370_cast)[name = tensor("attn_weights_121_cast")]; + tensor attn_weights_123_cast = mul(x = attn_weights_121_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_3376_cast = softmax(axis = var_1170, x = attn_weights_123_cast)[name = tensor("op_3376_cast")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3372_cast, y = var_3376_cast)[name = tensor("attn_61_cast")]; + tensor var_3380 = const()[name = tensor("op_3380"), val = tensor([2, 1280, 1, -1])]; + tensor input_237_cast = reshape(shape = var_3380, x = attn_61_cast)[name = tensor("input_237_cast")]; + tensor var_3385 = const()[name = tensor("op_3385"), val = tensor([1, 1])]; + tensor var_3387 = const()[name = tensor("op_3387"), val = tensor([1, 1])]; + tensor var_3389_pad_type_0 = const()[name = tensor("op_3389_pad_type_0"), val = tensor("custom")]; + tensor var_3389_pad_0 = const()[name = tensor("op_3389_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(309993664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310812928))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310813056)))]; + tensor var_3389_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_3387, groups = var_1186, pad = var_3389_pad_0, pad_type = var_3389_pad_type_0, strides = var_3385, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("op_3389_cast")]; + tensor inputs_93_cast = add(x = var_3389_cast, y = inputs_91_cast)[name = tensor("inputs_93_cast")]; + tensor var_3393 = const()[name = tensor("op_3393"), val = tensor([1])]; + tensor channels_mean_93_cast = reduce_mean(axes = var_3393, keep_dims = var_1181, x = inputs_93_cast)[name = tensor("channels_mean_93_cast")]; + tensor zero_mean_93_cast = sub(x = inputs_93_cast, y = channels_mean_93_cast)[name = tensor("zero_mean_93_cast")]; + tensor zero_mean_sq_93_cast = mul(x = zero_mean_93_cast, y = zero_mean_93_cast)[name = tensor("zero_mean_sq_93_cast")]; + tensor var_3397 = const()[name = tensor("op_3397"), val = tensor([1])]; + tensor var_3398_cast = reduce_mean(axes = var_3397, keep_dims = var_1181, x = zero_mean_sq_93_cast)[name = tensor("op_3398_cast")]; + tensor var_3399_to_fp16 = const()[name = tensor("op_3399_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3400_cast = add(x = var_3398_cast, y = var_3399_to_fp16)[name = tensor("op_3400_cast")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_3400_cast)[name = tensor("denom_93_cast")]; + tensor out_93_cast = mul(x = zero_mean_93_cast, y = denom_93_cast)[name = tensor("out_93_cast")]; + tensor var_3404_to_fp16 = const()[name = tensor("op_3404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310815680)))]; + tensor var_3405_cast = add(x = out_93_cast, y = var_3404_to_fp16)[name = tensor("op_3405_cast")]; + tensor var_3407_to_fp16 = const()[name = tensor("op_3407_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310818304)))]; + tensor hidden_states_145_cast = mul(x = var_3405_cast, y = var_3407_to_fp16)[name = tensor("hidden_states_145_cast")]; + tensor var_3414 = const()[name = tensor("op_3414"), val = tensor([1, 1])]; + tensor var_3416 = const()[name = tensor("op_3416"), val = tensor([1, 1])]; + tensor q_63_pad_type_0 = const()[name = tensor("q_63_pad_type_0"), val = tensor("custom")]; + tensor q_63_pad_0 = const()[name = tensor("q_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(310820928))), lut = tensor([-0x1.284p-6, 0x1.274p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_63_cast = conv(dilations = var_3416, groups = var_1186, pad = q_63_pad_0, pad_type = q_63_pad_type_0, strides = var_3414, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_145_cast)[name = tensor("q_63_cast")]; + tensor var_3420 = const()[name = tensor("op_3420"), val = tensor([1, 1])]; + tensor var_3422 = const()[name = tensor("op_3422"), val = tensor([1, 1])]; + tensor k_63_pad_type_0 = const()[name = tensor("k_63_pad_type_0"), val = tensor("custom")]; + tensor k_63_pad_0 = const()[name = tensor("k_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311025792))), lut = tensor([-0x1.f2p-6, -0x1.278p-7, 0x1.258p-7, 0x1.f1p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_63_cast = conv(dilations = var_3422, groups = var_1186, pad = k_63_pad_0, pad_type = k_63_pad_type_0, strides = var_3420, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_63_cast")]; + tensor var_3426 = const()[name = tensor("op_3426"), val = tensor([1, 1])]; + tensor var_3428 = const()[name = tensor("op_3428"), val = tensor([1, 1])]; + tensor v_63_pad_type_0 = const()[name = tensor("v_63_pad_type_0"), val = tensor("custom")]; + tensor v_63_pad_0 = const()[name = tensor("v_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(311681216))), lut = tensor([-0x1.19p-6, 0x1.19p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_63_cast = conv(dilations = var_3428, groups = var_1186, pad = v_63_pad_0, pad_type = v_63_pad_type_0, strides = var_3426, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_63_cast")]; + tensor var_3432 = const()[name = tensor("op_3432"), val = tensor([2, 20, 64, -1])]; + tensor var_3433_cast = reshape(shape = var_3432, x = q_63_cast)[name = tensor("op_3433_cast")]; + tensor var_3434 = const()[name = tensor("op_3434"), val = tensor([2, 20, 64, -1])]; + tensor var_3435_cast = reshape(shape = var_3434, x = k_63_cast)[name = tensor("op_3435_cast")]; + tensor var_3436 = const()[name = tensor("op_3436"), val = tensor([2, 20, 64, -1])]; + tensor var_3437_cast = reshape(shape = var_3436, x = v_63_cast)[name = tensor("op_3437_cast")]; + tensor attn_weights_125_transpose_x_0 = const()[name = tensor("attn_weights_125_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_125_transpose_y_0 = const()[name = tensor("attn_weights_125_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_125_cast = matmul(transpose_x = attn_weights_125_transpose_x_0, transpose_y = attn_weights_125_transpose_y_0, x = var_3433_cast, y = var_3435_cast)[name = tensor("attn_weights_125_cast")]; + tensor attn_weights_127_cast = mul(x = attn_weights_125_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_127_cast")]; + tensor var_3441_cast = softmax(axis = var_1170, x = attn_weights_127_cast)[name = tensor("op_3441_cast")]; + tensor attn_63_transpose_x_0 = const()[name = tensor("attn_63_transpose_x_0"), val = tensor(false)]; + tensor attn_63_transpose_y_0 = const()[name = tensor("attn_63_transpose_y_0"), val = tensor(true)]; + tensor attn_63_cast = matmul(transpose_x = attn_63_transpose_x_0, transpose_y = attn_63_transpose_y_0, x = var_3437_cast, y = var_3441_cast)[name = tensor("attn_63_cast")]; + tensor var_3445 = const()[name = tensor("op_3445"), val = tensor([2, 1280, 1, -1])]; + tensor input_239_cast = reshape(shape = var_3445, x = attn_63_cast)[name = tensor("input_239_cast")]; + tensor var_3450 = const()[name = tensor("op_3450"), val = tensor([1, 1])]; + tensor var_3452 = const()[name = tensor("op_3452"), val = tensor([1, 1])]; + tensor var_3454_pad_type_0 = const()[name = tensor("op_3454_pad_type_0"), val = tensor("custom")]; + tensor var_3454_pad_0 = const()[name = tensor("op_3454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312008960))), lut = tensor([-0x1.6ecp-7, 0x1.6ep-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312213824)))]; + tensor var_3454_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_3452, groups = var_1186, pad = var_3454_pad_0, pad_type = var_3454_pad_type_0, strides = var_3450, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_239_cast)[name = tensor("op_3454_cast")]; + tensor inputs_95_cast = add(x = var_3454_cast, y = inputs_93_cast)[name = tensor("inputs_95_cast")]; + tensor var_3458 = const()[name = tensor("op_3458"), val = tensor([1])]; + tensor channels_mean_95_cast = reduce_mean(axes = var_3458, keep_dims = var_1181, x = inputs_95_cast)[name = tensor("channels_mean_95_cast")]; + tensor zero_mean_95_cast = sub(x = inputs_95_cast, y = channels_mean_95_cast)[name = tensor("zero_mean_95_cast")]; + tensor zero_mean_sq_95_cast = mul(x = zero_mean_95_cast, y = zero_mean_95_cast)[name = tensor("zero_mean_sq_95_cast")]; + tensor var_3462 = const()[name = tensor("op_3462"), val = tensor([1])]; + tensor var_3463_cast = reduce_mean(axes = var_3462, keep_dims = var_1181, x = zero_mean_sq_95_cast)[name = tensor("op_3463_cast")]; + tensor var_3464_to_fp16 = const()[name = tensor("op_3464_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3465_cast = add(x = var_3463_cast, y = var_3464_to_fp16)[name = tensor("op_3465_cast")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_3465_cast)[name = tensor("denom_95_cast")]; + tensor out_95_cast = mul(x = zero_mean_95_cast, y = denom_95_cast)[name = tensor("out_95_cast")]; + tensor var_3469_to_fp16 = const()[name = tensor("op_3469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312216448)))]; + tensor var_3470_cast = add(x = out_95_cast, y = var_3469_to_fp16)[name = tensor("op_3470_cast")]; + tensor var_3472_to_fp16 = const()[name = tensor("op_3472_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312219072)))]; + tensor input_241_cast = mul(x = var_3470_cast, y = var_3472_to_fp16)[name = tensor("input_241_cast")]; + tensor var_3480 = const()[name = tensor("op_3480"), val = tensor([1, 1])]; + tensor var_3482 = const()[name = tensor("op_3482"), val = tensor([1, 1])]; + tensor var_3484_pad_type_0 = const()[name = tensor("op_3484_pad_type_0"), val = tensor("custom")]; + tensor var_3484_pad_0 = const()[name = tensor("op_3484_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(312221696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318775360))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318775488)))]; + tensor var_3484_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_3482, groups = var_1186, pad = var_3484_pad_0, pad_type = var_3484_pad_type_0, strides = var_3480, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("op_3484_cast")]; + tensor var_3485_split_sizes_0 = const()[name = tensor("op_3485_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3485_axis_0 = const()[name = tensor("op_3485_axis_0"), val = tensor(1)]; + tensor var_3485_cast_0, tensor var_3485_cast_1 = split(axis = var_3485_axis_0, split_sizes = var_3485_split_sizes_0, x = var_3484_cast)[name = tensor("op_3485_cast")]; + tensor var_3487_mode_0 = const()[name = tensor("op_3487_mode_0"), val = tensor("EXACT")]; + tensor var_3487_cast = gelu(mode = var_3487_mode_0, x = var_3485_cast_1)[name = tensor("op_3487_cast")]; + tensor input_243_cast = mul(x = var_3485_cast_0, y = var_3487_cast)[name = tensor("input_243_cast")]; + tensor var_3491 = const()[name = tensor("op_3491"), val = tensor([1, 1])]; + tensor var_3493 = const()[name = tensor("op_3493"), val = tensor([1, 1])]; + tensor var_3495_pad_type_0 = const()[name = tensor("op_3495_pad_type_0"), val = tensor("custom")]; + tensor var_3495_pad_0 = const()[name = tensor("op_3495_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(318796032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322072896))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322073024)))]; + tensor var_3495_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_3493, groups = var_1186, pad = var_3495_pad_0, pad_type = var_3495_pad_type_0, strides = var_3491, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_243_cast)[name = tensor("op_3495_cast")]; + tensor inputs_97_cast = add(x = var_3495_cast, y = inputs_95_cast)[name = tensor("inputs_97_cast")]; + tensor var_3505 = const()[name = tensor("op_3505"), val = tensor([1])]; + tensor channels_mean_97_cast = reduce_mean(axes = var_3505, keep_dims = var_1181, x = inputs_97_cast)[name = tensor("channels_mean_97_cast")]; + tensor zero_mean_97_cast = sub(x = inputs_97_cast, y = channels_mean_97_cast)[name = tensor("zero_mean_97_cast")]; + tensor zero_mean_sq_97_cast = mul(x = zero_mean_97_cast, y = zero_mean_97_cast)[name = tensor("zero_mean_sq_97_cast")]; + tensor var_3509 = const()[name = tensor("op_3509"), val = tensor([1])]; + tensor var_3510_cast = reduce_mean(axes = var_3509, keep_dims = var_1181, x = zero_mean_sq_97_cast)[name = tensor("op_3510_cast")]; + tensor var_3511_to_fp16 = const()[name = tensor("op_3511_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3512_cast = add(x = var_3510_cast, y = var_3511_to_fp16)[name = tensor("op_3512_cast")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3512_cast)[name = tensor("denom_97_cast")]; + tensor out_97_cast = mul(x = zero_mean_97_cast, y = denom_97_cast)[name = tensor("out_97_cast")]; + tensor var_3516_to_fp16 = const()[name = tensor("op_3516_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322075648)))]; + tensor var_3517_cast = add(x = out_97_cast, y = var_3516_to_fp16)[name = tensor("op_3517_cast")]; + tensor var_3519_to_fp16 = const()[name = tensor("op_3519_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322078272)))]; + tensor hidden_states_149_cast = mul(x = var_3517_cast, y = var_3519_to_fp16)[name = tensor("hidden_states_149_cast")]; + tensor var_3526 = const()[name = tensor("op_3526"), val = tensor([1, 1])]; + tensor var_3528 = const()[name = tensor("op_3528"), val = tensor([1, 1])]; + tensor q_65_pad_type_0 = const()[name = tensor("q_65_pad_type_0"), val = tensor("custom")]; + tensor q_65_pad_0 = const()[name = tensor("q_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322080896))), lut = tensor([-0x1.438p-5, -0x1.854p-7, 0x1.848p-7, 0x1.438p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_65_cast = conv(dilations = var_3528, groups = var_1186, pad = q_65_pad_0, pad_type = q_65_pad_type_0, strides = var_3526, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("q_65_cast")]; + tensor var_3532 = const()[name = tensor("op_3532"), val = tensor([1, 1])]; + tensor var_3534 = const()[name = tensor("op_3534"), val = tensor([1, 1])]; + tensor k_65_pad_type_0 = const()[name = tensor("k_65_pad_type_0"), val = tensor("custom")]; + tensor k_65_pad_0 = const()[name = tensor("k_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322490560))), lut = tensor([-0x1.44cp-5, -0x1.86cp-7, 0x1.854p-7, 0x1.444p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_65_cast = conv(dilations = var_3534, groups = var_1186, pad = k_65_pad_0, pad_type = k_65_pad_type_0, strides = var_3532, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("k_65_cast")]; + tensor var_3538 = const()[name = tensor("op_3538"), val = tensor([1, 1])]; + tensor var_3540 = const()[name = tensor("op_3540"), val = tensor([1, 1])]; + tensor v_65_pad_type_0 = const()[name = tensor("v_65_pad_type_0"), val = tensor("custom")]; + tensor v_65_pad_0 = const()[name = tensor("v_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322900224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323719488))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_65_cast = conv(dilations = var_3540, groups = var_1186, pad = v_65_pad_0, pad_type = v_65_pad_type_0, strides = var_3538, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("v_65_cast")]; + tensor var_3544 = const()[name = tensor("op_3544"), val = tensor([2, 20, 64, -1])]; + tensor var_3545_cast = reshape(shape = var_3544, x = q_65_cast)[name = tensor("op_3545_cast")]; + tensor var_3546 = const()[name = tensor("op_3546"), val = tensor([2, 20, 64, -1])]; + tensor var_3547_cast = reshape(shape = var_3546, x = k_65_cast)[name = tensor("op_3547_cast")]; + tensor var_3548 = const()[name = tensor("op_3548"), val = tensor([2, 20, 64, -1])]; + tensor var_3549_cast = reshape(shape = var_3548, x = v_65_cast)[name = tensor("op_3549_cast")]; + tensor attn_weights_129_transpose_x_0 = const()[name = tensor("attn_weights_129_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_129_transpose_y_0 = const()[name = tensor("attn_weights_129_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_129_cast = matmul(transpose_x = attn_weights_129_transpose_x_0, transpose_y = attn_weights_129_transpose_y_0, x = var_3545_cast, y = var_3547_cast)[name = tensor("attn_weights_129_cast")]; + tensor attn_weights_131_cast = mul(x = attn_weights_129_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_131_cast")]; + tensor var_3553_cast = softmax(axis = var_1170, x = attn_weights_131_cast)[name = tensor("op_3553_cast")]; + tensor attn_65_transpose_x_0 = const()[name = tensor("attn_65_transpose_x_0"), val = tensor(false)]; + tensor attn_65_transpose_y_0 = const()[name = tensor("attn_65_transpose_y_0"), val = tensor(true)]; + tensor attn_65_cast = matmul(transpose_x = attn_65_transpose_x_0, transpose_y = attn_65_transpose_y_0, x = var_3549_cast, y = var_3553_cast)[name = tensor("attn_65_cast")]; + tensor var_3557 = const()[name = tensor("op_3557"), val = tensor([2, 1280, 1, -1])]; + tensor input_245_cast = reshape(shape = var_3557, x = attn_65_cast)[name = tensor("input_245_cast")]; + tensor var_3562 = const()[name = tensor("op_3562"), val = tensor([1, 1])]; + tensor var_3564 = const()[name = tensor("op_3564"), val = tensor([1, 1])]; + tensor var_3566_pad_type_0 = const()[name = tensor("op_3566_pad_type_0"), val = tensor("custom")]; + tensor var_3566_pad_0 = const()[name = tensor("op_3566_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323719616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324538880))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324539008)))]; + tensor var_3566_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_3564, groups = var_1186, pad = var_3566_pad_0, pad_type = var_3566_pad_type_0, strides = var_3562, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_245_cast)[name = tensor("op_3566_cast")]; + tensor inputs_99_cast = add(x = var_3566_cast, y = inputs_97_cast)[name = tensor("inputs_99_cast")]; + tensor var_3570 = const()[name = tensor("op_3570"), val = tensor([1])]; + tensor channels_mean_99_cast = reduce_mean(axes = var_3570, keep_dims = var_1181, x = inputs_99_cast)[name = tensor("channels_mean_99_cast")]; + tensor zero_mean_99_cast = sub(x = inputs_99_cast, y = channels_mean_99_cast)[name = tensor("zero_mean_99_cast")]; + tensor zero_mean_sq_99_cast = mul(x = zero_mean_99_cast, y = zero_mean_99_cast)[name = tensor("zero_mean_sq_99_cast")]; + tensor var_3574 = const()[name = tensor("op_3574"), val = tensor([1])]; + tensor var_3575_cast = reduce_mean(axes = var_3574, keep_dims = var_1181, x = zero_mean_sq_99_cast)[name = tensor("op_3575_cast")]; + tensor var_3576_to_fp16 = const()[name = tensor("op_3576_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3577_cast = add(x = var_3575_cast, y = var_3576_to_fp16)[name = tensor("op_3577_cast")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3577_cast)[name = tensor("denom_99_cast")]; + tensor out_99_cast = mul(x = zero_mean_99_cast, y = denom_99_cast)[name = tensor("out_99_cast")]; + tensor var_3581_to_fp16 = const()[name = tensor("op_3581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324541632)))]; + tensor var_3582_cast = add(x = out_99_cast, y = var_3581_to_fp16)[name = tensor("op_3582_cast")]; + tensor var_3584_to_fp16 = const()[name = tensor("op_3584_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324544256)))]; + tensor hidden_states_151_cast = mul(x = var_3582_cast, y = var_3584_to_fp16)[name = tensor("hidden_states_151_cast")]; + tensor var_3591 = const()[name = tensor("op_3591"), val = tensor([1, 1])]; + tensor var_3593 = const()[name = tensor("op_3593"), val = tensor([1, 1])]; + tensor q_67_pad_type_0 = const()[name = tensor("q_67_pad_type_0"), val = tensor("custom")]; + tensor q_67_pad_0 = const()[name = tensor("q_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324546880))), lut = tensor([-0x1.354p-6, 0x1.364p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_67_cast = conv(dilations = var_3593, groups = var_1186, pad = q_67_pad_0, pad_type = q_67_pad_type_0, strides = var_3591, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_151_cast)[name = tensor("q_67_cast")]; + tensor var_3597 = const()[name = tensor("op_3597"), val = tensor([1, 1])]; + tensor var_3599 = const()[name = tensor("op_3599"), val = tensor([1, 1])]; + tensor k_67_pad_type_0 = const()[name = tensor("k_67_pad_type_0"), val = tensor("custom")]; + tensor k_67_pad_0 = const()[name = tensor("k_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324751744))), lut = tensor([-0x1.08p-6, 0x1.084p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_67_cast = conv(dilations = var_3599, groups = var_1186, pad = k_67_pad_0, pad_type = k_67_pad_type_0, strides = var_3597, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_67_cast")]; + tensor var_3603 = const()[name = tensor("op_3603"), val = tensor([1, 1])]; + tensor var_3605 = const()[name = tensor("op_3605"), val = tensor([1, 1])]; + tensor v_67_pad_type_0 = const()[name = tensor("v_67_pad_type_0"), val = tensor("custom")]; + tensor v_67_pad_0 = const()[name = tensor("v_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325079488))), lut = tensor([-0x1.1f4p-6, 0x1.2p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_67_cast = conv(dilations = var_3605, groups = var_1186, pad = v_67_pad_0, pad_type = v_67_pad_type_0, strides = var_3603, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_67_cast")]; + tensor var_3609 = const()[name = tensor("op_3609"), val = tensor([2, 20, 64, -1])]; + tensor var_3610_cast = reshape(shape = var_3609, x = q_67_cast)[name = tensor("op_3610_cast")]; + tensor var_3611 = const()[name = tensor("op_3611"), val = tensor([2, 20, 64, -1])]; + tensor var_3612_cast = reshape(shape = var_3611, x = k_67_cast)[name = tensor("op_3612_cast")]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([2, 20, 64, -1])]; + tensor var_3614_cast = reshape(shape = var_3613, x = v_67_cast)[name = tensor("op_3614_cast")]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = var_3610_cast, y = var_3612_cast)[name = tensor("attn_weights_133_cast")]; + tensor attn_weights_135_cast = mul(x = attn_weights_133_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_3618_cast = softmax(axis = var_1170, x = attn_weights_135_cast)[name = tensor("op_3618_cast")]; + tensor attn_67_transpose_x_0 = const()[name = tensor("attn_67_transpose_x_0"), val = tensor(false)]; + tensor attn_67_transpose_y_0 = const()[name = tensor("attn_67_transpose_y_0"), val = tensor(true)]; + tensor attn_67_cast = matmul(transpose_x = attn_67_transpose_x_0, transpose_y = attn_67_transpose_y_0, x = var_3614_cast, y = var_3618_cast)[name = tensor("attn_67_cast")]; + tensor var_3622 = const()[name = tensor("op_3622"), val = tensor([2, 1280, 1, -1])]; + tensor input_247_cast = reshape(shape = var_3622, x = attn_67_cast)[name = tensor("input_247_cast")]; + tensor var_3627 = const()[name = tensor("op_3627"), val = tensor([1, 1])]; + tensor var_3629 = const()[name = tensor("op_3629"), val = tensor([1, 1])]; + tensor var_3631_pad_type_0 = const()[name = tensor("op_3631_pad_type_0"), val = tensor("custom")]; + tensor var_3631_pad_0 = const()[name = tensor("op_3631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325407232))), lut = tensor([-0x1.6ecp-7, 0x1.6fp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325612096)))]; + tensor var_3631_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_3629, groups = var_1186, pad = var_3631_pad_0, pad_type = var_3631_pad_type_0, strides = var_3627, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_247_cast)[name = tensor("op_3631_cast")]; + tensor inputs_101_cast = add(x = var_3631_cast, y = inputs_99_cast)[name = tensor("inputs_101_cast")]; + tensor var_3635 = const()[name = tensor("op_3635"), val = tensor([1])]; + tensor channels_mean_101_cast = reduce_mean(axes = var_3635, keep_dims = var_1181, x = inputs_101_cast)[name = tensor("channels_mean_101_cast")]; + tensor zero_mean_101_cast = sub(x = inputs_101_cast, y = channels_mean_101_cast)[name = tensor("zero_mean_101_cast")]; + tensor zero_mean_sq_101_cast = mul(x = zero_mean_101_cast, y = zero_mean_101_cast)[name = tensor("zero_mean_sq_101_cast")]; + tensor var_3639 = const()[name = tensor("op_3639"), val = tensor([1])]; + tensor var_3640_cast = reduce_mean(axes = var_3639, keep_dims = var_1181, x = zero_mean_sq_101_cast)[name = tensor("op_3640_cast")]; + tensor var_3641_to_fp16 = const()[name = tensor("op_3641_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3642_cast = add(x = var_3640_cast, y = var_3641_to_fp16)[name = tensor("op_3642_cast")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3642_cast)[name = tensor("denom_101_cast")]; + tensor out_101_cast = mul(x = zero_mean_101_cast, y = denom_101_cast)[name = tensor("out_101_cast")]; + tensor var_3646_to_fp16 = const()[name = tensor("op_3646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325614720)))]; + tensor var_3647_cast = add(x = out_101_cast, y = var_3646_to_fp16)[name = tensor("op_3647_cast")]; + tensor var_3649_to_fp16 = const()[name = tensor("op_3649_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325617344)))]; + tensor input_249_cast = mul(x = var_3647_cast, y = var_3649_to_fp16)[name = tensor("input_249_cast")]; + tensor var_3657 = const()[name = tensor("op_3657"), val = tensor([1, 1])]; + tensor var_3659 = const()[name = tensor("op_3659"), val = tensor([1, 1])]; + tensor var_3661_pad_type_0 = const()[name = tensor("op_3661_pad_type_0"), val = tensor("custom")]; + tensor var_3661_pad_0 = const()[name = tensor("op_3661_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325619968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332173632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332173760)))]; + tensor var_3661_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_3659, groups = var_1186, pad = var_3661_pad_0, pad_type = var_3661_pad_type_0, strides = var_3657, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("op_3661_cast")]; + tensor var_3662_split_sizes_0 = const()[name = tensor("op_3662_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3662_axis_0 = const()[name = tensor("op_3662_axis_0"), val = tensor(1)]; + tensor var_3662_cast_0, tensor var_3662_cast_1 = split(axis = var_3662_axis_0, split_sizes = var_3662_split_sizes_0, x = var_3661_cast)[name = tensor("op_3662_cast")]; + tensor var_3664_mode_0 = const()[name = tensor("op_3664_mode_0"), val = tensor("EXACT")]; + tensor var_3664_cast = gelu(mode = var_3664_mode_0, x = var_3662_cast_1)[name = tensor("op_3664_cast")]; + tensor input_251_cast = mul(x = var_3662_cast_0, y = var_3664_cast)[name = tensor("input_251_cast")]; + tensor var_3668 = const()[name = tensor("op_3668"), val = tensor([1, 1])]; + tensor var_3670 = const()[name = tensor("op_3670"), val = tensor([1, 1])]; + tensor var_3672_pad_type_0 = const()[name = tensor("op_3672_pad_type_0"), val = tensor("custom")]; + tensor var_3672_pad_0 = const()[name = tensor("op_3672_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(332194304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335471168))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335471296)))]; + tensor var_3672_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_3670, groups = var_1186, pad = var_3672_pad_0, pad_type = var_3672_pad_type_0, strides = var_3668, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_251_cast)[name = tensor("op_3672_cast")]; + tensor inputs_103_cast = add(x = var_3672_cast, y = inputs_101_cast)[name = tensor("inputs_103_cast")]; + tensor var_3682 = const()[name = tensor("op_3682"), val = tensor([1])]; + tensor channels_mean_103_cast = reduce_mean(axes = var_3682, keep_dims = var_1181, x = inputs_103_cast)[name = tensor("channels_mean_103_cast")]; + tensor zero_mean_103_cast = sub(x = inputs_103_cast, y = channels_mean_103_cast)[name = tensor("zero_mean_103_cast")]; + tensor zero_mean_sq_103_cast = mul(x = zero_mean_103_cast, y = zero_mean_103_cast)[name = tensor("zero_mean_sq_103_cast")]; + tensor var_3686 = const()[name = tensor("op_3686"), val = tensor([1])]; + tensor var_3687_cast = reduce_mean(axes = var_3686, keep_dims = var_1181, x = zero_mean_sq_103_cast)[name = tensor("op_3687_cast")]; + tensor var_3688_to_fp16 = const()[name = tensor("op_3688_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3689_cast = add(x = var_3687_cast, y = var_3688_to_fp16)[name = tensor("op_3689_cast")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3689_cast)[name = tensor("denom_103_cast")]; + tensor out_103_cast = mul(x = zero_mean_103_cast, y = denom_103_cast)[name = tensor("out_103_cast")]; + tensor var_3693_to_fp16 = const()[name = tensor("op_3693_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335473920)))]; + tensor var_3694_cast = add(x = out_103_cast, y = var_3693_to_fp16)[name = tensor("op_3694_cast")]; + tensor var_3696_to_fp16 = const()[name = tensor("op_3696_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335476544)))]; + tensor hidden_states_155_cast = mul(x = var_3694_cast, y = var_3696_to_fp16)[name = tensor("hidden_states_155_cast")]; + tensor var_3703 = const()[name = tensor("op_3703"), val = tensor([1, 1])]; + tensor var_3705 = const()[name = tensor("op_3705"), val = tensor([1, 1])]; + tensor q_69_pad_type_0 = const()[name = tensor("q_69_pad_type_0"), val = tensor("custom")]; + tensor q_69_pad_0 = const()[name = tensor("q_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335479168))), lut = tensor([-0x1.568p-6, 0x1.58p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_69_cast = conv(dilations = var_3705, groups = var_1186, pad = q_69_pad_0, pad_type = q_69_pad_type_0, strides = var_3703, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("q_69_cast")]; + tensor var_3709 = const()[name = tensor("op_3709"), val = tensor([1, 1])]; + tensor var_3711 = const()[name = tensor("op_3711"), val = tensor([1, 1])]; + tensor k_69_pad_type_0 = const()[name = tensor("k_69_pad_type_0"), val = tensor("custom")]; + tensor k_69_pad_0 = const()[name = tensor("k_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335684032))), lut = tensor([-0x1.574p-6, 0x1.56cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_69_cast = conv(dilations = var_3711, groups = var_1186, pad = k_69_pad_0, pad_type = k_69_pad_type_0, strides = var_3709, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("k_69_cast")]; + tensor var_3715 = const()[name = tensor("op_3715"), val = tensor([1, 1])]; + tensor var_3717 = const()[name = tensor("op_3717"), val = tensor([1, 1])]; + tensor v_69_pad_type_0 = const()[name = tensor("v_69_pad_type_0"), val = tensor("custom")]; + tensor v_69_pad_0 = const()[name = tensor("v_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(335888896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336708160))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_69_cast = conv(dilations = var_3717, groups = var_1186, pad = v_69_pad_0, pad_type = v_69_pad_type_0, strides = var_3715, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("v_69_cast")]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor([2, 20, 64, -1])]; + tensor var_3722_cast = reshape(shape = var_3721, x = q_69_cast)[name = tensor("op_3722_cast")]; + tensor var_3723 = const()[name = tensor("op_3723"), val = tensor([2, 20, 64, -1])]; + tensor var_3724_cast = reshape(shape = var_3723, x = k_69_cast)[name = tensor("op_3724_cast")]; + tensor var_3725 = const()[name = tensor("op_3725"), val = tensor([2, 20, 64, -1])]; + tensor var_3726_cast = reshape(shape = var_3725, x = v_69_cast)[name = tensor("op_3726_cast")]; + tensor attn_weights_137_transpose_x_0 = const()[name = tensor("attn_weights_137_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_137_transpose_y_0 = const()[name = tensor("attn_weights_137_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_137_cast = matmul(transpose_x = attn_weights_137_transpose_x_0, transpose_y = attn_weights_137_transpose_y_0, x = var_3722_cast, y = var_3724_cast)[name = tensor("attn_weights_137_cast")]; + tensor attn_weights_139_cast = mul(x = attn_weights_137_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_139_cast")]; + tensor var_3730_cast = softmax(axis = var_1170, x = attn_weights_139_cast)[name = tensor("op_3730_cast")]; + tensor attn_69_transpose_x_0 = const()[name = tensor("attn_69_transpose_x_0"), val = tensor(false)]; + tensor attn_69_transpose_y_0 = const()[name = tensor("attn_69_transpose_y_0"), val = tensor(true)]; + tensor attn_69_cast = matmul(transpose_x = attn_69_transpose_x_0, transpose_y = attn_69_transpose_y_0, x = var_3726_cast, y = var_3730_cast)[name = tensor("attn_69_cast")]; + tensor var_3734 = const()[name = tensor("op_3734"), val = tensor([2, 1280, 1, -1])]; + tensor input_253_cast = reshape(shape = var_3734, x = attn_69_cast)[name = tensor("input_253_cast")]; + tensor var_3739 = const()[name = tensor("op_3739"), val = tensor([1, 1])]; + tensor var_3741 = const()[name = tensor("op_3741"), val = tensor([1, 1])]; + tensor var_3743_pad_type_0 = const()[name = tensor("op_3743_pad_type_0"), val = tensor("custom")]; + tensor var_3743_pad_0 = const()[name = tensor("op_3743_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(336708288))), lut = tensor([-0x1.35cp-5, -0x1.74p-7, 0x1.74p-7, 0x1.358p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337117952)))]; + tensor var_3743_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_3741, groups = var_1186, pad = var_3743_pad_0, pad_type = var_3743_pad_type_0, strides = var_3739, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("op_3743_cast")]; + tensor inputs_105_cast = add(x = var_3743_cast, y = inputs_103_cast)[name = tensor("inputs_105_cast")]; + tensor var_3747 = const()[name = tensor("op_3747"), val = tensor([1])]; + tensor channels_mean_105_cast = reduce_mean(axes = var_3747, keep_dims = var_1181, x = inputs_105_cast)[name = tensor("channels_mean_105_cast")]; + tensor zero_mean_105_cast = sub(x = inputs_105_cast, y = channels_mean_105_cast)[name = tensor("zero_mean_105_cast")]; + tensor zero_mean_sq_105_cast = mul(x = zero_mean_105_cast, y = zero_mean_105_cast)[name = tensor("zero_mean_sq_105_cast")]; + tensor var_3751 = const()[name = tensor("op_3751"), val = tensor([1])]; + tensor var_3752_cast = reduce_mean(axes = var_3751, keep_dims = var_1181, x = zero_mean_sq_105_cast)[name = tensor("op_3752_cast")]; + tensor var_3753_to_fp16 = const()[name = tensor("op_3753_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3754_cast = add(x = var_3752_cast, y = var_3753_to_fp16)[name = tensor("op_3754_cast")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3754_cast)[name = tensor("denom_105_cast")]; + tensor out_105_cast = mul(x = zero_mean_105_cast, y = denom_105_cast)[name = tensor("out_105_cast")]; + tensor var_3758_to_fp16 = const()[name = tensor("op_3758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337120576)))]; + tensor var_3759_cast = add(x = out_105_cast, y = var_3758_to_fp16)[name = tensor("op_3759_cast")]; + tensor var_3761_to_fp16 = const()[name = tensor("op_3761_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337123200)))]; + tensor hidden_states_157_cast = mul(x = var_3759_cast, y = var_3761_to_fp16)[name = tensor("hidden_states_157_cast")]; + tensor var_3768 = const()[name = tensor("op_3768"), val = tensor([1, 1])]; + tensor var_3770 = const()[name = tensor("op_3770"), val = tensor([1, 1])]; + tensor q_71_pad_type_0 = const()[name = tensor("q_71_pad_type_0"), val = tensor("custom")]; + tensor q_71_pad_0 = const()[name = tensor("q_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337125824))), lut = tensor([-0x1.26cp-6, 0x1.268p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_71_cast = conv(dilations = var_3770, groups = var_1186, pad = q_71_pad_0, pad_type = q_71_pad_type_0, strides = var_3768, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_157_cast)[name = tensor("q_71_cast")]; + tensor var_3774 = const()[name = tensor("op_3774"), val = tensor([1, 1])]; + tensor var_3776 = const()[name = tensor("op_3776"), val = tensor([1, 1])]; + tensor k_71_pad_type_0 = const()[name = tensor("k_71_pad_type_0"), val = tensor("custom")]; + tensor k_71_pad_0 = const()[name = tensor("k_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337330688))), lut = tensor([-0x1.f2cp-7, 0x1.f4cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_71_cast = conv(dilations = var_3776, groups = var_1186, pad = k_71_pad_0, pad_type = k_71_pad_type_0, strides = var_3774, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_71_cast")]; + tensor var_3780 = const()[name = tensor("op_3780"), val = tensor([1, 1])]; + tensor var_3782 = const()[name = tensor("op_3782"), val = tensor([1, 1])]; + tensor v_71_pad_type_0 = const()[name = tensor("v_71_pad_type_0"), val = tensor("custom")]; + tensor v_71_pad_0 = const()[name = tensor("v_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337658432))), lut = tensor([-0x1.1ecp-6, 0x1.1f4p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_71_cast = conv(dilations = var_3782, groups = var_1186, pad = v_71_pad_0, pad_type = v_71_pad_type_0, strides = var_3780, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_71_cast")]; + tensor var_3786 = const()[name = tensor("op_3786"), val = tensor([2, 20, 64, -1])]; + tensor var_3787_cast = reshape(shape = var_3786, x = q_71_cast)[name = tensor("op_3787_cast")]; + tensor var_3788 = const()[name = tensor("op_3788"), val = tensor([2, 20, 64, -1])]; + tensor var_3789_cast = reshape(shape = var_3788, x = k_71_cast)[name = tensor("op_3789_cast")]; + tensor var_3790 = const()[name = tensor("op_3790"), val = tensor([2, 20, 64, -1])]; + tensor var_3791_cast = reshape(shape = var_3790, x = v_71_cast)[name = tensor("op_3791_cast")]; + tensor attn_weights_141_transpose_x_0 = const()[name = tensor("attn_weights_141_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_141_transpose_y_0 = const()[name = tensor("attn_weights_141_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_141_cast = matmul(transpose_x = attn_weights_141_transpose_x_0, transpose_y = attn_weights_141_transpose_y_0, x = var_3787_cast, y = var_3789_cast)[name = tensor("attn_weights_141_cast")]; + tensor attn_weights_143_cast = mul(x = attn_weights_141_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_143_cast")]; + tensor var_3795_cast = softmax(axis = var_1170, x = attn_weights_143_cast)[name = tensor("op_3795_cast")]; + tensor attn_71_transpose_x_0 = const()[name = tensor("attn_71_transpose_x_0"), val = tensor(false)]; + tensor attn_71_transpose_y_0 = const()[name = tensor("attn_71_transpose_y_0"), val = tensor(true)]; + tensor attn_71_cast = matmul(transpose_x = attn_71_transpose_x_0, transpose_y = attn_71_transpose_y_0, x = var_3791_cast, y = var_3795_cast)[name = tensor("attn_71_cast")]; + tensor var_3799 = const()[name = tensor("op_3799"), val = tensor([2, 1280, 1, -1])]; + tensor input_255_cast = reshape(shape = var_3799, x = attn_71_cast)[name = tensor("input_255_cast")]; + tensor var_3804 = const()[name = tensor("op_3804"), val = tensor([1, 1])]; + tensor var_3806 = const()[name = tensor("op_3806"), val = tensor([1, 1])]; + tensor var_3808_pad_type_0 = const()[name = tensor("op_3808_pad_type_0"), val = tensor("custom")]; + tensor var_3808_pad_0 = const()[name = tensor("op_3808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(337986176))), lut = tensor([-0x1.618p-7, 0x1.628p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338191040)))]; + tensor var_3808_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_3806, groups = var_1186, pad = var_3808_pad_0, pad_type = var_3808_pad_type_0, strides = var_3804, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_255_cast)[name = tensor("op_3808_cast")]; + tensor inputs_107_cast = add(x = var_3808_cast, y = inputs_105_cast)[name = tensor("inputs_107_cast")]; + tensor var_3812 = const()[name = tensor("op_3812"), val = tensor([1])]; + tensor channels_mean_107_cast = reduce_mean(axes = var_3812, keep_dims = var_1181, x = inputs_107_cast)[name = tensor("channels_mean_107_cast")]; + tensor zero_mean_107_cast = sub(x = inputs_107_cast, y = channels_mean_107_cast)[name = tensor("zero_mean_107_cast")]; + tensor zero_mean_sq_107_cast = mul(x = zero_mean_107_cast, y = zero_mean_107_cast)[name = tensor("zero_mean_sq_107_cast")]; + tensor var_3816 = const()[name = tensor("op_3816"), val = tensor([1])]; + tensor var_3817_cast = reduce_mean(axes = var_3816, keep_dims = var_1181, x = zero_mean_sq_107_cast)[name = tensor("op_3817_cast")]; + tensor var_3818_to_fp16 = const()[name = tensor("op_3818_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3819_cast = add(x = var_3817_cast, y = var_3818_to_fp16)[name = tensor("op_3819_cast")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3819_cast)[name = tensor("denom_107_cast")]; + tensor out_107_cast = mul(x = zero_mean_107_cast, y = denom_107_cast)[name = tensor("out_107_cast")]; + tensor var_3823_to_fp16 = const()[name = tensor("op_3823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338193664)))]; + tensor var_3824_cast = add(x = out_107_cast, y = var_3823_to_fp16)[name = tensor("op_3824_cast")]; + tensor var_3826_to_fp16 = const()[name = tensor("op_3826_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338196288)))]; + tensor input_257_cast = mul(x = var_3824_cast, y = var_3826_to_fp16)[name = tensor("input_257_cast")]; + tensor var_3834 = const()[name = tensor("op_3834"), val = tensor([1, 1])]; + tensor var_3836 = const()[name = tensor("op_3836"), val = tensor([1, 1])]; + tensor var_3838_pad_type_0 = const()[name = tensor("op_3838_pad_type_0"), val = tensor("custom")]; + tensor var_3838_pad_0 = const()[name = tensor("op_3838_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338198912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344752576))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344752704)))]; + tensor var_3838_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_3836, groups = var_1186, pad = var_3838_pad_0, pad_type = var_3838_pad_type_0, strides = var_3834, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("op_3838_cast")]; + tensor var_3839_split_sizes_0 = const()[name = tensor("op_3839_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3839_axis_0 = const()[name = tensor("op_3839_axis_0"), val = tensor(1)]; + tensor var_3839_cast_0, tensor var_3839_cast_1 = split(axis = var_3839_axis_0, split_sizes = var_3839_split_sizes_0, x = var_3838_cast)[name = tensor("op_3839_cast")]; + tensor var_3841_mode_0 = const()[name = tensor("op_3841_mode_0"), val = tensor("EXACT")]; + tensor var_3841_cast = gelu(mode = var_3841_mode_0, x = var_3839_cast_1)[name = tensor("op_3841_cast")]; + tensor input_259_cast = mul(x = var_3839_cast_0, y = var_3841_cast)[name = tensor("input_259_cast")]; + tensor var_3845 = const()[name = tensor("op_3845"), val = tensor([1, 1])]; + tensor var_3847 = const()[name = tensor("op_3847"), val = tensor([1, 1])]; + tensor var_3849_pad_type_0 = const()[name = tensor("op_3849_pad_type_0"), val = tensor("custom")]; + tensor var_3849_pad_0 = const()[name = tensor("op_3849_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344773248))), lut = tensor([-0x1.4c4p-5, -0x1.8dp-7, 0x1.8dcp-7, 0x1.4c4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346411712)))]; + tensor var_3849_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_3847, groups = var_1186, pad = var_3849_pad_0, pad_type = var_3849_pad_type_0, strides = var_3845, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_259_cast)[name = tensor("op_3849_cast")]; + tensor inputs_109_cast = add(x = var_3849_cast, y = inputs_107_cast)[name = tensor("inputs_109_cast")]; + tensor var_3859 = const()[name = tensor("op_3859"), val = tensor([1])]; + tensor channels_mean_109_cast = reduce_mean(axes = var_3859, keep_dims = var_1181, x = inputs_109_cast)[name = tensor("channels_mean_109_cast")]; + tensor zero_mean_109_cast = sub(x = inputs_109_cast, y = channels_mean_109_cast)[name = tensor("zero_mean_109_cast")]; + tensor zero_mean_sq_109_cast = mul(x = zero_mean_109_cast, y = zero_mean_109_cast)[name = tensor("zero_mean_sq_109_cast")]; + tensor var_3863 = const()[name = tensor("op_3863"), val = tensor([1])]; + tensor var_3864_cast = reduce_mean(axes = var_3863, keep_dims = var_1181, x = zero_mean_sq_109_cast)[name = tensor("op_3864_cast")]; + tensor var_3865_to_fp16 = const()[name = tensor("op_3865_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3866_cast = add(x = var_3864_cast, y = var_3865_to_fp16)[name = tensor("op_3866_cast")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3866_cast)[name = tensor("denom_109_cast")]; + tensor out_109_cast = mul(x = zero_mean_109_cast, y = denom_109_cast)[name = tensor("out_109_cast")]; + tensor var_3870_to_fp16 = const()[name = tensor("op_3870_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346414336)))]; + tensor var_3871_cast = add(x = out_109_cast, y = var_3870_to_fp16)[name = tensor("op_3871_cast")]; + tensor var_3873_to_fp16 = const()[name = tensor("op_3873_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346416960)))]; + tensor hidden_states_161_cast = mul(x = var_3871_cast, y = var_3873_to_fp16)[name = tensor("hidden_states_161_cast")]; + tensor var_3880 = const()[name = tensor("op_3880"), val = tensor([1, 1])]; + tensor var_3882 = const()[name = tensor("op_3882"), val = tensor([1, 1])]; + tensor q_73_pad_type_0 = const()[name = tensor("q_73_pad_type_0"), val = tensor("custom")]; + tensor q_73_pad_0 = const()[name = tensor("q_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346419584))), lut = tensor([-0x1.3dcp-5, -0x1.7dp-7, 0x1.7f8p-7, 0x1.3e8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_73_cast = conv(dilations = var_3882, groups = var_1186, pad = q_73_pad_0, pad_type = q_73_pad_type_0, strides = var_3880, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("q_73_cast")]; + tensor var_3886 = const()[name = tensor("op_3886"), val = tensor([1, 1])]; + tensor var_3888 = const()[name = tensor("op_3888"), val = tensor([1, 1])]; + tensor k_73_pad_type_0 = const()[name = tensor("k_73_pad_type_0"), val = tensor("custom")]; + tensor k_73_pad_0 = const()[name = tensor("k_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346829248))), lut = tensor([-0x1.514p-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_73_cast = conv(dilations = var_3888, groups = var_1186, pad = k_73_pad_0, pad_type = k_73_pad_type_0, strides = var_3886, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("k_73_cast")]; + tensor var_3892 = const()[name = tensor("op_3892"), val = tensor([1, 1])]; + tensor var_3894 = const()[name = tensor("op_3894"), val = tensor([1, 1])]; + tensor v_73_pad_type_0 = const()[name = tensor("v_73_pad_type_0"), val = tensor("custom")]; + tensor v_73_pad_0 = const()[name = tensor("v_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347034112))), lut = tensor([-0x1.2ep-5, -0x1.6d4p-7, 0x1.6acp-7, 0x1.2d8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_73_cast = conv(dilations = var_3894, groups = var_1186, pad = v_73_pad_0, pad_type = v_73_pad_type_0, strides = var_3892, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("v_73_cast")]; + tensor var_3898 = const()[name = tensor("op_3898"), val = tensor([2, 20, 64, -1])]; + tensor var_3899_cast = reshape(shape = var_3898, x = q_73_cast)[name = tensor("op_3899_cast")]; + tensor var_3900 = const()[name = tensor("op_3900"), val = tensor([2, 20, 64, -1])]; + tensor var_3901_cast = reshape(shape = var_3900, x = k_73_cast)[name = tensor("op_3901_cast")]; + tensor var_3902 = const()[name = tensor("op_3902"), val = tensor([2, 20, 64, -1])]; + tensor var_3903_cast = reshape(shape = var_3902, x = v_73_cast)[name = tensor("op_3903_cast")]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = var_3899_cast, y = var_3901_cast)[name = tensor("attn_weights_145_cast")]; + tensor attn_weights_147_cast = mul(x = attn_weights_145_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_3907_cast = softmax(axis = var_1170, x = attn_weights_147_cast)[name = tensor("op_3907_cast")]; + tensor attn_73_transpose_x_0 = const()[name = tensor("attn_73_transpose_x_0"), val = tensor(false)]; + tensor attn_73_transpose_y_0 = const()[name = tensor("attn_73_transpose_y_0"), val = tensor(true)]; + tensor attn_73_cast = matmul(transpose_x = attn_73_transpose_x_0, transpose_y = attn_73_transpose_y_0, x = var_3903_cast, y = var_3907_cast)[name = tensor("attn_73_cast")]; + tensor var_3911 = const()[name = tensor("op_3911"), val = tensor([2, 1280, 1, -1])]; + tensor input_261_cast = reshape(shape = var_3911, x = attn_73_cast)[name = tensor("input_261_cast")]; + tensor var_3916 = const()[name = tensor("op_3916"), val = tensor([1, 1])]; + tensor var_3918 = const()[name = tensor("op_3918"), val = tensor([1, 1])]; + tensor var_3920_pad_type_0 = const()[name = tensor("op_3920_pad_type_0"), val = tensor("custom")]; + tensor var_3920_pad_0 = const()[name = tensor("op_3920_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347443776))), lut = tensor([-0x1.2c4p-5, -0x1.6c8p-7, 0x1.64p-7, 0x1.2a8p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347853440)))]; + tensor var_3920_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_3918, groups = var_1186, pad = var_3920_pad_0, pad_type = var_3920_pad_type_0, strides = var_3916, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_261_cast)[name = tensor("op_3920_cast")]; + tensor inputs_111_cast = add(x = var_3920_cast, y = inputs_109_cast)[name = tensor("inputs_111_cast")]; + tensor var_3924 = const()[name = tensor("op_3924"), val = tensor([1])]; + tensor channels_mean_111_cast = reduce_mean(axes = var_3924, keep_dims = var_1181, x = inputs_111_cast)[name = tensor("channels_mean_111_cast")]; + tensor zero_mean_111_cast = sub(x = inputs_111_cast, y = channels_mean_111_cast)[name = tensor("zero_mean_111_cast")]; + tensor zero_mean_sq_111_cast = mul(x = zero_mean_111_cast, y = zero_mean_111_cast)[name = tensor("zero_mean_sq_111_cast")]; + tensor var_3928 = const()[name = tensor("op_3928"), val = tensor([1])]; + tensor var_3929_cast = reduce_mean(axes = var_3928, keep_dims = var_1181, x = zero_mean_sq_111_cast)[name = tensor("op_3929_cast")]; + tensor var_3930_to_fp16 = const()[name = tensor("op_3930_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3931_cast = add(x = var_3929_cast, y = var_3930_to_fp16)[name = tensor("op_3931_cast")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3931_cast)[name = tensor("denom_111_cast")]; + tensor out_111_cast = mul(x = zero_mean_111_cast, y = denom_111_cast)[name = tensor("out_111_cast")]; + tensor var_3935_to_fp16 = const()[name = tensor("op_3935_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347856064)))]; + tensor var_3936_cast = add(x = out_111_cast, y = var_3935_to_fp16)[name = tensor("op_3936_cast")]; + tensor var_3938_to_fp16 = const()[name = tensor("op_3938_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347858688)))]; + tensor hidden_states_163_cast = mul(x = var_3936_cast, y = var_3938_to_fp16)[name = tensor("hidden_states_163_cast")]; + tensor var_3945 = const()[name = tensor("op_3945"), val = tensor([1, 1])]; + tensor var_3947 = const()[name = tensor("op_3947"), val = tensor([1, 1])]; + tensor q_75_pad_type_0 = const()[name = tensor("q_75_pad_type_0"), val = tensor("custom")]; + tensor q_75_pad_0 = const()[name = tensor("q_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347861312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348680576))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_75_cast = conv(dilations = var_3947, groups = var_1186, pad = q_75_pad_0, pad_type = q_75_pad_type_0, strides = var_3945, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_163_cast)[name = tensor("q_75_cast")]; + tensor var_3951 = const()[name = tensor("op_3951"), val = tensor([1, 1])]; + tensor var_3953 = const()[name = tensor("op_3953"), val = tensor([1, 1])]; + tensor k_75_pad_type_0 = const()[name = tensor("k_75_pad_type_0"), val = tensor("custom")]; + tensor k_75_pad_0 = const()[name = tensor("k_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(348680704))), lut = tensor([-0x1.d3p-7, 0x1.d54p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_75_cast = conv(dilations = var_3953, groups = var_1186, pad = k_75_pad_0, pad_type = k_75_pad_type_0, strides = var_3951, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_75_cast")]; + tensor var_3957 = const()[name = tensor("op_3957"), val = tensor([1, 1])]; + tensor var_3959 = const()[name = tensor("op_3959"), val = tensor([1, 1])]; + tensor v_75_pad_type_0 = const()[name = tensor("v_75_pad_type_0"), val = tensor("custom")]; + tensor v_75_pad_0 = const()[name = tensor("v_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349008448))), lut = tensor([-0x1.17cp-6, 0x1.188p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_75_cast = conv(dilations = var_3959, groups = var_1186, pad = v_75_pad_0, pad_type = v_75_pad_type_0, strides = var_3957, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_75_cast")]; + tensor var_3963 = const()[name = tensor("op_3963"), val = tensor([2, 20, 64, -1])]; + tensor var_3964_cast = reshape(shape = var_3963, x = q_75_cast)[name = tensor("op_3964_cast")]; + tensor var_3965 = const()[name = tensor("op_3965"), val = tensor([2, 20, 64, -1])]; + tensor var_3966_cast = reshape(shape = var_3965, x = k_75_cast)[name = tensor("op_3966_cast")]; + tensor var_3967 = const()[name = tensor("op_3967"), val = tensor([2, 20, 64, -1])]; + tensor var_3968_cast = reshape(shape = var_3967, x = v_75_cast)[name = tensor("op_3968_cast")]; + tensor attn_weights_149_transpose_x_0 = const()[name = tensor("attn_weights_149_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_149_transpose_y_0 = const()[name = tensor("attn_weights_149_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_149_cast = matmul(transpose_x = attn_weights_149_transpose_x_0, transpose_y = attn_weights_149_transpose_y_0, x = var_3964_cast, y = var_3966_cast)[name = tensor("attn_weights_149_cast")]; + tensor attn_weights_151_cast = mul(x = attn_weights_149_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_151_cast")]; + tensor var_3972_cast = softmax(axis = var_1170, x = attn_weights_151_cast)[name = tensor("op_3972_cast")]; + tensor attn_75_transpose_x_0 = const()[name = tensor("attn_75_transpose_x_0"), val = tensor(false)]; + tensor attn_75_transpose_y_0 = const()[name = tensor("attn_75_transpose_y_0"), val = tensor(true)]; + tensor attn_75_cast = matmul(transpose_x = attn_75_transpose_x_0, transpose_y = attn_75_transpose_y_0, x = var_3968_cast, y = var_3972_cast)[name = tensor("attn_75_cast")]; + tensor var_3976 = const()[name = tensor("op_3976"), val = tensor([2, 1280, 1, -1])]; + tensor input_263_cast = reshape(shape = var_3976, x = attn_75_cast)[name = tensor("input_263_cast")]; + tensor var_3981 = const()[name = tensor("op_3981"), val = tensor([1, 1])]; + tensor var_3983 = const()[name = tensor("op_3983"), val = tensor([1, 1])]; + tensor var_3985_pad_type_0 = const()[name = tensor("op_3985_pad_type_0"), val = tensor("custom")]; + tensor var_3985_pad_0 = const()[name = tensor("op_3985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349336192))), lut = tensor([-0x1.5bp-7, 0x1.5a8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349541056)))]; + tensor var_3985_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_3983, groups = var_1186, pad = var_3985_pad_0, pad_type = var_3985_pad_type_0, strides = var_3981, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_263_cast)[name = tensor("op_3985_cast")]; + tensor inputs_113_cast = add(x = var_3985_cast, y = inputs_111_cast)[name = tensor("inputs_113_cast")]; + tensor var_3989 = const()[name = tensor("op_3989"), val = tensor([1])]; + tensor channels_mean_113_cast = reduce_mean(axes = var_3989, keep_dims = var_1181, x = inputs_113_cast)[name = tensor("channels_mean_113_cast")]; + tensor zero_mean_113_cast = sub(x = inputs_113_cast, y = channels_mean_113_cast)[name = tensor("zero_mean_113_cast")]; + tensor zero_mean_sq_113_cast = mul(x = zero_mean_113_cast, y = zero_mean_113_cast)[name = tensor("zero_mean_sq_113_cast")]; + tensor var_3993 = const()[name = tensor("op_3993"), val = tensor([1])]; + tensor var_3994_cast = reduce_mean(axes = var_3993, keep_dims = var_1181, x = zero_mean_sq_113_cast)[name = tensor("op_3994_cast")]; + tensor var_3995_to_fp16 = const()[name = tensor("op_3995_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3996_cast = add(x = var_3994_cast, y = var_3995_to_fp16)[name = tensor("op_3996_cast")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3996_cast)[name = tensor("denom_113_cast")]; + tensor out_113_cast = mul(x = zero_mean_113_cast, y = denom_113_cast)[name = tensor("out_113_cast")]; + tensor var_4000_to_fp16 = const()[name = tensor("op_4000_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349543680)))]; + tensor var_4001_cast = add(x = out_113_cast, y = var_4000_to_fp16)[name = tensor("op_4001_cast")]; + tensor var_4003_to_fp16 = const()[name = tensor("op_4003_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349546304)))]; + tensor input_265_cast = mul(x = var_4001_cast, y = var_4003_to_fp16)[name = tensor("input_265_cast")]; + tensor var_4011 = const()[name = tensor("op_4011"), val = tensor([1, 1])]; + tensor var_4013 = const()[name = tensor("op_4013"), val = tensor([1, 1])]; + tensor var_4015_pad_type_0 = const()[name = tensor("op_4015_pad_type_0"), val = tensor("custom")]; + tensor var_4015_pad_0 = const()[name = tensor("op_4015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349548928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356102592))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356102720)))]; + tensor var_4015_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_4013, groups = var_1186, pad = var_4015_pad_0, pad_type = var_4015_pad_type_0, strides = var_4011, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("op_4015_cast")]; + tensor var_4016_split_sizes_0 = const()[name = tensor("op_4016_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4016_axis_0 = const()[name = tensor("op_4016_axis_0"), val = tensor(1)]; + tensor var_4016_cast_0, tensor var_4016_cast_1 = split(axis = var_4016_axis_0, split_sizes = var_4016_split_sizes_0, x = var_4015_cast)[name = tensor("op_4016_cast")]; + tensor var_4018_mode_0 = const()[name = tensor("op_4018_mode_0"), val = tensor("EXACT")]; + tensor var_4018_cast = gelu(mode = var_4018_mode_0, x = var_4016_cast_1)[name = tensor("op_4018_cast")]; + tensor input_267_cast = mul(x = var_4016_cast_0, y = var_4018_cast)[name = tensor("input_267_cast")]; + tensor var_4022 = const()[name = tensor("op_4022"), val = tensor([1, 1])]; + tensor var_4024 = const()[name = tensor("op_4024"), val = tensor([1, 1])]; + tensor var_4026_pad_type_0 = const()[name = tensor("op_4026_pad_type_0"), val = tensor("custom")]; + tensor var_4026_pad_0 = const()[name = tensor("op_4026_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(356123264))), lut = tensor([-0x1.508p-5, -0x1.92cp-7, 0x1.92p-7, 0x1.504p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357761728)))]; + tensor var_4026_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_4024, groups = var_1186, pad = var_4026_pad_0, pad_type = var_4026_pad_type_0, strides = var_4022, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_267_cast)[name = tensor("op_4026_cast")]; + tensor inputs_115_cast = add(x = var_4026_cast, y = inputs_113_cast)[name = tensor("inputs_115_cast")]; + tensor var_4036 = const()[name = tensor("op_4036"), val = tensor([1])]; + tensor channels_mean_115_cast = reduce_mean(axes = var_4036, keep_dims = var_1181, x = inputs_115_cast)[name = tensor("channels_mean_115_cast")]; + tensor zero_mean_115_cast = sub(x = inputs_115_cast, y = channels_mean_115_cast)[name = tensor("zero_mean_115_cast")]; + tensor zero_mean_sq_115_cast = mul(x = zero_mean_115_cast, y = zero_mean_115_cast)[name = tensor("zero_mean_sq_115_cast")]; + tensor var_4040 = const()[name = tensor("op_4040"), val = tensor([1])]; + tensor var_4041_cast = reduce_mean(axes = var_4040, keep_dims = var_1181, x = zero_mean_sq_115_cast)[name = tensor("op_4041_cast")]; + tensor var_4042_to_fp16 = const()[name = tensor("op_4042_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4043_cast = add(x = var_4041_cast, y = var_4042_to_fp16)[name = tensor("op_4043_cast")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_4043_cast)[name = tensor("denom_115_cast")]; + tensor out_115_cast = mul(x = zero_mean_115_cast, y = denom_115_cast)[name = tensor("out_115_cast")]; + tensor var_4047_to_fp16 = const()[name = tensor("op_4047_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357764352)))]; + tensor var_4048_cast = add(x = out_115_cast, y = var_4047_to_fp16)[name = tensor("op_4048_cast")]; + tensor var_4050_to_fp16 = const()[name = tensor("op_4050_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357766976)))]; + tensor hidden_states_167_cast = mul(x = var_4048_cast, y = var_4050_to_fp16)[name = tensor("hidden_states_167_cast")]; + tensor var_4057 = const()[name = tensor("op_4057"), val = tensor([1, 1])]; + tensor var_4059 = const()[name = tensor("op_4059"), val = tensor([1, 1])]; + tensor q_77_pad_type_0 = const()[name = tensor("q_77_pad_type_0"), val = tensor("custom")]; + tensor q_77_pad_0 = const()[name = tensor("q_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357769600))), lut = tensor([-0x1.538p-6, 0x1.548p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_77_cast = conv(dilations = var_4059, groups = var_1186, pad = q_77_pad_0, pad_type = q_77_pad_type_0, strides = var_4057, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("q_77_cast")]; + tensor var_4063 = const()[name = tensor("op_4063"), val = tensor([1, 1])]; + tensor var_4065 = const()[name = tensor("op_4065"), val = tensor([1, 1])]; + tensor k_77_pad_type_0 = const()[name = tensor("k_77_pad_type_0"), val = tensor("custom")]; + tensor k_77_pad_0 = const()[name = tensor("k_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(357974464))), lut = tensor([-0x1.528p-6, 0x1.528p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_77_cast = conv(dilations = var_4065, groups = var_1186, pad = k_77_pad_0, pad_type = k_77_pad_type_0, strides = var_4063, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("k_77_cast")]; + tensor var_4069 = const()[name = tensor("op_4069"), val = tensor([1, 1])]; + tensor var_4071 = const()[name = tensor("op_4071"), val = tensor([1, 1])]; + tensor v_77_pad_type_0 = const()[name = tensor("v_77_pad_type_0"), val = tensor("custom")]; + tensor v_77_pad_0 = const()[name = tensor("v_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(358179328))), lut = tensor([-0x1.2d8p-5, -0x1.6b4p-7, 0x1.6bp-7, 0x1.2d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_77_cast = conv(dilations = var_4071, groups = var_1186, pad = v_77_pad_0, pad_type = v_77_pad_type_0, strides = var_4069, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("v_77_cast")]; + tensor var_4075 = const()[name = tensor("op_4075"), val = tensor([2, 20, 64, -1])]; + tensor var_4076_cast = reshape(shape = var_4075, x = q_77_cast)[name = tensor("op_4076_cast")]; + tensor var_4077 = const()[name = tensor("op_4077"), val = tensor([2, 20, 64, -1])]; + tensor var_4078_cast = reshape(shape = var_4077, x = k_77_cast)[name = tensor("op_4078_cast")]; + tensor var_4079 = const()[name = tensor("op_4079"), val = tensor([2, 20, 64, -1])]; + tensor var_4080_cast = reshape(shape = var_4079, x = v_77_cast)[name = tensor("op_4080_cast")]; + tensor attn_weights_153_transpose_x_0 = const()[name = tensor("attn_weights_153_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_153_transpose_y_0 = const()[name = tensor("attn_weights_153_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_153_cast = matmul(transpose_x = attn_weights_153_transpose_x_0, transpose_y = attn_weights_153_transpose_y_0, x = var_4076_cast, y = var_4078_cast)[name = tensor("attn_weights_153_cast")]; + tensor attn_weights_155_cast = mul(x = attn_weights_153_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_155_cast")]; + tensor var_4084_cast = softmax(axis = var_1170, x = attn_weights_155_cast)[name = tensor("op_4084_cast")]; + tensor attn_77_transpose_x_0 = const()[name = tensor("attn_77_transpose_x_0"), val = tensor(false)]; + tensor attn_77_transpose_y_0 = const()[name = tensor("attn_77_transpose_y_0"), val = tensor(true)]; + tensor attn_77_cast = matmul(transpose_x = attn_77_transpose_x_0, transpose_y = attn_77_transpose_y_0, x = var_4080_cast, y = var_4084_cast)[name = tensor("attn_77_cast")]; + tensor var_4088 = const()[name = tensor("op_4088"), val = tensor([2, 1280, 1, -1])]; + tensor input_269_cast = reshape(shape = var_4088, x = attn_77_cast)[name = tensor("input_269_cast")]; + tensor var_4093 = const()[name = tensor("op_4093"), val = tensor([1, 1])]; + tensor var_4095 = const()[name = tensor("op_4095"), val = tensor([1, 1])]; + tensor var_4097_pad_type_0 = const()[name = tensor("op_4097_pad_type_0"), val = tensor("custom")]; + tensor var_4097_pad_0 = const()[name = tensor("op_4097_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(358588992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359408256))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359408384)))]; + tensor var_4097_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_4095, groups = var_1186, pad = var_4097_pad_0, pad_type = var_4097_pad_type_0, strides = var_4093, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("op_4097_cast")]; + tensor inputs_117_cast = add(x = var_4097_cast, y = inputs_115_cast)[name = tensor("inputs_117_cast")]; + tensor var_4101 = const()[name = tensor("op_4101"), val = tensor([1])]; + tensor channels_mean_117_cast = reduce_mean(axes = var_4101, keep_dims = var_1181, x = inputs_117_cast)[name = tensor("channels_mean_117_cast")]; + tensor zero_mean_117_cast = sub(x = inputs_117_cast, y = channels_mean_117_cast)[name = tensor("zero_mean_117_cast")]; + tensor zero_mean_sq_117_cast = mul(x = zero_mean_117_cast, y = zero_mean_117_cast)[name = tensor("zero_mean_sq_117_cast")]; + tensor var_4105 = const()[name = tensor("op_4105"), val = tensor([1])]; + tensor var_4106_cast = reduce_mean(axes = var_4105, keep_dims = var_1181, x = zero_mean_sq_117_cast)[name = tensor("op_4106_cast")]; + tensor var_4107_to_fp16 = const()[name = tensor("op_4107_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4108_cast = add(x = var_4106_cast, y = var_4107_to_fp16)[name = tensor("op_4108_cast")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_4108_cast)[name = tensor("denom_117_cast")]; + tensor out_117_cast = mul(x = zero_mean_117_cast, y = denom_117_cast)[name = tensor("out_117_cast")]; + tensor var_4112_to_fp16 = const()[name = tensor("op_4112_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359411008)))]; + tensor var_4113_cast = add(x = out_117_cast, y = var_4112_to_fp16)[name = tensor("op_4113_cast")]; + tensor var_4115_to_fp16 = const()[name = tensor("op_4115_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359413632)))]; + tensor hidden_states_169_cast = mul(x = var_4113_cast, y = var_4115_to_fp16)[name = tensor("hidden_states_169_cast")]; + tensor var_4122 = const()[name = tensor("op_4122"), val = tensor([1, 1])]; + tensor var_4124 = const()[name = tensor("op_4124"), val = tensor([1, 1])]; + tensor q_79_pad_type_0 = const()[name = tensor("q_79_pad_type_0"), val = tensor("custom")]; + tensor q_79_pad_0 = const()[name = tensor("q_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359416256))), lut = tensor([-0x1.0fcp-6, 0x1.0fp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_79_cast = conv(dilations = var_4124, groups = var_1186, pad = q_79_pad_0, pad_type = q_79_pad_type_0, strides = var_4122, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_169_cast)[name = tensor("q_79_cast")]; + tensor var_4128 = const()[name = tensor("op_4128"), val = tensor([1, 1])]; + tensor var_4130 = const()[name = tensor("op_4130"), val = tensor([1, 1])]; + tensor k_79_pad_type_0 = const()[name = tensor("k_79_pad_type_0"), val = tensor("custom")]; + tensor k_79_pad_0 = const()[name = tensor("k_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359621120))), lut = tensor([-0x1.b3p-7, 0x1.b38p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_79_cast = conv(dilations = var_4130, groups = var_1186, pad = k_79_pad_0, pad_type = k_79_pad_type_0, strides = var_4128, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_79_cast")]; + tensor var_4134 = const()[name = tensor("op_4134"), val = tensor([1, 1])]; + tensor var_4136 = const()[name = tensor("op_4136"), val = tensor([1, 1])]; + tensor v_79_pad_type_0 = const()[name = tensor("v_79_pad_type_0"), val = tensor("custom")]; + tensor v_79_pad_0 = const()[name = tensor("v_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(359948864))), lut = tensor([-0x1.12cp-6, 0x1.12p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_79_cast = conv(dilations = var_4136, groups = var_1186, pad = v_79_pad_0, pad_type = v_79_pad_type_0, strides = var_4134, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_79_cast")]; + tensor var_4140 = const()[name = tensor("op_4140"), val = tensor([2, 20, 64, -1])]; + tensor var_4141_cast = reshape(shape = var_4140, x = q_79_cast)[name = tensor("op_4141_cast")]; + tensor var_4142 = const()[name = tensor("op_4142"), val = tensor([2, 20, 64, -1])]; + tensor var_4143_cast = reshape(shape = var_4142, x = k_79_cast)[name = tensor("op_4143_cast")]; + tensor var_4144 = const()[name = tensor("op_4144"), val = tensor([2, 20, 64, -1])]; + tensor var_4145_cast = reshape(shape = var_4144, x = v_79_cast)[name = tensor("op_4145_cast")]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = var_4141_cast, y = var_4143_cast)[name = tensor("attn_weights_157_cast")]; + tensor attn_weights_159_cast = mul(x = attn_weights_157_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_4149_cast = softmax(axis = var_1170, x = attn_weights_159_cast)[name = tensor("op_4149_cast")]; + tensor attn_79_transpose_x_0 = const()[name = tensor("attn_79_transpose_x_0"), val = tensor(false)]; + tensor attn_79_transpose_y_0 = const()[name = tensor("attn_79_transpose_y_0"), val = tensor(true)]; + tensor attn_79_cast = matmul(transpose_x = attn_79_transpose_x_0, transpose_y = attn_79_transpose_y_0, x = var_4145_cast, y = var_4149_cast)[name = tensor("attn_79_cast")]; + tensor var_4153 = const()[name = tensor("op_4153"), val = tensor([2, 1280, 1, -1])]; + tensor input_271_cast = reshape(shape = var_4153, x = attn_79_cast)[name = tensor("input_271_cast")]; + tensor var_4158 = const()[name = tensor("op_4158"), val = tensor([1, 1])]; + tensor var_4160 = const()[name = tensor("op_4160"), val = tensor([1, 1])]; + tensor var_4162_pad_type_0 = const()[name = tensor("op_4162_pad_type_0"), val = tensor("custom")]; + tensor var_4162_pad_0 = const()[name = tensor("op_4162_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360276608))), lut = tensor([-0x1.578p-7, 0x1.57cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360481472)))]; + tensor var_4162_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_4160, groups = var_1186, pad = var_4162_pad_0, pad_type = var_4162_pad_type_0, strides = var_4158, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_271_cast)[name = tensor("op_4162_cast")]; + tensor inputs_119_cast = add(x = var_4162_cast, y = inputs_117_cast)[name = tensor("inputs_119_cast")]; + tensor var_4166 = const()[name = tensor("op_4166"), val = tensor([1])]; + tensor channels_mean_119_cast = reduce_mean(axes = var_4166, keep_dims = var_1181, x = inputs_119_cast)[name = tensor("channels_mean_119_cast")]; + tensor zero_mean_119_cast = sub(x = inputs_119_cast, y = channels_mean_119_cast)[name = tensor("zero_mean_119_cast")]; + tensor zero_mean_sq_119_cast = mul(x = zero_mean_119_cast, y = zero_mean_119_cast)[name = tensor("zero_mean_sq_119_cast")]; + tensor var_4170 = const()[name = tensor("op_4170"), val = tensor([1])]; + tensor var_4171_cast = reduce_mean(axes = var_4170, keep_dims = var_1181, x = zero_mean_sq_119_cast)[name = tensor("op_4171_cast")]; + tensor var_4172_to_fp16 = const()[name = tensor("op_4172_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4173_cast = add(x = var_4171_cast, y = var_4172_to_fp16)[name = tensor("op_4173_cast")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_4173_cast)[name = tensor("denom_119_cast")]; + tensor out_119_cast = mul(x = zero_mean_119_cast, y = denom_119_cast)[name = tensor("out_119_cast")]; + tensor var_4177_to_fp16 = const()[name = tensor("op_4177_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360484096)))]; + tensor var_4178_cast = add(x = out_119_cast, y = var_4177_to_fp16)[name = tensor("op_4178_cast")]; + tensor var_4180_to_fp16 = const()[name = tensor("op_4180_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360486720)))]; + tensor input_273_cast = mul(x = var_4178_cast, y = var_4180_to_fp16)[name = tensor("input_273_cast")]; + tensor var_4188 = const()[name = tensor("op_4188"), val = tensor([1, 1])]; + tensor var_4190 = const()[name = tensor("op_4190"), val = tensor([1, 1])]; + tensor var_4192_pad_type_0 = const()[name = tensor("op_4192_pad_type_0"), val = tensor("custom")]; + tensor var_4192_pad_0 = const()[name = tensor("op_4192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(360489344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367043008))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367043136)))]; + tensor var_4192_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_4190, groups = var_1186, pad = var_4192_pad_0, pad_type = var_4192_pad_type_0, strides = var_4188, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("op_4192_cast")]; + tensor var_4193_split_sizes_0 = const()[name = tensor("op_4193_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4193_axis_0 = const()[name = tensor("op_4193_axis_0"), val = tensor(1)]; + tensor var_4193_cast_0, tensor var_4193_cast_1 = split(axis = var_4193_axis_0, split_sizes = var_4193_split_sizes_0, x = var_4192_cast)[name = tensor("op_4193_cast")]; + tensor var_4195_mode_0 = const()[name = tensor("op_4195_mode_0"), val = tensor("EXACT")]; + tensor var_4195_cast = gelu(mode = var_4195_mode_0, x = var_4193_cast_1)[name = tensor("op_4195_cast")]; + tensor input_275_cast = mul(x = var_4193_cast_0, y = var_4195_cast)[name = tensor("input_275_cast")]; + tensor var_4199 = const()[name = tensor("op_4199"), val = tensor([1, 1])]; + tensor var_4201 = const()[name = tensor("op_4201"), val = tensor([1, 1])]; + tensor var_4203_pad_type_0 = const()[name = tensor("op_4203_pad_type_0"), val = tensor("custom")]; + tensor var_4203_pad_0 = const()[name = tensor("op_4203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(367063680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370340544))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370340672)))]; + tensor var_4203_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_4201, groups = var_1186, pad = var_4203_pad_0, pad_type = var_4203_pad_type_0, strides = var_4199, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_275_cast)[name = tensor("op_4203_cast")]; + tensor inputs_121_cast = add(x = var_4203_cast, y = inputs_119_cast)[name = tensor("inputs_121_cast")]; + tensor var_4213 = const()[name = tensor("op_4213"), val = tensor([1])]; + tensor channels_mean_121_cast = reduce_mean(axes = var_4213, keep_dims = var_1181, x = inputs_121_cast)[name = tensor("channels_mean_121_cast")]; + tensor zero_mean_121_cast = sub(x = inputs_121_cast, y = channels_mean_121_cast)[name = tensor("zero_mean_121_cast")]; + tensor zero_mean_sq_121_cast = mul(x = zero_mean_121_cast, y = zero_mean_121_cast)[name = tensor("zero_mean_sq_121_cast")]; + tensor var_4217 = const()[name = tensor("op_4217"), val = tensor([1])]; + tensor var_4218_cast = reduce_mean(axes = var_4217, keep_dims = var_1181, x = zero_mean_sq_121_cast)[name = tensor("op_4218_cast")]; + tensor var_4219_to_fp16 = const()[name = tensor("op_4219_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4220_cast = add(x = var_4218_cast, y = var_4219_to_fp16)[name = tensor("op_4220_cast")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_4220_cast)[name = tensor("denom_121_cast")]; + tensor out_121_cast = mul(x = zero_mean_121_cast, y = denom_121_cast)[name = tensor("out_121_cast")]; + tensor var_4224_to_fp16 = const()[name = tensor("op_4224_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370343296)))]; + tensor var_4225_cast = add(x = out_121_cast, y = var_4224_to_fp16)[name = tensor("op_4225_cast")]; + tensor var_4227_to_fp16 = const()[name = tensor("op_4227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370345920)))]; + tensor hidden_states_173_cast = mul(x = var_4225_cast, y = var_4227_to_fp16)[name = tensor("hidden_states_173_cast")]; + tensor var_4234 = const()[name = tensor("op_4234"), val = tensor([1, 1])]; + tensor var_4236 = const()[name = tensor("op_4236"), val = tensor([1, 1])]; + tensor q_81_pad_type_0 = const()[name = tensor("q_81_pad_type_0"), val = tensor("custom")]; + tensor q_81_pad_0 = const()[name = tensor("q_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370348544))), lut = tensor([-0x1.50cp-6, 0x1.51p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_81_cast = conv(dilations = var_4236, groups = var_1186, pad = q_81_pad_0, pad_type = q_81_pad_type_0, strides = var_4234, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("q_81_cast")]; + tensor var_4240 = const()[name = tensor("op_4240"), val = tensor([1, 1])]; + tensor var_4242 = const()[name = tensor("op_4242"), val = tensor([1, 1])]; + tensor k_81_pad_type_0 = const()[name = tensor("k_81_pad_type_0"), val = tensor("custom")]; + tensor k_81_pad_0 = const()[name = tensor("k_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370553408))), lut = tensor([-0x1.4fp-6, 0x1.4f8p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_81_cast = conv(dilations = var_4242, groups = var_1186, pad = k_81_pad_0, pad_type = k_81_pad_type_0, strides = var_4240, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("k_81_cast")]; + tensor var_4246 = const()[name = tensor("op_4246"), val = tensor([1, 1])]; + tensor var_4248 = const()[name = tensor("op_4248"), val = tensor([1, 1])]; + tensor v_81_pad_type_0 = const()[name = tensor("v_81_pad_type_0"), val = tensor("custom")]; + tensor v_81_pad_0 = const()[name = tensor("v_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(370758272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371577536))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_81_cast = conv(dilations = var_4248, groups = var_1186, pad = v_81_pad_0, pad_type = v_81_pad_type_0, strides = var_4246, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("v_81_cast")]; + tensor var_4252 = const()[name = tensor("op_4252"), val = tensor([2, 20, 64, -1])]; + tensor var_4253_cast = reshape(shape = var_4252, x = q_81_cast)[name = tensor("op_4253_cast")]; + tensor var_4254 = const()[name = tensor("op_4254"), val = tensor([2, 20, 64, -1])]; + tensor var_4255_cast = reshape(shape = var_4254, x = k_81_cast)[name = tensor("op_4255_cast")]; + tensor var_4256 = const()[name = tensor("op_4256"), val = tensor([2, 20, 64, -1])]; + tensor var_4257_cast = reshape(shape = var_4256, x = v_81_cast)[name = tensor("op_4257_cast")]; + tensor attn_weights_161_transpose_x_0 = const()[name = tensor("attn_weights_161_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_161_transpose_y_0 = const()[name = tensor("attn_weights_161_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_161_cast = matmul(transpose_x = attn_weights_161_transpose_x_0, transpose_y = attn_weights_161_transpose_y_0, x = var_4253_cast, y = var_4255_cast)[name = tensor("attn_weights_161_cast")]; + tensor attn_weights_163_cast = mul(x = attn_weights_161_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_163_cast")]; + tensor var_4261_cast = softmax(axis = var_1170, x = attn_weights_163_cast)[name = tensor("op_4261_cast")]; + tensor attn_81_transpose_x_0 = const()[name = tensor("attn_81_transpose_x_0"), val = tensor(false)]; + tensor attn_81_transpose_y_0 = const()[name = tensor("attn_81_transpose_y_0"), val = tensor(true)]; + tensor attn_81_cast = matmul(transpose_x = attn_81_transpose_x_0, transpose_y = attn_81_transpose_y_0, x = var_4257_cast, y = var_4261_cast)[name = tensor("attn_81_cast")]; + tensor var_4265 = const()[name = tensor("op_4265"), val = tensor([2, 1280, 1, -1])]; + tensor input_277_cast = reshape(shape = var_4265, x = attn_81_cast)[name = tensor("input_277_cast")]; + tensor var_4270 = const()[name = tensor("op_4270"), val = tensor([1, 1])]; + tensor var_4272 = const()[name = tensor("op_4272"), val = tensor([1, 1])]; + tensor var_4274_pad_type_0 = const()[name = tensor("op_4274_pad_type_0"), val = tensor("custom")]; + tensor var_4274_pad_0 = const()[name = tensor("op_4274_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371577664))), lut = tensor([-0x1.2e4p-5, -0x1.6bp-7, 0x1.6acp-7, 0x1.2ep-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371987328)))]; + tensor var_4274_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_4272, groups = var_1186, pad = var_4274_pad_0, pad_type = var_4274_pad_type_0, strides = var_4270, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_277_cast)[name = tensor("op_4274_cast")]; + tensor inputs_123_cast = add(x = var_4274_cast, y = inputs_121_cast)[name = tensor("inputs_123_cast")]; + tensor var_4278 = const()[name = tensor("op_4278"), val = tensor([1])]; + tensor channels_mean_123_cast = reduce_mean(axes = var_4278, keep_dims = var_1181, x = inputs_123_cast)[name = tensor("channels_mean_123_cast")]; + tensor zero_mean_123_cast = sub(x = inputs_123_cast, y = channels_mean_123_cast)[name = tensor("zero_mean_123_cast")]; + tensor zero_mean_sq_123_cast = mul(x = zero_mean_123_cast, y = zero_mean_123_cast)[name = tensor("zero_mean_sq_123_cast")]; + tensor var_4282 = const()[name = tensor("op_4282"), val = tensor([1])]; + tensor var_4283_cast = reduce_mean(axes = var_4282, keep_dims = var_1181, x = zero_mean_sq_123_cast)[name = tensor("op_4283_cast")]; + tensor var_4284_to_fp16 = const()[name = tensor("op_4284_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4285_cast = add(x = var_4283_cast, y = var_4284_to_fp16)[name = tensor("op_4285_cast")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_4285_cast)[name = tensor("denom_123_cast")]; + tensor out_123_cast = mul(x = zero_mean_123_cast, y = denom_123_cast)[name = tensor("out_123_cast")]; + tensor var_4289_to_fp16 = const()[name = tensor("op_4289_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371989952)))]; + tensor var_4290_cast = add(x = out_123_cast, y = var_4289_to_fp16)[name = tensor("op_4290_cast")]; + tensor var_4292_to_fp16 = const()[name = tensor("op_4292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371992576)))]; + tensor hidden_states_175_cast = mul(x = var_4290_cast, y = var_4292_to_fp16)[name = tensor("hidden_states_175_cast")]; + tensor var_4299 = const()[name = tensor("op_4299"), val = tensor([1, 1])]; + tensor var_4301 = const()[name = tensor("op_4301"), val = tensor([1, 1])]; + tensor q_83_pad_type_0 = const()[name = tensor("q_83_pad_type_0"), val = tensor("custom")]; + tensor q_83_pad_0 = const()[name = tensor("q_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371995200))), lut = tensor([-0x1.e5p-7, 0x1.e64p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_83_cast = conv(dilations = var_4301, groups = var_1186, pad = q_83_pad_0, pad_type = q_83_pad_type_0, strides = var_4299, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_175_cast)[name = tensor("q_83_cast")]; + tensor var_4305 = const()[name = tensor("op_4305"), val = tensor([1, 1])]; + tensor var_4307 = const()[name = tensor("op_4307"), val = tensor([1, 1])]; + tensor k_83_pad_type_0 = const()[name = tensor("k_83_pad_type_0"), val = tensor("custom")]; + tensor k_83_pad_0 = const()[name = tensor("k_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372200064))), lut = tensor([-0x1.798p-7, 0x1.78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_83_cast = conv(dilations = var_4307, groups = var_1186, pad = k_83_pad_0, pad_type = k_83_pad_type_0, strides = var_4305, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_83_cast")]; + tensor var_4311 = const()[name = tensor("op_4311"), val = tensor([1, 1])]; + tensor var_4313 = const()[name = tensor("op_4313"), val = tensor([1, 1])]; + tensor v_83_pad_type_0 = const()[name = tensor("v_83_pad_type_0"), val = tensor("custom")]; + tensor v_83_pad_0 = const()[name = tensor("v_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372527808))), lut = tensor([-0x1.fb4p-7, 0x1.fap-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_83_cast = conv(dilations = var_4313, groups = var_1186, pad = v_83_pad_0, pad_type = v_83_pad_type_0, strides = var_4311, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_83_cast")]; + tensor var_4317 = const()[name = tensor("op_4317"), val = tensor([2, 20, 64, -1])]; + tensor var_4318_cast = reshape(shape = var_4317, x = q_83_cast)[name = tensor("op_4318_cast")]; + tensor var_4319 = const()[name = tensor("op_4319"), val = tensor([2, 20, 64, -1])]; + tensor var_4320_cast = reshape(shape = var_4319, x = k_83_cast)[name = tensor("op_4320_cast")]; + tensor var_4321 = const()[name = tensor("op_4321"), val = tensor([2, 20, 64, -1])]; + tensor var_4322_cast = reshape(shape = var_4321, x = v_83_cast)[name = tensor("op_4322_cast")]; + tensor attn_weights_165_transpose_x_0 = const()[name = tensor("attn_weights_165_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_165_transpose_y_0 = const()[name = tensor("attn_weights_165_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_165_cast = matmul(transpose_x = attn_weights_165_transpose_x_0, transpose_y = attn_weights_165_transpose_y_0, x = var_4318_cast, y = var_4320_cast)[name = tensor("attn_weights_165_cast")]; + tensor attn_weights_167_cast = mul(x = attn_weights_165_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_167_cast")]; + tensor var_4326_cast = softmax(axis = var_1170, x = attn_weights_167_cast)[name = tensor("op_4326_cast")]; + tensor attn_83_transpose_x_0 = const()[name = tensor("attn_83_transpose_x_0"), val = tensor(false)]; + tensor attn_83_transpose_y_0 = const()[name = tensor("attn_83_transpose_y_0"), val = tensor(true)]; + tensor attn_83_cast = matmul(transpose_x = attn_83_transpose_x_0, transpose_y = attn_83_transpose_y_0, x = var_4322_cast, y = var_4326_cast)[name = tensor("attn_83_cast")]; + tensor var_4330 = const()[name = tensor("op_4330"), val = tensor([2, 1280, 1, -1])]; + tensor input_279_cast = reshape(shape = var_4330, x = attn_83_cast)[name = tensor("input_279_cast")]; + tensor var_4335 = const()[name = tensor("op_4335"), val = tensor([1, 1])]; + tensor var_4337 = const()[name = tensor("op_4337"), val = tensor([1, 1])]; + tensor var_4339_pad_type_0 = const()[name = tensor("op_4339_pad_type_0"), val = tensor("custom")]; + tensor var_4339_pad_0 = const()[name = tensor("op_4339_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372855552))), lut = tensor([-0x1.3e8p-7, 0x1.3f4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373060416)))]; + tensor var_4339_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_4337, groups = var_1186, pad = var_4339_pad_0, pad_type = var_4339_pad_type_0, strides = var_4335, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_279_cast)[name = tensor("op_4339_cast")]; + tensor inputs_125_cast = add(x = var_4339_cast, y = inputs_123_cast)[name = tensor("inputs_125_cast")]; + tensor var_4343 = const()[name = tensor("op_4343"), val = tensor([1])]; + tensor channels_mean_125_cast = reduce_mean(axes = var_4343, keep_dims = var_1181, x = inputs_125_cast)[name = tensor("channels_mean_125_cast")]; + tensor zero_mean_125_cast = sub(x = inputs_125_cast, y = channels_mean_125_cast)[name = tensor("zero_mean_125_cast")]; + tensor zero_mean_sq_125_cast = mul(x = zero_mean_125_cast, y = zero_mean_125_cast)[name = tensor("zero_mean_sq_125_cast")]; + tensor var_4347 = const()[name = tensor("op_4347"), val = tensor([1])]; + tensor var_4348_cast = reduce_mean(axes = var_4347, keep_dims = var_1181, x = zero_mean_sq_125_cast)[name = tensor("op_4348_cast")]; + tensor var_4349_to_fp16 = const()[name = tensor("op_4349_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4350_cast = add(x = var_4348_cast, y = var_4349_to_fp16)[name = tensor("op_4350_cast")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_4350_cast)[name = tensor("denom_125_cast")]; + tensor out_125_cast = mul(x = zero_mean_125_cast, y = denom_125_cast)[name = tensor("out_125_cast")]; + tensor var_4354_to_fp16 = const()[name = tensor("op_4354_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373063040)))]; + tensor var_4355_cast = add(x = out_125_cast, y = var_4354_to_fp16)[name = tensor("op_4355_cast")]; + tensor var_4357_to_fp16 = const()[name = tensor("op_4357_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373065664)))]; + tensor input_281_cast = mul(x = var_4355_cast, y = var_4357_to_fp16)[name = tensor("input_281_cast")]; + tensor var_4365 = const()[name = tensor("op_4365"), val = tensor([1, 1])]; + tensor var_4367 = const()[name = tensor("op_4367"), val = tensor([1, 1])]; + tensor var_4369_pad_type_0 = const()[name = tensor("op_4369_pad_type_0"), val = tensor("custom")]; + tensor var_4369_pad_0 = const()[name = tensor("op_4369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(373068288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379621952))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379622080)))]; + tensor var_4369_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_4367, groups = var_1186, pad = var_4369_pad_0, pad_type = var_4369_pad_type_0, strides = var_4365, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("op_4369_cast")]; + tensor var_4370_split_sizes_0 = const()[name = tensor("op_4370_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4370_axis_0 = const()[name = tensor("op_4370_axis_0"), val = tensor(1)]; + tensor var_4370_cast_0, tensor var_4370_cast_1 = split(axis = var_4370_axis_0, split_sizes = var_4370_split_sizes_0, x = var_4369_cast)[name = tensor("op_4370_cast")]; + tensor var_4372_mode_0 = const()[name = tensor("op_4372_mode_0"), val = tensor("EXACT")]; + tensor var_4372_cast = gelu(mode = var_4372_mode_0, x = var_4370_cast_1)[name = tensor("op_4372_cast")]; + tensor input_283_cast = mul(x = var_4370_cast_0, y = var_4372_cast)[name = tensor("input_283_cast")]; + tensor var_4376 = const()[name = tensor("op_4376"), val = tensor([1, 1])]; + tensor var_4378 = const()[name = tensor("op_4378"), val = tensor([1, 1])]; + tensor var_4380_pad_type_0 = const()[name = tensor("op_4380_pad_type_0"), val = tensor("custom")]; + tensor var_4380_pad_0 = const()[name = tensor("op_4380_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(379642624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382919488))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382919616)))]; + tensor var_4380_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_4378, groups = var_1186, pad = var_4380_pad_0, pad_type = var_4380_pad_type_0, strides = var_4376, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_283_cast)[name = tensor("op_4380_cast")]; + tensor inputs_127_cast = add(x = var_4380_cast, y = inputs_125_cast)[name = tensor("inputs_127_cast")]; + tensor var_4390 = const()[name = tensor("op_4390"), val = tensor([1])]; + tensor channels_mean_127_cast = reduce_mean(axes = var_4390, keep_dims = var_1181, x = inputs_127_cast)[name = tensor("channels_mean_127_cast")]; + tensor zero_mean_127_cast = sub(x = inputs_127_cast, y = channels_mean_127_cast)[name = tensor("zero_mean_127_cast")]; + tensor zero_mean_sq_127_cast = mul(x = zero_mean_127_cast, y = zero_mean_127_cast)[name = tensor("zero_mean_sq_127_cast")]; + tensor var_4394 = const()[name = tensor("op_4394"), val = tensor([1])]; + tensor var_4395_cast = reduce_mean(axes = var_4394, keep_dims = var_1181, x = zero_mean_sq_127_cast)[name = tensor("op_4395_cast")]; + tensor var_4396_to_fp16 = const()[name = tensor("op_4396_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4397_cast = add(x = var_4395_cast, y = var_4396_to_fp16)[name = tensor("op_4397_cast")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_4397_cast)[name = tensor("denom_127_cast")]; + tensor out_127_cast = mul(x = zero_mean_127_cast, y = denom_127_cast)[name = tensor("out_127_cast")]; + tensor var_4401_to_fp16 = const()[name = tensor("op_4401_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382922240)))]; + tensor var_4402_cast = add(x = out_127_cast, y = var_4401_to_fp16)[name = tensor("op_4402_cast")]; + tensor var_4404_to_fp16 = const()[name = tensor("op_4404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382924864)))]; + tensor hidden_states_179_cast = mul(x = var_4402_cast, y = var_4404_to_fp16)[name = tensor("hidden_states_179_cast")]; + tensor var_4411 = const()[name = tensor("op_4411"), val = tensor([1, 1])]; + tensor var_4413 = const()[name = tensor("op_4413"), val = tensor([1, 1])]; + tensor q_85_pad_type_0 = const()[name = tensor("q_85_pad_type_0"), val = tensor("custom")]; + tensor q_85_pad_0 = const()[name = tensor("q_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(382927488))), lut = tensor([-0x1.50cp-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_85_cast = conv(dilations = var_4413, groups = var_1186, pad = q_85_pad_0, pad_type = q_85_pad_type_0, strides = var_4411, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("q_85_cast")]; + tensor var_4417 = const()[name = tensor("op_4417"), val = tensor([1, 1])]; + tensor var_4419 = const()[name = tensor("op_4419"), val = tensor([1, 1])]; + tensor k_85_pad_type_0 = const()[name = tensor("k_85_pad_type_0"), val = tensor("custom")]; + tensor k_85_pad_0 = const()[name = tensor("k_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383132352))), lut = tensor([-0x1.4f4p-6, 0x1.4ecp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_85_cast = conv(dilations = var_4419, groups = var_1186, pad = k_85_pad_0, pad_type = k_85_pad_type_0, strides = var_4417, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("k_85_cast")]; + tensor var_4423 = const()[name = tensor("op_4423"), val = tensor([1, 1])]; + tensor var_4425 = const()[name = tensor("op_4425"), val = tensor([1, 1])]; + tensor v_85_pad_type_0 = const()[name = tensor("v_85_pad_type_0"), val = tensor("custom")]; + tensor v_85_pad_0 = const()[name = tensor("v_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383337216))), lut = tensor([-0x1.37p-5, -0x1.75cp-7, 0x1.794p-7, 0x1.374p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_85_cast = conv(dilations = var_4425, groups = var_1186, pad = v_85_pad_0, pad_type = v_85_pad_type_0, strides = var_4423, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("v_85_cast")]; + tensor var_4429 = const()[name = tensor("op_4429"), val = tensor([2, 20, 64, -1])]; + tensor var_4430_cast = reshape(shape = var_4429, x = q_85_cast)[name = tensor("op_4430_cast")]; + tensor var_4431 = const()[name = tensor("op_4431"), val = tensor([2, 20, 64, -1])]; + tensor var_4432_cast = reshape(shape = var_4431, x = k_85_cast)[name = tensor("op_4432_cast")]; + tensor var_4433 = const()[name = tensor("op_4433"), val = tensor([2, 20, 64, -1])]; + tensor var_4434_cast = reshape(shape = var_4433, x = v_85_cast)[name = tensor("op_4434_cast")]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = var_4430_cast, y = var_4432_cast)[name = tensor("attn_weights_169_cast")]; + tensor attn_weights_171_cast = mul(x = attn_weights_169_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_4438_cast = softmax(axis = var_1170, x = attn_weights_171_cast)[name = tensor("op_4438_cast")]; + tensor attn_85_transpose_x_0 = const()[name = tensor("attn_85_transpose_x_0"), val = tensor(false)]; + tensor attn_85_transpose_y_0 = const()[name = tensor("attn_85_transpose_y_0"), val = tensor(true)]; + tensor attn_85_cast = matmul(transpose_x = attn_85_transpose_x_0, transpose_y = attn_85_transpose_y_0, x = var_4434_cast, y = var_4438_cast)[name = tensor("attn_85_cast")]; + tensor var_4442 = const()[name = tensor("op_4442"), val = tensor([2, 1280, 1, -1])]; + tensor input_285_cast = reshape(shape = var_4442, x = attn_85_cast)[name = tensor("input_285_cast")]; + tensor var_4447 = const()[name = tensor("op_4447"), val = tensor([1, 1])]; + tensor var_4449 = const()[name = tensor("op_4449"), val = tensor([1, 1])]; + tensor var_4451_pad_type_0 = const()[name = tensor("op_4451_pad_type_0"), val = tensor("custom")]; + tensor var_4451_pad_0 = const()[name = tensor("op_4451_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(383746880))), lut = tensor([-0x1.31cp-5, -0x1.6f4p-7, 0x1.704p-7, 0x1.31cp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384156544)))]; + tensor var_4451_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_4449, groups = var_1186, pad = var_4451_pad_0, pad_type = var_4451_pad_type_0, strides = var_4447, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("op_4451_cast")]; + tensor inputs_129_cast = add(x = var_4451_cast, y = inputs_127_cast)[name = tensor("inputs_129_cast")]; + tensor var_4455 = const()[name = tensor("op_4455"), val = tensor([1])]; + tensor channels_mean_129_cast = reduce_mean(axes = var_4455, keep_dims = var_1181, x = inputs_129_cast)[name = tensor("channels_mean_129_cast")]; + tensor zero_mean_129_cast = sub(x = inputs_129_cast, y = channels_mean_129_cast)[name = tensor("zero_mean_129_cast")]; + tensor zero_mean_sq_129_cast = mul(x = zero_mean_129_cast, y = zero_mean_129_cast)[name = tensor("zero_mean_sq_129_cast")]; + tensor var_4459 = const()[name = tensor("op_4459"), val = tensor([1])]; + tensor var_4460_cast = reduce_mean(axes = var_4459, keep_dims = var_1181, x = zero_mean_sq_129_cast)[name = tensor("op_4460_cast")]; + tensor var_4461_to_fp16 = const()[name = tensor("op_4461_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4462_cast = add(x = var_4460_cast, y = var_4461_to_fp16)[name = tensor("op_4462_cast")]; + tensor denom_129_epsilon_0_to_fp16 = const()[name = tensor("denom_129_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_129_cast = rsqrt(epsilon = denom_129_epsilon_0_to_fp16, x = var_4462_cast)[name = tensor("denom_129_cast")]; + tensor out_129_cast = mul(x = zero_mean_129_cast, y = denom_129_cast)[name = tensor("out_129_cast")]; + tensor var_4466_to_fp16 = const()[name = tensor("op_4466_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384159168)))]; + tensor var_4467_cast = add(x = out_129_cast, y = var_4466_to_fp16)[name = tensor("op_4467_cast")]; + tensor var_4469_to_fp16 = const()[name = tensor("op_4469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384161792)))]; + tensor hidden_states_181_cast = mul(x = var_4467_cast, y = var_4469_to_fp16)[name = tensor("hidden_states_181_cast")]; + tensor var_4476 = const()[name = tensor("op_4476"), val = tensor([1, 1])]; + tensor var_4478 = const()[name = tensor("op_4478"), val = tensor([1, 1])]; + tensor q_87_pad_type_0 = const()[name = tensor("q_87_pad_type_0"), val = tensor("custom")]; + tensor q_87_pad_0 = const()[name = tensor("q_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384164416))), lut = tensor([-0x1.d38p-7, 0x1.d44p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_87_cast = conv(dilations = var_4478, groups = var_1186, pad = q_87_pad_0, pad_type = q_87_pad_type_0, strides = var_4476, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_181_cast)[name = tensor("q_87_cast")]; + tensor var_4482 = const()[name = tensor("op_4482"), val = tensor([1, 1])]; + tensor var_4484 = const()[name = tensor("op_4484"), val = tensor([1, 1])]; + tensor k_87_pad_type_0 = const()[name = tensor("k_87_pad_type_0"), val = tensor("custom")]; + tensor k_87_pad_0 = const()[name = tensor("k_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384369280))), lut = tensor([-0x1.648p-7, 0x1.62cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_87_cast = conv(dilations = var_4484, groups = var_1186, pad = k_87_pad_0, pad_type = k_87_pad_type_0, strides = var_4482, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_87_cast")]; + tensor var_4488 = const()[name = tensor("op_4488"), val = tensor([1, 1])]; + tensor var_4490 = const()[name = tensor("op_4490"), val = tensor([1, 1])]; + tensor v_87_pad_type_0 = const()[name = tensor("v_87_pad_type_0"), val = tensor("custom")]; + tensor v_87_pad_0 = const()[name = tensor("v_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(384697024))), lut = tensor([-0x1.e88p-7, 0x1.e78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_87_cast = conv(dilations = var_4490, groups = var_1186, pad = v_87_pad_0, pad_type = v_87_pad_type_0, strides = var_4488, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_87_cast")]; + tensor var_4494 = const()[name = tensor("op_4494"), val = tensor([2, 20, 64, -1])]; + tensor var_4495_cast = reshape(shape = var_4494, x = q_87_cast)[name = tensor("op_4495_cast")]; + tensor var_4496 = const()[name = tensor("op_4496"), val = tensor([2, 20, 64, -1])]; + tensor var_4497_cast = reshape(shape = var_4496, x = k_87_cast)[name = tensor("op_4497_cast")]; + tensor var_4498 = const()[name = tensor("op_4498"), val = tensor([2, 20, 64, -1])]; + tensor var_4499_cast = reshape(shape = var_4498, x = v_87_cast)[name = tensor("op_4499_cast")]; + tensor attn_weights_173_transpose_x_0 = const()[name = tensor("attn_weights_173_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_173_transpose_y_0 = const()[name = tensor("attn_weights_173_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_173_cast = matmul(transpose_x = attn_weights_173_transpose_x_0, transpose_y = attn_weights_173_transpose_y_0, x = var_4495_cast, y = var_4497_cast)[name = tensor("attn_weights_173_cast")]; + tensor attn_weights_175_cast = mul(x = attn_weights_173_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_175_cast")]; + tensor var_4503_cast = softmax(axis = var_1170, x = attn_weights_175_cast)[name = tensor("op_4503_cast")]; + tensor attn_87_transpose_x_0 = const()[name = tensor("attn_87_transpose_x_0"), val = tensor(false)]; + tensor attn_87_transpose_y_0 = const()[name = tensor("attn_87_transpose_y_0"), val = tensor(true)]; + tensor attn_87_cast = matmul(transpose_x = attn_87_transpose_x_0, transpose_y = attn_87_transpose_y_0, x = var_4499_cast, y = var_4503_cast)[name = tensor("attn_87_cast")]; + tensor var_4507 = const()[name = tensor("op_4507"), val = tensor([2, 1280, 1, -1])]; + tensor input_287_cast = reshape(shape = var_4507, x = attn_87_cast)[name = tensor("input_287_cast")]; + tensor var_4512 = const()[name = tensor("op_4512"), val = tensor([1, 1])]; + tensor var_4514 = const()[name = tensor("op_4514"), val = tensor([1, 1])]; + tensor var_4516_pad_type_0 = const()[name = tensor("op_4516_pad_type_0"), val = tensor("custom")]; + tensor var_4516_pad_0 = const()[name = tensor("op_4516_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385024768))), lut = tensor([-0x1.354p-7, 0x1.37p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385229632)))]; + tensor var_4516_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_4514, groups = var_1186, pad = var_4516_pad_0, pad_type = var_4516_pad_type_0, strides = var_4512, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_287_cast)[name = tensor("op_4516_cast")]; + tensor inputs_131_cast = add(x = var_4516_cast, y = inputs_129_cast)[name = tensor("inputs_131_cast")]; + tensor var_4520 = const()[name = tensor("op_4520"), val = tensor([1])]; + tensor channels_mean_131_cast = reduce_mean(axes = var_4520, keep_dims = var_1181, x = inputs_131_cast)[name = tensor("channels_mean_131_cast")]; + tensor zero_mean_131_cast = sub(x = inputs_131_cast, y = channels_mean_131_cast)[name = tensor("zero_mean_131_cast")]; + tensor zero_mean_sq_131_cast = mul(x = zero_mean_131_cast, y = zero_mean_131_cast)[name = tensor("zero_mean_sq_131_cast")]; + tensor var_4524 = const()[name = tensor("op_4524"), val = tensor([1])]; + tensor var_4525_cast = reduce_mean(axes = var_4524, keep_dims = var_1181, x = zero_mean_sq_131_cast)[name = tensor("op_4525_cast")]; + tensor var_4526_to_fp16 = const()[name = tensor("op_4526_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4527_cast = add(x = var_4525_cast, y = var_4526_to_fp16)[name = tensor("op_4527_cast")]; + tensor denom_131_epsilon_0_to_fp16 = const()[name = tensor("denom_131_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_131_cast = rsqrt(epsilon = denom_131_epsilon_0_to_fp16, x = var_4527_cast)[name = tensor("denom_131_cast")]; + tensor out_131_cast = mul(x = zero_mean_131_cast, y = denom_131_cast)[name = tensor("out_131_cast")]; + tensor var_4531_to_fp16 = const()[name = tensor("op_4531_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385232256)))]; + tensor var_4532_cast = add(x = out_131_cast, y = var_4531_to_fp16)[name = tensor("op_4532_cast")]; + tensor var_4534_to_fp16 = const()[name = tensor("op_4534_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385234880)))]; + tensor input_289_cast = mul(x = var_4532_cast, y = var_4534_to_fp16)[name = tensor("input_289_cast")]; + tensor var_4542 = const()[name = tensor("op_4542"), val = tensor([1, 1])]; + tensor var_4544 = const()[name = tensor("op_4544"), val = tensor([1, 1])]; + tensor var_4546_pad_type_0 = const()[name = tensor("op_4546_pad_type_0"), val = tensor("custom")]; + tensor var_4546_pad_0 = const()[name = tensor("op_4546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(385237504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391791168))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391791296)))]; + tensor var_4546_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_4544, groups = var_1186, pad = var_4546_pad_0, pad_type = var_4546_pad_type_0, strides = var_4542, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("op_4546_cast")]; + tensor var_4547_split_sizes_0 = const()[name = tensor("op_4547_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4547_axis_0 = const()[name = tensor("op_4547_axis_0"), val = tensor(1)]; + tensor var_4547_cast_0, tensor var_4547_cast_1 = split(axis = var_4547_axis_0, split_sizes = var_4547_split_sizes_0, x = var_4546_cast)[name = tensor("op_4547_cast")]; + tensor var_4549_mode_0 = const()[name = tensor("op_4549_mode_0"), val = tensor("EXACT")]; + tensor var_4549_cast = gelu(mode = var_4549_mode_0, x = var_4547_cast_1)[name = tensor("op_4549_cast")]; + tensor input_291_cast = mul(x = var_4547_cast_0, y = var_4549_cast)[name = tensor("input_291_cast")]; + tensor var_4553 = const()[name = tensor("op_4553"), val = tensor([1, 1])]; + tensor var_4555 = const()[name = tensor("op_4555"), val = tensor([1, 1])]; + tensor var_4557_pad_type_0 = const()[name = tensor("op_4557_pad_type_0"), val = tensor("custom")]; + tensor var_4557_pad_0 = const()[name = tensor("op_4557_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(391811840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395088704))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395088832)))]; + tensor var_4557_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_4555, groups = var_1186, pad = var_4557_pad_0, pad_type = var_4557_pad_type_0, strides = var_4553, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_291_cast)[name = tensor("op_4557_cast")]; + tensor inputs_133_cast = add(x = var_4557_cast, y = inputs_131_cast)[name = tensor("inputs_133_cast")]; + tensor var_4567 = const()[name = tensor("op_4567"), val = tensor([1])]; + tensor channels_mean_133_cast = reduce_mean(axes = var_4567, keep_dims = var_1181, x = inputs_133_cast)[name = tensor("channels_mean_133_cast")]; + tensor zero_mean_133_cast = sub(x = inputs_133_cast, y = channels_mean_133_cast)[name = tensor("zero_mean_133_cast")]; + tensor zero_mean_sq_133_cast = mul(x = zero_mean_133_cast, y = zero_mean_133_cast)[name = tensor("zero_mean_sq_133_cast")]; + tensor var_4571 = const()[name = tensor("op_4571"), val = tensor([1])]; + tensor var_4572_cast = reduce_mean(axes = var_4571, keep_dims = var_1181, x = zero_mean_sq_133_cast)[name = tensor("op_4572_cast")]; + tensor var_4573_to_fp16 = const()[name = tensor("op_4573_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4574_cast = add(x = var_4572_cast, y = var_4573_to_fp16)[name = tensor("op_4574_cast")]; + tensor denom_133_epsilon_0_to_fp16 = const()[name = tensor("denom_133_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_133_cast = rsqrt(epsilon = denom_133_epsilon_0_to_fp16, x = var_4574_cast)[name = tensor("denom_133_cast")]; + tensor out_133_cast = mul(x = zero_mean_133_cast, y = denom_133_cast)[name = tensor("out_133_cast")]; + tensor var_4578_to_fp16 = const()[name = tensor("op_4578_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395091456)))]; + tensor var_4579_cast = add(x = out_133_cast, y = var_4578_to_fp16)[name = tensor("op_4579_cast")]; + tensor var_4581_to_fp16 = const()[name = tensor("op_4581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395094080)))]; + tensor hidden_states_185_cast = mul(x = var_4579_cast, y = var_4581_to_fp16)[name = tensor("hidden_states_185_cast")]; + tensor var_4588 = const()[name = tensor("op_4588"), val = tensor([1, 1])]; + tensor var_4590 = const()[name = tensor("op_4590"), val = tensor([1, 1])]; + tensor q_89_pad_type_0 = const()[name = tensor("q_89_pad_type_0"), val = tensor("custom")]; + tensor q_89_pad_0 = const()[name = tensor("q_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395096704))), lut = tensor([-0x1.544p-6, 0x1.54p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_89_cast = conv(dilations = var_4590, groups = var_1186, pad = q_89_pad_0, pad_type = q_89_pad_type_0, strides = var_4588, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("q_89_cast")]; + tensor var_4594 = const()[name = tensor("op_4594"), val = tensor([1, 1])]; + tensor var_4596 = const()[name = tensor("op_4596"), val = tensor([1, 1])]; + tensor k_89_pad_type_0 = const()[name = tensor("k_89_pad_type_0"), val = tensor("custom")]; + tensor k_89_pad_0 = const()[name = tensor("k_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395301568))), lut = tensor([-0x1.53p-6, 0x1.51cp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_89_cast = conv(dilations = var_4596, groups = var_1186, pad = k_89_pad_0, pad_type = k_89_pad_type_0, strides = var_4594, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("k_89_cast")]; + tensor var_4600 = const()[name = tensor("op_4600"), val = tensor([1, 1])]; + tensor var_4602 = const()[name = tensor("op_4602"), val = tensor([1, 1])]; + tensor v_89_pad_type_0 = const()[name = tensor("v_89_pad_type_0"), val = tensor("custom")]; + tensor v_89_pad_0 = const()[name = tensor("v_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395506432))), lut = tensor([-0x1.3e4p-5, -0x1.7ecp-7, 0x1.82p-7, 0x1.3f4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_89_cast = conv(dilations = var_4602, groups = var_1186, pad = v_89_pad_0, pad_type = v_89_pad_type_0, strides = var_4600, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("v_89_cast")]; + tensor var_4606 = const()[name = tensor("op_4606"), val = tensor([2, 20, 64, -1])]; + tensor var_4607_cast = reshape(shape = var_4606, x = q_89_cast)[name = tensor("op_4607_cast")]; + tensor var_4608 = const()[name = tensor("op_4608"), val = tensor([2, 20, 64, -1])]; + tensor var_4609_cast = reshape(shape = var_4608, x = k_89_cast)[name = tensor("op_4609_cast")]; + tensor var_4610 = const()[name = tensor("op_4610"), val = tensor([2, 20, 64, -1])]; + tensor var_4611_cast = reshape(shape = var_4610, x = v_89_cast)[name = tensor("op_4611_cast")]; + tensor attn_weights_177_transpose_x_0 = const()[name = tensor("attn_weights_177_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_177_transpose_y_0 = const()[name = tensor("attn_weights_177_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_177_cast = matmul(transpose_x = attn_weights_177_transpose_x_0, transpose_y = attn_weights_177_transpose_y_0, x = var_4607_cast, y = var_4609_cast)[name = tensor("attn_weights_177_cast")]; + tensor attn_weights_179_cast = mul(x = attn_weights_177_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_179_cast")]; + tensor var_4615_cast = softmax(axis = var_1170, x = attn_weights_179_cast)[name = tensor("op_4615_cast")]; + tensor attn_89_transpose_x_0 = const()[name = tensor("attn_89_transpose_x_0"), val = tensor(false)]; + tensor attn_89_transpose_y_0 = const()[name = tensor("attn_89_transpose_y_0"), val = tensor(true)]; + tensor attn_89_cast = matmul(transpose_x = attn_89_transpose_x_0, transpose_y = attn_89_transpose_y_0, x = var_4611_cast, y = var_4615_cast)[name = tensor("attn_89_cast")]; + tensor var_4619 = const()[name = tensor("op_4619"), val = tensor([2, 1280, 1, -1])]; + tensor input_293_cast = reshape(shape = var_4619, x = attn_89_cast)[name = tensor("input_293_cast")]; + tensor var_4624 = const()[name = tensor("op_4624"), val = tensor([1, 1])]; + tensor var_4626 = const()[name = tensor("op_4626"), val = tensor([1, 1])]; + tensor var_4628_pad_type_0 = const()[name = tensor("op_4628_pad_type_0"), val = tensor("custom")]; + tensor var_4628_pad_0 = const()[name = tensor("op_4628_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(395916096))), lut = tensor([-0x1.378p-5, -0x1.74p-7, 0x1.7bcp-7, 0x1.394p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396325760)))]; + tensor var_4628_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_4626, groups = var_1186, pad = var_4628_pad_0, pad_type = var_4628_pad_type_0, strides = var_4624, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_293_cast)[name = tensor("op_4628_cast")]; + tensor inputs_135_cast = add(x = var_4628_cast, y = inputs_133_cast)[name = tensor("inputs_135_cast")]; + tensor var_4632 = const()[name = tensor("op_4632"), val = tensor([1])]; + tensor channels_mean_135_cast = reduce_mean(axes = var_4632, keep_dims = var_1181, x = inputs_135_cast)[name = tensor("channels_mean_135_cast")]; + tensor zero_mean_135_cast = sub(x = inputs_135_cast, y = channels_mean_135_cast)[name = tensor("zero_mean_135_cast")]; + tensor zero_mean_sq_135_cast = mul(x = zero_mean_135_cast, y = zero_mean_135_cast)[name = tensor("zero_mean_sq_135_cast")]; + tensor var_4636 = const()[name = tensor("op_4636"), val = tensor([1])]; + tensor var_4637_cast = reduce_mean(axes = var_4636, keep_dims = var_1181, x = zero_mean_sq_135_cast)[name = tensor("op_4637_cast")]; + tensor var_4638_to_fp16 = const()[name = tensor("op_4638_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4639_cast = add(x = var_4637_cast, y = var_4638_to_fp16)[name = tensor("op_4639_cast")]; + tensor denom_135_epsilon_0_to_fp16 = const()[name = tensor("denom_135_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_135_cast = rsqrt(epsilon = denom_135_epsilon_0_to_fp16, x = var_4639_cast)[name = tensor("denom_135_cast")]; + tensor out_135_cast = mul(x = zero_mean_135_cast, y = denom_135_cast)[name = tensor("out_135_cast")]; + tensor var_4643_to_fp16 = const()[name = tensor("op_4643_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396328384)))]; + tensor var_4644_cast = add(x = out_135_cast, y = var_4643_to_fp16)[name = tensor("op_4644_cast")]; + tensor var_4646_to_fp16 = const()[name = tensor("op_4646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396331008)))]; + tensor hidden_states_187_cast = mul(x = var_4644_cast, y = var_4646_to_fp16)[name = tensor("hidden_states_187_cast")]; + tensor var_4653 = const()[name = tensor("op_4653"), val = tensor([1, 1])]; + tensor var_4655 = const()[name = tensor("op_4655"), val = tensor([1, 1])]; + tensor q_91_pad_type_0 = const()[name = tensor("q_91_pad_type_0"), val = tensor("custom")]; + tensor q_91_pad_0 = const()[name = tensor("q_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396333632))), lut = tensor([-0x1.cd4p-7, 0x1.cf4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_91_cast = conv(dilations = var_4655, groups = var_1186, pad = q_91_pad_0, pad_type = q_91_pad_type_0, strides = var_4653, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_187_cast)[name = tensor("q_91_cast")]; + tensor var_4659 = const()[name = tensor("op_4659"), val = tensor([1, 1])]; + tensor var_4661 = const()[name = tensor("op_4661"), val = tensor([1, 1])]; + tensor k_91_pad_type_0 = const()[name = tensor("k_91_pad_type_0"), val = tensor("custom")]; + tensor k_91_pad_0 = const()[name = tensor("k_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396538496))), lut = tensor([-0x1.584p-7, 0x1.568p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_91_cast = conv(dilations = var_4661, groups = var_1186, pad = k_91_pad_0, pad_type = k_91_pad_type_0, strides = var_4659, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_91_cast")]; + tensor var_4665 = const()[name = tensor("op_4665"), val = tensor([1, 1])]; + tensor var_4667 = const()[name = tensor("op_4667"), val = tensor([1, 1])]; + tensor v_91_pad_type_0 = const()[name = tensor("v_91_pad_type_0"), val = tensor("custom")]; + tensor v_91_pad_0 = const()[name = tensor("v_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396866240))), lut = tensor([-0x1.ce8p-7, 0x1.cd8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_91_cast = conv(dilations = var_4667, groups = var_1186, pad = v_91_pad_0, pad_type = v_91_pad_type_0, strides = var_4665, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_91_cast")]; + tensor var_4671 = const()[name = tensor("op_4671"), val = tensor([2, 20, 64, -1])]; + tensor var_4672_cast = reshape(shape = var_4671, x = q_91_cast)[name = tensor("op_4672_cast")]; + tensor var_4673 = const()[name = tensor("op_4673"), val = tensor([2, 20, 64, -1])]; + tensor var_4674_cast = reshape(shape = var_4673, x = k_91_cast)[name = tensor("op_4674_cast")]; + tensor var_4675 = const()[name = tensor("op_4675"), val = tensor([2, 20, 64, -1])]; + tensor var_4676_cast = reshape(shape = var_4675, x = v_91_cast)[name = tensor("op_4676_cast")]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = var_4672_cast, y = var_4674_cast)[name = tensor("attn_weights_181_cast")]; + tensor attn_weights_183_cast = mul(x = attn_weights_181_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_4680_cast = softmax(axis = var_1170, x = attn_weights_183_cast)[name = tensor("op_4680_cast")]; + tensor attn_91_transpose_x_0 = const()[name = tensor("attn_91_transpose_x_0"), val = tensor(false)]; + tensor attn_91_transpose_y_0 = const()[name = tensor("attn_91_transpose_y_0"), val = tensor(true)]; + tensor attn_91_cast = matmul(transpose_x = attn_91_transpose_x_0, transpose_y = attn_91_transpose_y_0, x = var_4676_cast, y = var_4680_cast)[name = tensor("attn_91_cast")]; + tensor var_4684 = const()[name = tensor("op_4684"), val = tensor([2, 1280, 1, -1])]; + tensor input_295_cast = reshape(shape = var_4684, x = attn_91_cast)[name = tensor("input_295_cast")]; + tensor var_4689 = const()[name = tensor("op_4689"), val = tensor([1, 1])]; + tensor var_4691 = const()[name = tensor("op_4691"), val = tensor([1, 1])]; + tensor var_4693_pad_type_0 = const()[name = tensor("op_4693_pad_type_0"), val = tensor("custom")]; + tensor var_4693_pad_0 = const()[name = tensor("op_4693_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397193984))), lut = tensor([-0x1.23p-7, 0x1.25p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397398848)))]; + tensor var_4693_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_4691, groups = var_1186, pad = var_4693_pad_0, pad_type = var_4693_pad_type_0, strides = var_4689, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_295_cast)[name = tensor("op_4693_cast")]; + tensor inputs_137_cast = add(x = var_4693_cast, y = inputs_135_cast)[name = tensor("inputs_137_cast")]; + tensor var_4697 = const()[name = tensor("op_4697"), val = tensor([1])]; + tensor channels_mean_137_cast = reduce_mean(axes = var_4697, keep_dims = var_1181, x = inputs_137_cast)[name = tensor("channels_mean_137_cast")]; + tensor zero_mean_137_cast = sub(x = inputs_137_cast, y = channels_mean_137_cast)[name = tensor("zero_mean_137_cast")]; + tensor zero_mean_sq_137_cast = mul(x = zero_mean_137_cast, y = zero_mean_137_cast)[name = tensor("zero_mean_sq_137_cast")]; + tensor var_4701 = const()[name = tensor("op_4701"), val = tensor([1])]; + tensor var_4702_cast = reduce_mean(axes = var_4701, keep_dims = var_1181, x = zero_mean_sq_137_cast)[name = tensor("op_4702_cast")]; + tensor var_4703_to_fp16 = const()[name = tensor("op_4703_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4704_cast = add(x = var_4702_cast, y = var_4703_to_fp16)[name = tensor("op_4704_cast")]; + tensor denom_137_epsilon_0_to_fp16 = const()[name = tensor("denom_137_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_137_cast = rsqrt(epsilon = denom_137_epsilon_0_to_fp16, x = var_4704_cast)[name = tensor("denom_137_cast")]; + tensor out_137_cast = mul(x = zero_mean_137_cast, y = denom_137_cast)[name = tensor("out_137_cast")]; + tensor var_4708_to_fp16 = const()[name = tensor("op_4708_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397401472)))]; + tensor var_4709_cast = add(x = out_137_cast, y = var_4708_to_fp16)[name = tensor("op_4709_cast")]; + tensor var_4711_to_fp16 = const()[name = tensor("op_4711_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397404096)))]; + tensor input_297_cast = mul(x = var_4709_cast, y = var_4711_to_fp16)[name = tensor("input_297_cast")]; + tensor var_4719 = const()[name = tensor("op_4719"), val = tensor([1, 1])]; + tensor var_4721 = const()[name = tensor("op_4721"), val = tensor([1, 1])]; + tensor var_4723_pad_type_0 = const()[name = tensor("op_4723_pad_type_0"), val = tensor("custom")]; + tensor var_4723_pad_0 = const()[name = tensor("op_4723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(397406720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403960384))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403960512)))]; + tensor var_4723_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_4721, groups = var_1186, pad = var_4723_pad_0, pad_type = var_4723_pad_type_0, strides = var_4719, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("op_4723_cast")]; + tensor var_4724_split_sizes_0 = const()[name = tensor("op_4724_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4724_axis_0 = const()[name = tensor("op_4724_axis_0"), val = tensor(1)]; + tensor var_4724_cast_0, tensor var_4724_cast_1 = split(axis = var_4724_axis_0, split_sizes = var_4724_split_sizes_0, x = var_4723_cast)[name = tensor("op_4724_cast")]; + tensor var_4726_mode_0 = const()[name = tensor("op_4726_mode_0"), val = tensor("EXACT")]; + tensor var_4726_cast = gelu(mode = var_4726_mode_0, x = var_4724_cast_1)[name = tensor("op_4726_cast")]; + tensor input_299_cast = mul(x = var_4724_cast_0, y = var_4726_cast)[name = tensor("input_299_cast")]; + tensor var_4730 = const()[name = tensor("op_4730"), val = tensor([1, 1])]; + tensor var_4732 = const()[name = tensor("op_4732"), val = tensor([1, 1])]; + tensor var_4734_pad_type_0 = const()[name = tensor("op_4734_pad_type_0"), val = tensor("custom")]; + tensor var_4734_pad_0 = const()[name = tensor("op_4734_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(403981056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407257920))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407258048)))]; + tensor var_4734_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_4732, groups = var_1186, pad = var_4734_pad_0, pad_type = var_4734_pad_type_0, strides = var_4730, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_299_cast)[name = tensor("op_4734_cast")]; + tensor inputs_139_cast = add(x = var_4734_cast, y = inputs_137_cast)[name = tensor("inputs_139_cast")]; + tensor var_4744 = const()[name = tensor("op_4744"), val = tensor([1])]; + tensor channels_mean_139_cast = reduce_mean(axes = var_4744, keep_dims = var_1181, x = inputs_139_cast)[name = tensor("channels_mean_139_cast")]; + tensor zero_mean_139_cast = sub(x = inputs_139_cast, y = channels_mean_139_cast)[name = tensor("zero_mean_139_cast")]; + tensor zero_mean_sq_139_cast = mul(x = zero_mean_139_cast, y = zero_mean_139_cast)[name = tensor("zero_mean_sq_139_cast")]; + tensor var_4748 = const()[name = tensor("op_4748"), val = tensor([1])]; + tensor var_4749_cast = reduce_mean(axes = var_4748, keep_dims = var_1181, x = zero_mean_sq_139_cast)[name = tensor("op_4749_cast")]; + tensor var_4750_to_fp16 = const()[name = tensor("op_4750_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4751_cast = add(x = var_4749_cast, y = var_4750_to_fp16)[name = tensor("op_4751_cast")]; + tensor denom_139_epsilon_0_to_fp16 = const()[name = tensor("denom_139_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_139_cast = rsqrt(epsilon = denom_139_epsilon_0_to_fp16, x = var_4751_cast)[name = tensor("denom_139_cast")]; + tensor out_139_cast = mul(x = zero_mean_139_cast, y = denom_139_cast)[name = tensor("out_139_cast")]; + tensor var_4755_to_fp16 = const()[name = tensor("op_4755_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407260672)))]; + tensor var_4756_cast = add(x = out_139_cast, y = var_4755_to_fp16)[name = tensor("op_4756_cast")]; + tensor var_4758_to_fp16 = const()[name = tensor("op_4758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407263296)))]; + tensor hidden_states_191_cast = mul(x = var_4756_cast, y = var_4758_to_fp16)[name = tensor("hidden_states_191_cast")]; + tensor var_4765 = const()[name = tensor("op_4765"), val = tensor([1, 1])]; + tensor var_4767 = const()[name = tensor("op_4767"), val = tensor([1, 1])]; + tensor q_93_pad_type_0 = const()[name = tensor("q_93_pad_type_0"), val = tensor("custom")]; + tensor q_93_pad_0 = const()[name = tensor("q_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407265920))), lut = tensor([-0x1.4a4p-5, -0x1.8b8p-7, 0x1.90cp-7, 0x1.4b4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_93_cast = conv(dilations = var_4767, groups = var_1186, pad = q_93_pad_0, pad_type = q_93_pad_type_0, strides = var_4765, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("q_93_cast")]; + tensor var_4771 = const()[name = tensor("op_4771"), val = tensor([1, 1])]; + tensor var_4773 = const()[name = tensor("op_4773"), val = tensor([1, 1])]; + tensor k_93_pad_type_0 = const()[name = tensor("k_93_pad_type_0"), val = tensor("custom")]; + tensor k_93_pad_0 = const()[name = tensor("k_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(407675584))), lut = tensor([-0x1.474p-5, -0x1.8a4p-7, 0x1.898p-7, 0x1.474p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_93_cast = conv(dilations = var_4773, groups = var_1186, pad = k_93_pad_0, pad_type = k_93_pad_type_0, strides = var_4771, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("k_93_cast")]; + tensor var_4777 = const()[name = tensor("op_4777"), val = tensor([1, 1])]; + tensor var_4779 = const()[name = tensor("op_4779"), val = tensor([1, 1])]; + tensor v_93_pad_type_0 = const()[name = tensor("v_93_pad_type_0"), val = tensor("custom")]; + tensor v_93_pad_0 = const()[name = tensor("v_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408085248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408904512))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_93_cast = conv(dilations = var_4779, groups = var_1186, pad = v_93_pad_0, pad_type = v_93_pad_type_0, strides = var_4777, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("v_93_cast")]; + tensor var_4783 = const()[name = tensor("op_4783"), val = tensor([2, 20, 64, -1])]; + tensor var_4784_cast = reshape(shape = var_4783, x = q_93_cast)[name = tensor("op_4784_cast")]; + tensor var_4785 = const()[name = tensor("op_4785"), val = tensor([2, 20, 64, -1])]; + tensor var_4786_cast = reshape(shape = var_4785, x = k_93_cast)[name = tensor("op_4786_cast")]; + tensor var_4787 = const()[name = tensor("op_4787"), val = tensor([2, 20, 64, -1])]; + tensor var_4788_cast = reshape(shape = var_4787, x = v_93_cast)[name = tensor("op_4788_cast")]; + tensor attn_weights_185_transpose_x_0 = const()[name = tensor("attn_weights_185_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_185_transpose_y_0 = const()[name = tensor("attn_weights_185_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_185_cast = matmul(transpose_x = attn_weights_185_transpose_x_0, transpose_y = attn_weights_185_transpose_y_0, x = var_4784_cast, y = var_4786_cast)[name = tensor("attn_weights_185_cast")]; + tensor attn_weights_187_cast = mul(x = attn_weights_185_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_187_cast")]; + tensor var_4792_cast = softmax(axis = var_1170, x = attn_weights_187_cast)[name = tensor("op_4792_cast")]; + tensor attn_93_transpose_x_0 = const()[name = tensor("attn_93_transpose_x_0"), val = tensor(false)]; + tensor attn_93_transpose_y_0 = const()[name = tensor("attn_93_transpose_y_0"), val = tensor(true)]; + tensor attn_93_cast = matmul(transpose_x = attn_93_transpose_x_0, transpose_y = attn_93_transpose_y_0, x = var_4788_cast, y = var_4792_cast)[name = tensor("attn_93_cast")]; + tensor var_4796 = const()[name = tensor("op_4796"), val = tensor([2, 1280, 1, -1])]; + tensor input_301_cast = reshape(shape = var_4796, x = attn_93_cast)[name = tensor("input_301_cast")]; + tensor var_4801 = const()[name = tensor("op_4801"), val = tensor([1, 1])]; + tensor var_4803 = const()[name = tensor("op_4803"), val = tensor([1, 1])]; + tensor var_4805_pad_type_0 = const()[name = tensor("op_4805_pad_type_0"), val = tensor("custom")]; + tensor var_4805_pad_0 = const()[name = tensor("op_4805_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(408904640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409723904))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409724032)))]; + tensor var_4805_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_4803, groups = var_1186, pad = var_4805_pad_0, pad_type = var_4805_pad_type_0, strides = var_4801, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("op_4805_cast")]; + tensor inputs_141_cast = add(x = var_4805_cast, y = inputs_139_cast)[name = tensor("inputs_141_cast")]; + tensor var_4809 = const()[name = tensor("op_4809"), val = tensor([1])]; + tensor channels_mean_141_cast = reduce_mean(axes = var_4809, keep_dims = var_1181, x = inputs_141_cast)[name = tensor("channels_mean_141_cast")]; + tensor zero_mean_141_cast = sub(x = inputs_141_cast, y = channels_mean_141_cast)[name = tensor("zero_mean_141_cast")]; + tensor zero_mean_sq_141_cast = mul(x = zero_mean_141_cast, y = zero_mean_141_cast)[name = tensor("zero_mean_sq_141_cast")]; + tensor var_4813 = const()[name = tensor("op_4813"), val = tensor([1])]; + tensor var_4814_cast = reduce_mean(axes = var_4813, keep_dims = var_1181, x = zero_mean_sq_141_cast)[name = tensor("op_4814_cast")]; + tensor var_4815_to_fp16 = const()[name = tensor("op_4815_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4816_cast = add(x = var_4814_cast, y = var_4815_to_fp16)[name = tensor("op_4816_cast")]; + tensor denom_141_epsilon_0_to_fp16 = const()[name = tensor("denom_141_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_141_cast = rsqrt(epsilon = denom_141_epsilon_0_to_fp16, x = var_4816_cast)[name = tensor("denom_141_cast")]; + tensor out_141_cast = mul(x = zero_mean_141_cast, y = denom_141_cast)[name = tensor("out_141_cast")]; + tensor var_4820_to_fp16 = const()[name = tensor("op_4820_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409726656)))]; + tensor var_4821_cast = add(x = out_141_cast, y = var_4820_to_fp16)[name = tensor("op_4821_cast")]; + tensor var_4823_to_fp16 = const()[name = tensor("op_4823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409729280)))]; + tensor hidden_states_193_cast = mul(x = var_4821_cast, y = var_4823_to_fp16)[name = tensor("hidden_states_193_cast")]; + tensor var_4830 = const()[name = tensor("op_4830"), val = tensor([1, 1])]; + tensor var_4832 = const()[name = tensor("op_4832"), val = tensor([1, 1])]; + tensor q_95_pad_type_0 = const()[name = tensor("q_95_pad_type_0"), val = tensor("custom")]; + tensor q_95_pad_0 = const()[name = tensor("q_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409731904))), lut = tensor([-0x1.a64p-7, 0x1.a5cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_95_cast = conv(dilations = var_4832, groups = var_1186, pad = q_95_pad_0, pad_type = q_95_pad_type_0, strides = var_4830, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_193_cast)[name = tensor("q_95_cast")]; + tensor var_4836 = const()[name = tensor("op_4836"), val = tensor([1, 1])]; + tensor var_4838 = const()[name = tensor("op_4838"), val = tensor([1, 1])]; + tensor k_95_pad_type_0 = const()[name = tensor("k_95_pad_type_0"), val = tensor("custom")]; + tensor k_95_pad_0 = const()[name = tensor("k_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409936768))), lut = tensor([-0x1.27cp-7, 0x1.288p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_95_cast = conv(dilations = var_4838, groups = var_1186, pad = k_95_pad_0, pad_type = k_95_pad_type_0, strides = var_4836, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_95_cast")]; + tensor var_4842 = const()[name = tensor("op_4842"), val = tensor([1, 1])]; + tensor var_4844 = const()[name = tensor("op_4844"), val = tensor([1, 1])]; + tensor v_95_pad_type_0 = const()[name = tensor("v_95_pad_type_0"), val = tensor("custom")]; + tensor v_95_pad_0 = const()[name = tensor("v_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410264512))), lut = tensor([-0x1.77cp-7, 0x1.79p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_95_cast = conv(dilations = var_4844, groups = var_1186, pad = v_95_pad_0, pad_type = v_95_pad_type_0, strides = var_4842, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_95_cast")]; + tensor var_4848 = const()[name = tensor("op_4848"), val = tensor([2, 20, 64, -1])]; + tensor var_4849_cast = reshape(shape = var_4848, x = q_95_cast)[name = tensor("op_4849_cast")]; + tensor var_4850 = const()[name = tensor("op_4850"), val = tensor([2, 20, 64, -1])]; + tensor var_4851_cast = reshape(shape = var_4850, x = k_95_cast)[name = tensor("op_4851_cast")]; + tensor var_4852 = const()[name = tensor("op_4852"), val = tensor([2, 20, 64, -1])]; + tensor var_4853_cast = reshape(shape = var_4852, x = v_95_cast)[name = tensor("op_4853_cast")]; + tensor attn_weights_189_transpose_x_0 = const()[name = tensor("attn_weights_189_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_189_transpose_y_0 = const()[name = tensor("attn_weights_189_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_189_cast = matmul(transpose_x = attn_weights_189_transpose_x_0, transpose_y = attn_weights_189_transpose_y_0, x = var_4849_cast, y = var_4851_cast)[name = tensor("attn_weights_189_cast")]; + tensor attn_weights_191_cast = mul(x = attn_weights_189_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_191_cast")]; + tensor var_4857_cast = softmax(axis = var_1170, x = attn_weights_191_cast)[name = tensor("op_4857_cast")]; + tensor attn_95_transpose_x_0 = const()[name = tensor("attn_95_transpose_x_0"), val = tensor(false)]; + tensor attn_95_transpose_y_0 = const()[name = tensor("attn_95_transpose_y_0"), val = tensor(true)]; + tensor attn_95_cast = matmul(transpose_x = attn_95_transpose_x_0, transpose_y = attn_95_transpose_y_0, x = var_4853_cast, y = var_4857_cast)[name = tensor("attn_95_cast")]; + tensor var_4861 = const()[name = tensor("op_4861"), val = tensor([2, 1280, 1, -1])]; + tensor input_303_cast = reshape(shape = var_4861, x = attn_95_cast)[name = tensor("input_303_cast")]; + tensor var_4866 = const()[name = tensor("op_4866"), val = tensor([1, 1])]; + tensor var_4868 = const()[name = tensor("op_4868"), val = tensor([1, 1])]; + tensor var_4870_pad_type_0 = const()[name = tensor("op_4870_pad_type_0"), val = tensor("custom")]; + tensor var_4870_pad_0 = const()[name = tensor("op_4870_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410592256))), lut = tensor([-0x1.e74p-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410797120)))]; + tensor var_4870_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_4868, groups = var_1186, pad = var_4870_pad_0, pad_type = var_4870_pad_type_0, strides = var_4866, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_303_cast)[name = tensor("op_4870_cast")]; + tensor inputs_143_cast = add(x = var_4870_cast, y = inputs_141_cast)[name = tensor("inputs_143_cast")]; + tensor var_4874 = const()[name = tensor("op_4874"), val = tensor([1])]; + tensor channels_mean_143_cast = reduce_mean(axes = var_4874, keep_dims = var_1181, x = inputs_143_cast)[name = tensor("channels_mean_143_cast")]; + tensor zero_mean_143_cast = sub(x = inputs_143_cast, y = channels_mean_143_cast)[name = tensor("zero_mean_143_cast")]; + tensor zero_mean_sq_143_cast = mul(x = zero_mean_143_cast, y = zero_mean_143_cast)[name = tensor("zero_mean_sq_143_cast")]; + tensor var_4878 = const()[name = tensor("op_4878"), val = tensor([1])]; + tensor var_4879_cast = reduce_mean(axes = var_4878, keep_dims = var_1181, x = zero_mean_sq_143_cast)[name = tensor("op_4879_cast")]; + tensor var_4880_to_fp16 = const()[name = tensor("op_4880_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4881_cast = add(x = var_4879_cast, y = var_4880_to_fp16)[name = tensor("op_4881_cast")]; + tensor denom_143_epsilon_0_to_fp16 = const()[name = tensor("denom_143_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_143_cast = rsqrt(epsilon = denom_143_epsilon_0_to_fp16, x = var_4881_cast)[name = tensor("denom_143_cast")]; + tensor out_143_cast = mul(x = zero_mean_143_cast, y = denom_143_cast)[name = tensor("out_143_cast")]; + tensor var_4885_to_fp16 = const()[name = tensor("op_4885_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410799744)))]; + tensor var_4886_cast = add(x = out_143_cast, y = var_4885_to_fp16)[name = tensor("op_4886_cast")]; + tensor var_4888_to_fp16 = const()[name = tensor("op_4888_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410802368)))]; + tensor input_305_cast = mul(x = var_4886_cast, y = var_4888_to_fp16)[name = tensor("input_305_cast")]; + tensor var_4896 = const()[name = tensor("op_4896"), val = tensor([1, 1])]; + tensor var_4898 = const()[name = tensor("op_4898"), val = tensor([1, 1])]; + tensor var_4900_pad_type_0 = const()[name = tensor("op_4900_pad_type_0"), val = tensor("custom")]; + tensor var_4900_pad_0 = const()[name = tensor("op_4900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410804992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417358656))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417358784)))]; + tensor var_4900_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_4898, groups = var_1186, pad = var_4900_pad_0, pad_type = var_4900_pad_type_0, strides = var_4896, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("op_4900_cast")]; + tensor var_4901_split_sizes_0 = const()[name = tensor("op_4901_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4901_axis_0 = const()[name = tensor("op_4901_axis_0"), val = tensor(1)]; + tensor var_4901_cast_0, tensor var_4901_cast_1 = split(axis = var_4901_axis_0, split_sizes = var_4901_split_sizes_0, x = var_4900_cast)[name = tensor("op_4901_cast")]; + tensor var_4903_mode_0 = const()[name = tensor("op_4903_mode_0"), val = tensor("EXACT")]; + tensor var_4903_cast = gelu(mode = var_4903_mode_0, x = var_4901_cast_1)[name = tensor("op_4903_cast")]; + tensor input_307_cast = mul(x = var_4901_cast_0, y = var_4903_cast)[name = tensor("input_307_cast")]; + tensor var_4907 = const()[name = tensor("op_4907"), val = tensor([1, 1])]; + tensor var_4909 = const()[name = tensor("op_4909"), val = tensor([1, 1])]; + tensor var_4911_pad_type_0 = const()[name = tensor("op_4911_pad_type_0"), val = tensor("custom")]; + tensor var_4911_pad_0 = const()[name = tensor("op_4911_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(417379328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420656192))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420656320)))]; + tensor var_4911_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_4909, groups = var_1186, pad = var_4911_pad_0, pad_type = var_4911_pad_type_0, strides = var_4907, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_307_cast)[name = tensor("op_4911_cast")]; + tensor hidden_states_197_cast = add(x = var_4911_cast, y = inputs_143_cast)[name = tensor("hidden_states_197_cast")]; + tensor var_4913 = const()[name = tensor("op_4913"), val = tensor([2, 1280, 32, 32])]; + tensor input_309_cast = reshape(shape = var_4913, x = hidden_states_197_cast)[name = tensor("input_309_cast")]; + tensor var_4917 = const()[name = tensor("op_4917"), val = tensor([1, 1])]; + tensor var_4919 = const()[name = tensor("op_4919"), val = tensor([1, 1])]; + tensor hidden_states_199_pad_type_0 = const()[name = tensor("hidden_states_199_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_199_pad_0 = const()[name = tensor("hidden_states_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(420658944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421887808))), name = tensor("down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421888000)))]; + tensor hidden_states_199_cast = conv(bias = down_blocks_2_attentions_1_proj_out_bias_to_fp16, dilations = var_4919, groups = var_1186, pad = hidden_states_199_pad_0, pad_type = hidden_states_199_pad_type_0, strides = var_4917, weight = down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized, x = input_309_cast)[name = tensor("hidden_states_199_cast")]; + tensor input_311_cast = add(x = hidden_states_199_cast, y = hidden_states_133_cast)[name = tensor("input_311_cast")]; + tensor var_4927 = const()[name = tensor("op_4927"), val = tensor(3)]; + tensor var_4938 = const()[name = tensor("op_4938"), val = tensor(true)]; + tensor var_4943 = const()[name = tensor("op_4943"), val = tensor(1)]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = input_311_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421890624)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421893248)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_315_cast = silu(x = add_33_cast)[name = tensor("input_315_cast")]; + tensor var_4961 = const()[name = tensor("op_4961"), val = tensor([1, 1])]; + tensor var_4963 = const()[name = tensor("op_4963"), val = tensor([1, 1])]; + tensor hidden_states_201_pad_type_0 = const()[name = tensor("hidden_states_201_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_201_pad_0 = const()[name = tensor("hidden_states_201_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421895872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432955136))), name = tensor("mid_block_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432955328)))]; + tensor hidden_states_201_cast = conv(bias = mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_4963, groups = var_4943, pad = hidden_states_201_pad_0, pad_type = hidden_states_201_pad_type_0, strides = var_4961, weight = mid_block_resnets_0_conv1_weight_to_fp16_palettized, x = input_315_cast)[name = tensor("hidden_states_201_cast")]; + tensor var_4969 = const()[name = tensor("op_4969"), val = tensor([1, 1])]; + tensor var_4971 = const()[name = tensor("op_4971"), val = tensor([1, 1])]; + tensor temb_13_pad_type_0 = const()[name = tensor("temb_13_pad_type_0"), val = tensor("custom")]; + tensor temb_13_pad_0 = const()[name = tensor("temb_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(432957952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433777216))), name = tensor("mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433777344)))]; + tensor temb_13_cast = conv(bias = mid_block_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_4971, groups = var_4943, pad = temb_13_pad_0, pad_type = temb_13_pad_type_0, strides = var_4969, weight = mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_13_cast")]; + tensor input_319_cast = add(x = hidden_states_201_cast, y = temb_13_cast)[name = tensor("input_319_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_319_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433779968)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433782592)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_323_cast = silu(x = add_35_cast)[name = tensor("input_323_cast")]; + tensor var_4981 = const()[name = tensor("op_4981"), val = tensor([1, 1])]; + tensor var_4983 = const()[name = tensor("op_4983"), val = tensor([1, 1])]; + tensor hidden_states_203_pad_type_0 = const()[name = tensor("hidden_states_203_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_203_pad_0 = const()[name = tensor("hidden_states_203_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(433785216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444844480))), name = tensor("mid_block_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444844672)))]; + tensor hidden_states_203_cast = conv(bias = mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_4983, groups = var_4943, pad = hidden_states_203_pad_0, pad_type = hidden_states_203_pad_type_0, strides = var_4981, weight = mid_block_resnets_0_conv2_weight_to_fp16_palettized, x = input_323_cast)[name = tensor("hidden_states_203_cast")]; + tensor hidden_states_205_cast = add(x = input_311_cast, y = hidden_states_203_cast)[name = tensor("hidden_states_205_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = hidden_states_205_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor add_37_gamma_0_to_fp16 = const()[name = tensor("add_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444847296)))]; + tensor add_37_beta_0_to_fp16 = const()[name = tensor("add_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444849920)))]; + tensor add_37_epsilon_0_to_fp16 = const()[name = tensor("add_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_37_cast = batch_norm(beta = add_37_beta_0_to_fp16, epsilon = add_37_epsilon_0_to_fp16, gamma = add_37_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_73_cast)[name = tensor("add_37_cast")]; + tensor var_5021 = const()[name = tensor("op_5021"), val = tensor([1, 1])]; + tensor var_5023 = const()[name = tensor("op_5023"), val = tensor([1, 1])]; + tensor hidden_states_207_pad_type_0 = const()[name = tensor("hidden_states_207_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_207_pad_0 = const()[name = tensor("hidden_states_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(444852544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446081408))), name = tensor("mid_block_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446081600)))]; + tensor hidden_states_207_cast = conv(bias = mid_block_attentions_0_proj_in_bias_to_fp16, dilations = var_5023, groups = var_4943, pad = hidden_states_207_pad_0, pad_type = hidden_states_207_pad_type_0, strides = var_5021, weight = mid_block_attentions_0_proj_in_weight_to_fp16_palettized, x = add_37_cast)[name = tensor("hidden_states_207_cast")]; + tensor var_5028 = const()[name = tensor("op_5028"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_145_cast = reshape(shape = var_5028, x = hidden_states_207_cast)[name = tensor("inputs_145_cast")]; + tensor var_5038 = const()[name = tensor("op_5038"), val = tensor([1])]; + tensor channels_mean_145_cast = reduce_mean(axes = var_5038, keep_dims = var_4938, x = inputs_145_cast)[name = tensor("channels_mean_145_cast")]; + tensor zero_mean_145_cast = sub(x = inputs_145_cast, y = channels_mean_145_cast)[name = tensor("zero_mean_145_cast")]; + tensor zero_mean_sq_145_cast = mul(x = zero_mean_145_cast, y = zero_mean_145_cast)[name = tensor("zero_mean_sq_145_cast")]; + tensor var_5042 = const()[name = tensor("op_5042"), val = tensor([1])]; + tensor var_5043_cast = reduce_mean(axes = var_5042, keep_dims = var_4938, x = zero_mean_sq_145_cast)[name = tensor("op_5043_cast")]; + tensor var_5044_to_fp16 = const()[name = tensor("op_5044_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5045_cast = add(x = var_5043_cast, y = var_5044_to_fp16)[name = tensor("op_5045_cast")]; + tensor denom_145_epsilon_0_to_fp16 = const()[name = tensor("denom_145_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_145_cast = rsqrt(epsilon = denom_145_epsilon_0_to_fp16, x = var_5045_cast)[name = tensor("denom_145_cast")]; + tensor out_145_cast = mul(x = zero_mean_145_cast, y = denom_145_cast)[name = tensor("out_145_cast")]; + tensor var_5049_to_fp16 = const()[name = tensor("op_5049_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446084224)))]; + tensor var_5050_cast = add(x = out_145_cast, y = var_5049_to_fp16)[name = tensor("op_5050_cast")]; + tensor var_5052_to_fp16 = const()[name = tensor("op_5052_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446086848)))]; + tensor hidden_states_209_cast = mul(x = var_5050_cast, y = var_5052_to_fp16)[name = tensor("hidden_states_209_cast")]; + tensor var_5059 = const()[name = tensor("op_5059"), val = tensor([1, 1])]; + tensor var_5061 = const()[name = tensor("op_5061"), val = tensor([1, 1])]; + tensor q_97_pad_type_0 = const()[name = tensor("q_97_pad_type_0"), val = tensor("custom")]; + tensor q_97_pad_0 = const()[name = tensor("q_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446089472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318336))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_97_cast = conv(dilations = var_5061, groups = var_4943, pad = q_97_pad_0, pad_type = q_97_pad_type_0, strides = var_5059, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("q_97_cast")]; + tensor var_5065 = const()[name = tensor("op_5065"), val = tensor([1, 1])]; + tensor var_5067 = const()[name = tensor("op_5067"), val = tensor([1, 1])]; + tensor k_97_pad_type_0 = const()[name = tensor("k_97_pad_type_0"), val = tensor("custom")]; + tensor k_97_pad_0 = const()[name = tensor("k_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547392))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_97_cast = conv(dilations = var_5067, groups = var_4943, pad = k_97_pad_0, pad_type = k_97_pad_type_0, strides = var_5065, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("k_97_cast")]; + tensor var_5071 = const()[name = tensor("op_5071"), val = tensor([1, 1])]; + tensor var_5073 = const()[name = tensor("op_5073"), val = tensor([1, 1])]; + tensor v_97_pad_type_0 = const()[name = tensor("v_97_pad_type_0"), val = tensor("custom")]; + tensor v_97_pad_0 = const()[name = tensor("v_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776448))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_97_cast = conv(dilations = var_5073, groups = var_4943, pad = v_97_pad_0, pad_type = v_97_pad_type_0, strides = var_5071, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("v_97_cast")]; + tensor var_5077 = const()[name = tensor("op_5077"), val = tensor([2, 20, 64, -1])]; + tensor var_5078_cast = reshape(shape = var_5077, x = q_97_cast)[name = tensor("op_5078_cast")]; + tensor var_5079 = const()[name = tensor("op_5079"), val = tensor([2, 20, 64, -1])]; + tensor var_5080_cast = reshape(shape = var_5079, x = k_97_cast)[name = tensor("op_5080_cast")]; + tensor var_5081 = const()[name = tensor("op_5081"), val = tensor([2, 20, 64, -1])]; + tensor var_5082_cast = reshape(shape = var_5081, x = v_97_cast)[name = tensor("op_5082_cast")]; + tensor attn_weights_193_transpose_x_0 = const()[name = tensor("attn_weights_193_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_193_transpose_y_0 = const()[name = tensor("attn_weights_193_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_193_cast = matmul(transpose_x = attn_weights_193_transpose_x_0, transpose_y = attn_weights_193_transpose_y_0, x = var_5078_cast, y = var_5080_cast)[name = tensor("attn_weights_193_cast")]; + tensor var_4934_to_fp16 = const()[name = tensor("op_4934_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_195_cast = mul(x = attn_weights_193_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_195_cast")]; + tensor var_5086_cast = softmax(axis = var_4927, x = attn_weights_195_cast)[name = tensor("op_5086_cast")]; + tensor attn_97_transpose_x_0 = const()[name = tensor("attn_97_transpose_x_0"), val = tensor(false)]; + tensor attn_97_transpose_y_0 = const()[name = tensor("attn_97_transpose_y_0"), val = tensor(true)]; + tensor attn_97_cast = matmul(transpose_x = attn_97_transpose_x_0, transpose_y = attn_97_transpose_y_0, x = var_5082_cast, y = var_5086_cast)[name = tensor("attn_97_cast")]; + tensor var_5090 = const()[name = tensor("op_5090"), val = tensor([2, 1280, 1, -1])]; + tensor input_327_cast = reshape(shape = var_5090, x = attn_97_cast)[name = tensor("input_327_cast")]; + tensor var_5095 = const()[name = tensor("op_5095"), val = tensor([1, 1])]; + tensor var_5097 = const()[name = tensor("op_5097"), val = tensor([1, 1])]; + tensor var_5099_pad_type_0 = const()[name = tensor("op_5099_pad_type_0"), val = tensor("custom")]; + tensor var_5099_pad_0 = const()[name = tensor("op_5099_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450595904))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450596032)))]; + tensor var_5099_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_5097, groups = var_4943, pad = var_5099_pad_0, pad_type = var_5099_pad_type_0, strides = var_5095, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_327_cast)[name = tensor("op_5099_cast")]; + tensor inputs_147_cast = add(x = var_5099_cast, y = inputs_145_cast)[name = tensor("inputs_147_cast")]; + tensor var_5103 = const()[name = tensor("op_5103"), val = tensor([1])]; + tensor channels_mean_147_cast = reduce_mean(axes = var_5103, keep_dims = var_4938, x = inputs_147_cast)[name = tensor("channels_mean_147_cast")]; + tensor zero_mean_147_cast = sub(x = inputs_147_cast, y = channels_mean_147_cast)[name = tensor("zero_mean_147_cast")]; + tensor zero_mean_sq_147_cast = mul(x = zero_mean_147_cast, y = zero_mean_147_cast)[name = tensor("zero_mean_sq_147_cast")]; + tensor var_5107 = const()[name = tensor("op_5107"), val = tensor([1])]; + tensor var_5108_cast = reduce_mean(axes = var_5107, keep_dims = var_4938, x = zero_mean_sq_147_cast)[name = tensor("op_5108_cast")]; + tensor var_5109_to_fp16 = const()[name = tensor("op_5109_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5110_cast = add(x = var_5108_cast, y = var_5109_to_fp16)[name = tensor("op_5110_cast")]; + tensor denom_147_epsilon_0_to_fp16 = const()[name = tensor("denom_147_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_147_cast = rsqrt(epsilon = denom_147_epsilon_0_to_fp16, x = var_5110_cast)[name = tensor("denom_147_cast")]; + tensor out_147_cast = mul(x = zero_mean_147_cast, y = denom_147_cast)[name = tensor("out_147_cast")]; + tensor var_5114_to_fp16 = const()[name = tensor("op_5114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450598656)))]; + tensor var_5115_cast = add(x = out_147_cast, y = var_5114_to_fp16)[name = tensor("op_5115_cast")]; + tensor var_5117_to_fp16 = const()[name = tensor("op_5117_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450601280)))]; + tensor hidden_states_211_cast = mul(x = var_5115_cast, y = var_5117_to_fp16)[name = tensor("hidden_states_211_cast")]; + tensor var_5124 = const()[name = tensor("op_5124"), val = tensor([1, 1])]; + tensor var_5126 = const()[name = tensor("op_5126"), val = tensor([1, 1])]; + tensor q_99_pad_type_0 = const()[name = tensor("q_99_pad_type_0"), val = tensor("custom")]; + tensor q_99_pad_0 = const()[name = tensor("q_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450603904))), lut = tensor([-0x1.964p-7, 0x1.96p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_99_cast = conv(dilations = var_5126, groups = var_4943, pad = q_99_pad_0, pad_type = q_99_pad_type_0, strides = var_5124, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_211_cast)[name = tensor("q_99_cast")]; + tensor var_5130 = const()[name = tensor("op_5130"), val = tensor([1, 1])]; + tensor var_5132 = const()[name = tensor("op_5132"), val = tensor([1, 1])]; + tensor k_99_pad_type_0 = const()[name = tensor("k_99_pad_type_0"), val = tensor("custom")]; + tensor k_99_pad_0 = const()[name = tensor("k_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450808768))), lut = tensor([-0x1.444p-7, 0x1.44cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_99_cast = conv(dilations = var_5132, groups = var_4943, pad = k_99_pad_0, pad_type = k_99_pad_type_0, strides = var_5130, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_99_cast")]; + tensor var_5136 = const()[name = tensor("op_5136"), val = tensor([1, 1])]; + tensor var_5138 = const()[name = tensor("op_5138"), val = tensor([1, 1])]; + tensor v_99_pad_type_0 = const()[name = tensor("v_99_pad_type_0"), val = tensor("custom")]; + tensor v_99_pad_0 = const()[name = tensor("v_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451136512))), lut = tensor([-0x1.658p-7, 0x1.66p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_99_cast = conv(dilations = var_5138, groups = var_4943, pad = v_99_pad_0, pad_type = v_99_pad_type_0, strides = var_5136, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_99_cast")]; + tensor var_5142 = const()[name = tensor("op_5142"), val = tensor([2, 20, 64, -1])]; + tensor var_5143_cast = reshape(shape = var_5142, x = q_99_cast)[name = tensor("op_5143_cast")]; + tensor var_5144 = const()[name = tensor("op_5144"), val = tensor([2, 20, 64, -1])]; + tensor var_5145_cast = reshape(shape = var_5144, x = k_99_cast)[name = tensor("op_5145_cast")]; + tensor var_5146 = const()[name = tensor("op_5146"), val = tensor([2, 20, 64, -1])]; + tensor var_5147_cast = reshape(shape = var_5146, x = v_99_cast)[name = tensor("op_5147_cast")]; + tensor attn_weights_197_transpose_x_0 = const()[name = tensor("attn_weights_197_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_197_transpose_y_0 = const()[name = tensor("attn_weights_197_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_197_cast = matmul(transpose_x = attn_weights_197_transpose_x_0, transpose_y = attn_weights_197_transpose_y_0, x = var_5143_cast, y = var_5145_cast)[name = tensor("attn_weights_197_cast")]; + tensor attn_weights_199_cast = mul(x = attn_weights_197_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_199_cast")]; + tensor var_5151_cast = softmax(axis = var_4927, x = attn_weights_199_cast)[name = tensor("op_5151_cast")]; + tensor attn_99_transpose_x_0 = const()[name = tensor("attn_99_transpose_x_0"), val = tensor(false)]; + tensor attn_99_transpose_y_0 = const()[name = tensor("attn_99_transpose_y_0"), val = tensor(true)]; + tensor attn_99_cast = matmul(transpose_x = attn_99_transpose_x_0, transpose_y = attn_99_transpose_y_0, x = var_5147_cast, y = var_5151_cast)[name = tensor("attn_99_cast")]; + tensor var_5155 = const()[name = tensor("op_5155"), val = tensor([2, 1280, 1, -1])]; + tensor input_329_cast = reshape(shape = var_5155, x = attn_99_cast)[name = tensor("input_329_cast")]; + tensor var_5160 = const()[name = tensor("op_5160"), val = tensor([1, 1])]; + tensor var_5162 = const()[name = tensor("op_5162"), val = tensor([1, 1])]; + tensor var_5164_pad_type_0 = const()[name = tensor("op_5164_pad_type_0"), val = tensor("custom")]; + tensor var_5164_pad_0 = const()[name = tensor("op_5164_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451464256))), lut = tensor([-0x1.78cp-8, 0x1.764p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451669120)))]; + tensor var_5164_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_5162, groups = var_4943, pad = var_5164_pad_0, pad_type = var_5164_pad_type_0, strides = var_5160, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("op_5164_cast")]; + tensor inputs_149_cast = add(x = var_5164_cast, y = inputs_147_cast)[name = tensor("inputs_149_cast")]; + tensor var_5168 = const()[name = tensor("op_5168"), val = tensor([1])]; + tensor channels_mean_149_cast = reduce_mean(axes = var_5168, keep_dims = var_4938, x = inputs_149_cast)[name = tensor("channels_mean_149_cast")]; + tensor zero_mean_149_cast = sub(x = inputs_149_cast, y = channels_mean_149_cast)[name = tensor("zero_mean_149_cast")]; + tensor zero_mean_sq_149_cast = mul(x = zero_mean_149_cast, y = zero_mean_149_cast)[name = tensor("zero_mean_sq_149_cast")]; + tensor var_5172 = const()[name = tensor("op_5172"), val = tensor([1])]; + tensor var_5173_cast = reduce_mean(axes = var_5172, keep_dims = var_4938, x = zero_mean_sq_149_cast)[name = tensor("op_5173_cast")]; + tensor var_5174_to_fp16 = const()[name = tensor("op_5174_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5175_cast = add(x = var_5173_cast, y = var_5174_to_fp16)[name = tensor("op_5175_cast")]; + tensor denom_149_epsilon_0_to_fp16 = const()[name = tensor("denom_149_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_149_cast = rsqrt(epsilon = denom_149_epsilon_0_to_fp16, x = var_5175_cast)[name = tensor("denom_149_cast")]; + tensor out_149_cast = mul(x = zero_mean_149_cast, y = denom_149_cast)[name = tensor("out_149_cast")]; + tensor var_5179_to_fp16 = const()[name = tensor("op_5179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451671744)))]; + tensor var_5180_cast = add(x = out_149_cast, y = var_5179_to_fp16)[name = tensor("op_5180_cast")]; + tensor var_5182_to_fp16 = const()[name = tensor("op_5182_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451674368)))]; + tensor input_331_cast = mul(x = var_5180_cast, y = var_5182_to_fp16)[name = tensor("input_331_cast")]; + tensor var_5190 = const()[name = tensor("op_5190"), val = tensor([1, 1])]; + tensor var_5192 = const()[name = tensor("op_5192"), val = tensor([1, 1])]; + tensor var_5194_pad_type_0 = const()[name = tensor("op_5194_pad_type_0"), val = tensor("custom")]; + tensor var_5194_pad_0 = const()[name = tensor("op_5194_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451676992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458230656))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458230784)))]; + tensor var_5194_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_5192, groups = var_4943, pad = var_5194_pad_0, pad_type = var_5194_pad_type_0, strides = var_5190, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_331_cast)[name = tensor("op_5194_cast")]; + tensor var_5195_split_sizes_0 = const()[name = tensor("op_5195_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5195_axis_0 = const()[name = tensor("op_5195_axis_0"), val = tensor(1)]; + tensor var_5195_cast_0, tensor var_5195_cast_1 = split(axis = var_5195_axis_0, split_sizes = var_5195_split_sizes_0, x = var_5194_cast)[name = tensor("op_5195_cast")]; + tensor var_5197_mode_0 = const()[name = tensor("op_5197_mode_0"), val = tensor("EXACT")]; + tensor var_5197_cast = gelu(mode = var_5197_mode_0, x = var_5195_cast_1)[name = tensor("op_5197_cast")]; + tensor input_333_cast = mul(x = var_5195_cast_0, y = var_5197_cast)[name = tensor("input_333_cast")]; + tensor var_5201 = const()[name = tensor("op_5201"), val = tensor([1, 1])]; + tensor var_5203 = const()[name = tensor("op_5203"), val = tensor([1, 1])]; + tensor var_5205_pad_type_0 = const()[name = tensor("op_5205_pad_type_0"), val = tensor("custom")]; + tensor var_5205_pad_0 = const()[name = tensor("op_5205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(458251328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461528192))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461528320)))]; + tensor var_5205_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_5203, groups = var_4943, pad = var_5205_pad_0, pad_type = var_5205_pad_type_0, strides = var_5201, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("op_5205_cast")]; + tensor inputs_151_cast = add(x = var_5205_cast, y = inputs_149_cast)[name = tensor("inputs_151_cast")]; + tensor var_5215 = const()[name = tensor("op_5215"), val = tensor([1])]; + tensor channels_mean_151_cast = reduce_mean(axes = var_5215, keep_dims = var_4938, x = inputs_151_cast)[name = tensor("channels_mean_151_cast")]; + tensor zero_mean_151_cast = sub(x = inputs_151_cast, y = channels_mean_151_cast)[name = tensor("zero_mean_151_cast")]; + tensor zero_mean_sq_151_cast = mul(x = zero_mean_151_cast, y = zero_mean_151_cast)[name = tensor("zero_mean_sq_151_cast")]; + tensor var_5219 = const()[name = tensor("op_5219"), val = tensor([1])]; + tensor var_5220_cast = reduce_mean(axes = var_5219, keep_dims = var_4938, x = zero_mean_sq_151_cast)[name = tensor("op_5220_cast")]; + tensor var_5221_to_fp16 = const()[name = tensor("op_5221_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5222_cast = add(x = var_5220_cast, y = var_5221_to_fp16)[name = tensor("op_5222_cast")]; + tensor denom_151_epsilon_0_to_fp16 = const()[name = tensor("denom_151_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_151_cast = rsqrt(epsilon = denom_151_epsilon_0_to_fp16, x = var_5222_cast)[name = tensor("denom_151_cast")]; + tensor out_151_cast = mul(x = zero_mean_151_cast, y = denom_151_cast)[name = tensor("out_151_cast")]; + tensor var_5226_to_fp16 = const()[name = tensor("op_5226_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461530944)))]; + tensor var_5227_cast = add(x = out_151_cast, y = var_5226_to_fp16)[name = tensor("op_5227_cast")]; + tensor var_5229_to_fp16 = const()[name = tensor("op_5229_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461533568)))]; + tensor hidden_states_215_cast = mul(x = var_5227_cast, y = var_5229_to_fp16)[name = tensor("hidden_states_215_cast")]; + tensor var_5236 = const()[name = tensor("op_5236"), val = tensor([1, 1])]; + tensor var_5238 = const()[name = tensor("op_5238"), val = tensor([1, 1])]; + tensor q_101_pad_type_0 = const()[name = tensor("q_101_pad_type_0"), val = tensor("custom")]; + tensor q_101_pad_0 = const()[name = tensor("q_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(461536192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462355456))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_101_cast = conv(dilations = var_5238, groups = var_4943, pad = q_101_pad_0, pad_type = q_101_pad_type_0, strides = var_5236, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("q_101_cast")]; + tensor var_5242 = const()[name = tensor("op_5242"), val = tensor([1, 1])]; + tensor var_5244 = const()[name = tensor("op_5244"), val = tensor([1, 1])]; + tensor k_101_pad_type_0 = const()[name = tensor("k_101_pad_type_0"), val = tensor("custom")]; + tensor k_101_pad_0 = const()[name = tensor("k_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462355584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(463584448))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_101_cast = conv(dilations = var_5244, groups = var_4943, pad = k_101_pad_0, pad_type = k_101_pad_type_0, strides = var_5242, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("k_101_cast")]; + tensor var_5248 = const()[name = tensor("op_5248"), val = tensor([1, 1])]; + tensor var_5250 = const()[name = tensor("op_5250"), val = tensor([1, 1])]; + tensor v_101_pad_type_0 = const()[name = tensor("v_101_pad_type_0"), val = tensor("custom")]; + tensor v_101_pad_0 = const()[name = tensor("v_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(463584640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(464403904))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_101_cast = conv(dilations = var_5250, groups = var_4943, pad = v_101_pad_0, pad_type = v_101_pad_type_0, strides = var_5248, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("v_101_cast")]; + tensor var_5254 = const()[name = tensor("op_5254"), val = tensor([2, 20, 64, -1])]; + tensor var_5255_cast = reshape(shape = var_5254, x = q_101_cast)[name = tensor("op_5255_cast")]; + tensor var_5256 = const()[name = tensor("op_5256"), val = tensor([2, 20, 64, -1])]; + tensor var_5257_cast = reshape(shape = var_5256, x = k_101_cast)[name = tensor("op_5257_cast")]; + tensor var_5258 = const()[name = tensor("op_5258"), val = tensor([2, 20, 64, -1])]; + tensor var_5259_cast = reshape(shape = var_5258, x = v_101_cast)[name = tensor("op_5259_cast")]; + tensor attn_weights_201_transpose_x_0 = const()[name = tensor("attn_weights_201_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_201_transpose_y_0 = const()[name = tensor("attn_weights_201_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_201_cast = matmul(transpose_x = attn_weights_201_transpose_x_0, transpose_y = attn_weights_201_transpose_y_0, x = var_5255_cast, y = var_5257_cast)[name = tensor("attn_weights_201_cast")]; + tensor attn_weights_203_cast = mul(x = attn_weights_201_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_203_cast")]; + tensor var_5263_cast = softmax(axis = var_4927, x = attn_weights_203_cast)[name = tensor("op_5263_cast")]; + tensor attn_101_transpose_x_0 = const()[name = tensor("attn_101_transpose_x_0"), val = tensor(false)]; + tensor attn_101_transpose_y_0 = const()[name = tensor("attn_101_transpose_y_0"), val = tensor(true)]; + tensor attn_101_cast = matmul(transpose_x = attn_101_transpose_x_0, transpose_y = attn_101_transpose_y_0, x = var_5259_cast, y = var_5263_cast)[name = tensor("attn_101_cast")]; + tensor var_5267 = const()[name = tensor("op_5267"), val = tensor([2, 1280, 1, -1])]; + tensor input_335_cast = reshape(shape = var_5267, x = attn_101_cast)[name = tensor("input_335_cast")]; + tensor var_5272 = const()[name = tensor("op_5272"), val = tensor([1, 1])]; + tensor var_5274 = const()[name = tensor("op_5274"), val = tensor([1, 1])]; + tensor var_5276_pad_type_0 = const()[name = tensor("op_5276_pad_type_0"), val = tensor("custom")]; + tensor var_5276_pad_0 = const()[name = tensor("op_5276_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(464404032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465632896))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465633088)))]; + tensor var_5276_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_5274, groups = var_4943, pad = var_5276_pad_0, pad_type = var_5276_pad_type_0, strides = var_5272, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_335_cast)[name = tensor("op_5276_cast")]; + tensor inputs_153_cast = add(x = var_5276_cast, y = inputs_151_cast)[name = tensor("inputs_153_cast")]; + tensor var_5280 = const()[name = tensor("op_5280"), val = tensor([1])]; + tensor channels_mean_153_cast = reduce_mean(axes = var_5280, keep_dims = var_4938, x = inputs_153_cast)[name = tensor("channels_mean_153_cast")]; + tensor zero_mean_153_cast = sub(x = inputs_153_cast, y = channels_mean_153_cast)[name = tensor("zero_mean_153_cast")]; + tensor zero_mean_sq_153_cast = mul(x = zero_mean_153_cast, y = zero_mean_153_cast)[name = tensor("zero_mean_sq_153_cast")]; + tensor var_5284 = const()[name = tensor("op_5284"), val = tensor([1])]; + tensor var_5285_cast = reduce_mean(axes = var_5284, keep_dims = var_4938, x = zero_mean_sq_153_cast)[name = tensor("op_5285_cast")]; + tensor var_5286_to_fp16 = const()[name = tensor("op_5286_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5287_cast = add(x = var_5285_cast, y = var_5286_to_fp16)[name = tensor("op_5287_cast")]; + tensor denom_153_epsilon_0_to_fp16 = const()[name = tensor("denom_153_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_153_cast = rsqrt(epsilon = denom_153_epsilon_0_to_fp16, x = var_5287_cast)[name = tensor("denom_153_cast")]; + tensor out_153_cast = mul(x = zero_mean_153_cast, y = denom_153_cast)[name = tensor("out_153_cast")]; + tensor var_5291_to_fp16 = const()[name = tensor("op_5291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465635712)))]; + tensor var_5292_cast = add(x = out_153_cast, y = var_5291_to_fp16)[name = tensor("op_5292_cast")]; + tensor var_5294_to_fp16 = const()[name = tensor("op_5294_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465638336)))]; + tensor hidden_states_217_cast = mul(x = var_5292_cast, y = var_5294_to_fp16)[name = tensor("hidden_states_217_cast")]; + tensor var_5301 = const()[name = tensor("op_5301"), val = tensor([1, 1])]; + tensor var_5303 = const()[name = tensor("op_5303"), val = tensor([1, 1])]; + tensor q_103_pad_type_0 = const()[name = tensor("q_103_pad_type_0"), val = tensor("custom")]; + tensor q_103_pad_0 = const()[name = tensor("q_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465640960))), lut = tensor([-0x1.acp-7, 0x1.ab4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_103_cast = conv(dilations = var_5303, groups = var_4943, pad = q_103_pad_0, pad_type = q_103_pad_type_0, strides = var_5301, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_217_cast)[name = tensor("q_103_cast")]; + tensor var_5307 = const()[name = tensor("op_5307"), val = tensor([1, 1])]; + tensor var_5309 = const()[name = tensor("op_5309"), val = tensor([1, 1])]; + tensor k_103_pad_type_0 = const()[name = tensor("k_103_pad_type_0"), val = tensor("custom")]; + tensor k_103_pad_0 = const()[name = tensor("k_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(465845824))), lut = tensor([-0x1.514p-7, 0x1.528p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_103_cast = conv(dilations = var_5309, groups = var_4943, pad = k_103_pad_0, pad_type = k_103_pad_type_0, strides = var_5307, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_103_cast")]; + tensor var_5313 = const()[name = tensor("op_5313"), val = tensor([1, 1])]; + tensor var_5315 = const()[name = tensor("op_5315"), val = tensor([1, 1])]; + tensor v_103_pad_type_0 = const()[name = tensor("v_103_pad_type_0"), val = tensor("custom")]; + tensor v_103_pad_0 = const()[name = tensor("v_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466173568))), lut = tensor([-0x1.88cp-7, 0x1.884p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_103_cast = conv(dilations = var_5315, groups = var_4943, pad = v_103_pad_0, pad_type = v_103_pad_type_0, strides = var_5313, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_103_cast")]; + tensor var_5319 = const()[name = tensor("op_5319"), val = tensor([2, 20, 64, -1])]; + tensor var_5320_cast = reshape(shape = var_5319, x = q_103_cast)[name = tensor("op_5320_cast")]; + tensor var_5321 = const()[name = tensor("op_5321"), val = tensor([2, 20, 64, -1])]; + tensor var_5322_cast = reshape(shape = var_5321, x = k_103_cast)[name = tensor("op_5322_cast")]; + tensor var_5323 = const()[name = tensor("op_5323"), val = tensor([2, 20, 64, -1])]; + tensor var_5324_cast = reshape(shape = var_5323, x = v_103_cast)[name = tensor("op_5324_cast")]; + tensor attn_weights_205_transpose_x_0 = const()[name = tensor("attn_weights_205_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_205_transpose_y_0 = const()[name = tensor("attn_weights_205_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_205_cast = matmul(transpose_x = attn_weights_205_transpose_x_0, transpose_y = attn_weights_205_transpose_y_0, x = var_5320_cast, y = var_5322_cast)[name = tensor("attn_weights_205_cast")]; + tensor attn_weights_207_cast = mul(x = attn_weights_205_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_207_cast")]; + tensor var_5328_cast = softmax(axis = var_4927, x = attn_weights_207_cast)[name = tensor("op_5328_cast")]; + tensor attn_103_transpose_x_0 = const()[name = tensor("attn_103_transpose_x_0"), val = tensor(false)]; + tensor attn_103_transpose_y_0 = const()[name = tensor("attn_103_transpose_y_0"), val = tensor(true)]; + tensor attn_103_cast = matmul(transpose_x = attn_103_transpose_x_0, transpose_y = attn_103_transpose_y_0, x = var_5324_cast, y = var_5328_cast)[name = tensor("attn_103_cast")]; + tensor var_5332 = const()[name = tensor("op_5332"), val = tensor([2, 1280, 1, -1])]; + tensor input_337_cast = reshape(shape = var_5332, x = attn_103_cast)[name = tensor("input_337_cast")]; + tensor var_5337 = const()[name = tensor("op_5337"), val = tensor([1, 1])]; + tensor var_5339 = const()[name = tensor("op_5339"), val = tensor([1, 1])]; + tensor var_5341_pad_type_0 = const()[name = tensor("op_5341_pad_type_0"), val = tensor("custom")]; + tensor var_5341_pad_0 = const()[name = tensor("op_5341_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466501312))), lut = tensor([-0x1.aacp-8, 0x1.ab4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466706176)))]; + tensor var_5341_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_5339, groups = var_4943, pad = var_5341_pad_0, pad_type = var_5341_pad_type_0, strides = var_5337, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("op_5341_cast")]; + tensor inputs_155_cast = add(x = var_5341_cast, y = inputs_153_cast)[name = tensor("inputs_155_cast")]; + tensor var_5345 = const()[name = tensor("op_5345"), val = tensor([1])]; + tensor channels_mean_155_cast = reduce_mean(axes = var_5345, keep_dims = var_4938, x = inputs_155_cast)[name = tensor("channels_mean_155_cast")]; + tensor zero_mean_155_cast = sub(x = inputs_155_cast, y = channels_mean_155_cast)[name = tensor("zero_mean_155_cast")]; + tensor zero_mean_sq_155_cast = mul(x = zero_mean_155_cast, y = zero_mean_155_cast)[name = tensor("zero_mean_sq_155_cast")]; + tensor var_5349 = const()[name = tensor("op_5349"), val = tensor([1])]; + tensor var_5350_cast = reduce_mean(axes = var_5349, keep_dims = var_4938, x = zero_mean_sq_155_cast)[name = tensor("op_5350_cast")]; + tensor var_5351_to_fp16 = const()[name = tensor("op_5351_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5352_cast = add(x = var_5350_cast, y = var_5351_to_fp16)[name = tensor("op_5352_cast")]; + tensor denom_155_epsilon_0_to_fp16 = const()[name = tensor("denom_155_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_155_cast = rsqrt(epsilon = denom_155_epsilon_0_to_fp16, x = var_5352_cast)[name = tensor("denom_155_cast")]; + tensor out_155_cast = mul(x = zero_mean_155_cast, y = denom_155_cast)[name = tensor("out_155_cast")]; + tensor var_5356_to_fp16 = const()[name = tensor("op_5356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466708800)))]; + tensor var_5357_cast = add(x = out_155_cast, y = var_5356_to_fp16)[name = tensor("op_5357_cast")]; + tensor var_5359_to_fp16 = const()[name = tensor("op_5359_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466711424)))]; + tensor input_339_cast = mul(x = var_5357_cast, y = var_5359_to_fp16)[name = tensor("input_339_cast")]; + tensor var_5367 = const()[name = tensor("op_5367"), val = tensor([1, 1])]; + tensor var_5369 = const()[name = tensor("op_5369"), val = tensor([1, 1])]; + tensor var_5371_pad_type_0 = const()[name = tensor("op_5371_pad_type_0"), val = tensor("custom")]; + tensor var_5371_pad_0 = const()[name = tensor("op_5371_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(466714048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476544512))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476544704)))]; + tensor var_5371_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_5369, groups = var_4943, pad = var_5371_pad_0, pad_type = var_5371_pad_type_0, strides = var_5367, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_339_cast)[name = tensor("op_5371_cast")]; + tensor var_5372_split_sizes_0 = const()[name = tensor("op_5372_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5372_axis_0 = const()[name = tensor("op_5372_axis_0"), val = tensor(1)]; + tensor var_5372_cast_0, tensor var_5372_cast_1 = split(axis = var_5372_axis_0, split_sizes = var_5372_split_sizes_0, x = var_5371_cast)[name = tensor("op_5372_cast")]; + tensor var_5374_mode_0 = const()[name = tensor("op_5374_mode_0"), val = tensor("EXACT")]; + tensor var_5374_cast = gelu(mode = var_5374_mode_0, x = var_5372_cast_1)[name = tensor("op_5374_cast")]; + tensor input_341_cast = mul(x = var_5372_cast_0, y = var_5374_cast)[name = tensor("input_341_cast")]; + tensor var_5378 = const()[name = tensor("op_5378"), val = tensor([1, 1])]; + tensor var_5380 = const()[name = tensor("op_5380"), val = tensor([1, 1])]; + tensor var_5382_pad_type_0 = const()[name = tensor("op_5382_pad_type_0"), val = tensor("custom")]; + tensor var_5382_pad_0 = const()[name = tensor("op_5382_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(476565248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479842112))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479842240)))]; + tensor var_5382_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_5380, groups = var_4943, pad = var_5382_pad_0, pad_type = var_5382_pad_type_0, strides = var_5378, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_341_cast)[name = tensor("op_5382_cast")]; + tensor inputs_157_cast = add(x = var_5382_cast, y = inputs_155_cast)[name = tensor("inputs_157_cast")]; + tensor var_5392 = const()[name = tensor("op_5392"), val = tensor([1])]; + tensor channels_mean_157_cast = reduce_mean(axes = var_5392, keep_dims = var_4938, x = inputs_157_cast)[name = tensor("channels_mean_157_cast")]; + tensor zero_mean_157_cast = sub(x = inputs_157_cast, y = channels_mean_157_cast)[name = tensor("zero_mean_157_cast")]; + tensor zero_mean_sq_157_cast = mul(x = zero_mean_157_cast, y = zero_mean_157_cast)[name = tensor("zero_mean_sq_157_cast")]; + tensor var_5396 = const()[name = tensor("op_5396"), val = tensor([1])]; + tensor var_5397_cast = reduce_mean(axes = var_5396, keep_dims = var_4938, x = zero_mean_sq_157_cast)[name = tensor("op_5397_cast")]; + tensor var_5398_to_fp16 = const()[name = tensor("op_5398_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5399_cast = add(x = var_5397_cast, y = var_5398_to_fp16)[name = tensor("op_5399_cast")]; + tensor denom_157_epsilon_0_to_fp16 = const()[name = tensor("denom_157_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_157_cast = rsqrt(epsilon = denom_157_epsilon_0_to_fp16, x = var_5399_cast)[name = tensor("denom_157_cast")]; + tensor out_157_cast = mul(x = zero_mean_157_cast, y = denom_157_cast)[name = tensor("out_157_cast")]; + tensor var_5403_to_fp16 = const()[name = tensor("op_5403_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479844864)))]; + tensor var_5404_cast = add(x = out_157_cast, y = var_5403_to_fp16)[name = tensor("op_5404_cast")]; + tensor var_5406_to_fp16 = const()[name = tensor("op_5406_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479847488)))]; + tensor hidden_states_221_cast = mul(x = var_5404_cast, y = var_5406_to_fp16)[name = tensor("hidden_states_221_cast")]; + tensor var_5413 = const()[name = tensor("op_5413"), val = tensor([1, 1])]; + tensor var_5415 = const()[name = tensor("op_5415"), val = tensor([1, 1])]; + tensor q_105_pad_type_0 = const()[name = tensor("q_105_pad_type_0"), val = tensor("custom")]; + tensor q_105_pad_0 = const()[name = tensor("q_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(479850112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480669376))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_105_cast = conv(dilations = var_5415, groups = var_4943, pad = q_105_pad_0, pad_type = q_105_pad_type_0, strides = var_5413, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("q_105_cast")]; + tensor var_5419 = const()[name = tensor("op_5419"), val = tensor([1, 1])]; + tensor var_5421 = const()[name = tensor("op_5421"), val = tensor([1, 1])]; + tensor k_105_pad_type_0 = const()[name = tensor("k_105_pad_type_0"), val = tensor("custom")]; + tensor k_105_pad_0 = const()[name = tensor("k_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(480669504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(481488768))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_105_cast = conv(dilations = var_5421, groups = var_4943, pad = k_105_pad_0, pad_type = k_105_pad_type_0, strides = var_5419, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("k_105_cast")]; + tensor var_5425 = const()[name = tensor("op_5425"), val = tensor([1, 1])]; + tensor var_5427 = const()[name = tensor("op_5427"), val = tensor([1, 1])]; + tensor v_105_pad_type_0 = const()[name = tensor("v_105_pad_type_0"), val = tensor("custom")]; + tensor v_105_pad_0 = const()[name = tensor("v_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(481488896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(482308160))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_105_cast = conv(dilations = var_5427, groups = var_4943, pad = v_105_pad_0, pad_type = v_105_pad_type_0, strides = var_5425, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("v_105_cast")]; + tensor var_5431 = const()[name = tensor("op_5431"), val = tensor([2, 20, 64, -1])]; + tensor var_5432_cast = reshape(shape = var_5431, x = q_105_cast)[name = tensor("op_5432_cast")]; + tensor var_5433 = const()[name = tensor("op_5433"), val = tensor([2, 20, 64, -1])]; + tensor var_5434_cast = reshape(shape = var_5433, x = k_105_cast)[name = tensor("op_5434_cast")]; + tensor var_5435 = const()[name = tensor("op_5435"), val = tensor([2, 20, 64, -1])]; + tensor var_5436_cast = reshape(shape = var_5435, x = v_105_cast)[name = tensor("op_5436_cast")]; + tensor attn_weights_209_transpose_x_0 = const()[name = tensor("attn_weights_209_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_209_transpose_y_0 = const()[name = tensor("attn_weights_209_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_209_cast = matmul(transpose_x = attn_weights_209_transpose_x_0, transpose_y = attn_weights_209_transpose_y_0, x = var_5432_cast, y = var_5434_cast)[name = tensor("attn_weights_209_cast")]; + tensor attn_weights_211_cast = mul(x = attn_weights_209_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_211_cast")]; + tensor var_5440_cast = softmax(axis = var_4927, x = attn_weights_211_cast)[name = tensor("op_5440_cast")]; + tensor attn_105_transpose_x_0 = const()[name = tensor("attn_105_transpose_x_0"), val = tensor(false)]; + tensor attn_105_transpose_y_0 = const()[name = tensor("attn_105_transpose_y_0"), val = tensor(true)]; + tensor attn_105_cast = matmul(transpose_x = attn_105_transpose_x_0, transpose_y = attn_105_transpose_y_0, x = var_5436_cast, y = var_5440_cast)[name = tensor("attn_105_cast")]; + tensor var_5444 = const()[name = tensor("op_5444"), val = tensor([2, 1280, 1, -1])]; + tensor input_343_cast = reshape(shape = var_5444, x = attn_105_cast)[name = tensor("input_343_cast")]; + tensor var_5449 = const()[name = tensor("op_5449"), val = tensor([1, 1])]; + tensor var_5451 = const()[name = tensor("op_5451"), val = tensor([1, 1])]; + tensor var_5453_pad_type_0 = const()[name = tensor("op_5453_pad_type_0"), val = tensor("custom")]; + tensor var_5453_pad_0 = const()[name = tensor("op_5453_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(482308288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483127552))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483127680)))]; + tensor var_5453_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_5451, groups = var_4943, pad = var_5453_pad_0, pad_type = var_5453_pad_type_0, strides = var_5449, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_343_cast)[name = tensor("op_5453_cast")]; + tensor inputs_159_cast = add(x = var_5453_cast, y = inputs_157_cast)[name = tensor("inputs_159_cast")]; + tensor var_5457 = const()[name = tensor("op_5457"), val = tensor([1])]; + tensor channels_mean_159_cast = reduce_mean(axes = var_5457, keep_dims = var_4938, x = inputs_159_cast)[name = tensor("channels_mean_159_cast")]; + tensor zero_mean_159_cast = sub(x = inputs_159_cast, y = channels_mean_159_cast)[name = tensor("zero_mean_159_cast")]; + tensor zero_mean_sq_159_cast = mul(x = zero_mean_159_cast, y = zero_mean_159_cast)[name = tensor("zero_mean_sq_159_cast")]; + tensor var_5461 = const()[name = tensor("op_5461"), val = tensor([1])]; + tensor var_5462_cast = reduce_mean(axes = var_5461, keep_dims = var_4938, x = zero_mean_sq_159_cast)[name = tensor("op_5462_cast")]; + tensor var_5463_to_fp16 = const()[name = tensor("op_5463_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5464_cast = add(x = var_5462_cast, y = var_5463_to_fp16)[name = tensor("op_5464_cast")]; + tensor denom_159_epsilon_0_to_fp16 = const()[name = tensor("denom_159_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_159_cast = rsqrt(epsilon = denom_159_epsilon_0_to_fp16, x = var_5464_cast)[name = tensor("denom_159_cast")]; + tensor out_159_cast = mul(x = zero_mean_159_cast, y = denom_159_cast)[name = tensor("out_159_cast")]; + tensor var_5468_to_fp16 = const()[name = tensor("op_5468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483130304)))]; + tensor var_5469_cast = add(x = out_159_cast, y = var_5468_to_fp16)[name = tensor("op_5469_cast")]; + tensor var_5471_to_fp16 = const()[name = tensor("op_5471_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483132928)))]; + tensor hidden_states_223_cast = mul(x = var_5469_cast, y = var_5471_to_fp16)[name = tensor("hidden_states_223_cast")]; + tensor var_5478 = const()[name = tensor("op_5478"), val = tensor([1, 1])]; + tensor var_5480 = const()[name = tensor("op_5480"), val = tensor([1, 1])]; + tensor q_107_pad_type_0 = const()[name = tensor("q_107_pad_type_0"), val = tensor("custom")]; + tensor q_107_pad_0 = const()[name = tensor("q_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483135552))), lut = tensor([-0x1.93p-7, 0x1.938p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_107_cast = conv(dilations = var_5480, groups = var_4943, pad = q_107_pad_0, pad_type = q_107_pad_type_0, strides = var_5478, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_223_cast)[name = tensor("q_107_cast")]; + tensor var_5484 = const()[name = tensor("op_5484"), val = tensor([1, 1])]; + tensor var_5486 = const()[name = tensor("op_5486"), val = tensor([1, 1])]; + tensor k_107_pad_type_0 = const()[name = tensor("k_107_pad_type_0"), val = tensor("custom")]; + tensor k_107_pad_0 = const()[name = tensor("k_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483340416))), lut = tensor([-0x1.294p-7, 0x1.28cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_107_cast = conv(dilations = var_5486, groups = var_4943, pad = k_107_pad_0, pad_type = k_107_pad_type_0, strides = var_5484, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_107_cast")]; + tensor var_5490 = const()[name = tensor("op_5490"), val = tensor([1, 1])]; + tensor var_5492 = const()[name = tensor("op_5492"), val = tensor([1, 1])]; + tensor v_107_pad_type_0 = const()[name = tensor("v_107_pad_type_0"), val = tensor("custom")]; + tensor v_107_pad_0 = const()[name = tensor("v_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483668160))), lut = tensor([-0x1.63cp-7, 0x1.62cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_107_cast = conv(dilations = var_5492, groups = var_4943, pad = v_107_pad_0, pad_type = v_107_pad_type_0, strides = var_5490, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_107_cast")]; + tensor var_5496 = const()[name = tensor("op_5496"), val = tensor([2, 20, 64, -1])]; + tensor var_5497_cast = reshape(shape = var_5496, x = q_107_cast)[name = tensor("op_5497_cast")]; + tensor var_5498 = const()[name = tensor("op_5498"), val = tensor([2, 20, 64, -1])]; + tensor var_5499_cast = reshape(shape = var_5498, x = k_107_cast)[name = tensor("op_5499_cast")]; + tensor var_5500 = const()[name = tensor("op_5500"), val = tensor([2, 20, 64, -1])]; + tensor var_5501_cast = reshape(shape = var_5500, x = v_107_cast)[name = tensor("op_5501_cast")]; + tensor attn_weights_213_transpose_x_0 = const()[name = tensor("attn_weights_213_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_213_transpose_y_0 = const()[name = tensor("attn_weights_213_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_213_cast = matmul(transpose_x = attn_weights_213_transpose_x_0, transpose_y = attn_weights_213_transpose_y_0, x = var_5497_cast, y = var_5499_cast)[name = tensor("attn_weights_213_cast")]; + tensor attn_weights_215_cast = mul(x = attn_weights_213_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_215_cast")]; + tensor var_5505_cast = softmax(axis = var_4927, x = attn_weights_215_cast)[name = tensor("op_5505_cast")]; + tensor attn_107_transpose_x_0 = const()[name = tensor("attn_107_transpose_x_0"), val = tensor(false)]; + tensor attn_107_transpose_y_0 = const()[name = tensor("attn_107_transpose_y_0"), val = tensor(true)]; + tensor attn_107_cast = matmul(transpose_x = attn_107_transpose_x_0, transpose_y = attn_107_transpose_y_0, x = var_5501_cast, y = var_5505_cast)[name = tensor("attn_107_cast")]; + tensor var_5509 = const()[name = tensor("op_5509"), val = tensor([2, 1280, 1, -1])]; + tensor input_345_cast = reshape(shape = var_5509, x = attn_107_cast)[name = tensor("input_345_cast")]; + tensor var_5514 = const()[name = tensor("op_5514"), val = tensor([1, 1])]; + tensor var_5516 = const()[name = tensor("op_5516"), val = tensor([1, 1])]; + tensor var_5518_pad_type_0 = const()[name = tensor("op_5518_pad_type_0"), val = tensor("custom")]; + tensor var_5518_pad_0 = const()[name = tensor("op_5518_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483995904))), lut = tensor([-0x1.878p-8, 0x1.86cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484200768)))]; + tensor var_5518_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_5516, groups = var_4943, pad = var_5518_pad_0, pad_type = var_5518_pad_type_0, strides = var_5514, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("op_5518_cast")]; + tensor inputs_161_cast = add(x = var_5518_cast, y = inputs_159_cast)[name = tensor("inputs_161_cast")]; + tensor var_5522 = const()[name = tensor("op_5522"), val = tensor([1])]; + tensor channels_mean_161_cast = reduce_mean(axes = var_5522, keep_dims = var_4938, x = inputs_161_cast)[name = tensor("channels_mean_161_cast")]; + tensor zero_mean_161_cast = sub(x = inputs_161_cast, y = channels_mean_161_cast)[name = tensor("zero_mean_161_cast")]; + tensor zero_mean_sq_161_cast = mul(x = zero_mean_161_cast, y = zero_mean_161_cast)[name = tensor("zero_mean_sq_161_cast")]; + tensor var_5526 = const()[name = tensor("op_5526"), val = tensor([1])]; + tensor var_5527_cast = reduce_mean(axes = var_5526, keep_dims = var_4938, x = zero_mean_sq_161_cast)[name = tensor("op_5527_cast")]; + tensor var_5528_to_fp16 = const()[name = tensor("op_5528_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5529_cast = add(x = var_5527_cast, y = var_5528_to_fp16)[name = tensor("op_5529_cast")]; + tensor denom_161_epsilon_0_to_fp16 = const()[name = tensor("denom_161_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_161_cast = rsqrt(epsilon = denom_161_epsilon_0_to_fp16, x = var_5529_cast)[name = tensor("denom_161_cast")]; + tensor out_161_cast = mul(x = zero_mean_161_cast, y = denom_161_cast)[name = tensor("out_161_cast")]; + tensor var_5533_to_fp16 = const()[name = tensor("op_5533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484203392)))]; + tensor var_5534_cast = add(x = out_161_cast, y = var_5533_to_fp16)[name = tensor("op_5534_cast")]; + tensor var_5536_to_fp16 = const()[name = tensor("op_5536_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484206016)))]; + tensor input_347_cast = mul(x = var_5534_cast, y = var_5536_to_fp16)[name = tensor("input_347_cast")]; + tensor var_5544 = const()[name = tensor("op_5544"), val = tensor([1, 1])]; + tensor var_5546 = const()[name = tensor("op_5546"), val = tensor([1, 1])]; + tensor var_5548_pad_type_0 = const()[name = tensor("op_5548_pad_type_0"), val = tensor("custom")]; + tensor var_5548_pad_0 = const()[name = tensor("op_5548_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(484208640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490762304))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490762432)))]; + tensor var_5548_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_5546, groups = var_4943, pad = var_5548_pad_0, pad_type = var_5548_pad_type_0, strides = var_5544, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_347_cast)[name = tensor("op_5548_cast")]; + tensor var_5549_split_sizes_0 = const()[name = tensor("op_5549_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5549_axis_0 = const()[name = tensor("op_5549_axis_0"), val = tensor(1)]; + tensor var_5549_cast_0, tensor var_5549_cast_1 = split(axis = var_5549_axis_0, split_sizes = var_5549_split_sizes_0, x = var_5548_cast)[name = tensor("op_5549_cast")]; + tensor var_5551_mode_0 = const()[name = tensor("op_5551_mode_0"), val = tensor("EXACT")]; + tensor var_5551_cast = gelu(mode = var_5551_mode_0, x = var_5549_cast_1)[name = tensor("op_5551_cast")]; + tensor input_349_cast = mul(x = var_5549_cast_0, y = var_5551_cast)[name = tensor("input_349_cast")]; + tensor var_5555 = const()[name = tensor("op_5555"), val = tensor([1, 1])]; + tensor var_5557 = const()[name = tensor("op_5557"), val = tensor([1, 1])]; + tensor var_5559_pad_type_0 = const()[name = tensor("op_5559_pad_type_0"), val = tensor("custom")]; + tensor var_5559_pad_0 = const()[name = tensor("op_5559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(490782976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494059840))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494059968)))]; + tensor var_5559_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_5557, groups = var_4943, pad = var_5559_pad_0, pad_type = var_5559_pad_type_0, strides = var_5555, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("op_5559_cast")]; + tensor inputs_163_cast = add(x = var_5559_cast, y = inputs_161_cast)[name = tensor("inputs_163_cast")]; + tensor var_5569 = const()[name = tensor("op_5569"), val = tensor([1])]; + tensor channels_mean_163_cast = reduce_mean(axes = var_5569, keep_dims = var_4938, x = inputs_163_cast)[name = tensor("channels_mean_163_cast")]; + tensor zero_mean_163_cast = sub(x = inputs_163_cast, y = channels_mean_163_cast)[name = tensor("zero_mean_163_cast")]; + tensor zero_mean_sq_163_cast = mul(x = zero_mean_163_cast, y = zero_mean_163_cast)[name = tensor("zero_mean_sq_163_cast")]; + tensor var_5573 = const()[name = tensor("op_5573"), val = tensor([1])]; + tensor var_5574_cast = reduce_mean(axes = var_5573, keep_dims = var_4938, x = zero_mean_sq_163_cast)[name = tensor("op_5574_cast")]; + tensor var_5575_to_fp16 = const()[name = tensor("op_5575_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5576_cast = add(x = var_5574_cast, y = var_5575_to_fp16)[name = tensor("op_5576_cast")]; + tensor denom_163_epsilon_0_to_fp16 = const()[name = tensor("denom_163_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_163_cast = rsqrt(epsilon = denom_163_epsilon_0_to_fp16, x = var_5576_cast)[name = tensor("denom_163_cast")]; + tensor out_163_cast = mul(x = zero_mean_163_cast, y = denom_163_cast)[name = tensor("out_163_cast")]; + tensor var_5580_to_fp16 = const()[name = tensor("op_5580_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494062592)))]; + tensor var_5581_cast = add(x = out_163_cast, y = var_5580_to_fp16)[name = tensor("op_5581_cast")]; + tensor var_5583_to_fp16 = const()[name = tensor("op_5583_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494065216)))]; + tensor hidden_states_227_cast = mul(x = var_5581_cast, y = var_5583_to_fp16)[name = tensor("hidden_states_227_cast")]; + tensor var_5590 = const()[name = tensor("op_5590"), val = tensor([1, 1])]; + tensor var_5592 = const()[name = tensor("op_5592"), val = tensor([1, 1])]; + tensor q_109_pad_type_0 = const()[name = tensor("q_109_pad_type_0"), val = tensor("custom")]; + tensor q_109_pad_0 = const()[name = tensor("q_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494067840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494887104))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_109_cast = conv(dilations = var_5592, groups = var_4943, pad = q_109_pad_0, pad_type = q_109_pad_type_0, strides = var_5590, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("q_109_cast")]; + tensor var_5596 = const()[name = tensor("op_5596"), val = tensor([1, 1])]; + tensor var_5598 = const()[name = tensor("op_5598"), val = tensor([1, 1])]; + tensor k_109_pad_type_0 = const()[name = tensor("k_109_pad_type_0"), val = tensor("custom")]; + tensor k_109_pad_0 = const()[name = tensor("k_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494887232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495706496))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_109_cast = conv(dilations = var_5598, groups = var_4943, pad = k_109_pad_0, pad_type = k_109_pad_type_0, strides = var_5596, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("k_109_cast")]; + tensor var_5602 = const()[name = tensor("op_5602"), val = tensor([1, 1])]; + tensor var_5604 = const()[name = tensor("op_5604"), val = tensor([1, 1])]; + tensor v_109_pad_type_0 = const()[name = tensor("v_109_pad_type_0"), val = tensor("custom")]; + tensor v_109_pad_0 = const()[name = tensor("v_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(495706624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496525888))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_109_cast = conv(dilations = var_5604, groups = var_4943, pad = v_109_pad_0, pad_type = v_109_pad_type_0, strides = var_5602, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("v_109_cast")]; + tensor var_5608 = const()[name = tensor("op_5608"), val = tensor([2, 20, 64, -1])]; + tensor var_5609_cast = reshape(shape = var_5608, x = q_109_cast)[name = tensor("op_5609_cast")]; + tensor var_5610 = const()[name = tensor("op_5610"), val = tensor([2, 20, 64, -1])]; + tensor var_5611_cast = reshape(shape = var_5610, x = k_109_cast)[name = tensor("op_5611_cast")]; + tensor var_5612 = const()[name = tensor("op_5612"), val = tensor([2, 20, 64, -1])]; + tensor var_5613_cast = reshape(shape = var_5612, x = v_109_cast)[name = tensor("op_5613_cast")]; + tensor attn_weights_217_transpose_x_0 = const()[name = tensor("attn_weights_217_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_217_transpose_y_0 = const()[name = tensor("attn_weights_217_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_217_cast = matmul(transpose_x = attn_weights_217_transpose_x_0, transpose_y = attn_weights_217_transpose_y_0, x = var_5609_cast, y = var_5611_cast)[name = tensor("attn_weights_217_cast")]; + tensor attn_weights_219_cast = mul(x = attn_weights_217_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_219_cast")]; + tensor var_5617_cast = softmax(axis = var_4927, x = attn_weights_219_cast)[name = tensor("op_5617_cast")]; + tensor attn_109_transpose_x_0 = const()[name = tensor("attn_109_transpose_x_0"), val = tensor(false)]; + tensor attn_109_transpose_y_0 = const()[name = tensor("attn_109_transpose_y_0"), val = tensor(true)]; + tensor attn_109_cast = matmul(transpose_x = attn_109_transpose_x_0, transpose_y = attn_109_transpose_y_0, x = var_5613_cast, y = var_5617_cast)[name = tensor("attn_109_cast")]; + tensor var_5621 = const()[name = tensor("op_5621"), val = tensor([2, 1280, 1, -1])]; + tensor input_351_cast = reshape(shape = var_5621, x = attn_109_cast)[name = tensor("input_351_cast")]; + tensor var_5626 = const()[name = tensor("op_5626"), val = tensor([1, 1])]; + tensor var_5628 = const()[name = tensor("op_5628"), val = tensor([1, 1])]; + tensor var_5630_pad_type_0 = const()[name = tensor("op_5630_pad_type_0"), val = tensor("custom")]; + tensor var_5630_pad_0 = const()[name = tensor("op_5630_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(496526016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497345280))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497345408)))]; + tensor var_5630_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_5628, groups = var_4943, pad = var_5630_pad_0, pad_type = var_5630_pad_type_0, strides = var_5626, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_351_cast)[name = tensor("op_5630_cast")]; + tensor inputs_165_cast = add(x = var_5630_cast, y = inputs_163_cast)[name = tensor("inputs_165_cast")]; + tensor var_5634 = const()[name = tensor("op_5634"), val = tensor([1])]; + tensor channels_mean_165_cast = reduce_mean(axes = var_5634, keep_dims = var_4938, x = inputs_165_cast)[name = tensor("channels_mean_165_cast")]; + tensor zero_mean_165_cast = sub(x = inputs_165_cast, y = channels_mean_165_cast)[name = tensor("zero_mean_165_cast")]; + tensor zero_mean_sq_165_cast = mul(x = zero_mean_165_cast, y = zero_mean_165_cast)[name = tensor("zero_mean_sq_165_cast")]; + tensor var_5638 = const()[name = tensor("op_5638"), val = tensor([1])]; + tensor var_5639_cast = reduce_mean(axes = var_5638, keep_dims = var_4938, x = zero_mean_sq_165_cast)[name = tensor("op_5639_cast")]; + tensor var_5640_to_fp16 = const()[name = tensor("op_5640_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5641_cast = add(x = var_5639_cast, y = var_5640_to_fp16)[name = tensor("op_5641_cast")]; + tensor denom_165_epsilon_0_to_fp16 = const()[name = tensor("denom_165_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_165_cast = rsqrt(epsilon = denom_165_epsilon_0_to_fp16, x = var_5641_cast)[name = tensor("denom_165_cast")]; + tensor out_165_cast = mul(x = zero_mean_165_cast, y = denom_165_cast)[name = tensor("out_165_cast")]; + tensor var_5645_to_fp16 = const()[name = tensor("op_5645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497348032)))]; + tensor var_5646_cast = add(x = out_165_cast, y = var_5645_to_fp16)[name = tensor("op_5646_cast")]; + tensor var_5648_to_fp16 = const()[name = tensor("op_5648_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497350656)))]; + tensor hidden_states_229_cast = mul(x = var_5646_cast, y = var_5648_to_fp16)[name = tensor("hidden_states_229_cast")]; + tensor var_5655 = const()[name = tensor("op_5655"), val = tensor([1, 1])]; + tensor var_5657 = const()[name = tensor("op_5657"), val = tensor([1, 1])]; + tensor q_111_pad_type_0 = const()[name = tensor("q_111_pad_type_0"), val = tensor("custom")]; + tensor q_111_pad_0 = const()[name = tensor("q_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497353280))), lut = tensor([-0x1.7b4p-7, 0x1.7a4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_111_cast = conv(dilations = var_5657, groups = var_4943, pad = q_111_pad_0, pad_type = q_111_pad_type_0, strides = var_5655, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_229_cast)[name = tensor("q_111_cast")]; + tensor var_5661 = const()[name = tensor("op_5661"), val = tensor([1, 1])]; + tensor var_5663 = const()[name = tensor("op_5663"), val = tensor([1, 1])]; + tensor k_111_pad_type_0 = const()[name = tensor("k_111_pad_type_0"), val = tensor("custom")]; + tensor k_111_pad_0 = const()[name = tensor("k_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497558144))), lut = tensor([-0x1.038p-7, 0x1.044p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_111_cast = conv(dilations = var_5663, groups = var_4943, pad = k_111_pad_0, pad_type = k_111_pad_type_0, strides = var_5661, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_111_cast")]; + tensor var_5667 = const()[name = tensor("op_5667"), val = tensor([1, 1])]; + tensor var_5669 = const()[name = tensor("op_5669"), val = tensor([1, 1])]; + tensor v_111_pad_type_0 = const()[name = tensor("v_111_pad_type_0"), val = tensor("custom")]; + tensor v_111_pad_0 = const()[name = tensor("v_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(497885888))), lut = tensor([-0x1.354p-7, 0x1.35cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_111_cast = conv(dilations = var_5669, groups = var_4943, pad = v_111_pad_0, pad_type = v_111_pad_type_0, strides = var_5667, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_111_cast")]; + tensor var_5673 = const()[name = tensor("op_5673"), val = tensor([2, 20, 64, -1])]; + tensor var_5674_cast = reshape(shape = var_5673, x = q_111_cast)[name = tensor("op_5674_cast")]; + tensor var_5675 = const()[name = tensor("op_5675"), val = tensor([2, 20, 64, -1])]; + tensor var_5676_cast = reshape(shape = var_5675, x = k_111_cast)[name = tensor("op_5676_cast")]; + tensor var_5677 = const()[name = tensor("op_5677"), val = tensor([2, 20, 64, -1])]; + tensor var_5678_cast = reshape(shape = var_5677, x = v_111_cast)[name = tensor("op_5678_cast")]; + tensor attn_weights_221_transpose_x_0 = const()[name = tensor("attn_weights_221_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_221_transpose_y_0 = const()[name = tensor("attn_weights_221_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_221_cast = matmul(transpose_x = attn_weights_221_transpose_x_0, transpose_y = attn_weights_221_transpose_y_0, x = var_5674_cast, y = var_5676_cast)[name = tensor("attn_weights_221_cast")]; + tensor attn_weights_223_cast = mul(x = attn_weights_221_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_223_cast")]; + tensor var_5682_cast = softmax(axis = var_4927, x = attn_weights_223_cast)[name = tensor("op_5682_cast")]; + tensor attn_111_transpose_x_0 = const()[name = tensor("attn_111_transpose_x_0"), val = tensor(false)]; + tensor attn_111_transpose_y_0 = const()[name = tensor("attn_111_transpose_y_0"), val = tensor(true)]; + tensor attn_111_cast = matmul(transpose_x = attn_111_transpose_x_0, transpose_y = attn_111_transpose_y_0, x = var_5678_cast, y = var_5682_cast)[name = tensor("attn_111_cast")]; + tensor var_5686 = const()[name = tensor("op_5686"), val = tensor([2, 1280, 1, -1])]; + tensor input_353_cast = reshape(shape = var_5686, x = attn_111_cast)[name = tensor("input_353_cast")]; + tensor var_5691 = const()[name = tensor("op_5691"), val = tensor([1, 1])]; + tensor var_5693 = const()[name = tensor("op_5693"), val = tensor([1, 1])]; + tensor var_5695_pad_type_0 = const()[name = tensor("op_5695_pad_type_0"), val = tensor("custom")]; + tensor var_5695_pad_0 = const()[name = tensor("op_5695_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498213632))), lut = tensor([-0x1.5a4p-8, 0x1.5bcp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498418496)))]; + tensor var_5695_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_5693, groups = var_4943, pad = var_5695_pad_0, pad_type = var_5695_pad_type_0, strides = var_5691, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("op_5695_cast")]; + tensor inputs_167_cast = add(x = var_5695_cast, y = inputs_165_cast)[name = tensor("inputs_167_cast")]; + tensor var_5699 = const()[name = tensor("op_5699"), val = tensor([1])]; + tensor channels_mean_167_cast = reduce_mean(axes = var_5699, keep_dims = var_4938, x = inputs_167_cast)[name = tensor("channels_mean_167_cast")]; + tensor zero_mean_167_cast = sub(x = inputs_167_cast, y = channels_mean_167_cast)[name = tensor("zero_mean_167_cast")]; + tensor zero_mean_sq_167_cast = mul(x = zero_mean_167_cast, y = zero_mean_167_cast)[name = tensor("zero_mean_sq_167_cast")]; + tensor var_5703 = const()[name = tensor("op_5703"), val = tensor([1])]; + tensor var_5704_cast = reduce_mean(axes = var_5703, keep_dims = var_4938, x = zero_mean_sq_167_cast)[name = tensor("op_5704_cast")]; + tensor var_5705_to_fp16 = const()[name = tensor("op_5705_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5706_cast = add(x = var_5704_cast, y = var_5705_to_fp16)[name = tensor("op_5706_cast")]; + tensor denom_167_epsilon_0_to_fp16 = const()[name = tensor("denom_167_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_167_cast = rsqrt(epsilon = denom_167_epsilon_0_to_fp16, x = var_5706_cast)[name = tensor("denom_167_cast")]; + tensor out_167_cast = mul(x = zero_mean_167_cast, y = denom_167_cast)[name = tensor("out_167_cast")]; + tensor var_5710_to_fp16 = const()[name = tensor("op_5710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498421120)))]; + tensor var_5711_cast = add(x = out_167_cast, y = var_5710_to_fp16)[name = tensor("op_5711_cast")]; + tensor var_5713_to_fp16 = const()[name = tensor("op_5713_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498423744)))]; + tensor input_355_cast = mul(x = var_5711_cast, y = var_5713_to_fp16)[name = tensor("input_355_cast")]; + tensor var_5721 = const()[name = tensor("op_5721"), val = tensor([1, 1])]; + tensor var_5723 = const()[name = tensor("op_5723"), val = tensor([1, 1])]; + tensor var_5725_pad_type_0 = const()[name = tensor("op_5725_pad_type_0"), val = tensor("custom")]; + tensor var_5725_pad_0 = const()[name = tensor("op_5725_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(498426368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504980032))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504980160)))]; + tensor var_5725_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_5723, groups = var_4943, pad = var_5725_pad_0, pad_type = var_5725_pad_type_0, strides = var_5721, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_355_cast)[name = tensor("op_5725_cast")]; + tensor var_5726_split_sizes_0 = const()[name = tensor("op_5726_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5726_axis_0 = const()[name = tensor("op_5726_axis_0"), val = tensor(1)]; + tensor var_5726_cast_0, tensor var_5726_cast_1 = split(axis = var_5726_axis_0, split_sizes = var_5726_split_sizes_0, x = var_5725_cast)[name = tensor("op_5726_cast")]; + tensor var_5728_mode_0 = const()[name = tensor("op_5728_mode_0"), val = tensor("EXACT")]; + tensor var_5728_cast = gelu(mode = var_5728_mode_0, x = var_5726_cast_1)[name = tensor("op_5728_cast")]; + tensor input_357_cast = mul(x = var_5726_cast_0, y = var_5728_cast)[name = tensor("input_357_cast")]; + tensor var_5732 = const()[name = tensor("op_5732"), val = tensor([1, 1])]; + tensor var_5734 = const()[name = tensor("op_5734"), val = tensor([1, 1])]; + tensor var_5736_pad_type_0 = const()[name = tensor("op_5736_pad_type_0"), val = tensor("custom")]; + tensor var_5736_pad_0 = const()[name = tensor("op_5736_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(505000704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508277568))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508277696)))]; + tensor var_5736_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_5734, groups = var_4943, pad = var_5736_pad_0, pad_type = var_5736_pad_type_0, strides = var_5732, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_357_cast)[name = tensor("op_5736_cast")]; + tensor inputs_169_cast = add(x = var_5736_cast, y = inputs_167_cast)[name = tensor("inputs_169_cast")]; + tensor var_5746 = const()[name = tensor("op_5746"), val = tensor([1])]; + tensor channels_mean_169_cast = reduce_mean(axes = var_5746, keep_dims = var_4938, x = inputs_169_cast)[name = tensor("channels_mean_169_cast")]; + tensor zero_mean_169_cast = sub(x = inputs_169_cast, y = channels_mean_169_cast)[name = tensor("zero_mean_169_cast")]; + tensor zero_mean_sq_169_cast = mul(x = zero_mean_169_cast, y = zero_mean_169_cast)[name = tensor("zero_mean_sq_169_cast")]; + tensor var_5750 = const()[name = tensor("op_5750"), val = tensor([1])]; + tensor var_5751_cast = reduce_mean(axes = var_5750, keep_dims = var_4938, x = zero_mean_sq_169_cast)[name = tensor("op_5751_cast")]; + tensor var_5752_to_fp16 = const()[name = tensor("op_5752_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5753_cast = add(x = var_5751_cast, y = var_5752_to_fp16)[name = tensor("op_5753_cast")]; + tensor denom_169_epsilon_0_to_fp16 = const()[name = tensor("denom_169_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_169_cast = rsqrt(epsilon = denom_169_epsilon_0_to_fp16, x = var_5753_cast)[name = tensor("denom_169_cast")]; + tensor out_169_cast = mul(x = zero_mean_169_cast, y = denom_169_cast)[name = tensor("out_169_cast")]; + tensor var_5757_to_fp16 = const()[name = tensor("op_5757_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508280320)))]; + tensor var_5758_cast = add(x = out_169_cast, y = var_5757_to_fp16)[name = tensor("op_5758_cast")]; + tensor var_5760_to_fp16 = const()[name = tensor("op_5760_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508282944)))]; + tensor hidden_states_233_cast = mul(x = var_5758_cast, y = var_5760_to_fp16)[name = tensor("hidden_states_233_cast")]; + tensor var_5767 = const()[name = tensor("op_5767"), val = tensor([1, 1])]; + tensor var_5769 = const()[name = tensor("op_5769"), val = tensor([1, 1])]; + tensor q_113_pad_type_0 = const()[name = tensor("q_113_pad_type_0"), val = tensor("custom")]; + tensor q_113_pad_0 = const()[name = tensor("q_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(508285568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509104832))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_113_cast = conv(dilations = var_5769, groups = var_4943, pad = q_113_pad_0, pad_type = q_113_pad_type_0, strides = var_5767, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("q_113_cast")]; + tensor var_5773 = const()[name = tensor("op_5773"), val = tensor([1, 1])]; + tensor var_5775 = const()[name = tensor("op_5775"), val = tensor([1, 1])]; + tensor k_113_pad_type_0 = const()[name = tensor("k_113_pad_type_0"), val = tensor("custom")]; + tensor k_113_pad_0 = const()[name = tensor("k_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509104960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509924224))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_113_cast = conv(dilations = var_5775, groups = var_4943, pad = k_113_pad_0, pad_type = k_113_pad_type_0, strides = var_5773, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("k_113_cast")]; + tensor var_5779 = const()[name = tensor("op_5779"), val = tensor([1, 1])]; + tensor var_5781 = const()[name = tensor("op_5781"), val = tensor([1, 1])]; + tensor v_113_pad_type_0 = const()[name = tensor("v_113_pad_type_0"), val = tensor("custom")]; + tensor v_113_pad_0 = const()[name = tensor("v_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509924352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510743616))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_113_cast = conv(dilations = var_5781, groups = var_4943, pad = v_113_pad_0, pad_type = v_113_pad_type_0, strides = var_5779, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("v_113_cast")]; + tensor var_5785 = const()[name = tensor("op_5785"), val = tensor([2, 20, 64, -1])]; + tensor var_5786_cast = reshape(shape = var_5785, x = q_113_cast)[name = tensor("op_5786_cast")]; + tensor var_5787 = const()[name = tensor("op_5787"), val = tensor([2, 20, 64, -1])]; + tensor var_5788_cast = reshape(shape = var_5787, x = k_113_cast)[name = tensor("op_5788_cast")]; + tensor var_5789 = const()[name = tensor("op_5789"), val = tensor([2, 20, 64, -1])]; + tensor var_5790_cast = reshape(shape = var_5789, x = v_113_cast)[name = tensor("op_5790_cast")]; + tensor attn_weights_225_transpose_x_0 = const()[name = tensor("attn_weights_225_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_225_transpose_y_0 = const()[name = tensor("attn_weights_225_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_225_cast = matmul(transpose_x = attn_weights_225_transpose_x_0, transpose_y = attn_weights_225_transpose_y_0, x = var_5786_cast, y = var_5788_cast)[name = tensor("attn_weights_225_cast")]; + tensor attn_weights_227_cast = mul(x = attn_weights_225_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_227_cast")]; + tensor var_5794_cast = softmax(axis = var_4927, x = attn_weights_227_cast)[name = tensor("op_5794_cast")]; + tensor attn_113_transpose_x_0 = const()[name = tensor("attn_113_transpose_x_0"), val = tensor(false)]; + tensor attn_113_transpose_y_0 = const()[name = tensor("attn_113_transpose_y_0"), val = tensor(true)]; + tensor attn_113_cast = matmul(transpose_x = attn_113_transpose_x_0, transpose_y = attn_113_transpose_y_0, x = var_5790_cast, y = var_5794_cast)[name = tensor("attn_113_cast")]; + tensor var_5798 = const()[name = tensor("op_5798"), val = tensor([2, 1280, 1, -1])]; + tensor input_359_cast = reshape(shape = var_5798, x = attn_113_cast)[name = tensor("input_359_cast")]; + tensor var_5803 = const()[name = tensor("op_5803"), val = tensor([1, 1])]; + tensor var_5805 = const()[name = tensor("op_5805"), val = tensor([1, 1])]; + tensor var_5807_pad_type_0 = const()[name = tensor("op_5807_pad_type_0"), val = tensor("custom")]; + tensor var_5807_pad_0 = const()[name = tensor("op_5807_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510743744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511972608))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511972800)))]; + tensor var_5807_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_5805, groups = var_4943, pad = var_5807_pad_0, pad_type = var_5807_pad_type_0, strides = var_5803, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_359_cast)[name = tensor("op_5807_cast")]; + tensor inputs_171_cast = add(x = var_5807_cast, y = inputs_169_cast)[name = tensor("inputs_171_cast")]; + tensor var_5811 = const()[name = tensor("op_5811"), val = tensor([1])]; + tensor channels_mean_171_cast = reduce_mean(axes = var_5811, keep_dims = var_4938, x = inputs_171_cast)[name = tensor("channels_mean_171_cast")]; + tensor zero_mean_171_cast = sub(x = inputs_171_cast, y = channels_mean_171_cast)[name = tensor("zero_mean_171_cast")]; + tensor zero_mean_sq_171_cast = mul(x = zero_mean_171_cast, y = zero_mean_171_cast)[name = tensor("zero_mean_sq_171_cast")]; + tensor var_5815 = const()[name = tensor("op_5815"), val = tensor([1])]; + tensor var_5816_cast = reduce_mean(axes = var_5815, keep_dims = var_4938, x = zero_mean_sq_171_cast)[name = tensor("op_5816_cast")]; + tensor var_5817_to_fp16 = const()[name = tensor("op_5817_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5818_cast = add(x = var_5816_cast, y = var_5817_to_fp16)[name = tensor("op_5818_cast")]; + tensor denom_171_epsilon_0_to_fp16 = const()[name = tensor("denom_171_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_171_cast = rsqrt(epsilon = denom_171_epsilon_0_to_fp16, x = var_5818_cast)[name = tensor("denom_171_cast")]; + tensor out_171_cast = mul(x = zero_mean_171_cast, y = denom_171_cast)[name = tensor("out_171_cast")]; + tensor var_5822_to_fp16 = const()[name = tensor("op_5822_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511975424)))]; + tensor var_5823_cast = add(x = out_171_cast, y = var_5822_to_fp16)[name = tensor("op_5823_cast")]; + tensor var_5825_to_fp16 = const()[name = tensor("op_5825_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511978048)))]; + tensor hidden_states_235_cast = mul(x = var_5823_cast, y = var_5825_to_fp16)[name = tensor("hidden_states_235_cast")]; + tensor var_5832 = const()[name = tensor("op_5832"), val = tensor([1, 1])]; + tensor var_5834 = const()[name = tensor("op_5834"), val = tensor([1, 1])]; + tensor q_115_pad_type_0 = const()[name = tensor("q_115_pad_type_0"), val = tensor("custom")]; + tensor q_115_pad_0 = const()[name = tensor("q_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511980672))), lut = tensor([-0x1.6f8p-7, 0x1.6f8p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_115_cast = conv(dilations = var_5834, groups = var_4943, pad = q_115_pad_0, pad_type = q_115_pad_type_0, strides = var_5832, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_235_cast)[name = tensor("q_115_cast")]; + tensor var_5838 = const()[name = tensor("op_5838"), val = tensor([1, 1])]; + tensor var_5840 = const()[name = tensor("op_5840"), val = tensor([1, 1])]; + tensor k_115_pad_type_0 = const()[name = tensor("k_115_pad_type_0"), val = tensor("custom")]; + tensor k_115_pad_0 = const()[name = tensor("k_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512185536))), lut = tensor([-0x1.e54p-8, 0x1.e5p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_115_cast = conv(dilations = var_5840, groups = var_4943, pad = k_115_pad_0, pad_type = k_115_pad_type_0, strides = var_5838, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_115_cast")]; + tensor var_5844 = const()[name = tensor("op_5844"), val = tensor([1, 1])]; + tensor var_5846 = const()[name = tensor("op_5846"), val = tensor([1, 1])]; + tensor v_115_pad_type_0 = const()[name = tensor("v_115_pad_type_0"), val = tensor("custom")]; + tensor v_115_pad_0 = const()[name = tensor("v_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512513280))), lut = tensor([-0x1.1b4p-7, 0x1.1bp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_115_cast = conv(dilations = var_5846, groups = var_4943, pad = v_115_pad_0, pad_type = v_115_pad_type_0, strides = var_5844, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_115_cast")]; + tensor var_5850 = const()[name = tensor("op_5850"), val = tensor([2, 20, 64, -1])]; + tensor var_5851_cast = reshape(shape = var_5850, x = q_115_cast)[name = tensor("op_5851_cast")]; + tensor var_5852 = const()[name = tensor("op_5852"), val = tensor([2, 20, 64, -1])]; + tensor var_5853_cast = reshape(shape = var_5852, x = k_115_cast)[name = tensor("op_5853_cast")]; + tensor var_5854 = const()[name = tensor("op_5854"), val = tensor([2, 20, 64, -1])]; + tensor var_5855_cast = reshape(shape = var_5854, x = v_115_cast)[name = tensor("op_5855_cast")]; + tensor attn_weights_229_transpose_x_0 = const()[name = tensor("attn_weights_229_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_229_transpose_y_0 = const()[name = tensor("attn_weights_229_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_229_cast = matmul(transpose_x = attn_weights_229_transpose_x_0, transpose_y = attn_weights_229_transpose_y_0, x = var_5851_cast, y = var_5853_cast)[name = tensor("attn_weights_229_cast")]; + tensor attn_weights_231_cast = mul(x = attn_weights_229_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_231_cast")]; + tensor var_5859_cast = softmax(axis = var_4927, x = attn_weights_231_cast)[name = tensor("op_5859_cast")]; + tensor attn_115_transpose_x_0 = const()[name = tensor("attn_115_transpose_x_0"), val = tensor(false)]; + tensor attn_115_transpose_y_0 = const()[name = tensor("attn_115_transpose_y_0"), val = tensor(true)]; + tensor attn_115_cast = matmul(transpose_x = attn_115_transpose_x_0, transpose_y = attn_115_transpose_y_0, x = var_5855_cast, y = var_5859_cast)[name = tensor("attn_115_cast")]; + tensor var_5863 = const()[name = tensor("op_5863"), val = tensor([2, 1280, 1, -1])]; + tensor input_361_cast = reshape(shape = var_5863, x = attn_115_cast)[name = tensor("input_361_cast")]; + tensor var_5868 = const()[name = tensor("op_5868"), val = tensor([1, 1])]; + tensor var_5870 = const()[name = tensor("op_5870"), val = tensor([1, 1])]; + tensor var_5872_pad_type_0 = const()[name = tensor("op_5872_pad_type_0"), val = tensor("custom")]; + tensor var_5872_pad_0 = const()[name = tensor("op_5872_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512841024))), lut = tensor([-0x1.478p-8, 0x1.47p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513045888)))]; + tensor var_5872_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_5870, groups = var_4943, pad = var_5872_pad_0, pad_type = var_5872_pad_type_0, strides = var_5868, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("op_5872_cast")]; + tensor inputs_173_cast = add(x = var_5872_cast, y = inputs_171_cast)[name = tensor("inputs_173_cast")]; + tensor var_5876 = const()[name = tensor("op_5876"), val = tensor([1])]; + tensor channels_mean_173_cast = reduce_mean(axes = var_5876, keep_dims = var_4938, x = inputs_173_cast)[name = tensor("channels_mean_173_cast")]; + tensor zero_mean_173_cast = sub(x = inputs_173_cast, y = channels_mean_173_cast)[name = tensor("zero_mean_173_cast")]; + tensor zero_mean_sq_173_cast = mul(x = zero_mean_173_cast, y = zero_mean_173_cast)[name = tensor("zero_mean_sq_173_cast")]; + tensor var_5880 = const()[name = tensor("op_5880"), val = tensor([1])]; + tensor var_5881_cast = reduce_mean(axes = var_5880, keep_dims = var_4938, x = zero_mean_sq_173_cast)[name = tensor("op_5881_cast")]; + tensor var_5882_to_fp16 = const()[name = tensor("op_5882_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5883_cast = add(x = var_5881_cast, y = var_5882_to_fp16)[name = tensor("op_5883_cast")]; + tensor denom_173_epsilon_0_to_fp16 = const()[name = tensor("denom_173_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_173_cast = rsqrt(epsilon = denom_173_epsilon_0_to_fp16, x = var_5883_cast)[name = tensor("denom_173_cast")]; + tensor out_173_cast = mul(x = zero_mean_173_cast, y = denom_173_cast)[name = tensor("out_173_cast")]; + tensor var_5887_to_fp16 = const()[name = tensor("op_5887_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513048512)))]; + tensor var_5888_cast = add(x = out_173_cast, y = var_5887_to_fp16)[name = tensor("op_5888_cast")]; + tensor var_5890_to_fp16 = const()[name = tensor("op_5890_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513051136)))]; + tensor input_363_cast = mul(x = var_5888_cast, y = var_5890_to_fp16)[name = tensor("input_363_cast")]; + tensor var_5898 = const()[name = tensor("op_5898"), val = tensor([1, 1])]; + tensor var_5900 = const()[name = tensor("op_5900"), val = tensor([1, 1])]; + tensor var_5902_pad_type_0 = const()[name = tensor("op_5902_pad_type_0"), val = tensor("custom")]; + tensor var_5902_pad_0 = const()[name = tensor("op_5902_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(513053760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519607424))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519607552)))]; + tensor var_5902_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_5900, groups = var_4943, pad = var_5902_pad_0, pad_type = var_5902_pad_type_0, strides = var_5898, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_363_cast)[name = tensor("op_5902_cast")]; + tensor var_5903_split_sizes_0 = const()[name = tensor("op_5903_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5903_axis_0 = const()[name = tensor("op_5903_axis_0"), val = tensor(1)]; + tensor var_5903_cast_0, tensor var_5903_cast_1 = split(axis = var_5903_axis_0, split_sizes = var_5903_split_sizes_0, x = var_5902_cast)[name = tensor("op_5903_cast")]; + tensor var_5905_mode_0 = const()[name = tensor("op_5905_mode_0"), val = tensor("EXACT")]; + tensor var_5905_cast = gelu(mode = var_5905_mode_0, x = var_5903_cast_1)[name = tensor("op_5905_cast")]; + tensor input_365_cast = mul(x = var_5903_cast_0, y = var_5905_cast)[name = tensor("input_365_cast")]; + tensor var_5909 = const()[name = tensor("op_5909"), val = tensor([1, 1])]; + tensor var_5911 = const()[name = tensor("op_5911"), val = tensor([1, 1])]; + tensor var_5913_pad_type_0 = const()[name = tensor("op_5913_pad_type_0"), val = tensor("custom")]; + tensor var_5913_pad_0 = const()[name = tensor("op_5913_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(519628096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522904960))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522905088)))]; + tensor var_5913_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_5911, groups = var_4943, pad = var_5913_pad_0, pad_type = var_5913_pad_type_0, strides = var_5909, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("op_5913_cast")]; + tensor inputs_175_cast = add(x = var_5913_cast, y = inputs_173_cast)[name = tensor("inputs_175_cast")]; + tensor var_5923 = const()[name = tensor("op_5923"), val = tensor([1])]; + tensor channels_mean_175_cast = reduce_mean(axes = var_5923, keep_dims = var_4938, x = inputs_175_cast)[name = tensor("channels_mean_175_cast")]; + tensor zero_mean_175_cast = sub(x = inputs_175_cast, y = channels_mean_175_cast)[name = tensor("zero_mean_175_cast")]; + tensor zero_mean_sq_175_cast = mul(x = zero_mean_175_cast, y = zero_mean_175_cast)[name = tensor("zero_mean_sq_175_cast")]; + tensor var_5927 = const()[name = tensor("op_5927"), val = tensor([1])]; + tensor var_5928_cast = reduce_mean(axes = var_5927, keep_dims = var_4938, x = zero_mean_sq_175_cast)[name = tensor("op_5928_cast")]; + tensor var_5929_to_fp16 = const()[name = tensor("op_5929_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5930_cast = add(x = var_5928_cast, y = var_5929_to_fp16)[name = tensor("op_5930_cast")]; + tensor denom_175_epsilon_0_to_fp16 = const()[name = tensor("denom_175_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_175_cast = rsqrt(epsilon = denom_175_epsilon_0_to_fp16, x = var_5930_cast)[name = tensor("denom_175_cast")]; + tensor out_175_cast = mul(x = zero_mean_175_cast, y = denom_175_cast)[name = tensor("out_175_cast")]; + tensor var_5934_to_fp16 = const()[name = tensor("op_5934_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522907712)))]; + tensor var_5935_cast = add(x = out_175_cast, y = var_5934_to_fp16)[name = tensor("op_5935_cast")]; + tensor var_5937_to_fp16 = const()[name = tensor("op_5937_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522910336)))]; + tensor hidden_states_239_cast = mul(x = var_5935_cast, y = var_5937_to_fp16)[name = tensor("hidden_states_239_cast")]; + tensor var_5944 = const()[name = tensor("op_5944"), val = tensor([1, 1])]; + tensor var_5946 = const()[name = tensor("op_5946"), val = tensor([1, 1])]; + tensor q_117_pad_type_0 = const()[name = tensor("q_117_pad_type_0"), val = tensor("custom")]; + tensor q_117_pad_0 = const()[name = tensor("q_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(522912960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523732224))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_117_cast = conv(dilations = var_5946, groups = var_4943, pad = q_117_pad_0, pad_type = q_117_pad_type_0, strides = var_5944, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("q_117_cast")]; + tensor var_5950 = const()[name = tensor("op_5950"), val = tensor([1, 1])]; + tensor var_5952 = const()[name = tensor("op_5952"), val = tensor([1, 1])]; + tensor k_117_pad_type_0 = const()[name = tensor("k_117_pad_type_0"), val = tensor("custom")]; + tensor k_117_pad_0 = const()[name = tensor("k_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(523732352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524551616))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_117_cast = conv(dilations = var_5952, groups = var_4943, pad = k_117_pad_0, pad_type = k_117_pad_type_0, strides = var_5950, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("k_117_cast")]; + tensor var_5956 = const()[name = tensor("op_5956"), val = tensor([1, 1])]; + tensor var_5958 = const()[name = tensor("op_5958"), val = tensor([1, 1])]; + tensor v_117_pad_type_0 = const()[name = tensor("v_117_pad_type_0"), val = tensor("custom")]; + tensor v_117_pad_0 = const()[name = tensor("v_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(524551744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525371008))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_117_cast = conv(dilations = var_5958, groups = var_4943, pad = v_117_pad_0, pad_type = v_117_pad_type_0, strides = var_5956, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("v_117_cast")]; + tensor var_5962 = const()[name = tensor("op_5962"), val = tensor([2, 20, 64, -1])]; + tensor var_5963_cast = reshape(shape = var_5962, x = q_117_cast)[name = tensor("op_5963_cast")]; + tensor var_5964 = const()[name = tensor("op_5964"), val = tensor([2, 20, 64, -1])]; + tensor var_5965_cast = reshape(shape = var_5964, x = k_117_cast)[name = tensor("op_5965_cast")]; + tensor var_5966 = const()[name = tensor("op_5966"), val = tensor([2, 20, 64, -1])]; + tensor var_5967_cast = reshape(shape = var_5966, x = v_117_cast)[name = tensor("op_5967_cast")]; + tensor attn_weights_233_transpose_x_0 = const()[name = tensor("attn_weights_233_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_233_transpose_y_0 = const()[name = tensor("attn_weights_233_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_233_cast = matmul(transpose_x = attn_weights_233_transpose_x_0, transpose_y = attn_weights_233_transpose_y_0, x = var_5963_cast, y = var_5965_cast)[name = tensor("attn_weights_233_cast")]; + tensor attn_weights_235_cast = mul(x = attn_weights_233_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_235_cast")]; + tensor var_5971_cast = softmax(axis = var_4927, x = attn_weights_235_cast)[name = tensor("op_5971_cast")]; + tensor attn_117_transpose_x_0 = const()[name = tensor("attn_117_transpose_x_0"), val = tensor(false)]; + tensor attn_117_transpose_y_0 = const()[name = tensor("attn_117_transpose_y_0"), val = tensor(true)]; + tensor attn_117_cast = matmul(transpose_x = attn_117_transpose_x_0, transpose_y = attn_117_transpose_y_0, x = var_5967_cast, y = var_5971_cast)[name = tensor("attn_117_cast")]; + tensor var_5975 = const()[name = tensor("op_5975"), val = tensor([2, 1280, 1, -1])]; + tensor input_367_cast = reshape(shape = var_5975, x = attn_117_cast)[name = tensor("input_367_cast")]; + tensor var_5980 = const()[name = tensor("op_5980"), val = tensor([1, 1])]; + tensor var_5982 = const()[name = tensor("op_5982"), val = tensor([1, 1])]; + tensor var_5984_pad_type_0 = const()[name = tensor("op_5984_pad_type_0"), val = tensor("custom")]; + tensor var_5984_pad_0 = const()[name = tensor("op_5984_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525371136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526190400))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526190528)))]; + tensor var_5984_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_5982, groups = var_4943, pad = var_5984_pad_0, pad_type = var_5984_pad_type_0, strides = var_5980, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_367_cast)[name = tensor("op_5984_cast")]; + tensor inputs_177_cast = add(x = var_5984_cast, y = inputs_175_cast)[name = tensor("inputs_177_cast")]; + tensor var_5988 = const()[name = tensor("op_5988"), val = tensor([1])]; + tensor channels_mean_177_cast = reduce_mean(axes = var_5988, keep_dims = var_4938, x = inputs_177_cast)[name = tensor("channels_mean_177_cast")]; + tensor zero_mean_177_cast = sub(x = inputs_177_cast, y = channels_mean_177_cast)[name = tensor("zero_mean_177_cast")]; + tensor zero_mean_sq_177_cast = mul(x = zero_mean_177_cast, y = zero_mean_177_cast)[name = tensor("zero_mean_sq_177_cast")]; + tensor var_5992 = const()[name = tensor("op_5992"), val = tensor([1])]; + tensor var_5993_cast = reduce_mean(axes = var_5992, keep_dims = var_4938, x = zero_mean_sq_177_cast)[name = tensor("op_5993_cast")]; + tensor var_5994_to_fp16 = const()[name = tensor("op_5994_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5995_cast = add(x = var_5993_cast, y = var_5994_to_fp16)[name = tensor("op_5995_cast")]; + tensor denom_177_epsilon_0_to_fp16 = const()[name = tensor("denom_177_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_177_cast = rsqrt(epsilon = denom_177_epsilon_0_to_fp16, x = var_5995_cast)[name = tensor("denom_177_cast")]; + tensor out_177_cast = mul(x = zero_mean_177_cast, y = denom_177_cast)[name = tensor("out_177_cast")]; + tensor var_5999_to_fp16 = const()[name = tensor("op_5999_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526193152)))]; + tensor var_6000_cast = add(x = out_177_cast, y = var_5999_to_fp16)[name = tensor("op_6000_cast")]; + tensor var_6002_to_fp16 = const()[name = tensor("op_6002_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526195776)))]; + tensor hidden_states_241_cast = mul(x = var_6000_cast, y = var_6002_to_fp16)[name = tensor("hidden_states_241_cast")]; + tensor var_6009 = const()[name = tensor("op_6009"), val = tensor([1, 1])]; + tensor var_6011 = const()[name = tensor("op_6011"), val = tensor([1, 1])]; + tensor q_119_pad_type_0 = const()[name = tensor("q_119_pad_type_0"), val = tensor("custom")]; + tensor q_119_pad_0 = const()[name = tensor("q_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526198400))), lut = tensor([-0x1.6a4p-7, 0x1.6ap-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_119_cast = conv(dilations = var_6011, groups = var_4943, pad = q_119_pad_0, pad_type = q_119_pad_type_0, strides = var_6009, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_241_cast)[name = tensor("q_119_cast")]; + tensor var_6015 = const()[name = tensor("op_6015"), val = tensor([1, 1])]; + tensor var_6017 = const()[name = tensor("op_6017"), val = tensor([1, 1])]; + tensor k_119_pad_type_0 = const()[name = tensor("k_119_pad_type_0"), val = tensor("custom")]; + tensor k_119_pad_0 = const()[name = tensor("k_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526403264))), lut = tensor([-0x1.d9cp-8, 0x1.d8p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_119_cast = conv(dilations = var_6017, groups = var_4943, pad = k_119_pad_0, pad_type = k_119_pad_type_0, strides = var_6015, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_119_cast")]; + tensor var_6021 = const()[name = tensor("op_6021"), val = tensor([1, 1])]; + tensor var_6023 = const()[name = tensor("op_6023"), val = tensor([1, 1])]; + tensor v_119_pad_type_0 = const()[name = tensor("v_119_pad_type_0"), val = tensor("custom")]; + tensor v_119_pad_0 = const()[name = tensor("v_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(526731008))), lut = tensor([-0x1.128p-7, 0x1.13p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_119_cast = conv(dilations = var_6023, groups = var_4943, pad = v_119_pad_0, pad_type = v_119_pad_type_0, strides = var_6021, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_119_cast")]; + tensor var_6027 = const()[name = tensor("op_6027"), val = tensor([2, 20, 64, -1])]; + tensor var_6028_cast = reshape(shape = var_6027, x = q_119_cast)[name = tensor("op_6028_cast")]; + tensor var_6029 = const()[name = tensor("op_6029"), val = tensor([2, 20, 64, -1])]; + tensor var_6030_cast = reshape(shape = var_6029, x = k_119_cast)[name = tensor("op_6030_cast")]; + tensor var_6031 = const()[name = tensor("op_6031"), val = tensor([2, 20, 64, -1])]; + tensor var_6032_cast = reshape(shape = var_6031, x = v_119_cast)[name = tensor("op_6032_cast")]; + tensor attn_weights_237_transpose_x_0 = const()[name = tensor("attn_weights_237_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_237_transpose_y_0 = const()[name = tensor("attn_weights_237_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_237_cast = matmul(transpose_x = attn_weights_237_transpose_x_0, transpose_y = attn_weights_237_transpose_y_0, x = var_6028_cast, y = var_6030_cast)[name = tensor("attn_weights_237_cast")]; + tensor attn_weights_239_cast = mul(x = attn_weights_237_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_239_cast")]; + tensor var_6036_cast = softmax(axis = var_4927, x = attn_weights_239_cast)[name = tensor("op_6036_cast")]; + tensor attn_119_transpose_x_0 = const()[name = tensor("attn_119_transpose_x_0"), val = tensor(false)]; + tensor attn_119_transpose_y_0 = const()[name = tensor("attn_119_transpose_y_0"), val = tensor(true)]; + tensor attn_119_cast = matmul(transpose_x = attn_119_transpose_x_0, transpose_y = attn_119_transpose_y_0, x = var_6032_cast, y = var_6036_cast)[name = tensor("attn_119_cast")]; + tensor var_6040 = const()[name = tensor("op_6040"), val = tensor([2, 1280, 1, -1])]; + tensor input_369_cast = reshape(shape = var_6040, x = attn_119_cast)[name = tensor("input_369_cast")]; + tensor var_6045 = const()[name = tensor("op_6045"), val = tensor([1, 1])]; + tensor var_6047 = const()[name = tensor("op_6047"), val = tensor([1, 1])]; + tensor var_6049_pad_type_0 = const()[name = tensor("op_6049_pad_type_0"), val = tensor("custom")]; + tensor var_6049_pad_0 = const()[name = tensor("op_6049_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527058752))), lut = tensor([-0x1.45p-8, 0x1.46p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527263616)))]; + tensor var_6049_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_6047, groups = var_4943, pad = var_6049_pad_0, pad_type = var_6049_pad_type_0, strides = var_6045, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("op_6049_cast")]; + tensor inputs_179_cast = add(x = var_6049_cast, y = inputs_177_cast)[name = tensor("inputs_179_cast")]; + tensor var_6053 = const()[name = tensor("op_6053"), val = tensor([1])]; + tensor channels_mean_179_cast = reduce_mean(axes = var_6053, keep_dims = var_4938, x = inputs_179_cast)[name = tensor("channels_mean_179_cast")]; + tensor zero_mean_179_cast = sub(x = inputs_179_cast, y = channels_mean_179_cast)[name = tensor("zero_mean_179_cast")]; + tensor zero_mean_sq_179_cast = mul(x = zero_mean_179_cast, y = zero_mean_179_cast)[name = tensor("zero_mean_sq_179_cast")]; + tensor var_6057 = const()[name = tensor("op_6057"), val = tensor([1])]; + tensor var_6058_cast = reduce_mean(axes = var_6057, keep_dims = var_4938, x = zero_mean_sq_179_cast)[name = tensor("op_6058_cast")]; + tensor var_6059_to_fp16 = const()[name = tensor("op_6059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6060_cast = add(x = var_6058_cast, y = var_6059_to_fp16)[name = tensor("op_6060_cast")]; + tensor denom_179_epsilon_0_to_fp16 = const()[name = tensor("denom_179_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_179_cast = rsqrt(epsilon = denom_179_epsilon_0_to_fp16, x = var_6060_cast)[name = tensor("denom_179_cast")]; + tensor out_179_cast = mul(x = zero_mean_179_cast, y = denom_179_cast)[name = tensor("out_179_cast")]; + tensor var_6064_to_fp16 = const()[name = tensor("op_6064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527266240)))]; + tensor var_6065_cast = add(x = out_179_cast, y = var_6064_to_fp16)[name = tensor("op_6065_cast")]; + tensor var_6067_to_fp16 = const()[name = tensor("op_6067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527268864)))]; + tensor input_371_cast = mul(x = var_6065_cast, y = var_6067_to_fp16)[name = tensor("input_371_cast")]; + tensor var_6075 = const()[name = tensor("op_6075"), val = tensor([1, 1])]; + tensor var_6077 = const()[name = tensor("op_6077"), val = tensor([1, 1])]; + tensor var_6079_pad_type_0 = const()[name = tensor("op_6079_pad_type_0"), val = tensor("custom")]; + tensor var_6079_pad_0 = const()[name = tensor("op_6079_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(527271488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533825152))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533825280)))]; + tensor var_6079_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_6077, groups = var_4943, pad = var_6079_pad_0, pad_type = var_6079_pad_type_0, strides = var_6075, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_371_cast)[name = tensor("op_6079_cast")]; + tensor var_6080_split_sizes_0 = const()[name = tensor("op_6080_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6080_axis_0 = const()[name = tensor("op_6080_axis_0"), val = tensor(1)]; + tensor var_6080_cast_0, tensor var_6080_cast_1 = split(axis = var_6080_axis_0, split_sizes = var_6080_split_sizes_0, x = var_6079_cast)[name = tensor("op_6080_cast")]; + tensor var_6082_mode_0 = const()[name = tensor("op_6082_mode_0"), val = tensor("EXACT")]; + tensor var_6082_cast = gelu(mode = var_6082_mode_0, x = var_6080_cast_1)[name = tensor("op_6082_cast")]; + tensor input_373_cast = mul(x = var_6080_cast_0, y = var_6082_cast)[name = tensor("input_373_cast")]; + tensor var_6086 = const()[name = tensor("op_6086"), val = tensor([1, 1])]; + tensor var_6088 = const()[name = tensor("op_6088"), val = tensor([1, 1])]; + tensor var_6090_pad_type_0 = const()[name = tensor("op_6090_pad_type_0"), val = tensor("custom")]; + tensor var_6090_pad_0 = const()[name = tensor("op_6090_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533845824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537122688))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537122816)))]; + tensor var_6090_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_6088, groups = var_4943, pad = var_6090_pad_0, pad_type = var_6090_pad_type_0, strides = var_6086, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_373_cast)[name = tensor("op_6090_cast")]; + tensor inputs_181_cast = add(x = var_6090_cast, y = inputs_179_cast)[name = tensor("inputs_181_cast")]; + tensor var_6100 = const()[name = tensor("op_6100"), val = tensor([1])]; + tensor channels_mean_181_cast = reduce_mean(axes = var_6100, keep_dims = var_4938, x = inputs_181_cast)[name = tensor("channels_mean_181_cast")]; + tensor zero_mean_181_cast = sub(x = inputs_181_cast, y = channels_mean_181_cast)[name = tensor("zero_mean_181_cast")]; + tensor zero_mean_sq_181_cast = mul(x = zero_mean_181_cast, y = zero_mean_181_cast)[name = tensor("zero_mean_sq_181_cast")]; + tensor var_6104 = const()[name = tensor("op_6104"), val = tensor([1])]; + tensor var_6105_cast = reduce_mean(axes = var_6104, keep_dims = var_4938, x = zero_mean_sq_181_cast)[name = tensor("op_6105_cast")]; + tensor var_6106_to_fp16 = const()[name = tensor("op_6106_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6107_cast = add(x = var_6105_cast, y = var_6106_to_fp16)[name = tensor("op_6107_cast")]; + tensor denom_181_epsilon_0_to_fp16 = const()[name = tensor("denom_181_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_181_cast = rsqrt(epsilon = denom_181_epsilon_0_to_fp16, x = var_6107_cast)[name = tensor("denom_181_cast")]; + tensor out_181_cast = mul(x = zero_mean_181_cast, y = denom_181_cast)[name = tensor("out_181_cast")]; + tensor var_6111_to_fp16 = const()[name = tensor("op_6111_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537125440)))]; + tensor var_6112_cast = add(x = out_181_cast, y = var_6111_to_fp16)[name = tensor("op_6112_cast")]; + tensor var_6114_to_fp16 = const()[name = tensor("op_6114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537128064)))]; + tensor hidden_states_245_cast = mul(x = var_6112_cast, y = var_6114_to_fp16)[name = tensor("hidden_states_245_cast")]; + tensor var_6121 = const()[name = tensor("op_6121"), val = tensor([1, 1])]; + tensor var_6123 = const()[name = tensor("op_6123"), val = tensor([1, 1])]; + tensor q_121_pad_type_0 = const()[name = tensor("q_121_pad_type_0"), val = tensor("custom")]; + tensor q_121_pad_0 = const()[name = tensor("q_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537130688))), lut = tensor([-0x1.29cp-5, -0x1.694p-7, 0x1.64p-7, 0x1.28cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_121_cast = conv(dilations = var_6123, groups = var_4943, pad = q_121_pad_0, pad_type = q_121_pad_type_0, strides = var_6121, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("q_121_cast")]; + tensor var_6127 = const()[name = tensor("op_6127"), val = tensor([1, 1])]; + tensor var_6129 = const()[name = tensor("op_6129"), val = tensor([1, 1])]; + tensor k_121_pad_type_0 = const()[name = tensor("k_121_pad_type_0"), val = tensor("custom")]; + tensor k_121_pad_0 = const()[name = tensor("k_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537540352))), lut = tensor([-0x1.28p-5, -0x1.644p-7, 0x1.688p-7, 0x1.294p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_121_cast = conv(dilations = var_6129, groups = var_4943, pad = k_121_pad_0, pad_type = k_121_pad_type_0, strides = var_6127, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("k_121_cast")]; + tensor var_6133 = const()[name = tensor("op_6133"), val = tensor([1, 1])]; + tensor var_6135 = const()[name = tensor("op_6135"), val = tensor([1, 1])]; + tensor v_121_pad_type_0 = const()[name = tensor("v_121_pad_type_0"), val = tensor("custom")]; + tensor v_121_pad_0 = const()[name = tensor("v_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(537950016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538769280))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_121_cast = conv(dilations = var_6135, groups = var_4943, pad = v_121_pad_0, pad_type = v_121_pad_type_0, strides = var_6133, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("v_121_cast")]; + tensor var_6139 = const()[name = tensor("op_6139"), val = tensor([2, 20, 64, -1])]; + tensor var_6140_cast = reshape(shape = var_6139, x = q_121_cast)[name = tensor("op_6140_cast")]; + tensor var_6141 = const()[name = tensor("op_6141"), val = tensor([2, 20, 64, -1])]; + tensor var_6142_cast = reshape(shape = var_6141, x = k_121_cast)[name = tensor("op_6142_cast")]; + tensor var_6143 = const()[name = tensor("op_6143"), val = tensor([2, 20, 64, -1])]; + tensor var_6144_cast = reshape(shape = var_6143, x = v_121_cast)[name = tensor("op_6144_cast")]; + tensor attn_weights_241_transpose_x_0 = const()[name = tensor("attn_weights_241_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_241_transpose_y_0 = const()[name = tensor("attn_weights_241_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_241_cast = matmul(transpose_x = attn_weights_241_transpose_x_0, transpose_y = attn_weights_241_transpose_y_0, x = var_6140_cast, y = var_6142_cast)[name = tensor("attn_weights_241_cast")]; + tensor attn_weights_243_cast = mul(x = attn_weights_241_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_243_cast")]; + tensor var_6148_cast = softmax(axis = var_4927, x = attn_weights_243_cast)[name = tensor("op_6148_cast")]; + tensor attn_121_transpose_x_0 = const()[name = tensor("attn_121_transpose_x_0"), val = tensor(false)]; + tensor attn_121_transpose_y_0 = const()[name = tensor("attn_121_transpose_y_0"), val = tensor(true)]; + tensor attn_121_cast = matmul(transpose_x = attn_121_transpose_x_0, transpose_y = attn_121_transpose_y_0, x = var_6144_cast, y = var_6148_cast)[name = tensor("attn_121_cast")]; + tensor var_6152 = const()[name = tensor("op_6152"), val = tensor([2, 1280, 1, -1])]; + tensor input_375_cast = reshape(shape = var_6152, x = attn_121_cast)[name = tensor("input_375_cast")]; + tensor var_6157 = const()[name = tensor("op_6157"), val = tensor([1, 1])]; + tensor var_6159 = const()[name = tensor("op_6159"), val = tensor([1, 1])]; + tensor var_6161_pad_type_0 = const()[name = tensor("op_6161_pad_type_0"), val = tensor("custom")]; + tensor var_6161_pad_0 = const()[name = tensor("op_6161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(538769408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539588672))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539588800)))]; + tensor var_6161_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_6159, groups = var_4943, pad = var_6161_pad_0, pad_type = var_6161_pad_type_0, strides = var_6157, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_375_cast)[name = tensor("op_6161_cast")]; + tensor inputs_183_cast = add(x = var_6161_cast, y = inputs_181_cast)[name = tensor("inputs_183_cast")]; + tensor var_6165 = const()[name = tensor("op_6165"), val = tensor([1])]; + tensor channels_mean_183_cast = reduce_mean(axes = var_6165, keep_dims = var_4938, x = inputs_183_cast)[name = tensor("channels_mean_183_cast")]; + tensor zero_mean_183_cast = sub(x = inputs_183_cast, y = channels_mean_183_cast)[name = tensor("zero_mean_183_cast")]; + tensor zero_mean_sq_183_cast = mul(x = zero_mean_183_cast, y = zero_mean_183_cast)[name = tensor("zero_mean_sq_183_cast")]; + tensor var_6169 = const()[name = tensor("op_6169"), val = tensor([1])]; + tensor var_6170_cast = reduce_mean(axes = var_6169, keep_dims = var_4938, x = zero_mean_sq_183_cast)[name = tensor("op_6170_cast")]; + tensor var_6171_to_fp16 = const()[name = tensor("op_6171_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6172_cast = add(x = var_6170_cast, y = var_6171_to_fp16)[name = tensor("op_6172_cast")]; + tensor denom_183_epsilon_0_to_fp16 = const()[name = tensor("denom_183_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_183_cast = rsqrt(epsilon = denom_183_epsilon_0_to_fp16, x = var_6172_cast)[name = tensor("denom_183_cast")]; + tensor out_183_cast = mul(x = zero_mean_183_cast, y = denom_183_cast)[name = tensor("out_183_cast")]; + tensor var_6176_to_fp16 = const()[name = tensor("op_6176_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539591424)))]; + tensor var_6177_cast = add(x = out_183_cast, y = var_6176_to_fp16)[name = tensor("op_6177_cast")]; + tensor var_6179_to_fp16 = const()[name = tensor("op_6179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539594048)))]; + tensor hidden_states_247_cast = mul(x = var_6177_cast, y = var_6179_to_fp16)[name = tensor("hidden_states_247_cast")]; + tensor var_6186 = const()[name = tensor("op_6186"), val = tensor([1, 1])]; + tensor var_6188 = const()[name = tensor("op_6188"), val = tensor([1, 1])]; + tensor q_123_pad_type_0 = const()[name = tensor("q_123_pad_type_0"), val = tensor("custom")]; + tensor q_123_pad_0 = const()[name = tensor("q_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539596672))), lut = tensor([-0x1.664p-7, 0x1.668p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_123_cast = conv(dilations = var_6188, groups = var_4943, pad = q_123_pad_0, pad_type = q_123_pad_type_0, strides = var_6186, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_247_cast)[name = tensor("q_123_cast")]; + tensor var_6192 = const()[name = tensor("op_6192"), val = tensor([1, 1])]; + tensor var_6194 = const()[name = tensor("op_6194"), val = tensor([1, 1])]; + tensor k_123_pad_type_0 = const()[name = tensor("k_123_pad_type_0"), val = tensor("custom")]; + tensor k_123_pad_0 = const()[name = tensor("k_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(539801536))), lut = tensor([-0x1.c18p-8, 0x1.c1p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_123_cast = conv(dilations = var_6194, groups = var_4943, pad = k_123_pad_0, pad_type = k_123_pad_type_0, strides = var_6192, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_123_cast")]; + tensor var_6198 = const()[name = tensor("op_6198"), val = tensor([1, 1])]; + tensor var_6200 = const()[name = tensor("op_6200"), val = tensor([1, 1])]; + tensor v_123_pad_type_0 = const()[name = tensor("v_123_pad_type_0"), val = tensor("custom")]; + tensor v_123_pad_0 = const()[name = tensor("v_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540129280))), lut = tensor([-0x1.f8cp-8, 0x1.f88p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_123_cast = conv(dilations = var_6200, groups = var_4943, pad = v_123_pad_0, pad_type = v_123_pad_type_0, strides = var_6198, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_123_cast")]; + tensor var_6204 = const()[name = tensor("op_6204"), val = tensor([2, 20, 64, -1])]; + tensor var_6205_cast = reshape(shape = var_6204, x = q_123_cast)[name = tensor("op_6205_cast")]; + tensor var_6206 = const()[name = tensor("op_6206"), val = tensor([2, 20, 64, -1])]; + tensor var_6207_cast = reshape(shape = var_6206, x = k_123_cast)[name = tensor("op_6207_cast")]; + tensor var_6208 = const()[name = tensor("op_6208"), val = tensor([2, 20, 64, -1])]; + tensor var_6209_cast = reshape(shape = var_6208, x = v_123_cast)[name = tensor("op_6209_cast")]; + tensor attn_weights_245_transpose_x_0 = const()[name = tensor("attn_weights_245_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_245_transpose_y_0 = const()[name = tensor("attn_weights_245_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_245_cast = matmul(transpose_x = attn_weights_245_transpose_x_0, transpose_y = attn_weights_245_transpose_y_0, x = var_6205_cast, y = var_6207_cast)[name = tensor("attn_weights_245_cast")]; + tensor attn_weights_247_cast = mul(x = attn_weights_245_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_247_cast")]; + tensor var_6213_cast = softmax(axis = var_4927, x = attn_weights_247_cast)[name = tensor("op_6213_cast")]; + tensor attn_123_transpose_x_0 = const()[name = tensor("attn_123_transpose_x_0"), val = tensor(false)]; + tensor attn_123_transpose_y_0 = const()[name = tensor("attn_123_transpose_y_0"), val = tensor(true)]; + tensor attn_123_cast = matmul(transpose_x = attn_123_transpose_x_0, transpose_y = attn_123_transpose_y_0, x = var_6209_cast, y = var_6213_cast)[name = tensor("attn_123_cast")]; + tensor var_6217 = const()[name = tensor("op_6217"), val = tensor([2, 1280, 1, -1])]; + tensor input_377_cast = reshape(shape = var_6217, x = attn_123_cast)[name = tensor("input_377_cast")]; + tensor var_6222 = const()[name = tensor("op_6222"), val = tensor([1, 1])]; + tensor var_6224 = const()[name = tensor("op_6224"), val = tensor([1, 1])]; + tensor var_6226_pad_type_0 = const()[name = tensor("op_6226_pad_type_0"), val = tensor("custom")]; + tensor var_6226_pad_0 = const()[name = tensor("op_6226_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540457024))), lut = tensor([-0x1.35p-8, 0x1.37p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540661888)))]; + tensor var_6226_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_6224, groups = var_4943, pad = var_6226_pad_0, pad_type = var_6226_pad_type_0, strides = var_6222, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_377_cast)[name = tensor("op_6226_cast")]; + tensor inputs_185_cast = add(x = var_6226_cast, y = inputs_183_cast)[name = tensor("inputs_185_cast")]; + tensor var_6230 = const()[name = tensor("op_6230"), val = tensor([1])]; + tensor channels_mean_185_cast = reduce_mean(axes = var_6230, keep_dims = var_4938, x = inputs_185_cast)[name = tensor("channels_mean_185_cast")]; + tensor zero_mean_185_cast = sub(x = inputs_185_cast, y = channels_mean_185_cast)[name = tensor("zero_mean_185_cast")]; + tensor zero_mean_sq_185_cast = mul(x = zero_mean_185_cast, y = zero_mean_185_cast)[name = tensor("zero_mean_sq_185_cast")]; + tensor var_6234 = const()[name = tensor("op_6234"), val = tensor([1])]; + tensor var_6235_cast = reduce_mean(axes = var_6234, keep_dims = var_4938, x = zero_mean_sq_185_cast)[name = tensor("op_6235_cast")]; + tensor var_6236_to_fp16 = const()[name = tensor("op_6236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6237_cast = add(x = var_6235_cast, y = var_6236_to_fp16)[name = tensor("op_6237_cast")]; + tensor denom_185_epsilon_0_to_fp16 = const()[name = tensor("denom_185_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_185_cast = rsqrt(epsilon = denom_185_epsilon_0_to_fp16, x = var_6237_cast)[name = tensor("denom_185_cast")]; + tensor out_185_cast = mul(x = zero_mean_185_cast, y = denom_185_cast)[name = tensor("out_185_cast")]; + tensor var_6241_to_fp16 = const()[name = tensor("op_6241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540664512)))]; + tensor var_6242_cast = add(x = out_185_cast, y = var_6241_to_fp16)[name = tensor("op_6242_cast")]; + tensor var_6244_to_fp16 = const()[name = tensor("op_6244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540667136)))]; + tensor input_379_cast = mul(x = var_6242_cast, y = var_6244_to_fp16)[name = tensor("input_379_cast")]; + tensor var_6252 = const()[name = tensor("op_6252"), val = tensor([1, 1])]; + tensor var_6254 = const()[name = tensor("op_6254"), val = tensor([1, 1])]; + tensor var_6256_pad_type_0 = const()[name = tensor("op_6256_pad_type_0"), val = tensor("custom")]; + tensor var_6256_pad_0 = const()[name = tensor("op_6256_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(540669760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547223424))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547223552)))]; + tensor var_6256_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_6254, groups = var_4943, pad = var_6256_pad_0, pad_type = var_6256_pad_type_0, strides = var_6252, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_379_cast)[name = tensor("op_6256_cast")]; + tensor var_6257_split_sizes_0 = const()[name = tensor("op_6257_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6257_axis_0 = const()[name = tensor("op_6257_axis_0"), val = tensor(1)]; + tensor var_6257_cast_0, tensor var_6257_cast_1 = split(axis = var_6257_axis_0, split_sizes = var_6257_split_sizes_0, x = var_6256_cast)[name = tensor("op_6257_cast")]; + tensor var_6259_mode_0 = const()[name = tensor("op_6259_mode_0"), val = tensor("EXACT")]; + tensor var_6259_cast = gelu(mode = var_6259_mode_0, x = var_6257_cast_1)[name = tensor("op_6259_cast")]; + tensor input_381_cast = mul(x = var_6257_cast_0, y = var_6259_cast)[name = tensor("input_381_cast")]; + tensor var_6263 = const()[name = tensor("op_6263"), val = tensor([1, 1])]; + tensor var_6265 = const()[name = tensor("op_6265"), val = tensor([1, 1])]; + tensor var_6267_pad_type_0 = const()[name = tensor("op_6267_pad_type_0"), val = tensor("custom")]; + tensor var_6267_pad_0 = const()[name = tensor("op_6267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(547244096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550520960))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550521088)))]; + tensor var_6267_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_6265, groups = var_4943, pad = var_6267_pad_0, pad_type = var_6267_pad_type_0, strides = var_6263, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_381_cast)[name = tensor("op_6267_cast")]; + tensor inputs_187_cast = add(x = var_6267_cast, y = inputs_185_cast)[name = tensor("inputs_187_cast")]; + tensor var_6277 = const()[name = tensor("op_6277"), val = tensor([1])]; + tensor channels_mean_187_cast = reduce_mean(axes = var_6277, keep_dims = var_4938, x = inputs_187_cast)[name = tensor("channels_mean_187_cast")]; + tensor zero_mean_187_cast = sub(x = inputs_187_cast, y = channels_mean_187_cast)[name = tensor("zero_mean_187_cast")]; + tensor zero_mean_sq_187_cast = mul(x = zero_mean_187_cast, y = zero_mean_187_cast)[name = tensor("zero_mean_sq_187_cast")]; + tensor var_6281 = const()[name = tensor("op_6281"), val = tensor([1])]; + tensor var_6282_cast = reduce_mean(axes = var_6281, keep_dims = var_4938, x = zero_mean_sq_187_cast)[name = tensor("op_6282_cast")]; + tensor var_6283_to_fp16 = const()[name = tensor("op_6283_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6284_cast = add(x = var_6282_cast, y = var_6283_to_fp16)[name = tensor("op_6284_cast")]; + tensor denom_187_epsilon_0_to_fp16 = const()[name = tensor("denom_187_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_187_cast = rsqrt(epsilon = denom_187_epsilon_0_to_fp16, x = var_6284_cast)[name = tensor("denom_187_cast")]; + tensor out_187_cast = mul(x = zero_mean_187_cast, y = denom_187_cast)[name = tensor("out_187_cast")]; + tensor var_6288_to_fp16 = const()[name = tensor("op_6288_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550523712)))]; + tensor var_6289_cast = add(x = out_187_cast, y = var_6288_to_fp16)[name = tensor("op_6289_cast")]; + tensor var_6291_to_fp16 = const()[name = tensor("op_6291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550526336)))]; + tensor hidden_states_251_cast = mul(x = var_6289_cast, y = var_6291_to_fp16)[name = tensor("hidden_states_251_cast")]; + tensor var_6298 = const()[name = tensor("op_6298"), val = tensor([1, 1])]; + tensor var_6300 = const()[name = tensor("op_6300"), val = tensor([1, 1])]; + tensor q_125_pad_type_0 = const()[name = tensor("q_125_pad_type_0"), val = tensor("custom")]; + tensor q_125_pad_0 = const()[name = tensor("q_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550528960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551348224))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_125_cast = conv(dilations = var_6300, groups = var_4943, pad = q_125_pad_0, pad_type = q_125_pad_type_0, strides = var_6298, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("q_125_cast")]; + tensor var_6304 = const()[name = tensor("op_6304"), val = tensor([1, 1])]; + tensor var_6306 = const()[name = tensor("op_6306"), val = tensor([1, 1])]; + tensor k_125_pad_type_0 = const()[name = tensor("k_125_pad_type_0"), val = tensor("custom")]; + tensor k_125_pad_0 = const()[name = tensor("k_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551348352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552167616))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_125_cast = conv(dilations = var_6306, groups = var_4943, pad = k_125_pad_0, pad_type = k_125_pad_type_0, strides = var_6304, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("k_125_cast")]; + tensor var_6310 = const()[name = tensor("op_6310"), val = tensor([1, 1])]; + tensor var_6312 = const()[name = tensor("op_6312"), val = tensor([1, 1])]; + tensor v_125_pad_type_0 = const()[name = tensor("v_125_pad_type_0"), val = tensor("custom")]; + tensor v_125_pad_0 = const()[name = tensor("v_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552167744))), lut = tensor([-0x1.454p-5, -0x1.88p-7, 0x1.864p-7, 0x1.44cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_125_cast = conv(dilations = var_6312, groups = var_4943, pad = v_125_pad_0, pad_type = v_125_pad_type_0, strides = var_6310, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("v_125_cast")]; + tensor var_6316 = const()[name = tensor("op_6316"), val = tensor([2, 20, 64, -1])]; + tensor var_6317_cast = reshape(shape = var_6316, x = q_125_cast)[name = tensor("op_6317_cast")]; + tensor var_6318 = const()[name = tensor("op_6318"), val = tensor([2, 20, 64, -1])]; + tensor var_6319_cast = reshape(shape = var_6318, x = k_125_cast)[name = tensor("op_6319_cast")]; + tensor var_6320 = const()[name = tensor("op_6320"), val = tensor([2, 20, 64, -1])]; + tensor var_6321_cast = reshape(shape = var_6320, x = v_125_cast)[name = tensor("op_6321_cast")]; + tensor attn_weights_249_transpose_x_0 = const()[name = tensor("attn_weights_249_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_249_transpose_y_0 = const()[name = tensor("attn_weights_249_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_249_cast = matmul(transpose_x = attn_weights_249_transpose_x_0, transpose_y = attn_weights_249_transpose_y_0, x = var_6317_cast, y = var_6319_cast)[name = tensor("attn_weights_249_cast")]; + tensor attn_weights_251_cast = mul(x = attn_weights_249_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_251_cast")]; + tensor var_6325_cast = softmax(axis = var_4927, x = attn_weights_251_cast)[name = tensor("op_6325_cast")]; + tensor attn_125_transpose_x_0 = const()[name = tensor("attn_125_transpose_x_0"), val = tensor(false)]; + tensor attn_125_transpose_y_0 = const()[name = tensor("attn_125_transpose_y_0"), val = tensor(true)]; + tensor attn_125_cast = matmul(transpose_x = attn_125_transpose_x_0, transpose_y = attn_125_transpose_y_0, x = var_6321_cast, y = var_6325_cast)[name = tensor("attn_125_cast")]; + tensor var_6329 = const()[name = tensor("op_6329"), val = tensor([2, 1280, 1, -1])]; + tensor input_383_cast = reshape(shape = var_6329, x = attn_125_cast)[name = tensor("input_383_cast")]; + tensor var_6334 = const()[name = tensor("op_6334"), val = tensor([1, 1])]; + tensor var_6336 = const()[name = tensor("op_6336"), val = tensor([1, 1])]; + tensor var_6338_pad_type_0 = const()[name = tensor("op_6338_pad_type_0"), val = tensor("custom")]; + tensor var_6338_pad_0 = const()[name = tensor("op_6338_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552577408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553396672))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553396800)))]; + tensor var_6338_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_6336, groups = var_4943, pad = var_6338_pad_0, pad_type = var_6338_pad_type_0, strides = var_6334, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_383_cast)[name = tensor("op_6338_cast")]; + tensor inputs_189_cast = add(x = var_6338_cast, y = inputs_187_cast)[name = tensor("inputs_189_cast")]; + tensor var_6342 = const()[name = tensor("op_6342"), val = tensor([1])]; + tensor channels_mean_189_cast = reduce_mean(axes = var_6342, keep_dims = var_4938, x = inputs_189_cast)[name = tensor("channels_mean_189_cast")]; + tensor zero_mean_189_cast = sub(x = inputs_189_cast, y = channels_mean_189_cast)[name = tensor("zero_mean_189_cast")]; + tensor zero_mean_sq_189_cast = mul(x = zero_mean_189_cast, y = zero_mean_189_cast)[name = tensor("zero_mean_sq_189_cast")]; + tensor var_6346 = const()[name = tensor("op_6346"), val = tensor([1])]; + tensor var_6347_cast = reduce_mean(axes = var_6346, keep_dims = var_4938, x = zero_mean_sq_189_cast)[name = tensor("op_6347_cast")]; + tensor var_6348_to_fp16 = const()[name = tensor("op_6348_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6349_cast = add(x = var_6347_cast, y = var_6348_to_fp16)[name = tensor("op_6349_cast")]; + tensor denom_189_epsilon_0_to_fp16 = const()[name = tensor("denom_189_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_189_cast = rsqrt(epsilon = denom_189_epsilon_0_to_fp16, x = var_6349_cast)[name = tensor("denom_189_cast")]; + tensor out_189_cast = mul(x = zero_mean_189_cast, y = denom_189_cast)[name = tensor("out_189_cast")]; + tensor var_6353_to_fp16 = const()[name = tensor("op_6353_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553399424)))]; + tensor var_6354_cast = add(x = out_189_cast, y = var_6353_to_fp16)[name = tensor("op_6354_cast")]; + tensor var_6356_to_fp16 = const()[name = tensor("op_6356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553402048)))]; + tensor hidden_states_253_cast = mul(x = var_6354_cast, y = var_6356_to_fp16)[name = tensor("hidden_states_253_cast")]; + tensor var_6363 = const()[name = tensor("op_6363"), val = tensor([1, 1])]; + tensor var_6365 = const()[name = tensor("op_6365"), val = tensor([1, 1])]; + tensor q_127_pad_type_0 = const()[name = tensor("q_127_pad_type_0"), val = tensor("custom")]; + tensor q_127_pad_0 = const()[name = tensor("q_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553404672))), lut = tensor([-0x1.61p-7, 0x1.61p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_127_cast = conv(dilations = var_6365, groups = var_4943, pad = q_127_pad_0, pad_type = q_127_pad_type_0, strides = var_6363, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_253_cast)[name = tensor("q_127_cast")]; + tensor var_6369 = const()[name = tensor("op_6369"), val = tensor([1, 1])]; + tensor var_6371 = const()[name = tensor("op_6371"), val = tensor([1, 1])]; + tensor k_127_pad_type_0 = const()[name = tensor("k_127_pad_type_0"), val = tensor("custom")]; + tensor k_127_pad_0 = const()[name = tensor("k_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553609536))), lut = tensor([-0x1.b7p-8, 0x1.b6cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_127_cast = conv(dilations = var_6371, groups = var_4943, pad = k_127_pad_0, pad_type = k_127_pad_type_0, strides = var_6369, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_127_cast")]; + tensor var_6375 = const()[name = tensor("op_6375"), val = tensor([1, 1])]; + tensor var_6377 = const()[name = tensor("op_6377"), val = tensor([1, 1])]; + tensor v_127_pad_type_0 = const()[name = tensor("v_127_pad_type_0"), val = tensor("custom")]; + tensor v_127_pad_0 = const()[name = tensor("v_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553937280))), lut = tensor([-0x1.e78p-8, 0x1.e74p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_127_cast = conv(dilations = var_6377, groups = var_4943, pad = v_127_pad_0, pad_type = v_127_pad_type_0, strides = var_6375, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_127_cast")]; + tensor var_6381 = const()[name = tensor("op_6381"), val = tensor([2, 20, 64, -1])]; + tensor var_6382_cast = reshape(shape = var_6381, x = q_127_cast)[name = tensor("op_6382_cast")]; + tensor var_6383 = const()[name = tensor("op_6383"), val = tensor([2, 20, 64, -1])]; + tensor var_6384_cast = reshape(shape = var_6383, x = k_127_cast)[name = tensor("op_6384_cast")]; + tensor var_6385 = const()[name = tensor("op_6385"), val = tensor([2, 20, 64, -1])]; + tensor var_6386_cast = reshape(shape = var_6385, x = v_127_cast)[name = tensor("op_6386_cast")]; + tensor attn_weights_253_transpose_x_0 = const()[name = tensor("attn_weights_253_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_253_transpose_y_0 = const()[name = tensor("attn_weights_253_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_253_cast = matmul(transpose_x = attn_weights_253_transpose_x_0, transpose_y = attn_weights_253_transpose_y_0, x = var_6382_cast, y = var_6384_cast)[name = tensor("attn_weights_253_cast")]; + tensor attn_weights_255_cast = mul(x = attn_weights_253_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_255_cast")]; + tensor var_6390_cast = softmax(axis = var_4927, x = attn_weights_255_cast)[name = tensor("op_6390_cast")]; + tensor attn_127_transpose_x_0 = const()[name = tensor("attn_127_transpose_x_0"), val = tensor(false)]; + tensor attn_127_transpose_y_0 = const()[name = tensor("attn_127_transpose_y_0"), val = tensor(true)]; + tensor attn_127_cast = matmul(transpose_x = attn_127_transpose_x_0, transpose_y = attn_127_transpose_y_0, x = var_6386_cast, y = var_6390_cast)[name = tensor("attn_127_cast")]; + tensor var_6394 = const()[name = tensor("op_6394"), val = tensor([2, 1280, 1, -1])]; + tensor input_385_cast = reshape(shape = var_6394, x = attn_127_cast)[name = tensor("input_385_cast")]; + tensor var_6399 = const()[name = tensor("op_6399"), val = tensor([1, 1])]; + tensor var_6401 = const()[name = tensor("op_6401"), val = tensor([1, 1])]; + tensor var_6403_pad_type_0 = const()[name = tensor("op_6403_pad_type_0"), val = tensor("custom")]; + tensor var_6403_pad_0 = const()[name = tensor("op_6403_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554265024))), lut = tensor([-0x1.37cp-8, 0x1.364p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554469888)))]; + tensor var_6403_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_6401, groups = var_4943, pad = var_6403_pad_0, pad_type = var_6403_pad_type_0, strides = var_6399, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_385_cast)[name = tensor("op_6403_cast")]; + tensor inputs_191_cast = add(x = var_6403_cast, y = inputs_189_cast)[name = tensor("inputs_191_cast")]; + tensor var_6407 = const()[name = tensor("op_6407"), val = tensor([1])]; + tensor channels_mean_191_cast = reduce_mean(axes = var_6407, keep_dims = var_4938, x = inputs_191_cast)[name = tensor("channels_mean_191_cast")]; + tensor zero_mean_191_cast = sub(x = inputs_191_cast, y = channels_mean_191_cast)[name = tensor("zero_mean_191_cast")]; + tensor zero_mean_sq_191_cast = mul(x = zero_mean_191_cast, y = zero_mean_191_cast)[name = tensor("zero_mean_sq_191_cast")]; + tensor var_6411 = const()[name = tensor("op_6411"), val = tensor([1])]; + tensor var_6412_cast = reduce_mean(axes = var_6411, keep_dims = var_4938, x = zero_mean_sq_191_cast)[name = tensor("op_6412_cast")]; + tensor var_6413_to_fp16 = const()[name = tensor("op_6413_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6414_cast = add(x = var_6412_cast, y = var_6413_to_fp16)[name = tensor("op_6414_cast")]; + tensor denom_191_epsilon_0_to_fp16 = const()[name = tensor("denom_191_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_191_cast = rsqrt(epsilon = denom_191_epsilon_0_to_fp16, x = var_6414_cast)[name = tensor("denom_191_cast")]; + tensor out_191_cast = mul(x = zero_mean_191_cast, y = denom_191_cast)[name = tensor("out_191_cast")]; + tensor var_6418_to_fp16 = const()[name = tensor("op_6418_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554472512)))]; + tensor var_6419_cast = add(x = out_191_cast, y = var_6418_to_fp16)[name = tensor("op_6419_cast")]; + tensor var_6421_to_fp16 = const()[name = tensor("op_6421_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554475136)))]; + tensor input_387_cast = mul(x = var_6419_cast, y = var_6421_to_fp16)[name = tensor("input_387_cast")]; + tensor var_6429 = const()[name = tensor("op_6429"), val = tensor([1, 1])]; + tensor var_6431 = const()[name = tensor("op_6431"), val = tensor([1, 1])]; + tensor var_6433_pad_type_0 = const()[name = tensor("op_6433_pad_type_0"), val = tensor("custom")]; + tensor var_6433_pad_0 = const()[name = tensor("op_6433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554477760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561031424))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561031552)))]; + tensor var_6433_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_6431, groups = var_4943, pad = var_6433_pad_0, pad_type = var_6433_pad_type_0, strides = var_6429, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_387_cast)[name = tensor("op_6433_cast")]; + tensor var_6434_split_sizes_0 = const()[name = tensor("op_6434_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6434_axis_0 = const()[name = tensor("op_6434_axis_0"), val = tensor(1)]; + tensor var_6434_cast_0, tensor var_6434_cast_1 = split(axis = var_6434_axis_0, split_sizes = var_6434_split_sizes_0, x = var_6433_cast)[name = tensor("op_6434_cast")]; + tensor var_6436_mode_0 = const()[name = tensor("op_6436_mode_0"), val = tensor("EXACT")]; + tensor var_6436_cast = gelu(mode = var_6436_mode_0, x = var_6434_cast_1)[name = tensor("op_6436_cast")]; + tensor input_389_cast = mul(x = var_6434_cast_0, y = var_6436_cast)[name = tensor("input_389_cast")]; + tensor var_6440 = const()[name = tensor("op_6440"), val = tensor([1, 1])]; + tensor var_6442 = const()[name = tensor("op_6442"), val = tensor([1, 1])]; + tensor var_6444_pad_type_0 = const()[name = tensor("op_6444_pad_type_0"), val = tensor("custom")]; + tensor var_6444_pad_0 = const()[name = tensor("op_6444_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(561052096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564328960))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564329088)))]; + tensor var_6444_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_6442, groups = var_4943, pad = var_6444_pad_0, pad_type = var_6444_pad_type_0, strides = var_6440, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_389_cast)[name = tensor("op_6444_cast")]; + tensor inputs_193_cast = add(x = var_6444_cast, y = inputs_191_cast)[name = tensor("inputs_193_cast")]; + tensor var_6454 = const()[name = tensor("op_6454"), val = tensor([1])]; + tensor channels_mean_193_cast = reduce_mean(axes = var_6454, keep_dims = var_4938, x = inputs_193_cast)[name = tensor("channels_mean_193_cast")]; + tensor zero_mean_193_cast = sub(x = inputs_193_cast, y = channels_mean_193_cast)[name = tensor("zero_mean_193_cast")]; + tensor zero_mean_sq_193_cast = mul(x = zero_mean_193_cast, y = zero_mean_193_cast)[name = tensor("zero_mean_sq_193_cast")]; + tensor var_6458 = const()[name = tensor("op_6458"), val = tensor([1])]; + tensor var_6459_cast = reduce_mean(axes = var_6458, keep_dims = var_4938, x = zero_mean_sq_193_cast)[name = tensor("op_6459_cast")]; + tensor var_6460_to_fp16 = const()[name = tensor("op_6460_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6461_cast = add(x = var_6459_cast, y = var_6460_to_fp16)[name = tensor("op_6461_cast")]; + tensor denom_193_epsilon_0_to_fp16 = const()[name = tensor("denom_193_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_193_cast = rsqrt(epsilon = denom_193_epsilon_0_to_fp16, x = var_6461_cast)[name = tensor("denom_193_cast")]; + tensor out_193_cast = mul(x = zero_mean_193_cast, y = denom_193_cast)[name = tensor("out_193_cast")]; + tensor var_6465_to_fp16 = const()[name = tensor("op_6465_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564331712)))]; + tensor var_6466_cast = add(x = out_193_cast, y = var_6465_to_fp16)[name = tensor("op_6466_cast")]; + tensor var_6468_to_fp16 = const()[name = tensor("op_6468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564334336)))]; + tensor hidden_states_257_cast = mul(x = var_6466_cast, y = var_6468_to_fp16)[name = tensor("hidden_states_257_cast")]; + tensor var_6475 = const()[name = tensor("op_6475"), val = tensor([1, 1])]; + tensor var_6477 = const()[name = tensor("op_6477"), val = tensor([1, 1])]; + tensor q_129_pad_type_0 = const()[name = tensor("q_129_pad_type_0"), val = tensor("custom")]; + tensor q_129_pad_0 = const()[name = tensor("q_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564336960))), lut = tensor([-0x1.32cp-5, -0x1.6ecp-7, 0x1.738p-7, 0x1.338p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_129_cast = conv(dilations = var_6477, groups = var_4943, pad = q_129_pad_0, pad_type = q_129_pad_type_0, strides = var_6475, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("q_129_cast")]; + tensor var_6481 = const()[name = tensor("op_6481"), val = tensor([1, 1])]; + tensor var_6483 = const()[name = tensor("op_6483"), val = tensor([1, 1])]; + tensor k_129_pad_type_0 = const()[name = tensor("k_129_pad_type_0"), val = tensor("custom")]; + tensor k_129_pad_0 = const()[name = tensor("k_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564746624))), lut = tensor([-0x1.328p-5, -0x1.71p-7, 0x1.724p-7, 0x1.32cp-5]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_129_cast = conv(dilations = var_6483, groups = var_4943, pad = k_129_pad_0, pad_type = k_129_pad_type_0, strides = var_6481, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("k_129_cast")]; + tensor var_6487 = const()[name = tensor("op_6487"), val = tensor([1, 1])]; + tensor var_6489 = const()[name = tensor("op_6489"), val = tensor([1, 1])]; + tensor v_129_pad_type_0 = const()[name = tensor("v_129_pad_type_0"), val = tensor("custom")]; + tensor v_129_pad_0 = const()[name = tensor("v_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565156288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565975552))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_129_cast = conv(dilations = var_6489, groups = var_4943, pad = v_129_pad_0, pad_type = v_129_pad_type_0, strides = var_6487, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("v_129_cast")]; + tensor var_6493 = const()[name = tensor("op_6493"), val = tensor([2, 20, 64, -1])]; + tensor var_6494_cast = reshape(shape = var_6493, x = q_129_cast)[name = tensor("op_6494_cast")]; + tensor var_6495 = const()[name = tensor("op_6495"), val = tensor([2, 20, 64, -1])]; + tensor var_6496_cast = reshape(shape = var_6495, x = k_129_cast)[name = tensor("op_6496_cast")]; + tensor var_6497 = const()[name = tensor("op_6497"), val = tensor([2, 20, 64, -1])]; + tensor var_6498_cast = reshape(shape = var_6497, x = v_129_cast)[name = tensor("op_6498_cast")]; + tensor attn_weights_257_transpose_x_0 = const()[name = tensor("attn_weights_257_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_257_transpose_y_0 = const()[name = tensor("attn_weights_257_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_257_cast = matmul(transpose_x = attn_weights_257_transpose_x_0, transpose_y = attn_weights_257_transpose_y_0, x = var_6494_cast, y = var_6496_cast)[name = tensor("attn_weights_257_cast")]; + tensor attn_weights_259_cast = mul(x = attn_weights_257_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_259_cast")]; + tensor var_6502_cast = softmax(axis = var_4927, x = attn_weights_259_cast)[name = tensor("op_6502_cast")]; + tensor attn_129_transpose_x_0 = const()[name = tensor("attn_129_transpose_x_0"), val = tensor(false)]; + tensor attn_129_transpose_y_0 = const()[name = tensor("attn_129_transpose_y_0"), val = tensor(true)]; + tensor attn_129_cast = matmul(transpose_x = attn_129_transpose_x_0, transpose_y = attn_129_transpose_y_0, x = var_6498_cast, y = var_6502_cast)[name = tensor("attn_129_cast")]; + tensor var_6506 = const()[name = tensor("op_6506"), val = tensor([2, 1280, 1, -1])]; + tensor input_391_cast = reshape(shape = var_6506, x = attn_129_cast)[name = tensor("input_391_cast")]; + tensor var_6511 = const()[name = tensor("op_6511"), val = tensor([1, 1])]; + tensor var_6513 = const()[name = tensor("op_6513"), val = tensor([1, 1])]; + tensor var_6515_pad_type_0 = const()[name = tensor("op_6515_pad_type_0"), val = tensor("custom")]; + tensor var_6515_pad_0 = const()[name = tensor("op_6515_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(565975680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566794944))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566795072)))]; + tensor var_6515_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_6513, groups = var_4943, pad = var_6515_pad_0, pad_type = var_6515_pad_type_0, strides = var_6511, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_391_cast)[name = tensor("op_6515_cast")]; + tensor inputs_195_cast = add(x = var_6515_cast, y = inputs_193_cast)[name = tensor("inputs_195_cast")]; + tensor var_6519 = const()[name = tensor("op_6519"), val = tensor([1])]; + tensor channels_mean_195_cast = reduce_mean(axes = var_6519, keep_dims = var_4938, x = inputs_195_cast)[name = tensor("channels_mean_195_cast")]; + tensor zero_mean_195_cast = sub(x = inputs_195_cast, y = channels_mean_195_cast)[name = tensor("zero_mean_195_cast")]; + tensor zero_mean_sq_195_cast = mul(x = zero_mean_195_cast, y = zero_mean_195_cast)[name = tensor("zero_mean_sq_195_cast")]; + tensor var_6523 = const()[name = tensor("op_6523"), val = tensor([1])]; + tensor var_6524_cast = reduce_mean(axes = var_6523, keep_dims = var_4938, x = zero_mean_sq_195_cast)[name = tensor("op_6524_cast")]; + tensor var_6525_to_fp16 = const()[name = tensor("op_6525_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6526_cast = add(x = var_6524_cast, y = var_6525_to_fp16)[name = tensor("op_6526_cast")]; + tensor denom_195_epsilon_0_to_fp16 = const()[name = tensor("denom_195_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_195_cast = rsqrt(epsilon = denom_195_epsilon_0_to_fp16, x = var_6526_cast)[name = tensor("denom_195_cast")]; + tensor out_195_cast = mul(x = zero_mean_195_cast, y = denom_195_cast)[name = tensor("out_195_cast")]; + tensor var_6530_to_fp16 = const()[name = tensor("op_6530_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566797696)))]; + tensor var_6531_cast = add(x = out_195_cast, y = var_6530_to_fp16)[name = tensor("op_6531_cast")]; + tensor var_6533_to_fp16 = const()[name = tensor("op_6533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566800320)))]; + tensor hidden_states_259_cast = mul(x = var_6531_cast, y = var_6533_to_fp16)[name = tensor("hidden_states_259_cast")]; + tensor var_6540 = const()[name = tensor("op_6540"), val = tensor([1, 1])]; + tensor var_6542 = const()[name = tensor("op_6542"), val = tensor([1, 1])]; + tensor q_131_pad_type_0 = const()[name = tensor("q_131_pad_type_0"), val = tensor("custom")]; + tensor q_131_pad_0 = const()[name = tensor("q_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(566802944))), lut = tensor([-0x1.60cp-7, 0x1.604p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_131_cast = conv(dilations = var_6542, groups = var_4943, pad = q_131_pad_0, pad_type = q_131_pad_type_0, strides = var_6540, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_259_cast)[name = tensor("q_131_cast")]; + tensor var_6546 = const()[name = tensor("op_6546"), val = tensor([1, 1])]; + tensor var_6548 = const()[name = tensor("op_6548"), val = tensor([1, 1])]; + tensor k_131_pad_type_0 = const()[name = tensor("k_131_pad_type_0"), val = tensor("custom")]; + tensor k_131_pad_0 = const()[name = tensor("k_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567007808))), lut = tensor([-0x1.b4p-8, 0x1.b3cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_131_cast = conv(dilations = var_6548, groups = var_4943, pad = k_131_pad_0, pad_type = k_131_pad_type_0, strides = var_6546, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_131_cast")]; + tensor var_6552 = const()[name = tensor("op_6552"), val = tensor([1, 1])]; + tensor var_6554 = const()[name = tensor("op_6554"), val = tensor([1, 1])]; + tensor v_131_pad_type_0 = const()[name = tensor("v_131_pad_type_0"), val = tensor("custom")]; + tensor v_131_pad_0 = const()[name = tensor("v_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567335552))), lut = tensor([-0x1.e54p-8, 0x1.e38p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_131_cast = conv(dilations = var_6554, groups = var_4943, pad = v_131_pad_0, pad_type = v_131_pad_type_0, strides = var_6552, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_131_cast")]; + tensor var_6558 = const()[name = tensor("op_6558"), val = tensor([2, 20, 64, -1])]; + tensor var_6559_cast = reshape(shape = var_6558, x = q_131_cast)[name = tensor("op_6559_cast")]; + tensor var_6560 = const()[name = tensor("op_6560"), val = tensor([2, 20, 64, -1])]; + tensor var_6561_cast = reshape(shape = var_6560, x = k_131_cast)[name = tensor("op_6561_cast")]; + tensor var_6562 = const()[name = tensor("op_6562"), val = tensor([2, 20, 64, -1])]; + tensor var_6563_cast = reshape(shape = var_6562, x = v_131_cast)[name = tensor("op_6563_cast")]; + tensor attn_weights_261_transpose_x_0 = const()[name = tensor("attn_weights_261_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_261_transpose_y_0 = const()[name = tensor("attn_weights_261_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_261_cast = matmul(transpose_x = attn_weights_261_transpose_x_0, transpose_y = attn_weights_261_transpose_y_0, x = var_6559_cast, y = var_6561_cast)[name = tensor("attn_weights_261_cast")]; + tensor attn_weights_263_cast = mul(x = attn_weights_261_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_263_cast")]; + tensor var_6567_cast = softmax(axis = var_4927, x = attn_weights_263_cast)[name = tensor("op_6567_cast")]; + tensor attn_131_transpose_x_0 = const()[name = tensor("attn_131_transpose_x_0"), val = tensor(false)]; + tensor attn_131_transpose_y_0 = const()[name = tensor("attn_131_transpose_y_0"), val = tensor(true)]; + tensor attn_131_cast = matmul(transpose_x = attn_131_transpose_x_0, transpose_y = attn_131_transpose_y_0, x = var_6563_cast, y = var_6567_cast)[name = tensor("attn_131_cast")]; + tensor var_6571 = const()[name = tensor("op_6571"), val = tensor([2, 1280, 1, -1])]; + tensor input_393_cast = reshape(shape = var_6571, x = attn_131_cast)[name = tensor("input_393_cast")]; + tensor var_6576 = const()[name = tensor("op_6576"), val = tensor([1, 1])]; + tensor var_6578 = const()[name = tensor("op_6578"), val = tensor([1, 1])]; + tensor var_6580_pad_type_0 = const()[name = tensor("op_6580_pad_type_0"), val = tensor("custom")]; + tensor var_6580_pad_0 = const()[name = tensor("op_6580_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567663296))), lut = tensor([-0x1.42p-8, 0x1.41cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567868160)))]; + tensor var_6580_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_6578, groups = var_4943, pad = var_6580_pad_0, pad_type = var_6580_pad_type_0, strides = var_6576, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_393_cast)[name = tensor("op_6580_cast")]; + tensor inputs_197_cast = add(x = var_6580_cast, y = inputs_195_cast)[name = tensor("inputs_197_cast")]; + tensor var_6584 = const()[name = tensor("op_6584"), val = tensor([1])]; + tensor channels_mean_197_cast = reduce_mean(axes = var_6584, keep_dims = var_4938, x = inputs_197_cast)[name = tensor("channels_mean_197_cast")]; + tensor zero_mean_197_cast = sub(x = inputs_197_cast, y = channels_mean_197_cast)[name = tensor("zero_mean_197_cast")]; + tensor zero_mean_sq_197_cast = mul(x = zero_mean_197_cast, y = zero_mean_197_cast)[name = tensor("zero_mean_sq_197_cast")]; + tensor var_6588 = const()[name = tensor("op_6588"), val = tensor([1])]; + tensor var_6589_cast = reduce_mean(axes = var_6588, keep_dims = var_4938, x = zero_mean_sq_197_cast)[name = tensor("op_6589_cast")]; + tensor var_6590_to_fp16 = const()[name = tensor("op_6590_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6591_cast = add(x = var_6589_cast, y = var_6590_to_fp16)[name = tensor("op_6591_cast")]; + tensor denom_197_epsilon_0_to_fp16 = const()[name = tensor("denom_197_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_197_cast = rsqrt(epsilon = denom_197_epsilon_0_to_fp16, x = var_6591_cast)[name = tensor("denom_197_cast")]; + tensor out_197_cast = mul(x = zero_mean_197_cast, y = denom_197_cast)[name = tensor("out_197_cast")]; + tensor var_6595_to_fp16 = const()[name = tensor("op_6595_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567870784)))]; + tensor var_6596_cast = add(x = out_197_cast, y = var_6595_to_fp16)[name = tensor("op_6596_cast")]; + tensor var_6598_to_fp16 = const()[name = tensor("op_6598_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567873408)))]; + tensor input_395_cast = mul(x = var_6596_cast, y = var_6598_to_fp16)[name = tensor("input_395_cast")]; + tensor var_6606 = const()[name = tensor("op_6606"), val = tensor([1, 1])]; + tensor var_6608 = const()[name = tensor("op_6608"), val = tensor([1, 1])]; + tensor var_6610_pad_type_0 = const()[name = tensor("op_6610_pad_type_0"), val = tensor("custom")]; + tensor var_6610_pad_0 = const()[name = tensor("op_6610_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(567876032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574429696))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574429824)))]; + tensor var_6610_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_6608, groups = var_4943, pad = var_6610_pad_0, pad_type = var_6610_pad_type_0, strides = var_6606, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_395_cast)[name = tensor("op_6610_cast")]; + tensor var_6611_split_sizes_0 = const()[name = tensor("op_6611_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6611_axis_0 = const()[name = tensor("op_6611_axis_0"), val = tensor(1)]; + tensor var_6611_cast_0, tensor var_6611_cast_1 = split(axis = var_6611_axis_0, split_sizes = var_6611_split_sizes_0, x = var_6610_cast)[name = tensor("op_6611_cast")]; + tensor var_6613_mode_0 = const()[name = tensor("op_6613_mode_0"), val = tensor("EXACT")]; + tensor var_6613_cast = gelu(mode = var_6613_mode_0, x = var_6611_cast_1)[name = tensor("op_6613_cast")]; + tensor input_397_cast = mul(x = var_6611_cast_0, y = var_6613_cast)[name = tensor("input_397_cast")]; + tensor var_6617 = const()[name = tensor("op_6617"), val = tensor([1, 1])]; + tensor var_6619 = const()[name = tensor("op_6619"), val = tensor([1, 1])]; + tensor var_6621_pad_type_0 = const()[name = tensor("op_6621_pad_type_0"), val = tensor("custom")]; + tensor var_6621_pad_0 = const()[name = tensor("op_6621_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574450368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577727232))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577727360)))]; + tensor var_6621_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_6619, groups = var_4943, pad = var_6621_pad_0, pad_type = var_6621_pad_type_0, strides = var_6617, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_397_cast)[name = tensor("op_6621_cast")]; + tensor inputs_199_cast = add(x = var_6621_cast, y = inputs_197_cast)[name = tensor("inputs_199_cast")]; + tensor var_6631 = const()[name = tensor("op_6631"), val = tensor([1])]; + tensor channels_mean_199_cast = reduce_mean(axes = var_6631, keep_dims = var_4938, x = inputs_199_cast)[name = tensor("channels_mean_199_cast")]; + tensor zero_mean_199_cast = sub(x = inputs_199_cast, y = channels_mean_199_cast)[name = tensor("zero_mean_199_cast")]; + tensor zero_mean_sq_199_cast = mul(x = zero_mean_199_cast, y = zero_mean_199_cast)[name = tensor("zero_mean_sq_199_cast")]; + tensor var_6635 = const()[name = tensor("op_6635"), val = tensor([1])]; + tensor var_6636_cast = reduce_mean(axes = var_6635, keep_dims = var_4938, x = zero_mean_sq_199_cast)[name = tensor("op_6636_cast")]; + tensor var_6637_to_fp16 = const()[name = tensor("op_6637_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6638_cast = add(x = var_6636_cast, y = var_6637_to_fp16)[name = tensor("op_6638_cast")]; + tensor denom_199_epsilon_0_to_fp16 = const()[name = tensor("denom_199_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_199_cast = rsqrt(epsilon = denom_199_epsilon_0_to_fp16, x = var_6638_cast)[name = tensor("denom_199_cast")]; + tensor out_199_cast = mul(x = zero_mean_199_cast, y = denom_199_cast)[name = tensor("out_199_cast")]; + tensor var_6642_to_fp16 = const()[name = tensor("op_6642_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577729984)))]; + tensor var_6643_cast = add(x = out_199_cast, y = var_6642_to_fp16)[name = tensor("op_6643_cast")]; + tensor var_6645_to_fp16 = const()[name = tensor("op_6645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577732608)))]; + tensor hidden_states_263_cast = mul(x = var_6643_cast, y = var_6645_to_fp16)[name = tensor("hidden_states_263_cast")]; + tensor var_6652 = const()[name = tensor("op_6652"), val = tensor([1, 1])]; + tensor var_6654 = const()[name = tensor("op_6654"), val = tensor([1, 1])]; + tensor q_133_pad_type_0 = const()[name = tensor("q_133_pad_type_0"), val = tensor("custom")]; + tensor q_133_pad_0 = const()[name = tensor("q_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(577735232))), lut = tensor([-0x1.338p-5, -0x1.718p-7, 0x1.724p-7, 0x1.334p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_133_cast = conv(dilations = var_6654, groups = var_4943, pad = q_133_pad_0, pad_type = q_133_pad_type_0, strides = var_6652, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("q_133_cast")]; + tensor var_6658 = const()[name = tensor("op_6658"), val = tensor([1, 1])]; + tensor var_6660 = const()[name = tensor("op_6660"), val = tensor([1, 1])]; + tensor k_133_pad_type_0 = const()[name = tensor("k_133_pad_type_0"), val = tensor("custom")]; + tensor k_133_pad_0 = const()[name = tensor("k_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578144896))), lut = tensor([-0x1.318p-5, -0x1.6e8p-7, 0x1.734p-7, 0x1.328p-5]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_133_cast = conv(dilations = var_6660, groups = var_4943, pad = k_133_pad_0, pad_type = k_133_pad_type_0, strides = var_6658, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("k_133_cast")]; + tensor var_6664 = const()[name = tensor("op_6664"), val = tensor([1, 1])]; + tensor var_6666 = const()[name = tensor("op_6666"), val = tensor([1, 1])]; + tensor v_133_pad_type_0 = const()[name = tensor("v_133_pad_type_0"), val = tensor("custom")]; + tensor v_133_pad_0 = const()[name = tensor("v_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(578554560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579373824))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_133_cast = conv(dilations = var_6666, groups = var_4943, pad = v_133_pad_0, pad_type = v_133_pad_type_0, strides = var_6664, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("v_133_cast")]; + tensor var_6670 = const()[name = tensor("op_6670"), val = tensor([2, 20, 64, -1])]; + tensor var_6671_cast = reshape(shape = var_6670, x = q_133_cast)[name = tensor("op_6671_cast")]; + tensor var_6672 = const()[name = tensor("op_6672"), val = tensor([2, 20, 64, -1])]; + tensor var_6673_cast = reshape(shape = var_6672, x = k_133_cast)[name = tensor("op_6673_cast")]; + tensor var_6674 = const()[name = tensor("op_6674"), val = tensor([2, 20, 64, -1])]; + tensor var_6675_cast = reshape(shape = var_6674, x = v_133_cast)[name = tensor("op_6675_cast")]; + tensor attn_weights_265_transpose_x_0 = const()[name = tensor("attn_weights_265_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_265_transpose_y_0 = const()[name = tensor("attn_weights_265_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_265_cast = matmul(transpose_x = attn_weights_265_transpose_x_0, transpose_y = attn_weights_265_transpose_y_0, x = var_6671_cast, y = var_6673_cast)[name = tensor("attn_weights_265_cast")]; + tensor attn_weights_267_cast = mul(x = attn_weights_265_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_267_cast")]; + tensor var_6679_cast = softmax(axis = var_4927, x = attn_weights_267_cast)[name = tensor("op_6679_cast")]; + tensor attn_133_transpose_x_0 = const()[name = tensor("attn_133_transpose_x_0"), val = tensor(false)]; + tensor attn_133_transpose_y_0 = const()[name = tensor("attn_133_transpose_y_0"), val = tensor(true)]; + tensor attn_133_cast = matmul(transpose_x = attn_133_transpose_x_0, transpose_y = attn_133_transpose_y_0, x = var_6675_cast, y = var_6679_cast)[name = tensor("attn_133_cast")]; + tensor var_6683 = const()[name = tensor("op_6683"), val = tensor([2, 1280, 1, -1])]; + tensor input_399_cast = reshape(shape = var_6683, x = attn_133_cast)[name = tensor("input_399_cast")]; + tensor var_6688 = const()[name = tensor("op_6688"), val = tensor([1, 1])]; + tensor var_6690 = const()[name = tensor("op_6690"), val = tensor([1, 1])]; + tensor var_6692_pad_type_0 = const()[name = tensor("op_6692_pad_type_0"), val = tensor("custom")]; + tensor var_6692_pad_0 = const()[name = tensor("op_6692_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(579373952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580193216))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580193344)))]; + tensor var_6692_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_6690, groups = var_4943, pad = var_6692_pad_0, pad_type = var_6692_pad_type_0, strides = var_6688, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_399_cast)[name = tensor("op_6692_cast")]; + tensor inputs_201_cast = add(x = var_6692_cast, y = inputs_199_cast)[name = tensor("inputs_201_cast")]; + tensor var_6696 = const()[name = tensor("op_6696"), val = tensor([1])]; + tensor channels_mean_201_cast = reduce_mean(axes = var_6696, keep_dims = var_4938, x = inputs_201_cast)[name = tensor("channels_mean_201_cast")]; + tensor zero_mean_201_cast = sub(x = inputs_201_cast, y = channels_mean_201_cast)[name = tensor("zero_mean_201_cast")]; + tensor zero_mean_sq_201_cast = mul(x = zero_mean_201_cast, y = zero_mean_201_cast)[name = tensor("zero_mean_sq_201_cast")]; + tensor var_6700 = const()[name = tensor("op_6700"), val = tensor([1])]; + tensor var_6701_cast = reduce_mean(axes = var_6700, keep_dims = var_4938, x = zero_mean_sq_201_cast)[name = tensor("op_6701_cast")]; + tensor var_6702_to_fp16 = const()[name = tensor("op_6702_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6703_cast = add(x = var_6701_cast, y = var_6702_to_fp16)[name = tensor("op_6703_cast")]; + tensor denom_201_epsilon_0_to_fp16 = const()[name = tensor("denom_201_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_201_cast = rsqrt(epsilon = denom_201_epsilon_0_to_fp16, x = var_6703_cast)[name = tensor("denom_201_cast")]; + tensor out_201_cast = mul(x = zero_mean_201_cast, y = denom_201_cast)[name = tensor("out_201_cast")]; + tensor var_6707_to_fp16 = const()[name = tensor("op_6707_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580195968)))]; + tensor var_6708_cast = add(x = out_201_cast, y = var_6707_to_fp16)[name = tensor("op_6708_cast")]; + tensor var_6710_to_fp16 = const()[name = tensor("op_6710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580198592)))]; + tensor hidden_states_265_cast = mul(x = var_6708_cast, y = var_6710_to_fp16)[name = tensor("hidden_states_265_cast")]; + tensor var_6717 = const()[name = tensor("op_6717"), val = tensor([1, 1])]; + tensor var_6719 = const()[name = tensor("op_6719"), val = tensor([1, 1])]; + tensor q_135_pad_type_0 = const()[name = tensor("q_135_pad_type_0"), val = tensor("custom")]; + tensor q_135_pad_0 = const()[name = tensor("q_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580201216))), lut = tensor([-0x1.5ccp-7, 0x1.5ccp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_135_cast = conv(dilations = var_6719, groups = var_4943, pad = q_135_pad_0, pad_type = q_135_pad_type_0, strides = var_6717, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_265_cast)[name = tensor("q_135_cast")]; + tensor var_6723 = const()[name = tensor("op_6723"), val = tensor([1, 1])]; + tensor var_6725 = const()[name = tensor("op_6725"), val = tensor([1, 1])]; + tensor k_135_pad_type_0 = const()[name = tensor("k_135_pad_type_0"), val = tensor("custom")]; + tensor k_135_pad_0 = const()[name = tensor("k_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580406080))), lut = tensor([-0x1.aa4p-8, 0x1.abp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_135_cast = conv(dilations = var_6725, groups = var_4943, pad = k_135_pad_0, pad_type = k_135_pad_type_0, strides = var_6723, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_135_cast")]; + tensor var_6729 = const()[name = tensor("op_6729"), val = tensor([1, 1])]; + tensor var_6731 = const()[name = tensor("op_6731"), val = tensor([1, 1])]; + tensor v_135_pad_type_0 = const()[name = tensor("v_135_pad_type_0"), val = tensor("custom")]; + tensor v_135_pad_0 = const()[name = tensor("v_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(580733824))), lut = tensor([-0x1.cdcp-8, 0x1.cd4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_135_cast = conv(dilations = var_6731, groups = var_4943, pad = v_135_pad_0, pad_type = v_135_pad_type_0, strides = var_6729, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_135_cast")]; + tensor var_6735 = const()[name = tensor("op_6735"), val = tensor([2, 20, 64, -1])]; + tensor var_6736_cast = reshape(shape = var_6735, x = q_135_cast)[name = tensor("op_6736_cast")]; + tensor var_6737 = const()[name = tensor("op_6737"), val = tensor([2, 20, 64, -1])]; + tensor var_6738_cast = reshape(shape = var_6737, x = k_135_cast)[name = tensor("op_6738_cast")]; + tensor var_6739 = const()[name = tensor("op_6739"), val = tensor([2, 20, 64, -1])]; + tensor var_6740_cast = reshape(shape = var_6739, x = v_135_cast)[name = tensor("op_6740_cast")]; + tensor attn_weights_269_transpose_x_0 = const()[name = tensor("attn_weights_269_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_269_transpose_y_0 = const()[name = tensor("attn_weights_269_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_269_cast = matmul(transpose_x = attn_weights_269_transpose_x_0, transpose_y = attn_weights_269_transpose_y_0, x = var_6736_cast, y = var_6738_cast)[name = tensor("attn_weights_269_cast")]; + tensor attn_weights_271_cast = mul(x = attn_weights_269_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_271_cast")]; + tensor var_6744_cast = softmax(axis = var_4927, x = attn_weights_271_cast)[name = tensor("op_6744_cast")]; + tensor attn_135_transpose_x_0 = const()[name = tensor("attn_135_transpose_x_0"), val = tensor(false)]; + tensor attn_135_transpose_y_0 = const()[name = tensor("attn_135_transpose_y_0"), val = tensor(true)]; + tensor attn_135_cast = matmul(transpose_x = attn_135_transpose_x_0, transpose_y = attn_135_transpose_y_0, x = var_6740_cast, y = var_6744_cast)[name = tensor("attn_135_cast")]; + tensor var_6748 = const()[name = tensor("op_6748"), val = tensor([2, 1280, 1, -1])]; + tensor input_401_cast = reshape(shape = var_6748, x = attn_135_cast)[name = tensor("input_401_cast")]; + tensor var_6753 = const()[name = tensor("op_6753"), val = tensor([1, 1])]; + tensor var_6755 = const()[name = tensor("op_6755"), val = tensor([1, 1])]; + tensor var_6757_pad_type_0 = const()[name = tensor("op_6757_pad_type_0"), val = tensor("custom")]; + tensor var_6757_pad_0 = const()[name = tensor("op_6757_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581061568))), lut = tensor([-0x1.48cp-8, 0x1.49p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581266432)))]; + tensor var_6757_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_6755, groups = var_4943, pad = var_6757_pad_0, pad_type = var_6757_pad_type_0, strides = var_6753, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_401_cast)[name = tensor("op_6757_cast")]; + tensor inputs_203_cast = add(x = var_6757_cast, y = inputs_201_cast)[name = tensor("inputs_203_cast")]; + tensor var_6761 = const()[name = tensor("op_6761"), val = tensor([1])]; + tensor channels_mean_203_cast = reduce_mean(axes = var_6761, keep_dims = var_4938, x = inputs_203_cast)[name = tensor("channels_mean_203_cast")]; + tensor zero_mean_203_cast = sub(x = inputs_203_cast, y = channels_mean_203_cast)[name = tensor("zero_mean_203_cast")]; + tensor zero_mean_sq_203_cast = mul(x = zero_mean_203_cast, y = zero_mean_203_cast)[name = tensor("zero_mean_sq_203_cast")]; + tensor var_6765 = const()[name = tensor("op_6765"), val = tensor([1])]; + tensor var_6766_cast = reduce_mean(axes = var_6765, keep_dims = var_4938, x = zero_mean_sq_203_cast)[name = tensor("op_6766_cast")]; + tensor var_6767_to_fp16 = const()[name = tensor("op_6767_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6768_cast = add(x = var_6766_cast, y = var_6767_to_fp16)[name = tensor("op_6768_cast")]; + tensor denom_203_epsilon_0_to_fp16 = const()[name = tensor("denom_203_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_203_cast = rsqrt(epsilon = denom_203_epsilon_0_to_fp16, x = var_6768_cast)[name = tensor("denom_203_cast")]; + tensor out_203_cast = mul(x = zero_mean_203_cast, y = denom_203_cast)[name = tensor("out_203_cast")]; + tensor var_6772_to_fp16 = const()[name = tensor("op_6772_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581269056)))]; + tensor var_6773_cast = add(x = out_203_cast, y = var_6772_to_fp16)[name = tensor("op_6773_cast")]; + tensor var_6775_to_fp16 = const()[name = tensor("op_6775_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581271680)))]; + tensor input_403_cast = mul(x = var_6773_cast, y = var_6775_to_fp16)[name = tensor("input_403_cast")]; + tensor var_6783 = const()[name = tensor("op_6783"), val = tensor([1, 1])]; + tensor var_6785 = const()[name = tensor("op_6785"), val = tensor([1, 1])]; + tensor var_6787_pad_type_0 = const()[name = tensor("op_6787_pad_type_0"), val = tensor("custom")]; + tensor var_6787_pad_0 = const()[name = tensor("op_6787_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(581274304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587827968))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587828096)))]; + tensor var_6787_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_6785, groups = var_4943, pad = var_6787_pad_0, pad_type = var_6787_pad_type_0, strides = var_6783, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_403_cast)[name = tensor("op_6787_cast")]; + tensor var_6788_split_sizes_0 = const()[name = tensor("op_6788_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6788_axis_0 = const()[name = tensor("op_6788_axis_0"), val = tensor(1)]; + tensor var_6788_cast_0, tensor var_6788_cast_1 = split(axis = var_6788_axis_0, split_sizes = var_6788_split_sizes_0, x = var_6787_cast)[name = tensor("op_6788_cast")]; + tensor var_6790_mode_0 = const()[name = tensor("op_6790_mode_0"), val = tensor("EXACT")]; + tensor var_6790_cast = gelu(mode = var_6790_mode_0, x = var_6788_cast_1)[name = tensor("op_6790_cast")]; + tensor input_405_cast = mul(x = var_6788_cast_0, y = var_6790_cast)[name = tensor("input_405_cast")]; + tensor var_6794 = const()[name = tensor("op_6794"), val = tensor([1, 1])]; + tensor var_6796 = const()[name = tensor("op_6796"), val = tensor([1, 1])]; + tensor var_6798_pad_type_0 = const()[name = tensor("op_6798_pad_type_0"), val = tensor("custom")]; + tensor var_6798_pad_0 = const()[name = tensor("op_6798_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587848640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591125504))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591125632)))]; + tensor var_6798_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_6796, groups = var_4943, pad = var_6798_pad_0, pad_type = var_6798_pad_type_0, strides = var_6794, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_405_cast)[name = tensor("op_6798_cast")]; + tensor hidden_states_269_cast = add(x = var_6798_cast, y = inputs_203_cast)[name = tensor("hidden_states_269_cast")]; + tensor var_6800 = const()[name = tensor("op_6800"), val = tensor([2, 1280, 32, 32])]; + tensor input_407_cast = reshape(shape = var_6800, x = hidden_states_269_cast)[name = tensor("input_407_cast")]; + tensor var_6804 = const()[name = tensor("op_6804"), val = tensor([1, 1])]; + tensor var_6806 = const()[name = tensor("op_6806"), val = tensor([1, 1])]; + tensor hidden_states_271_pad_type_0 = const()[name = tensor("hidden_states_271_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_271_pad_0 = const()[name = tensor("hidden_states_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591128256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591947520))), name = tensor("mid_block_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591947648)))]; + tensor hidden_states_271_cast = conv(bias = mid_block_attentions_0_proj_out_bias_to_fp16, dilations = var_6806, groups = var_4943, pad = hidden_states_271_pad_0, pad_type = hidden_states_271_pad_type_0, strides = var_6804, weight = mid_block_attentions_0_proj_out_weight_to_fp16_palettized, x = input_407_cast)[name = tensor("hidden_states_271_cast")]; + tensor input_409_cast = add(x = hidden_states_271_cast, y = hidden_states_205_cast)[name = tensor("input_409_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = input_409_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591950272)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591952896)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_413_cast = silu(x = add_39_cast)[name = tensor("input_413_cast")]; + tensor var_6821 = const()[name = tensor("op_6821"), val = tensor([1, 1])]; + tensor var_6823 = const()[name = tensor("op_6823"), val = tensor([1, 1])]; + tensor hidden_states_273_pad_type_0 = const()[name = tensor("hidden_states_273_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_273_pad_0 = const()[name = tensor("hidden_states_273_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591955520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603014784))), name = tensor("mid_block_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603014976)))]; + tensor hidden_states_273_cast = conv(bias = mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_6823, groups = var_4943, pad = hidden_states_273_pad_0, pad_type = hidden_states_273_pad_type_0, strides = var_6821, weight = mid_block_resnets_1_conv1_weight_to_fp16_palettized, x = input_413_cast)[name = tensor("hidden_states_273_cast")]; + tensor var_6829 = const()[name = tensor("op_6829"), val = tensor([1, 1])]; + tensor var_6831 = const()[name = tensor("op_6831"), val = tensor([1, 1])]; + tensor temb_15_pad_type_0 = const()[name = tensor("temb_15_pad_type_0"), val = tensor("custom")]; + tensor temb_15_pad_0 = const()[name = tensor("temb_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603017600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603836864))), name = tensor("mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603836992)))]; + tensor temb_15_cast = conv(bias = mid_block_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_6831, groups = var_4943, pad = temb_15_pad_0, pad_type = temb_15_pad_type_0, strides = var_6829, weight = mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_15_cast")]; + tensor input_417_cast = add(x = hidden_states_273_cast, y = temb_15_cast)[name = tensor("input_417_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_417_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603839616)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603842240)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_421_cast = silu(x = add_41_cast)[name = tensor("input_421_cast")]; + tensor var_6841 = const()[name = tensor("op_6841"), val = tensor([1, 1])]; + tensor var_6843 = const()[name = tensor("op_6843"), val = tensor([1, 1])]; + tensor hidden_states_275_pad_type_0 = const()[name = tensor("hidden_states_275_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_275_pad_0 = const()[name = tensor("hidden_states_275_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(603844864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614904128))), name = tensor("mid_block_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614904320)))]; + tensor hidden_states_275_cast = conv(bias = mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_6843, groups = var_4943, pad = hidden_states_275_pad_0, pad_type = hidden_states_275_pad_type_0, strides = var_6841, weight = mid_block_resnets_1_conv2_weight_to_fp16_palettized, x = input_421_cast)[name = tensor("hidden_states_275_cast")]; + tensor hidden_states_277_cast = add(x = input_409_cast, y = hidden_states_275_cast)[name = tensor("hidden_states_277_cast")]; + tensor var_6849 = const()[name = tensor("op_6849"), val = tensor(3)]; + tensor var_6860 = const()[name = tensor("op_6860"), val = tensor(true)]; + tensor var_6865 = const()[name = tensor("op_6865"), val = tensor(1)]; + tensor input_423_interleave_0 = const()[name = tensor("input_423_interleave_0"), val = tensor(false)]; + tensor input_423_cast = concat(axis = var_6865, interleave = input_423_interleave_0, values = (hidden_states_277_cast, input_311_cast))[name = tensor("input_423_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = input_423_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_mean_0_to_fp16 = const()[name = tensor("add_43_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614906944)))]; + tensor add_43_variance_0_to_fp16 = const()[name = tensor("add_43_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614912128)))]; + tensor add_43_gamma_0_to_fp16 = const()[name = tensor("add_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614917312)))]; + tensor add_43_beta_0_to_fp16 = const()[name = tensor("add_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614922496)))]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_427_cast = silu(x = add_43_cast)[name = tensor("input_427_cast")]; + tensor var_6894 = const()[name = tensor("op_6894"), val = tensor([1, 1])]; + tensor var_6896 = const()[name = tensor("op_6896"), val = tensor([1, 1])]; + tensor hidden_states_279_pad_type_0 = const()[name = tensor("hidden_states_279_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_279_pad_0 = const()[name = tensor("hidden_states_279_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(614927680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637046144))), name = tensor("up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637046336)))]; + tensor hidden_states_279_cast = conv(bias = up_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_6896, groups = var_6865, pad = hidden_states_279_pad_0, pad_type = hidden_states_279_pad_type_0, strides = var_6894, weight = up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_427_cast)[name = tensor("hidden_states_279_cast")]; + tensor var_6902 = const()[name = tensor("op_6902"), val = tensor([1, 1])]; + tensor var_6904 = const()[name = tensor("op_6904"), val = tensor([1, 1])]; + tensor temb_17_pad_type_0 = const()[name = tensor("temb_17_pad_type_0"), val = tensor("custom")]; + tensor temb_17_pad_0 = const()[name = tensor("temb_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637048960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637868224))), name = tensor("up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637868352)))]; + tensor temb_17_cast = conv(bias = up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_6904, groups = var_6865, pad = temb_17_pad_0, pad_type = temb_17_pad_type_0, strides = var_6902, weight = up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_17_cast")]; + tensor input_431_cast = add(x = hidden_states_279_cast, y = temb_17_cast)[name = tensor("input_431_cast")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_88_cast = reshape(shape = reshape_88_shape_0, x = input_431_cast)[name = tensor("reshape_88_cast")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66_cast = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88_cast)[name = tensor("reduce_mean_66_cast")]; + tensor sub_44_cast = sub(x = reshape_88_cast, y = reduce_mean_66_cast)[name = tensor("sub_44_cast")]; + tensor square_22_cast = square(x = sub_44_cast)[name = tensor("square_22_cast")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68_cast = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22_cast)[name = tensor("reduce_mean_68_cast")]; + tensor add_44_y_0_to_fp16 = const()[name = tensor("add_44_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_44_cast = add(x = reduce_mean_68_cast, y = add_44_y_0_to_fp16)[name = tensor("add_44_cast")]; + tensor sqrt_22_cast = sqrt(x = add_44_cast)[name = tensor("sqrt_22_cast")]; + tensor real_div_22_cast = real_div(x = sub_44_cast, y = sqrt_22_cast)[name = tensor("real_div_22_cast")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_89_cast = reshape(shape = reshape_89_shape_0, x = real_div_22_cast)[name = tensor("reshape_89_cast")]; + tensor add_45_gamma_0_to_fp16 = const()[name = tensor("add_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637870976)))]; + tensor add_45_beta_0_to_fp16 = const()[name = tensor("add_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637873600)))]; + tensor add_45_epsilon_0_to_fp16 = const()[name = tensor("add_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_45_cast = batch_norm(beta = add_45_beta_0_to_fp16, epsilon = add_45_epsilon_0_to_fp16, gamma = add_45_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_89_cast)[name = tensor("add_45_cast")]; + tensor input_435_cast = silu(x = add_45_cast)[name = tensor("input_435_cast")]; + tensor var_6914 = const()[name = tensor("op_6914"), val = tensor([1, 1])]; + tensor var_6916 = const()[name = tensor("op_6916"), val = tensor([1, 1])]; + tensor hidden_states_281_pad_type_0 = const()[name = tensor("hidden_states_281_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_281_pad_0 = const()[name = tensor("hidden_states_281_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(637876224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648935488))), name = tensor("up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648935680)))]; + tensor hidden_states_281_cast = conv(bias = up_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_6916, groups = var_6865, pad = hidden_states_281_pad_0, pad_type = hidden_states_281_pad_type_0, strides = var_6914, weight = up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_435_cast)[name = tensor("hidden_states_281_cast")]; + tensor var_6921 = const()[name = tensor("op_6921"), val = tensor([1, 1])]; + tensor var_6923 = const()[name = tensor("op_6923"), val = tensor([1, 1])]; + tensor x_5_pad_type_0 = const()[name = tensor("x_5_pad_type_0"), val = tensor("custom")]; + tensor x_5_pad_0 = const()[name = tensor("x_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(648938304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651395968))), name = tensor("up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651396160)))]; + tensor x_5_cast = conv(bias = up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_6923, groups = var_6865, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = var_6921, weight = up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_423_cast)[name = tensor("x_5_cast")]; + tensor hidden_states_283_cast = add(x = x_5_cast, y = hidden_states_281_cast)[name = tensor("hidden_states_283_cast")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_92_cast = reshape(shape = reshape_92_shape_0, x = hidden_states_283_cast)[name = tensor("reshape_92_cast")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69_cast = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92_cast)[name = tensor("reduce_mean_69_cast")]; + tensor sub_46_cast = sub(x = reshape_92_cast, y = reduce_mean_69_cast)[name = tensor("sub_46_cast")]; + tensor square_23_cast = square(x = sub_46_cast)[name = tensor("square_23_cast")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71_cast = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23_cast)[name = tensor("reduce_mean_71_cast")]; + tensor add_46_y_0_to_fp16 = const()[name = tensor("add_46_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_46_cast = add(x = reduce_mean_71_cast, y = add_46_y_0_to_fp16)[name = tensor("add_46_cast")]; + tensor sqrt_23_cast = sqrt(x = add_46_cast)[name = tensor("sqrt_23_cast")]; + tensor real_div_23_cast = real_div(x = sub_46_cast, y = sqrt_23_cast)[name = tensor("real_div_23_cast")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_93_cast = reshape(shape = reshape_93_shape_0, x = real_div_23_cast)[name = tensor("reshape_93_cast")]; + tensor add_47_gamma_0_to_fp16 = const()[name = tensor("add_47_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651398784)))]; + tensor add_47_beta_0_to_fp16 = const()[name = tensor("add_47_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651401408)))]; + tensor add_47_epsilon_0_to_fp16 = const()[name = tensor("add_47_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_47_cast = batch_norm(beta = add_47_beta_0_to_fp16, epsilon = add_47_epsilon_0_to_fp16, gamma = add_47_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_93_cast)[name = tensor("add_47_cast")]; + tensor var_6961 = const()[name = tensor("op_6961"), val = tensor([1, 1])]; + tensor var_6963 = const()[name = tensor("op_6963"), val = tensor([1, 1])]; + tensor hidden_states_285_pad_type_0 = const()[name = tensor("hidden_states_285_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_285_pad_0 = const()[name = tensor("hidden_states_285_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651404032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652632896))), name = tensor("up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652633088)))]; + tensor hidden_states_285_cast = conv(bias = up_blocks_0_attentions_0_proj_in_bias_to_fp16, dilations = var_6963, groups = var_6865, pad = hidden_states_285_pad_0, pad_type = hidden_states_285_pad_type_0, strides = var_6961, weight = up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized, x = add_47_cast)[name = tensor("hidden_states_285_cast")]; + tensor var_6968 = const()[name = tensor("op_6968"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_205_cast = reshape(shape = var_6968, x = hidden_states_285_cast)[name = tensor("inputs_205_cast")]; + tensor var_6978 = const()[name = tensor("op_6978"), val = tensor([1])]; + tensor channels_mean_205_cast = reduce_mean(axes = var_6978, keep_dims = var_6860, x = inputs_205_cast)[name = tensor("channels_mean_205_cast")]; + tensor zero_mean_205_cast = sub(x = inputs_205_cast, y = channels_mean_205_cast)[name = tensor("zero_mean_205_cast")]; + tensor zero_mean_sq_205_cast = mul(x = zero_mean_205_cast, y = zero_mean_205_cast)[name = tensor("zero_mean_sq_205_cast")]; + tensor var_6982 = const()[name = tensor("op_6982"), val = tensor([1])]; + tensor var_6983_cast = reduce_mean(axes = var_6982, keep_dims = var_6860, x = zero_mean_sq_205_cast)[name = tensor("op_6983_cast")]; + tensor var_6984_to_fp16 = const()[name = tensor("op_6984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6985_cast = add(x = var_6983_cast, y = var_6984_to_fp16)[name = tensor("op_6985_cast")]; + tensor denom_205_epsilon_0_to_fp16 = const()[name = tensor("denom_205_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_205_cast = rsqrt(epsilon = denom_205_epsilon_0_to_fp16, x = var_6985_cast)[name = tensor("denom_205_cast")]; + tensor out_205_cast = mul(x = zero_mean_205_cast, y = denom_205_cast)[name = tensor("out_205_cast")]; + tensor var_6989_to_fp16 = const()[name = tensor("op_6989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652635712)))]; + tensor var_6990_cast = add(x = out_205_cast, y = var_6989_to_fp16)[name = tensor("op_6990_cast")]; + tensor var_6992_to_fp16 = const()[name = tensor("op_6992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652638336)))]; + tensor hidden_states_287_cast = mul(x = var_6990_cast, y = var_6992_to_fp16)[name = tensor("hidden_states_287_cast")]; + tensor var_6999 = const()[name = tensor("op_6999"), val = tensor([1, 1])]; + tensor var_7001 = const()[name = tensor("op_7001"), val = tensor([1, 1])]; + tensor q_137_pad_type_0 = const()[name = tensor("q_137_pad_type_0"), val = tensor("custom")]; + tensor q_137_pad_0 = const()[name = tensor("q_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(652640960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653460224))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_137_cast = conv(dilations = var_7001, groups = var_6865, pad = q_137_pad_0, pad_type = q_137_pad_type_0, strides = var_6999, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("q_137_cast")]; + tensor var_7005 = const()[name = tensor("op_7005"), val = tensor([1, 1])]; + tensor var_7007 = const()[name = tensor("op_7007"), val = tensor([1, 1])]; + tensor k_137_pad_type_0 = const()[name = tensor("k_137_pad_type_0"), val = tensor("custom")]; + tensor k_137_pad_0 = const()[name = tensor("k_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653460352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654279616))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_137_cast = conv(dilations = var_7007, groups = var_6865, pad = k_137_pad_0, pad_type = k_137_pad_type_0, strides = var_7005, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("k_137_cast")]; + tensor var_7011 = const()[name = tensor("op_7011"), val = tensor([1, 1])]; + tensor var_7013 = const()[name = tensor("op_7013"), val = tensor([1, 1])]; + tensor v_137_pad_type_0 = const()[name = tensor("v_137_pad_type_0"), val = tensor("custom")]; + tensor v_137_pad_0 = const()[name = tensor("v_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654279744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655099008))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_137_cast = conv(dilations = var_7013, groups = var_6865, pad = v_137_pad_0, pad_type = v_137_pad_type_0, strides = var_7011, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("v_137_cast")]; + tensor var_7017 = const()[name = tensor("op_7017"), val = tensor([2, 20, 64, -1])]; + tensor var_7018_cast = reshape(shape = var_7017, x = q_137_cast)[name = tensor("op_7018_cast")]; + tensor var_7019 = const()[name = tensor("op_7019"), val = tensor([2, 20, 64, -1])]; + tensor var_7020_cast = reshape(shape = var_7019, x = k_137_cast)[name = tensor("op_7020_cast")]; + tensor var_7021 = const()[name = tensor("op_7021"), val = tensor([2, 20, 64, -1])]; + tensor var_7022_cast = reshape(shape = var_7021, x = v_137_cast)[name = tensor("op_7022_cast")]; + tensor attn_weights_273_transpose_x_0 = const()[name = tensor("attn_weights_273_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_273_transpose_y_0 = const()[name = tensor("attn_weights_273_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_273_cast = matmul(transpose_x = attn_weights_273_transpose_x_0, transpose_y = attn_weights_273_transpose_y_0, x = var_7018_cast, y = var_7020_cast)[name = tensor("attn_weights_273_cast")]; + tensor var_6856_to_fp16 = const()[name = tensor("op_6856_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_275_cast = mul(x = attn_weights_273_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_275_cast")]; + tensor var_7026_cast = softmax(axis = var_6849, x = attn_weights_275_cast)[name = tensor("op_7026_cast")]; + tensor attn_137_transpose_x_0 = const()[name = tensor("attn_137_transpose_x_0"), val = tensor(false)]; + tensor attn_137_transpose_y_0 = const()[name = tensor("attn_137_transpose_y_0"), val = tensor(true)]; + tensor attn_137_cast = matmul(transpose_x = attn_137_transpose_x_0, transpose_y = attn_137_transpose_y_0, x = var_7022_cast, y = var_7026_cast)[name = tensor("attn_137_cast")]; + tensor var_7030 = const()[name = tensor("op_7030"), val = tensor([2, 1280, 1, -1])]; + tensor input_439_cast = reshape(shape = var_7030, x = attn_137_cast)[name = tensor("input_439_cast")]; + tensor var_7035 = const()[name = tensor("op_7035"), val = tensor([1, 1])]; + tensor var_7037 = const()[name = tensor("op_7037"), val = tensor([1, 1])]; + tensor var_7039_pad_type_0 = const()[name = tensor("op_7039_pad_type_0"), val = tensor("custom")]; + tensor var_7039_pad_0 = const()[name = tensor("op_7039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655099136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656328000))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656328192)))]; + tensor var_7039_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_7037, groups = var_6865, pad = var_7039_pad_0, pad_type = var_7039_pad_type_0, strides = var_7035, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_439_cast)[name = tensor("op_7039_cast")]; + tensor inputs_207_cast = add(x = var_7039_cast, y = inputs_205_cast)[name = tensor("inputs_207_cast")]; + tensor var_7043 = const()[name = tensor("op_7043"), val = tensor([1])]; + tensor channels_mean_207_cast = reduce_mean(axes = var_7043, keep_dims = var_6860, x = inputs_207_cast)[name = tensor("channels_mean_207_cast")]; + tensor zero_mean_207_cast = sub(x = inputs_207_cast, y = channels_mean_207_cast)[name = tensor("zero_mean_207_cast")]; + tensor zero_mean_sq_207_cast = mul(x = zero_mean_207_cast, y = zero_mean_207_cast)[name = tensor("zero_mean_sq_207_cast")]; + tensor var_7047 = const()[name = tensor("op_7047"), val = tensor([1])]; + tensor var_7048_cast = reduce_mean(axes = var_7047, keep_dims = var_6860, x = zero_mean_sq_207_cast)[name = tensor("op_7048_cast")]; + tensor var_7049_to_fp16 = const()[name = tensor("op_7049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7050_cast = add(x = var_7048_cast, y = var_7049_to_fp16)[name = tensor("op_7050_cast")]; + tensor denom_207_epsilon_0_to_fp16 = const()[name = tensor("denom_207_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_207_cast = rsqrt(epsilon = denom_207_epsilon_0_to_fp16, x = var_7050_cast)[name = tensor("denom_207_cast")]; + tensor out_207_cast = mul(x = zero_mean_207_cast, y = denom_207_cast)[name = tensor("out_207_cast")]; + tensor var_7054_to_fp16 = const()[name = tensor("op_7054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656330816)))]; + tensor var_7055_cast = add(x = out_207_cast, y = var_7054_to_fp16)[name = tensor("op_7055_cast")]; + tensor var_7057_to_fp16 = const()[name = tensor("op_7057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656333440)))]; + tensor hidden_states_289_cast = mul(x = var_7055_cast, y = var_7057_to_fp16)[name = tensor("hidden_states_289_cast")]; + tensor var_7064 = const()[name = tensor("op_7064"), val = tensor([1, 1])]; + tensor var_7066 = const()[name = tensor("op_7066"), val = tensor([1, 1])]; + tensor q_139_pad_type_0 = const()[name = tensor("q_139_pad_type_0"), val = tensor("custom")]; + tensor q_139_pad_0 = const()[name = tensor("q_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656336064))), lut = tensor([-0x1.f74p-7, 0x1.f98p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_139_cast = conv(dilations = var_7066, groups = var_6865, pad = q_139_pad_0, pad_type = q_139_pad_type_0, strides = var_7064, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_289_cast)[name = tensor("q_139_cast")]; + tensor var_7070 = const()[name = tensor("op_7070"), val = tensor([1, 1])]; + tensor var_7072 = const()[name = tensor("op_7072"), val = tensor([1, 1])]; + tensor k_139_pad_type_0 = const()[name = tensor("k_139_pad_type_0"), val = tensor("custom")]; + tensor k_139_pad_0 = const()[name = tensor("k_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656540928))), lut = tensor([-0x1.d24p-7, 0x1.d68p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_139_cast = conv(dilations = var_7072, groups = var_6865, pad = k_139_pad_0, pad_type = k_139_pad_type_0, strides = var_7070, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_139_cast")]; + tensor var_7076 = const()[name = tensor("op_7076"), val = tensor([1, 1])]; + tensor var_7078 = const()[name = tensor("op_7078"), val = tensor([1, 1])]; + tensor v_139_pad_type_0 = const()[name = tensor("v_139_pad_type_0"), val = tensor("custom")]; + tensor v_139_pad_0 = const()[name = tensor("v_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656868672))), lut = tensor([-0x1.ce8p-6, -0x1.0bcp-7, 0x1.09cp-7, 0x1.cd8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_139_cast = conv(dilations = var_7078, groups = var_6865, pad = v_139_pad_0, pad_type = v_139_pad_type_0, strides = var_7076, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_139_cast")]; + tensor var_7082 = const()[name = tensor("op_7082"), val = tensor([2, 20, 64, -1])]; + tensor var_7083_cast = reshape(shape = var_7082, x = q_139_cast)[name = tensor("op_7083_cast")]; + tensor var_7084 = const()[name = tensor("op_7084"), val = tensor([2, 20, 64, -1])]; + tensor var_7085_cast = reshape(shape = var_7084, x = k_139_cast)[name = tensor("op_7085_cast")]; + tensor var_7086 = const()[name = tensor("op_7086"), val = tensor([2, 20, 64, -1])]; + tensor var_7087_cast = reshape(shape = var_7086, x = v_139_cast)[name = tensor("op_7087_cast")]; + tensor attn_weights_277_transpose_x_0 = const()[name = tensor("attn_weights_277_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_277_transpose_y_0 = const()[name = tensor("attn_weights_277_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_277_cast = matmul(transpose_x = attn_weights_277_transpose_x_0, transpose_y = attn_weights_277_transpose_y_0, x = var_7083_cast, y = var_7085_cast)[name = tensor("attn_weights_277_cast")]; + tensor attn_weights_279_cast = mul(x = attn_weights_277_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_279_cast")]; + tensor var_7091_cast = softmax(axis = var_6849, x = attn_weights_279_cast)[name = tensor("op_7091_cast")]; + tensor attn_139_transpose_x_0 = const()[name = tensor("attn_139_transpose_x_0"), val = tensor(false)]; + tensor attn_139_transpose_y_0 = const()[name = tensor("attn_139_transpose_y_0"), val = tensor(true)]; + tensor attn_139_cast = matmul(transpose_x = attn_139_transpose_x_0, transpose_y = attn_139_transpose_y_0, x = var_7087_cast, y = var_7091_cast)[name = tensor("attn_139_cast")]; + tensor var_7095 = const()[name = tensor("op_7095"), val = tensor([2, 1280, 1, -1])]; + tensor input_441_cast = reshape(shape = var_7095, x = attn_139_cast)[name = tensor("input_441_cast")]; + tensor var_7100 = const()[name = tensor("op_7100"), val = tensor([1, 1])]; + tensor var_7102 = const()[name = tensor("op_7102"), val = tensor([1, 1])]; + tensor var_7104_pad_type_0 = const()[name = tensor("op_7104_pad_type_0"), val = tensor("custom")]; + tensor var_7104_pad_0 = const()[name = tensor("op_7104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657524096))), lut = tensor([-0x1.f6cp-8, 0x1.f54p-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657728960)))]; + tensor var_7104_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_7102, groups = var_6865, pad = var_7104_pad_0, pad_type = var_7104_pad_type_0, strides = var_7100, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_441_cast)[name = tensor("op_7104_cast")]; + tensor inputs_209_cast = add(x = var_7104_cast, y = inputs_207_cast)[name = tensor("inputs_209_cast")]; + tensor var_7108 = const()[name = tensor("op_7108"), val = tensor([1])]; + tensor channels_mean_209_cast = reduce_mean(axes = var_7108, keep_dims = var_6860, x = inputs_209_cast)[name = tensor("channels_mean_209_cast")]; + tensor zero_mean_209_cast = sub(x = inputs_209_cast, y = channels_mean_209_cast)[name = tensor("zero_mean_209_cast")]; + tensor zero_mean_sq_209_cast = mul(x = zero_mean_209_cast, y = zero_mean_209_cast)[name = tensor("zero_mean_sq_209_cast")]; + tensor var_7112 = const()[name = tensor("op_7112"), val = tensor([1])]; + tensor var_7113_cast = reduce_mean(axes = var_7112, keep_dims = var_6860, x = zero_mean_sq_209_cast)[name = tensor("op_7113_cast")]; + tensor var_7114_to_fp16 = const()[name = tensor("op_7114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7115_cast = add(x = var_7113_cast, y = var_7114_to_fp16)[name = tensor("op_7115_cast")]; + tensor denom_209_epsilon_0_to_fp16 = const()[name = tensor("denom_209_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_209_cast = rsqrt(epsilon = denom_209_epsilon_0_to_fp16, x = var_7115_cast)[name = tensor("denom_209_cast")]; + tensor out_209_cast = mul(x = zero_mean_209_cast, y = denom_209_cast)[name = tensor("out_209_cast")]; + tensor var_7119_to_fp16 = const()[name = tensor("op_7119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657731584)))]; + tensor var_7120_cast = add(x = out_209_cast, y = var_7119_to_fp16)[name = tensor("op_7120_cast")]; + tensor var_7122_to_fp16 = const()[name = tensor("op_7122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657734208)))]; + tensor input_443_cast = mul(x = var_7120_cast, y = var_7122_to_fp16)[name = tensor("input_443_cast")]; + tensor var_7130 = const()[name = tensor("op_7130"), val = tensor([1, 1])]; + tensor var_7132 = const()[name = tensor("op_7132"), val = tensor([1, 1])]; + tensor var_7134_pad_type_0 = const()[name = tensor("op_7134_pad_type_0"), val = tensor("custom")]; + tensor var_7134_pad_0 = const()[name = tensor("op_7134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657736832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667567296))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667567488)))]; + tensor var_7134_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_7132, groups = var_6865, pad = var_7134_pad_0, pad_type = var_7134_pad_type_0, strides = var_7130, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_443_cast)[name = tensor("op_7134_cast")]; + tensor var_7135_split_sizes_0 = const()[name = tensor("op_7135_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7135_axis_0 = const()[name = tensor("op_7135_axis_0"), val = tensor(1)]; + tensor var_7135_cast_0, tensor var_7135_cast_1 = split(axis = var_7135_axis_0, split_sizes = var_7135_split_sizes_0, x = var_7134_cast)[name = tensor("op_7135_cast")]; + tensor var_7137_mode_0 = const()[name = tensor("op_7137_mode_0"), val = tensor("EXACT")]; + tensor var_7137_cast = gelu(mode = var_7137_mode_0, x = var_7135_cast_1)[name = tensor("op_7137_cast")]; + tensor input_445_cast = mul(x = var_7135_cast_0, y = var_7137_cast)[name = tensor("input_445_cast")]; + tensor var_7141 = const()[name = tensor("op_7141"), val = tensor([1, 1])]; + tensor var_7143 = const()[name = tensor("op_7143"), val = tensor([1, 1])]; + tensor var_7145_pad_type_0 = const()[name = tensor("op_7145_pad_type_0"), val = tensor("custom")]; + tensor var_7145_pad_0 = const()[name = tensor("op_7145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(667588032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672503296))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672503488)))]; + tensor var_7145_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_7143, groups = var_6865, pad = var_7145_pad_0, pad_type = var_7145_pad_type_0, strides = var_7141, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_445_cast)[name = tensor("op_7145_cast")]; + tensor inputs_211_cast = add(x = var_7145_cast, y = inputs_209_cast)[name = tensor("inputs_211_cast")]; + tensor var_7155 = const()[name = tensor("op_7155"), val = tensor([1])]; + tensor channels_mean_211_cast = reduce_mean(axes = var_7155, keep_dims = var_6860, x = inputs_211_cast)[name = tensor("channels_mean_211_cast")]; + tensor zero_mean_211_cast = sub(x = inputs_211_cast, y = channels_mean_211_cast)[name = tensor("zero_mean_211_cast")]; + tensor zero_mean_sq_211_cast = mul(x = zero_mean_211_cast, y = zero_mean_211_cast)[name = tensor("zero_mean_sq_211_cast")]; + tensor var_7159 = const()[name = tensor("op_7159"), val = tensor([1])]; + tensor var_7160_cast = reduce_mean(axes = var_7159, keep_dims = var_6860, x = zero_mean_sq_211_cast)[name = tensor("op_7160_cast")]; + tensor var_7161_to_fp16 = const()[name = tensor("op_7161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7162_cast = add(x = var_7160_cast, y = var_7161_to_fp16)[name = tensor("op_7162_cast")]; + tensor denom_211_epsilon_0_to_fp16 = const()[name = tensor("denom_211_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_211_cast = rsqrt(epsilon = denom_211_epsilon_0_to_fp16, x = var_7162_cast)[name = tensor("denom_211_cast")]; + tensor out_211_cast = mul(x = zero_mean_211_cast, y = denom_211_cast)[name = tensor("out_211_cast")]; + tensor var_7166_to_fp16 = const()[name = tensor("op_7166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672506112)))]; + tensor var_7167_cast = add(x = out_211_cast, y = var_7166_to_fp16)[name = tensor("op_7167_cast")]; + tensor var_7169_to_fp16 = const()[name = tensor("op_7169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672508736)))]; + tensor hidden_states_293_cast = mul(x = var_7167_cast, y = var_7169_to_fp16)[name = tensor("hidden_states_293_cast")]; + tensor var_7176 = const()[name = tensor("op_7176"), val = tensor([1, 1])]; + tensor var_7178 = const()[name = tensor("op_7178"), val = tensor([1, 1])]; + tensor q_141_pad_type_0 = const()[name = tensor("q_141_pad_type_0"), val = tensor("custom")]; + tensor q_141_pad_0 = const()[name = tensor("q_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672511360))), lut = tensor([-0x1.5b4p-5, -0x1.a14p-7, 0x1.a2cp-7, 0x1.5cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_141_cast = conv(dilations = var_7178, groups = var_6865, pad = q_141_pad_0, pad_type = q_141_pad_type_0, strides = var_7176, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("q_141_cast")]; + tensor var_7182 = const()[name = tensor("op_7182"), val = tensor([1, 1])]; + tensor var_7184 = const()[name = tensor("op_7184"), val = tensor([1, 1])]; + tensor k_141_pad_type_0 = const()[name = tensor("k_141_pad_type_0"), val = tensor("custom")]; + tensor k_141_pad_0 = const()[name = tensor("k_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(672921024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673740288))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_141_cast = conv(dilations = var_7184, groups = var_6865, pad = k_141_pad_0, pad_type = k_141_pad_type_0, strides = var_7182, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("k_141_cast")]; + tensor var_7188 = const()[name = tensor("op_7188"), val = tensor([1, 1])]; + tensor var_7190 = const()[name = tensor("op_7190"), val = tensor([1, 1])]; + tensor v_141_pad_type_0 = const()[name = tensor("v_141_pad_type_0"), val = tensor("custom")]; + tensor v_141_pad_0 = const()[name = tensor("v_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(673740416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(674559680))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_141_cast = conv(dilations = var_7190, groups = var_6865, pad = v_141_pad_0, pad_type = v_141_pad_type_0, strides = var_7188, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("v_141_cast")]; + tensor var_7194 = const()[name = tensor("op_7194"), val = tensor([2, 20, 64, -1])]; + tensor var_7195_cast = reshape(shape = var_7194, x = q_141_cast)[name = tensor("op_7195_cast")]; + tensor var_7196 = const()[name = tensor("op_7196"), val = tensor([2, 20, 64, -1])]; + tensor var_7197_cast = reshape(shape = var_7196, x = k_141_cast)[name = tensor("op_7197_cast")]; + tensor var_7198 = const()[name = tensor("op_7198"), val = tensor([2, 20, 64, -1])]; + tensor var_7199_cast = reshape(shape = var_7198, x = v_141_cast)[name = tensor("op_7199_cast")]; + tensor attn_weights_281_transpose_x_0 = const()[name = tensor("attn_weights_281_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_281_transpose_y_0 = const()[name = tensor("attn_weights_281_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_281_cast = matmul(transpose_x = attn_weights_281_transpose_x_0, transpose_y = attn_weights_281_transpose_y_0, x = var_7195_cast, y = var_7197_cast)[name = tensor("attn_weights_281_cast")]; + tensor attn_weights_283_cast = mul(x = attn_weights_281_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_283_cast")]; + tensor var_7203_cast = softmax(axis = var_6849, x = attn_weights_283_cast)[name = tensor("op_7203_cast")]; + tensor attn_141_transpose_x_0 = const()[name = tensor("attn_141_transpose_x_0"), val = tensor(false)]; + tensor attn_141_transpose_y_0 = const()[name = tensor("attn_141_transpose_y_0"), val = tensor(true)]; + tensor attn_141_cast = matmul(transpose_x = attn_141_transpose_x_0, transpose_y = attn_141_transpose_y_0, x = var_7199_cast, y = var_7203_cast)[name = tensor("attn_141_cast")]; + tensor var_7207 = const()[name = tensor("op_7207"), val = tensor([2, 1280, 1, -1])]; + tensor input_447_cast = reshape(shape = var_7207, x = attn_141_cast)[name = tensor("input_447_cast")]; + tensor var_7212 = const()[name = tensor("op_7212"), val = tensor([1, 1])]; + tensor var_7214 = const()[name = tensor("op_7214"), val = tensor([1, 1])]; + tensor var_7216_pad_type_0 = const()[name = tensor("op_7216_pad_type_0"), val = tensor("custom")]; + tensor var_7216_pad_0 = const()[name = tensor("op_7216_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(674559808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675379072))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675379200)))]; + tensor var_7216_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_7214, groups = var_6865, pad = var_7216_pad_0, pad_type = var_7216_pad_type_0, strides = var_7212, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_447_cast)[name = tensor("op_7216_cast")]; + tensor inputs_213_cast = add(x = var_7216_cast, y = inputs_211_cast)[name = tensor("inputs_213_cast")]; + tensor var_7220 = const()[name = tensor("op_7220"), val = tensor([1])]; + tensor channels_mean_213_cast = reduce_mean(axes = var_7220, keep_dims = var_6860, x = inputs_213_cast)[name = tensor("channels_mean_213_cast")]; + tensor zero_mean_213_cast = sub(x = inputs_213_cast, y = channels_mean_213_cast)[name = tensor("zero_mean_213_cast")]; + tensor zero_mean_sq_213_cast = mul(x = zero_mean_213_cast, y = zero_mean_213_cast)[name = tensor("zero_mean_sq_213_cast")]; + tensor var_7224 = const()[name = tensor("op_7224"), val = tensor([1])]; + tensor var_7225_cast = reduce_mean(axes = var_7224, keep_dims = var_6860, x = zero_mean_sq_213_cast)[name = tensor("op_7225_cast")]; + tensor var_7226_to_fp16 = const()[name = tensor("op_7226_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7227_cast = add(x = var_7225_cast, y = var_7226_to_fp16)[name = tensor("op_7227_cast")]; + tensor denom_213_epsilon_0_to_fp16 = const()[name = tensor("denom_213_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_213_cast = rsqrt(epsilon = denom_213_epsilon_0_to_fp16, x = var_7227_cast)[name = tensor("denom_213_cast")]; + tensor out_213_cast = mul(x = zero_mean_213_cast, y = denom_213_cast)[name = tensor("out_213_cast")]; + tensor var_7231_to_fp16 = const()[name = tensor("op_7231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675381824)))]; + tensor var_7232_cast = add(x = out_213_cast, y = var_7231_to_fp16)[name = tensor("op_7232_cast")]; + tensor var_7234_to_fp16 = const()[name = tensor("op_7234_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675384448)))]; + tensor hidden_states_295_cast = mul(x = var_7232_cast, y = var_7234_to_fp16)[name = tensor("hidden_states_295_cast")]; + tensor var_7241 = const()[name = tensor("op_7241"), val = tensor([1, 1])]; + tensor var_7243 = const()[name = tensor("op_7243"), val = tensor([1, 1])]; + tensor q_143_pad_type_0 = const()[name = tensor("q_143_pad_type_0"), val = tensor("custom")]; + tensor q_143_pad_0 = const()[name = tensor("q_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675387072))), lut = tensor([-0x1.298p-6, 0x1.28cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_143_cast = conv(dilations = var_7243, groups = var_6865, pad = q_143_pad_0, pad_type = q_143_pad_type_0, strides = var_7241, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_295_cast)[name = tensor("q_143_cast")]; + tensor var_7247 = const()[name = tensor("op_7247"), val = tensor([1, 1])]; + tensor var_7249 = const()[name = tensor("op_7249"), val = tensor([1, 1])]; + tensor k_143_pad_type_0 = const()[name = tensor("k_143_pad_type_0"), val = tensor("custom")]; + tensor k_143_pad_0 = const()[name = tensor("k_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675591936))), lut = tensor([-0x1.0c4p-6, 0x1.0bp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_143_cast = conv(dilations = var_7249, groups = var_6865, pad = k_143_pad_0, pad_type = k_143_pad_type_0, strides = var_7247, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_143_cast")]; + tensor var_7253 = const()[name = tensor("op_7253"), val = tensor([1, 1])]; + tensor var_7255 = const()[name = tensor("op_7255"), val = tensor([1, 1])]; + tensor v_143_pad_type_0 = const()[name = tensor("v_143_pad_type_0"), val = tensor("custom")]; + tensor v_143_pad_0 = const()[name = tensor("v_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(675919680))), lut = tensor([-0x1.0ecp-5, -0x1.35cp-7, 0x1.35cp-7, 0x1.0e8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_143_cast = conv(dilations = var_7255, groups = var_6865, pad = v_143_pad_0, pad_type = v_143_pad_type_0, strides = var_7253, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_143_cast")]; + tensor var_7259 = const()[name = tensor("op_7259"), val = tensor([2, 20, 64, -1])]; + tensor var_7260_cast = reshape(shape = var_7259, x = q_143_cast)[name = tensor("op_7260_cast")]; + tensor var_7261 = const()[name = tensor("op_7261"), val = tensor([2, 20, 64, -1])]; + tensor var_7262_cast = reshape(shape = var_7261, x = k_143_cast)[name = tensor("op_7262_cast")]; + tensor var_7263 = const()[name = tensor("op_7263"), val = tensor([2, 20, 64, -1])]; + tensor var_7264_cast = reshape(shape = var_7263, x = v_143_cast)[name = tensor("op_7264_cast")]; + tensor attn_weights_285_transpose_x_0 = const()[name = tensor("attn_weights_285_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_285_transpose_y_0 = const()[name = tensor("attn_weights_285_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_285_cast = matmul(transpose_x = attn_weights_285_transpose_x_0, transpose_y = attn_weights_285_transpose_y_0, x = var_7260_cast, y = var_7262_cast)[name = tensor("attn_weights_285_cast")]; + tensor attn_weights_287_cast = mul(x = attn_weights_285_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_287_cast")]; + tensor var_7268_cast = softmax(axis = var_6849, x = attn_weights_287_cast)[name = tensor("op_7268_cast")]; + tensor attn_143_transpose_x_0 = const()[name = tensor("attn_143_transpose_x_0"), val = tensor(false)]; + tensor attn_143_transpose_y_0 = const()[name = tensor("attn_143_transpose_y_0"), val = tensor(true)]; + tensor attn_143_cast = matmul(transpose_x = attn_143_transpose_x_0, transpose_y = attn_143_transpose_y_0, x = var_7264_cast, y = var_7268_cast)[name = tensor("attn_143_cast")]; + tensor var_7272 = const()[name = tensor("op_7272"), val = tensor([2, 1280, 1, -1])]; + tensor input_449_cast = reshape(shape = var_7272, x = attn_143_cast)[name = tensor("input_449_cast")]; + tensor var_7277 = const()[name = tensor("op_7277"), val = tensor([1, 1])]; + tensor var_7279 = const()[name = tensor("op_7279"), val = tensor([1, 1])]; + tensor var_7281_pad_type_0 = const()[name = tensor("op_7281_pad_type_0"), val = tensor("custom")]; + tensor var_7281_pad_0 = const()[name = tensor("op_7281_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676575104))), lut = tensor([-0x1.2fp-7, 0x1.2f4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676779968)))]; + tensor var_7281_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_7279, groups = var_6865, pad = var_7281_pad_0, pad_type = var_7281_pad_type_0, strides = var_7277, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_449_cast)[name = tensor("op_7281_cast")]; + tensor inputs_215_cast = add(x = var_7281_cast, y = inputs_213_cast)[name = tensor("inputs_215_cast")]; + tensor var_7285 = const()[name = tensor("op_7285"), val = tensor([1])]; + tensor channels_mean_215_cast = reduce_mean(axes = var_7285, keep_dims = var_6860, x = inputs_215_cast)[name = tensor("channels_mean_215_cast")]; + tensor zero_mean_215_cast = sub(x = inputs_215_cast, y = channels_mean_215_cast)[name = tensor("zero_mean_215_cast")]; + tensor zero_mean_sq_215_cast = mul(x = zero_mean_215_cast, y = zero_mean_215_cast)[name = tensor("zero_mean_sq_215_cast")]; + tensor var_7289 = const()[name = tensor("op_7289"), val = tensor([1])]; + tensor var_7290_cast = reduce_mean(axes = var_7289, keep_dims = var_6860, x = zero_mean_sq_215_cast)[name = tensor("op_7290_cast")]; + tensor var_7291_to_fp16 = const()[name = tensor("op_7291_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7292_cast = add(x = var_7290_cast, y = var_7291_to_fp16)[name = tensor("op_7292_cast")]; + tensor denom_215_epsilon_0_to_fp16 = const()[name = tensor("denom_215_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_215_cast = rsqrt(epsilon = denom_215_epsilon_0_to_fp16, x = var_7292_cast)[name = tensor("denom_215_cast")]; + tensor out_215_cast = mul(x = zero_mean_215_cast, y = denom_215_cast)[name = tensor("out_215_cast")]; + tensor var_7296_to_fp16 = const()[name = tensor("op_7296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676782592)))]; + tensor var_7297_cast = add(x = out_215_cast, y = var_7296_to_fp16)[name = tensor("op_7297_cast")]; + tensor var_7299_to_fp16 = const()[name = tensor("op_7299_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676785216)))]; + tensor input_451_cast = mul(x = var_7297_cast, y = var_7299_to_fp16)[name = tensor("input_451_cast")]; + tensor var_7307 = const()[name = tensor("op_7307"), val = tensor([1, 1])]; + tensor var_7309 = const()[name = tensor("op_7309"), val = tensor([1, 1])]; + tensor var_7311_pad_type_0 = const()[name = tensor("op_7311_pad_type_0"), val = tensor("custom")]; + tensor var_7311_pad_0 = const()[name = tensor("op_7311_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676787840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683341504))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683341632)))]; + tensor var_7311_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_7309, groups = var_6865, pad = var_7311_pad_0, pad_type = var_7311_pad_type_0, strides = var_7307, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_451_cast)[name = tensor("op_7311_cast")]; + tensor var_7312_split_sizes_0 = const()[name = tensor("op_7312_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7312_axis_0 = const()[name = tensor("op_7312_axis_0"), val = tensor(1)]; + tensor var_7312_cast_0, tensor var_7312_cast_1 = split(axis = var_7312_axis_0, split_sizes = var_7312_split_sizes_0, x = var_7311_cast)[name = tensor("op_7312_cast")]; + tensor var_7314_mode_0 = const()[name = tensor("op_7314_mode_0"), val = tensor("EXACT")]; + tensor var_7314_cast = gelu(mode = var_7314_mode_0, x = var_7312_cast_1)[name = tensor("op_7314_cast")]; + tensor input_453_cast = mul(x = var_7312_cast_0, y = var_7314_cast)[name = tensor("input_453_cast")]; + tensor var_7318 = const()[name = tensor("op_7318"), val = tensor([1, 1])]; + tensor var_7320 = const()[name = tensor("op_7320"), val = tensor([1, 1])]; + tensor var_7322_pad_type_0 = const()[name = tensor("op_7322_pad_type_0"), val = tensor("custom")]; + tensor var_7322_pad_0 = const()[name = tensor("op_7322_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683362176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686639040))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686639168)))]; + tensor var_7322_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_7320, groups = var_6865, pad = var_7322_pad_0, pad_type = var_7322_pad_type_0, strides = var_7318, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_453_cast)[name = tensor("op_7322_cast")]; + tensor inputs_217_cast = add(x = var_7322_cast, y = inputs_215_cast)[name = tensor("inputs_217_cast")]; + tensor var_7332 = const()[name = tensor("op_7332"), val = tensor([1])]; + tensor channels_mean_217_cast = reduce_mean(axes = var_7332, keep_dims = var_6860, x = inputs_217_cast)[name = tensor("channels_mean_217_cast")]; + tensor zero_mean_217_cast = sub(x = inputs_217_cast, y = channels_mean_217_cast)[name = tensor("zero_mean_217_cast")]; + tensor zero_mean_sq_217_cast = mul(x = zero_mean_217_cast, y = zero_mean_217_cast)[name = tensor("zero_mean_sq_217_cast")]; + tensor var_7336 = const()[name = tensor("op_7336"), val = tensor([1])]; + tensor var_7337_cast = reduce_mean(axes = var_7336, keep_dims = var_6860, x = zero_mean_sq_217_cast)[name = tensor("op_7337_cast")]; + tensor var_7338_to_fp16 = const()[name = tensor("op_7338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7339_cast = add(x = var_7337_cast, y = var_7338_to_fp16)[name = tensor("op_7339_cast")]; + tensor denom_217_epsilon_0_to_fp16 = const()[name = tensor("denom_217_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_217_cast = rsqrt(epsilon = denom_217_epsilon_0_to_fp16, x = var_7339_cast)[name = tensor("denom_217_cast")]; + tensor out_217_cast = mul(x = zero_mean_217_cast, y = denom_217_cast)[name = tensor("out_217_cast")]; + tensor var_7343_to_fp16 = const()[name = tensor("op_7343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686641792)))]; + tensor var_7344_cast = add(x = out_217_cast, y = var_7343_to_fp16)[name = tensor("op_7344_cast")]; + tensor var_7346_to_fp16 = const()[name = tensor("op_7346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686644416)))]; + tensor hidden_states_299_cast = mul(x = var_7344_cast, y = var_7346_to_fp16)[name = tensor("hidden_states_299_cast")]; + tensor var_7353 = const()[name = tensor("op_7353"), val = tensor([1, 1])]; + tensor var_7355 = const()[name = tensor("op_7355"), val = tensor([1, 1])]; + tensor q_145_pad_type_0 = const()[name = tensor("q_145_pad_type_0"), val = tensor("custom")]; + tensor q_145_pad_0 = const()[name = tensor("q_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(686647040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687466304))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_145_cast = conv(dilations = var_7355, groups = var_6865, pad = q_145_pad_0, pad_type = q_145_pad_type_0, strides = var_7353, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("q_145_cast")]; + tensor var_7359 = const()[name = tensor("op_7359"), val = tensor([1, 1])]; + tensor var_7361 = const()[name = tensor("op_7361"), val = tensor([1, 1])]; + tensor k_145_pad_type_0 = const()[name = tensor("k_145_pad_type_0"), val = tensor("custom")]; + tensor k_145_pad_0 = const()[name = tensor("k_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687466432))), lut = tensor([-0x1.628p-5, -0x1.aa4p-7, 0x1.abcp-7, 0x1.638p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_145_cast = conv(dilations = var_7361, groups = var_6865, pad = k_145_pad_0, pad_type = k_145_pad_type_0, strides = var_7359, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("k_145_cast")]; + tensor var_7365 = const()[name = tensor("op_7365"), val = tensor([1, 1])]; + tensor var_7367 = const()[name = tensor("op_7367"), val = tensor([1, 1])]; + tensor v_145_pad_type_0 = const()[name = tensor("v_145_pad_type_0"), val = tensor("custom")]; + tensor v_145_pad_0 = const()[name = tensor("v_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(687876096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(688695360))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_145_cast = conv(dilations = var_7367, groups = var_6865, pad = v_145_pad_0, pad_type = v_145_pad_type_0, strides = var_7365, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("v_145_cast")]; + tensor var_7371 = const()[name = tensor("op_7371"), val = tensor([2, 20, 64, -1])]; + tensor var_7372_cast = reshape(shape = var_7371, x = q_145_cast)[name = tensor("op_7372_cast")]; + tensor var_7373 = const()[name = tensor("op_7373"), val = tensor([2, 20, 64, -1])]; + tensor var_7374_cast = reshape(shape = var_7373, x = k_145_cast)[name = tensor("op_7374_cast")]; + tensor var_7375 = const()[name = tensor("op_7375"), val = tensor([2, 20, 64, -1])]; + tensor var_7376_cast = reshape(shape = var_7375, x = v_145_cast)[name = tensor("op_7376_cast")]; + tensor attn_weights_289_transpose_x_0 = const()[name = tensor("attn_weights_289_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_289_transpose_y_0 = const()[name = tensor("attn_weights_289_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_289_cast = matmul(transpose_x = attn_weights_289_transpose_x_0, transpose_y = attn_weights_289_transpose_y_0, x = var_7372_cast, y = var_7374_cast)[name = tensor("attn_weights_289_cast")]; + tensor attn_weights_291_cast = mul(x = attn_weights_289_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_291_cast")]; + tensor var_7380_cast = softmax(axis = var_6849, x = attn_weights_291_cast)[name = tensor("op_7380_cast")]; + tensor attn_145_transpose_x_0 = const()[name = tensor("attn_145_transpose_x_0"), val = tensor(false)]; + tensor attn_145_transpose_y_0 = const()[name = tensor("attn_145_transpose_y_0"), val = tensor(true)]; + tensor attn_145_cast = matmul(transpose_x = attn_145_transpose_x_0, transpose_y = attn_145_transpose_y_0, x = var_7376_cast, y = var_7380_cast)[name = tensor("attn_145_cast")]; + tensor var_7384 = const()[name = tensor("op_7384"), val = tensor([2, 1280, 1, -1])]; + tensor input_455_cast = reshape(shape = var_7384, x = attn_145_cast)[name = tensor("input_455_cast")]; + tensor var_7389 = const()[name = tensor("op_7389"), val = tensor([1, 1])]; + tensor var_7391 = const()[name = tensor("op_7391"), val = tensor([1, 1])]; + tensor var_7393_pad_type_0 = const()[name = tensor("op_7393_pad_type_0"), val = tensor("custom")]; + tensor var_7393_pad_0 = const()[name = tensor("op_7393_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(688695488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689924352))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689924544)))]; + tensor var_7393_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_7391, groups = var_6865, pad = var_7393_pad_0, pad_type = var_7393_pad_type_0, strides = var_7389, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_455_cast)[name = tensor("op_7393_cast")]; + tensor inputs_219_cast = add(x = var_7393_cast, y = inputs_217_cast)[name = tensor("inputs_219_cast")]; + tensor var_7397 = const()[name = tensor("op_7397"), val = tensor([1])]; + tensor channels_mean_219_cast = reduce_mean(axes = var_7397, keep_dims = var_6860, x = inputs_219_cast)[name = tensor("channels_mean_219_cast")]; + tensor zero_mean_219_cast = sub(x = inputs_219_cast, y = channels_mean_219_cast)[name = tensor("zero_mean_219_cast")]; + tensor zero_mean_sq_219_cast = mul(x = zero_mean_219_cast, y = zero_mean_219_cast)[name = tensor("zero_mean_sq_219_cast")]; + tensor var_7401 = const()[name = tensor("op_7401"), val = tensor([1])]; + tensor var_7402_cast = reduce_mean(axes = var_7401, keep_dims = var_6860, x = zero_mean_sq_219_cast)[name = tensor("op_7402_cast")]; + tensor var_7403_to_fp16 = const()[name = tensor("op_7403_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7404_cast = add(x = var_7402_cast, y = var_7403_to_fp16)[name = tensor("op_7404_cast")]; + tensor denom_219_epsilon_0_to_fp16 = const()[name = tensor("denom_219_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_219_cast = rsqrt(epsilon = denom_219_epsilon_0_to_fp16, x = var_7404_cast)[name = tensor("denom_219_cast")]; + tensor out_219_cast = mul(x = zero_mean_219_cast, y = denom_219_cast)[name = tensor("out_219_cast")]; + tensor var_7408_to_fp16 = const()[name = tensor("op_7408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689927168)))]; + tensor var_7409_cast = add(x = out_219_cast, y = var_7408_to_fp16)[name = tensor("op_7409_cast")]; + tensor var_7411_to_fp16 = const()[name = tensor("op_7411_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689929792)))]; + tensor hidden_states_301_cast = mul(x = var_7409_cast, y = var_7411_to_fp16)[name = tensor("hidden_states_301_cast")]; + tensor var_7418 = const()[name = tensor("op_7418"), val = tensor([1, 1])]; + tensor var_7420 = const()[name = tensor("op_7420"), val = tensor([1, 1])]; + tensor q_147_pad_type_0 = const()[name = tensor("q_147_pad_type_0"), val = tensor("custom")]; + tensor q_147_pad_0 = const()[name = tensor("q_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(689932416))), lut = tensor([-0x1.39cp-5, -0x1.76cp-7, 0x1.744p-7, 0x1.38cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_147_cast = conv(dilations = var_7420, groups = var_6865, pad = q_147_pad_0, pad_type = q_147_pad_type_0, strides = var_7418, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_301_cast)[name = tensor("q_147_cast")]; + tensor var_7424 = const()[name = tensor("op_7424"), val = tensor([1, 1])]; + tensor var_7426 = const()[name = tensor("op_7426"), val = tensor([1, 1])]; + tensor k_147_pad_type_0 = const()[name = tensor("k_147_pad_type_0"), val = tensor("custom")]; + tensor k_147_pad_0 = const()[name = tensor("k_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690342080))), lut = tensor([-0x1.1a8p-5, -0x1.4e4p-7, 0x1.4acp-7, 0x1.19cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_147_cast = conv(dilations = var_7426, groups = var_6865, pad = k_147_pad_0, pad_type = k_147_pad_type_0, strides = var_7424, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_147_cast")]; + tensor var_7430 = const()[name = tensor("op_7430"), val = tensor([1, 1])]; + tensor var_7432 = const()[name = tensor("op_7432"), val = tensor([1, 1])]; + tensor v_147_pad_type_0 = const()[name = tensor("v_147_pad_type_0"), val = tensor("custom")]; + tensor v_147_pad_0 = const()[name = tensor("v_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(690997504))), lut = tensor([-0x1.25cp-5, -0x1.544p-7, 0x1.518p-7, 0x1.254p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_147_cast = conv(dilations = var_7432, groups = var_6865, pad = v_147_pad_0, pad_type = v_147_pad_type_0, strides = var_7430, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_147_cast")]; + tensor var_7436 = const()[name = tensor("op_7436"), val = tensor([2, 20, 64, -1])]; + tensor var_7437_cast = reshape(shape = var_7436, x = q_147_cast)[name = tensor("op_7437_cast")]; + tensor var_7438 = const()[name = tensor("op_7438"), val = tensor([2, 20, 64, -1])]; + tensor var_7439_cast = reshape(shape = var_7438, x = k_147_cast)[name = tensor("op_7439_cast")]; + tensor var_7440 = const()[name = tensor("op_7440"), val = tensor([2, 20, 64, -1])]; + tensor var_7441_cast = reshape(shape = var_7440, x = v_147_cast)[name = tensor("op_7441_cast")]; + tensor attn_weights_293_transpose_x_0 = const()[name = tensor("attn_weights_293_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_293_transpose_y_0 = const()[name = tensor("attn_weights_293_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_293_cast = matmul(transpose_x = attn_weights_293_transpose_x_0, transpose_y = attn_weights_293_transpose_y_0, x = var_7437_cast, y = var_7439_cast)[name = tensor("attn_weights_293_cast")]; + tensor attn_weights_295_cast = mul(x = attn_weights_293_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_295_cast")]; + tensor var_7445_cast = softmax(axis = var_6849, x = attn_weights_295_cast)[name = tensor("op_7445_cast")]; + tensor attn_147_transpose_x_0 = const()[name = tensor("attn_147_transpose_x_0"), val = tensor(false)]; + tensor attn_147_transpose_y_0 = const()[name = tensor("attn_147_transpose_y_0"), val = tensor(true)]; + tensor attn_147_cast = matmul(transpose_x = attn_147_transpose_x_0, transpose_y = attn_147_transpose_y_0, x = var_7441_cast, y = var_7445_cast)[name = tensor("attn_147_cast")]; + tensor var_7449 = const()[name = tensor("op_7449"), val = tensor([2, 1280, 1, -1])]; + tensor input_457_cast = reshape(shape = var_7449, x = attn_147_cast)[name = tensor("input_457_cast")]; + tensor var_7454 = const()[name = tensor("op_7454"), val = tensor([1, 1])]; + tensor var_7456 = const()[name = tensor("op_7456"), val = tensor([1, 1])]; + tensor var_7458_pad_type_0 = const()[name = tensor("op_7458_pad_type_0"), val = tensor("custom")]; + tensor var_7458_pad_0 = const()[name = tensor("op_7458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691652928))), lut = tensor([-0x1.52cp-7, 0x1.548p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691857792)))]; + tensor var_7458_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_7456, groups = var_6865, pad = var_7458_pad_0, pad_type = var_7458_pad_type_0, strides = var_7454, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_457_cast)[name = tensor("op_7458_cast")]; + tensor inputs_221_cast = add(x = var_7458_cast, y = inputs_219_cast)[name = tensor("inputs_221_cast")]; + tensor var_7462 = const()[name = tensor("op_7462"), val = tensor([1])]; + tensor channels_mean_221_cast = reduce_mean(axes = var_7462, keep_dims = var_6860, x = inputs_221_cast)[name = tensor("channels_mean_221_cast")]; + tensor zero_mean_221_cast = sub(x = inputs_221_cast, y = channels_mean_221_cast)[name = tensor("zero_mean_221_cast")]; + tensor zero_mean_sq_221_cast = mul(x = zero_mean_221_cast, y = zero_mean_221_cast)[name = tensor("zero_mean_sq_221_cast")]; + tensor var_7466 = const()[name = tensor("op_7466"), val = tensor([1])]; + tensor var_7467_cast = reduce_mean(axes = var_7466, keep_dims = var_6860, x = zero_mean_sq_221_cast)[name = tensor("op_7467_cast")]; + tensor var_7468_to_fp16 = const()[name = tensor("op_7468_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7469_cast = add(x = var_7467_cast, y = var_7468_to_fp16)[name = tensor("op_7469_cast")]; + tensor denom_221_epsilon_0_to_fp16 = const()[name = tensor("denom_221_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_221_cast = rsqrt(epsilon = denom_221_epsilon_0_to_fp16, x = var_7469_cast)[name = tensor("denom_221_cast")]; + tensor out_221_cast = mul(x = zero_mean_221_cast, y = denom_221_cast)[name = tensor("out_221_cast")]; + tensor var_7473_to_fp16 = const()[name = tensor("op_7473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691860416)))]; + tensor var_7474_cast = add(x = out_221_cast, y = var_7473_to_fp16)[name = tensor("op_7474_cast")]; + tensor var_7476_to_fp16 = const()[name = tensor("op_7476_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691863040)))]; + tensor input_459_cast = mul(x = var_7474_cast, y = var_7476_to_fp16)[name = tensor("input_459_cast")]; + tensor var_7484 = const()[name = tensor("op_7484"), val = tensor([1, 1])]; + tensor var_7486 = const()[name = tensor("op_7486"), val = tensor([1, 1])]; + tensor var_7488_pad_type_0 = const()[name = tensor("op_7488_pad_type_0"), val = tensor("custom")]; + tensor var_7488_pad_0 = const()[name = tensor("op_7488_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(691865664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698419328))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698419456)))]; + tensor var_7488_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_7486, groups = var_6865, pad = var_7488_pad_0, pad_type = var_7488_pad_type_0, strides = var_7484, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_459_cast)[name = tensor("op_7488_cast")]; + tensor var_7489_split_sizes_0 = const()[name = tensor("op_7489_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7489_axis_0 = const()[name = tensor("op_7489_axis_0"), val = tensor(1)]; + tensor var_7489_cast_0, tensor var_7489_cast_1 = split(axis = var_7489_axis_0, split_sizes = var_7489_split_sizes_0, x = var_7488_cast)[name = tensor("op_7489_cast")]; + tensor var_7491_mode_0 = const()[name = tensor("op_7491_mode_0"), val = tensor("EXACT")]; + tensor var_7491_cast = gelu(mode = var_7491_mode_0, x = var_7489_cast_1)[name = tensor("op_7491_cast")]; + tensor input_461_cast = mul(x = var_7489_cast_0, y = var_7491_cast)[name = tensor("input_461_cast")]; + tensor var_7495 = const()[name = tensor("op_7495"), val = tensor([1, 1])]; + tensor var_7497 = const()[name = tensor("op_7497"), val = tensor([1, 1])]; + tensor var_7499_pad_type_0 = const()[name = tensor("op_7499_pad_type_0"), val = tensor("custom")]; + tensor var_7499_pad_0 = const()[name = tensor("op_7499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698440000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703355264))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703355456)))]; + tensor var_7499_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_7497, groups = var_6865, pad = var_7499_pad_0, pad_type = var_7499_pad_type_0, strides = var_7495, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_461_cast)[name = tensor("op_7499_cast")]; + tensor inputs_223_cast = add(x = var_7499_cast, y = inputs_221_cast)[name = tensor("inputs_223_cast")]; + tensor var_7509 = const()[name = tensor("op_7509"), val = tensor([1])]; + tensor channels_mean_223_cast = reduce_mean(axes = var_7509, keep_dims = var_6860, x = inputs_223_cast)[name = tensor("channels_mean_223_cast")]; + tensor zero_mean_223_cast = sub(x = inputs_223_cast, y = channels_mean_223_cast)[name = tensor("zero_mean_223_cast")]; + tensor zero_mean_sq_223_cast = mul(x = zero_mean_223_cast, y = zero_mean_223_cast)[name = tensor("zero_mean_sq_223_cast")]; + tensor var_7513 = const()[name = tensor("op_7513"), val = tensor([1])]; + tensor var_7514_cast = reduce_mean(axes = var_7513, keep_dims = var_6860, x = zero_mean_sq_223_cast)[name = tensor("op_7514_cast")]; + tensor var_7515_to_fp16 = const()[name = tensor("op_7515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7516_cast = add(x = var_7514_cast, y = var_7515_to_fp16)[name = tensor("op_7516_cast")]; + tensor denom_223_epsilon_0_to_fp16 = const()[name = tensor("denom_223_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_223_cast = rsqrt(epsilon = denom_223_epsilon_0_to_fp16, x = var_7516_cast)[name = tensor("denom_223_cast")]; + tensor out_223_cast = mul(x = zero_mean_223_cast, y = denom_223_cast)[name = tensor("out_223_cast")]; + tensor var_7520_to_fp16 = const()[name = tensor("op_7520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703358080)))]; + tensor var_7521_cast = add(x = out_223_cast, y = var_7520_to_fp16)[name = tensor("op_7521_cast")]; + tensor var_7523_to_fp16 = const()[name = tensor("op_7523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703360704)))]; + tensor hidden_states_305_cast = mul(x = var_7521_cast, y = var_7523_to_fp16)[name = tensor("hidden_states_305_cast")]; + tensor var_7530 = const()[name = tensor("op_7530"), val = tensor([1, 1])]; + tensor var_7532 = const()[name = tensor("op_7532"), val = tensor([1, 1])]; + tensor q_149_pad_type_0 = const()[name = tensor("q_149_pad_type_0"), val = tensor("custom")]; + tensor q_149_pad_0 = const()[name = tensor("q_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703363328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704182592))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_149_cast = conv(dilations = var_7532, groups = var_6865, pad = q_149_pad_0, pad_type = q_149_pad_type_0, strides = var_7530, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("q_149_cast")]; + tensor var_7536 = const()[name = tensor("op_7536"), val = tensor([1, 1])]; + tensor var_7538 = const()[name = tensor("op_7538"), val = tensor([1, 1])]; + tensor k_149_pad_type_0 = const()[name = tensor("k_149_pad_type_0"), val = tensor("custom")]; + tensor k_149_pad_0 = const()[name = tensor("k_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(704182720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705001984))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_149_cast = conv(dilations = var_7538, groups = var_6865, pad = k_149_pad_0, pad_type = k_149_pad_type_0, strides = var_7536, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("k_149_cast")]; + tensor var_7542 = const()[name = tensor("op_7542"), val = tensor([1, 1])]; + tensor var_7544 = const()[name = tensor("op_7544"), val = tensor([1, 1])]; + tensor v_149_pad_type_0 = const()[name = tensor("v_149_pad_type_0"), val = tensor("custom")]; + tensor v_149_pad_0 = const()[name = tensor("v_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705002112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705821376))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_149_cast = conv(dilations = var_7544, groups = var_6865, pad = v_149_pad_0, pad_type = v_149_pad_type_0, strides = var_7542, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("v_149_cast")]; + tensor var_7548 = const()[name = tensor("op_7548"), val = tensor([2, 20, 64, -1])]; + tensor var_7549_cast = reshape(shape = var_7548, x = q_149_cast)[name = tensor("op_7549_cast")]; + tensor var_7550 = const()[name = tensor("op_7550"), val = tensor([2, 20, 64, -1])]; + tensor var_7551_cast = reshape(shape = var_7550, x = k_149_cast)[name = tensor("op_7551_cast")]; + tensor var_7552 = const()[name = tensor("op_7552"), val = tensor([2, 20, 64, -1])]; + tensor var_7553_cast = reshape(shape = var_7552, x = v_149_cast)[name = tensor("op_7553_cast")]; + tensor attn_weights_297_transpose_x_0 = const()[name = tensor("attn_weights_297_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_297_transpose_y_0 = const()[name = tensor("attn_weights_297_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_297_cast = matmul(transpose_x = attn_weights_297_transpose_x_0, transpose_y = attn_weights_297_transpose_y_0, x = var_7549_cast, y = var_7551_cast)[name = tensor("attn_weights_297_cast")]; + tensor attn_weights_299_cast = mul(x = attn_weights_297_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_299_cast")]; + tensor var_7557_cast = softmax(axis = var_6849, x = attn_weights_299_cast)[name = tensor("op_7557_cast")]; + tensor attn_149_transpose_x_0 = const()[name = tensor("attn_149_transpose_x_0"), val = tensor(false)]; + tensor attn_149_transpose_y_0 = const()[name = tensor("attn_149_transpose_y_0"), val = tensor(true)]; + tensor attn_149_cast = matmul(transpose_x = attn_149_transpose_x_0, transpose_y = attn_149_transpose_y_0, x = var_7553_cast, y = var_7557_cast)[name = tensor("attn_149_cast")]; + tensor var_7561 = const()[name = tensor("op_7561"), val = tensor([2, 1280, 1, -1])]; + tensor input_463_cast = reshape(shape = var_7561, x = attn_149_cast)[name = tensor("input_463_cast")]; + tensor var_7566 = const()[name = tensor("op_7566"), val = tensor([1, 1])]; + tensor var_7568 = const()[name = tensor("op_7568"), val = tensor([1, 1])]; + tensor var_7570_pad_type_0 = const()[name = tensor("op_7570_pad_type_0"), val = tensor("custom")]; + tensor var_7570_pad_0 = const()[name = tensor("op_7570_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705821504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707050368))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707050560)))]; + tensor var_7570_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_7568, groups = var_6865, pad = var_7570_pad_0, pad_type = var_7570_pad_type_0, strides = var_7566, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_463_cast)[name = tensor("op_7570_cast")]; + tensor inputs_225_cast = add(x = var_7570_cast, y = inputs_223_cast)[name = tensor("inputs_225_cast")]; + tensor var_7574 = const()[name = tensor("op_7574"), val = tensor([1])]; + tensor channels_mean_225_cast = reduce_mean(axes = var_7574, keep_dims = var_6860, x = inputs_225_cast)[name = tensor("channels_mean_225_cast")]; + tensor zero_mean_225_cast = sub(x = inputs_225_cast, y = channels_mean_225_cast)[name = tensor("zero_mean_225_cast")]; + tensor zero_mean_sq_225_cast = mul(x = zero_mean_225_cast, y = zero_mean_225_cast)[name = tensor("zero_mean_sq_225_cast")]; + tensor var_7578 = const()[name = tensor("op_7578"), val = tensor([1])]; + tensor var_7579_cast = reduce_mean(axes = var_7578, keep_dims = var_6860, x = zero_mean_sq_225_cast)[name = tensor("op_7579_cast")]; + tensor var_7580_to_fp16 = const()[name = tensor("op_7580_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7581_cast = add(x = var_7579_cast, y = var_7580_to_fp16)[name = tensor("op_7581_cast")]; + tensor denom_225_epsilon_0_to_fp16 = const()[name = tensor("denom_225_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_225_cast = rsqrt(epsilon = denom_225_epsilon_0_to_fp16, x = var_7581_cast)[name = tensor("denom_225_cast")]; + tensor out_225_cast = mul(x = zero_mean_225_cast, y = denom_225_cast)[name = tensor("out_225_cast")]; + tensor var_7585_to_fp16 = const()[name = tensor("op_7585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707053184)))]; + tensor var_7586_cast = add(x = out_225_cast, y = var_7585_to_fp16)[name = tensor("op_7586_cast")]; + tensor var_7588_to_fp16 = const()[name = tensor("op_7588_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707055808)))]; + tensor hidden_states_307_cast = mul(x = var_7586_cast, y = var_7588_to_fp16)[name = tensor("hidden_states_307_cast")]; + tensor var_7595 = const()[name = tensor("op_7595"), val = tensor([1, 1])]; + tensor var_7597 = const()[name = tensor("op_7597"), val = tensor([1, 1])]; + tensor q_151_pad_type_0 = const()[name = tensor("q_151_pad_type_0"), val = tensor("custom")]; + tensor q_151_pad_0 = const()[name = tensor("q_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707058432))), lut = tensor([-0x1.3fcp-6, 0x1.3fcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_151_cast = conv(dilations = var_7597, groups = var_6865, pad = q_151_pad_0, pad_type = q_151_pad_type_0, strides = var_7595, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_307_cast)[name = tensor("q_151_cast")]; + tensor var_7601 = const()[name = tensor("op_7601"), val = tensor([1, 1])]; + tensor var_7603 = const()[name = tensor("op_7603"), val = tensor([1, 1])]; + tensor k_151_pad_type_0 = const()[name = tensor("k_151_pad_type_0"), val = tensor("custom")]; + tensor k_151_pad_0 = const()[name = tensor("k_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707263296))), lut = tensor([-0x1.114p-6, 0x1.114p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_151_cast = conv(dilations = var_7603, groups = var_6865, pad = k_151_pad_0, pad_type = k_151_pad_type_0, strides = var_7601, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_151_cast")]; + tensor var_7607 = const()[name = tensor("op_7607"), val = tensor([1, 1])]; + tensor var_7609 = const()[name = tensor("op_7609"), val = tensor([1, 1])]; + tensor v_151_pad_type_0 = const()[name = tensor("v_151_pad_type_0"), val = tensor("custom")]; + tensor v_151_pad_0 = const()[name = tensor("v_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707591040))), lut = tensor([-0x1.24p-6, 0x1.228p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_151_cast = conv(dilations = var_7609, groups = var_6865, pad = v_151_pad_0, pad_type = v_151_pad_type_0, strides = var_7607, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_151_cast")]; + tensor var_7613 = const()[name = tensor("op_7613"), val = tensor([2, 20, 64, -1])]; + tensor var_7614_cast = reshape(shape = var_7613, x = q_151_cast)[name = tensor("op_7614_cast")]; + tensor var_7615 = const()[name = tensor("op_7615"), val = tensor([2, 20, 64, -1])]; + tensor var_7616_cast = reshape(shape = var_7615, x = k_151_cast)[name = tensor("op_7616_cast")]; + tensor var_7617 = const()[name = tensor("op_7617"), val = tensor([2, 20, 64, -1])]; + tensor var_7618_cast = reshape(shape = var_7617, x = v_151_cast)[name = tensor("op_7618_cast")]; + tensor attn_weights_301_transpose_x_0 = const()[name = tensor("attn_weights_301_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_301_transpose_y_0 = const()[name = tensor("attn_weights_301_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_301_cast = matmul(transpose_x = attn_weights_301_transpose_x_0, transpose_y = attn_weights_301_transpose_y_0, x = var_7614_cast, y = var_7616_cast)[name = tensor("attn_weights_301_cast")]; + tensor attn_weights_303_cast = mul(x = attn_weights_301_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_303_cast")]; + tensor var_7622_cast = softmax(axis = var_6849, x = attn_weights_303_cast)[name = tensor("op_7622_cast")]; + tensor attn_151_transpose_x_0 = const()[name = tensor("attn_151_transpose_x_0"), val = tensor(false)]; + tensor attn_151_transpose_y_0 = const()[name = tensor("attn_151_transpose_y_0"), val = tensor(true)]; + tensor attn_151_cast = matmul(transpose_x = attn_151_transpose_x_0, transpose_y = attn_151_transpose_y_0, x = var_7618_cast, y = var_7622_cast)[name = tensor("attn_151_cast")]; + tensor var_7626 = const()[name = tensor("op_7626"), val = tensor([2, 1280, 1, -1])]; + tensor input_465_cast = reshape(shape = var_7626, x = attn_151_cast)[name = tensor("input_465_cast")]; + tensor var_7631 = const()[name = tensor("op_7631"), val = tensor([1, 1])]; + tensor var_7633 = const()[name = tensor("op_7633"), val = tensor([1, 1])]; + tensor var_7635_pad_type_0 = const()[name = tensor("op_7635_pad_type_0"), val = tensor("custom")]; + tensor var_7635_pad_0 = const()[name = tensor("op_7635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(707918784))), lut = tensor([-0x1.574p-7, 0x1.56cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708123648)))]; + tensor var_7635_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_7633, groups = var_6865, pad = var_7635_pad_0, pad_type = var_7635_pad_type_0, strides = var_7631, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_465_cast)[name = tensor("op_7635_cast")]; + tensor inputs_227_cast = add(x = var_7635_cast, y = inputs_225_cast)[name = tensor("inputs_227_cast")]; + tensor var_7639 = const()[name = tensor("op_7639"), val = tensor([1])]; + tensor channels_mean_227_cast = reduce_mean(axes = var_7639, keep_dims = var_6860, x = inputs_227_cast)[name = tensor("channels_mean_227_cast")]; + tensor zero_mean_227_cast = sub(x = inputs_227_cast, y = channels_mean_227_cast)[name = tensor("zero_mean_227_cast")]; + tensor zero_mean_sq_227_cast = mul(x = zero_mean_227_cast, y = zero_mean_227_cast)[name = tensor("zero_mean_sq_227_cast")]; + tensor var_7643 = const()[name = tensor("op_7643"), val = tensor([1])]; + tensor var_7644_cast = reduce_mean(axes = var_7643, keep_dims = var_6860, x = zero_mean_sq_227_cast)[name = tensor("op_7644_cast")]; + tensor var_7645_to_fp16 = const()[name = tensor("op_7645_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7646_cast = add(x = var_7644_cast, y = var_7645_to_fp16)[name = tensor("op_7646_cast")]; + tensor denom_227_epsilon_0_to_fp16 = const()[name = tensor("denom_227_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_227_cast = rsqrt(epsilon = denom_227_epsilon_0_to_fp16, x = var_7646_cast)[name = tensor("denom_227_cast")]; + tensor out_227_cast = mul(x = zero_mean_227_cast, y = denom_227_cast)[name = tensor("out_227_cast")]; + tensor var_7650_to_fp16 = const()[name = tensor("op_7650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708126272)))]; + tensor var_7651_cast = add(x = out_227_cast, y = var_7650_to_fp16)[name = tensor("op_7651_cast")]; + tensor var_7653_to_fp16 = const()[name = tensor("op_7653_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708128896)))]; + tensor input_467_cast = mul(x = var_7651_cast, y = var_7653_to_fp16)[name = tensor("input_467_cast")]; + tensor var_7661 = const()[name = tensor("op_7661"), val = tensor([1, 1])]; + tensor var_7663 = const()[name = tensor("op_7663"), val = tensor([1, 1])]; + tensor var_7665_pad_type_0 = const()[name = tensor("op_7665_pad_type_0"), val = tensor("custom")]; + tensor var_7665_pad_0 = const()[name = tensor("op_7665_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(708131520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714685184))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714685312)))]; + tensor var_7665_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_7663, groups = var_6865, pad = var_7665_pad_0, pad_type = var_7665_pad_type_0, strides = var_7661, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_467_cast)[name = tensor("op_7665_cast")]; + tensor var_7666_split_sizes_0 = const()[name = tensor("op_7666_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7666_axis_0 = const()[name = tensor("op_7666_axis_0"), val = tensor(1)]; + tensor var_7666_cast_0, tensor var_7666_cast_1 = split(axis = var_7666_axis_0, split_sizes = var_7666_split_sizes_0, x = var_7665_cast)[name = tensor("op_7666_cast")]; + tensor var_7668_mode_0 = const()[name = tensor("op_7668_mode_0"), val = tensor("EXACT")]; + tensor var_7668_cast = gelu(mode = var_7668_mode_0, x = var_7666_cast_1)[name = tensor("op_7668_cast")]; + tensor input_469_cast = mul(x = var_7666_cast_0, y = var_7668_cast)[name = tensor("input_469_cast")]; + tensor var_7672 = const()[name = tensor("op_7672"), val = tensor([1, 1])]; + tensor var_7674 = const()[name = tensor("op_7674"), val = tensor([1, 1])]; + tensor var_7676_pad_type_0 = const()[name = tensor("op_7676_pad_type_0"), val = tensor("custom")]; + tensor var_7676_pad_0 = const()[name = tensor("op_7676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(714705856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717982720))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717982848)))]; + tensor var_7676_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_7674, groups = var_6865, pad = var_7676_pad_0, pad_type = var_7676_pad_type_0, strides = var_7672, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_469_cast)[name = tensor("op_7676_cast")]; + tensor inputs_229_cast = add(x = var_7676_cast, y = inputs_227_cast)[name = tensor("inputs_229_cast")]; + tensor var_7686 = const()[name = tensor("op_7686"), val = tensor([1])]; + tensor channels_mean_229_cast = reduce_mean(axes = var_7686, keep_dims = var_6860, x = inputs_229_cast)[name = tensor("channels_mean_229_cast")]; + tensor zero_mean_229_cast = sub(x = inputs_229_cast, y = channels_mean_229_cast)[name = tensor("zero_mean_229_cast")]; + tensor zero_mean_sq_229_cast = mul(x = zero_mean_229_cast, y = zero_mean_229_cast)[name = tensor("zero_mean_sq_229_cast")]; + tensor var_7690 = const()[name = tensor("op_7690"), val = tensor([1])]; + tensor var_7691_cast = reduce_mean(axes = var_7690, keep_dims = var_6860, x = zero_mean_sq_229_cast)[name = tensor("op_7691_cast")]; + tensor var_7692_to_fp16 = const()[name = tensor("op_7692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7693_cast = add(x = var_7691_cast, y = var_7692_to_fp16)[name = tensor("op_7693_cast")]; + tensor denom_229_epsilon_0_to_fp16 = const()[name = tensor("denom_229_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_229_cast = rsqrt(epsilon = denom_229_epsilon_0_to_fp16, x = var_7693_cast)[name = tensor("denom_229_cast")]; + tensor out_229_cast = mul(x = zero_mean_229_cast, y = denom_229_cast)[name = tensor("out_229_cast")]; + tensor var_7697_to_fp16 = const()[name = tensor("op_7697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717985472)))]; + tensor var_7698_cast = add(x = out_229_cast, y = var_7697_to_fp16)[name = tensor("op_7698_cast")]; + tensor var_7700_to_fp16 = const()[name = tensor("op_7700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717988096)))]; + tensor hidden_states_311_cast = mul(x = var_7698_cast, y = var_7700_to_fp16)[name = tensor("hidden_states_311_cast")]; + tensor var_7707 = const()[name = tensor("op_7707"), val = tensor([1, 1])]; + tensor var_7709 = const()[name = tensor("op_7709"), val = tensor([1, 1])]; + tensor q_153_pad_type_0 = const()[name = tensor("q_153_pad_type_0"), val = tensor("custom")]; + tensor q_153_pad_0 = const()[name = tensor("q_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(717990720))), lut = tensor([-0x1.6ap-5, -0x1.b3cp-7, 0x1.b1p-7, 0x1.69p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_153_cast = conv(dilations = var_7709, groups = var_6865, pad = q_153_pad_0, pad_type = q_153_pad_type_0, strides = var_7707, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("q_153_cast")]; + tensor var_7713 = const()[name = tensor("op_7713"), val = tensor([1, 1])]; + tensor var_7715 = const()[name = tensor("op_7715"), val = tensor([1, 1])]; + tensor k_153_pad_type_0 = const()[name = tensor("k_153_pad_type_0"), val = tensor("custom")]; + tensor k_153_pad_0 = const()[name = tensor("k_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(718400384))), lut = tensor([-0x1.6ap-5, -0x1.b1cp-7, 0x1.b64p-7, 0x1.6bp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_153_cast = conv(dilations = var_7715, groups = var_6865, pad = k_153_pad_0, pad_type = k_153_pad_type_0, strides = var_7713, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("k_153_cast")]; + tensor var_7719 = const()[name = tensor("op_7719"), val = tensor([1, 1])]; + tensor var_7721 = const()[name = tensor("op_7721"), val = tensor([1, 1])]; + tensor v_153_pad_type_0 = const()[name = tensor("v_153_pad_type_0"), val = tensor("custom")]; + tensor v_153_pad_0 = const()[name = tensor("v_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(718810048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(719629312))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_153_cast = conv(dilations = var_7721, groups = var_6865, pad = v_153_pad_0, pad_type = v_153_pad_type_0, strides = var_7719, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("v_153_cast")]; + tensor var_7725 = const()[name = tensor("op_7725"), val = tensor([2, 20, 64, -1])]; + tensor var_7726_cast = reshape(shape = var_7725, x = q_153_cast)[name = tensor("op_7726_cast")]; + tensor var_7727 = const()[name = tensor("op_7727"), val = tensor([2, 20, 64, -1])]; + tensor var_7728_cast = reshape(shape = var_7727, x = k_153_cast)[name = tensor("op_7728_cast")]; + tensor var_7729 = const()[name = tensor("op_7729"), val = tensor([2, 20, 64, -1])]; + tensor var_7730_cast = reshape(shape = var_7729, x = v_153_cast)[name = tensor("op_7730_cast")]; + tensor attn_weights_305_transpose_x_0 = const()[name = tensor("attn_weights_305_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_305_transpose_y_0 = const()[name = tensor("attn_weights_305_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_305_cast = matmul(transpose_x = attn_weights_305_transpose_x_0, transpose_y = attn_weights_305_transpose_y_0, x = var_7726_cast, y = var_7728_cast)[name = tensor("attn_weights_305_cast")]; + tensor attn_weights_307_cast = mul(x = attn_weights_305_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_307_cast")]; + tensor var_7734_cast = softmax(axis = var_6849, x = attn_weights_307_cast)[name = tensor("op_7734_cast")]; + tensor attn_153_transpose_x_0 = const()[name = tensor("attn_153_transpose_x_0"), val = tensor(false)]; + tensor attn_153_transpose_y_0 = const()[name = tensor("attn_153_transpose_y_0"), val = tensor(true)]; + tensor attn_153_cast = matmul(transpose_x = attn_153_transpose_x_0, transpose_y = attn_153_transpose_y_0, x = var_7730_cast, y = var_7734_cast)[name = tensor("attn_153_cast")]; + tensor var_7738 = const()[name = tensor("op_7738"), val = tensor([2, 1280, 1, -1])]; + tensor input_471_cast = reshape(shape = var_7738, x = attn_153_cast)[name = tensor("input_471_cast")]; + tensor var_7743 = const()[name = tensor("op_7743"), val = tensor([1, 1])]; + tensor var_7745 = const()[name = tensor("op_7745"), val = tensor([1, 1])]; + tensor var_7747_pad_type_0 = const()[name = tensor("op_7747_pad_type_0"), val = tensor("custom")]; + tensor var_7747_pad_0 = const()[name = tensor("op_7747_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(719629440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720858304))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720858496)))]; + tensor var_7747_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_7745, groups = var_6865, pad = var_7747_pad_0, pad_type = var_7747_pad_type_0, strides = var_7743, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_471_cast)[name = tensor("op_7747_cast")]; + tensor inputs_231_cast = add(x = var_7747_cast, y = inputs_229_cast)[name = tensor("inputs_231_cast")]; + tensor var_7751 = const()[name = tensor("op_7751"), val = tensor([1])]; + tensor channels_mean_231_cast = reduce_mean(axes = var_7751, keep_dims = var_6860, x = inputs_231_cast)[name = tensor("channels_mean_231_cast")]; + tensor zero_mean_231_cast = sub(x = inputs_231_cast, y = channels_mean_231_cast)[name = tensor("zero_mean_231_cast")]; + tensor zero_mean_sq_231_cast = mul(x = zero_mean_231_cast, y = zero_mean_231_cast)[name = tensor("zero_mean_sq_231_cast")]; + tensor var_7755 = const()[name = tensor("op_7755"), val = tensor([1])]; + tensor var_7756_cast = reduce_mean(axes = var_7755, keep_dims = var_6860, x = zero_mean_sq_231_cast)[name = tensor("op_7756_cast")]; + tensor var_7757_to_fp16 = const()[name = tensor("op_7757_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7758_cast = add(x = var_7756_cast, y = var_7757_to_fp16)[name = tensor("op_7758_cast")]; + tensor denom_231_epsilon_0_to_fp16 = const()[name = tensor("denom_231_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_231_cast = rsqrt(epsilon = denom_231_epsilon_0_to_fp16, x = var_7758_cast)[name = tensor("denom_231_cast")]; + tensor out_231_cast = mul(x = zero_mean_231_cast, y = denom_231_cast)[name = tensor("out_231_cast")]; + tensor var_7762_to_fp16 = const()[name = tensor("op_7762_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720861120)))]; + tensor var_7763_cast = add(x = out_231_cast, y = var_7762_to_fp16)[name = tensor("op_7763_cast")]; + tensor var_7765_to_fp16 = const()[name = tensor("op_7765_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720863744)))]; + tensor hidden_states_313_cast = mul(x = var_7763_cast, y = var_7765_to_fp16)[name = tensor("hidden_states_313_cast")]; + tensor var_7772 = const()[name = tensor("op_7772"), val = tensor([1, 1])]; + tensor var_7774 = const()[name = tensor("op_7774"), val = tensor([1, 1])]; + tensor q_155_pad_type_0 = const()[name = tensor("q_155_pad_type_0"), val = tensor("custom")]; + tensor q_155_pad_0 = const()[name = tensor("q_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(720866368))), lut = tensor([-0x1.22cp-6, 0x1.234p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_155_cast = conv(dilations = var_7774, groups = var_6865, pad = q_155_pad_0, pad_type = q_155_pad_type_0, strides = var_7772, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_313_cast)[name = tensor("q_155_cast")]; + tensor var_7778 = const()[name = tensor("op_7778"), val = tensor([1, 1])]; + tensor var_7780 = const()[name = tensor("op_7780"), val = tensor([1, 1])]; + tensor k_155_pad_type_0 = const()[name = tensor("k_155_pad_type_0"), val = tensor("custom")]; + tensor k_155_pad_0 = const()[name = tensor("k_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721071232))), lut = tensor([-0x1.da8p-7, 0x1.da8p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_155_cast = conv(dilations = var_7780, groups = var_6865, pad = k_155_pad_0, pad_type = k_155_pad_type_0, strides = var_7778, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_155_cast")]; + tensor var_7784 = const()[name = tensor("op_7784"), val = tensor([1, 1])]; + tensor var_7786 = const()[name = tensor("op_7786"), val = tensor([1, 1])]; + tensor v_155_pad_type_0 = const()[name = tensor("v_155_pad_type_0"), val = tensor("custom")]; + tensor v_155_pad_0 = const()[name = tensor("v_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721398976))), lut = tensor([-0x1.14cp-6, 0x1.15p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_155_cast = conv(dilations = var_7786, groups = var_6865, pad = v_155_pad_0, pad_type = v_155_pad_type_0, strides = var_7784, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_155_cast")]; + tensor var_7790 = const()[name = tensor("op_7790"), val = tensor([2, 20, 64, -1])]; + tensor var_7791_cast = reshape(shape = var_7790, x = q_155_cast)[name = tensor("op_7791_cast")]; + tensor var_7792 = const()[name = tensor("op_7792"), val = tensor([2, 20, 64, -1])]; + tensor var_7793_cast = reshape(shape = var_7792, x = k_155_cast)[name = tensor("op_7793_cast")]; + tensor var_7794 = const()[name = tensor("op_7794"), val = tensor([2, 20, 64, -1])]; + tensor var_7795_cast = reshape(shape = var_7794, x = v_155_cast)[name = tensor("op_7795_cast")]; + tensor attn_weights_309_transpose_x_0 = const()[name = tensor("attn_weights_309_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_309_transpose_y_0 = const()[name = tensor("attn_weights_309_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_309_cast = matmul(transpose_x = attn_weights_309_transpose_x_0, transpose_y = attn_weights_309_transpose_y_0, x = var_7791_cast, y = var_7793_cast)[name = tensor("attn_weights_309_cast")]; + tensor attn_weights_311_cast = mul(x = attn_weights_309_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_311_cast")]; + tensor var_7799_cast = softmax(axis = var_6849, x = attn_weights_311_cast)[name = tensor("op_7799_cast")]; + tensor attn_155_transpose_x_0 = const()[name = tensor("attn_155_transpose_x_0"), val = tensor(false)]; + tensor attn_155_transpose_y_0 = const()[name = tensor("attn_155_transpose_y_0"), val = tensor(true)]; + tensor attn_155_cast = matmul(transpose_x = attn_155_transpose_x_0, transpose_y = attn_155_transpose_y_0, x = var_7795_cast, y = var_7799_cast)[name = tensor("attn_155_cast")]; + tensor var_7803 = const()[name = tensor("op_7803"), val = tensor([2, 1280, 1, -1])]; + tensor input_473_cast = reshape(shape = var_7803, x = attn_155_cast)[name = tensor("input_473_cast")]; + tensor var_7808 = const()[name = tensor("op_7808"), val = tensor([1, 1])]; + tensor var_7810 = const()[name = tensor("op_7810"), val = tensor([1, 1])]; + tensor var_7812_pad_type_0 = const()[name = tensor("op_7812_pad_type_0"), val = tensor("custom")]; + tensor var_7812_pad_0 = const()[name = tensor("op_7812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721726720))), lut = tensor([-0x1.4acp-7, 0x1.494p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721931584)))]; + tensor var_7812_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_7810, groups = var_6865, pad = var_7812_pad_0, pad_type = var_7812_pad_type_0, strides = var_7808, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_473_cast)[name = tensor("op_7812_cast")]; + tensor inputs_233_cast = add(x = var_7812_cast, y = inputs_231_cast)[name = tensor("inputs_233_cast")]; + tensor var_7816 = const()[name = tensor("op_7816"), val = tensor([1])]; + tensor channels_mean_233_cast = reduce_mean(axes = var_7816, keep_dims = var_6860, x = inputs_233_cast)[name = tensor("channels_mean_233_cast")]; + tensor zero_mean_233_cast = sub(x = inputs_233_cast, y = channels_mean_233_cast)[name = tensor("zero_mean_233_cast")]; + tensor zero_mean_sq_233_cast = mul(x = zero_mean_233_cast, y = zero_mean_233_cast)[name = tensor("zero_mean_sq_233_cast")]; + tensor var_7820 = const()[name = tensor("op_7820"), val = tensor([1])]; + tensor var_7821_cast = reduce_mean(axes = var_7820, keep_dims = var_6860, x = zero_mean_sq_233_cast)[name = tensor("op_7821_cast")]; + tensor var_7822_to_fp16 = const()[name = tensor("op_7822_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7823_cast = add(x = var_7821_cast, y = var_7822_to_fp16)[name = tensor("op_7823_cast")]; + tensor denom_233_epsilon_0_to_fp16 = const()[name = tensor("denom_233_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_233_cast = rsqrt(epsilon = denom_233_epsilon_0_to_fp16, x = var_7823_cast)[name = tensor("denom_233_cast")]; + tensor out_233_cast = mul(x = zero_mean_233_cast, y = denom_233_cast)[name = tensor("out_233_cast")]; + tensor var_7827_to_fp16 = const()[name = tensor("op_7827_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721934208)))]; + tensor var_7828_cast = add(x = out_233_cast, y = var_7827_to_fp16)[name = tensor("op_7828_cast")]; + tensor var_7830_to_fp16 = const()[name = tensor("op_7830_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721936832)))]; + tensor input_475_cast = mul(x = var_7828_cast, y = var_7830_to_fp16)[name = tensor("input_475_cast")]; + tensor var_7838 = const()[name = tensor("op_7838"), val = tensor([1, 1])]; + tensor var_7840 = const()[name = tensor("op_7840"), val = tensor([1, 1])]; + tensor var_7842_pad_type_0 = const()[name = tensor("op_7842_pad_type_0"), val = tensor("custom")]; + tensor var_7842_pad_0 = const()[name = tensor("op_7842_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721939456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728493120))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728493248)))]; + tensor var_7842_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_7840, groups = var_6865, pad = var_7842_pad_0, pad_type = var_7842_pad_type_0, strides = var_7838, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_475_cast)[name = tensor("op_7842_cast")]; + tensor var_7843_split_sizes_0 = const()[name = tensor("op_7843_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7843_axis_0 = const()[name = tensor("op_7843_axis_0"), val = tensor(1)]; + tensor var_7843_cast_0, tensor var_7843_cast_1 = split(axis = var_7843_axis_0, split_sizes = var_7843_split_sizes_0, x = var_7842_cast)[name = tensor("op_7843_cast")]; + tensor var_7845_mode_0 = const()[name = tensor("op_7845_mode_0"), val = tensor("EXACT")]; + tensor var_7845_cast = gelu(mode = var_7845_mode_0, x = var_7843_cast_1)[name = tensor("op_7845_cast")]; + tensor input_477_cast = mul(x = var_7843_cast_0, y = var_7845_cast)[name = tensor("input_477_cast")]; + tensor var_7849 = const()[name = tensor("op_7849"), val = tensor([1, 1])]; + tensor var_7851 = const()[name = tensor("op_7851"), val = tensor([1, 1])]; + tensor var_7853_pad_type_0 = const()[name = tensor("op_7853_pad_type_0"), val = tensor("custom")]; + tensor var_7853_pad_0 = const()[name = tensor("op_7853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(728513792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731790656))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731790784)))]; + tensor var_7853_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_7851, groups = var_6865, pad = var_7853_pad_0, pad_type = var_7853_pad_type_0, strides = var_7849, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_477_cast)[name = tensor("op_7853_cast")]; + tensor inputs_235_cast = add(x = var_7853_cast, y = inputs_233_cast)[name = tensor("inputs_235_cast")]; + tensor var_7863 = const()[name = tensor("op_7863"), val = tensor([1])]; + tensor channels_mean_235_cast = reduce_mean(axes = var_7863, keep_dims = var_6860, x = inputs_235_cast)[name = tensor("channels_mean_235_cast")]; + tensor zero_mean_235_cast = sub(x = inputs_235_cast, y = channels_mean_235_cast)[name = tensor("zero_mean_235_cast")]; + tensor zero_mean_sq_235_cast = mul(x = zero_mean_235_cast, y = zero_mean_235_cast)[name = tensor("zero_mean_sq_235_cast")]; + tensor var_7867 = const()[name = tensor("op_7867"), val = tensor([1])]; + tensor var_7868_cast = reduce_mean(axes = var_7867, keep_dims = var_6860, x = zero_mean_sq_235_cast)[name = tensor("op_7868_cast")]; + tensor var_7869_to_fp16 = const()[name = tensor("op_7869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7870_cast = add(x = var_7868_cast, y = var_7869_to_fp16)[name = tensor("op_7870_cast")]; + tensor denom_235_epsilon_0_to_fp16 = const()[name = tensor("denom_235_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_235_cast = rsqrt(epsilon = denom_235_epsilon_0_to_fp16, x = var_7870_cast)[name = tensor("denom_235_cast")]; + tensor out_235_cast = mul(x = zero_mean_235_cast, y = denom_235_cast)[name = tensor("out_235_cast")]; + tensor var_7874_to_fp16 = const()[name = tensor("op_7874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731793408)))]; + tensor var_7875_cast = add(x = out_235_cast, y = var_7874_to_fp16)[name = tensor("op_7875_cast")]; + tensor var_7877_to_fp16 = const()[name = tensor("op_7877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731796032)))]; + tensor hidden_states_317_cast = mul(x = var_7875_cast, y = var_7877_to_fp16)[name = tensor("hidden_states_317_cast")]; + tensor var_7884 = const()[name = tensor("op_7884"), val = tensor([1, 1])]; + tensor var_7886 = const()[name = tensor("op_7886"), val = tensor([1, 1])]; + tensor q_157_pad_type_0 = const()[name = tensor("q_157_pad_type_0"), val = tensor("custom")]; + tensor q_157_pad_0 = const()[name = tensor("q_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(731798656))), lut = tensor([-0x1.66cp-5, -0x1.b04p-7, 0x1.adcp-7, 0x1.66cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_157_cast = conv(dilations = var_7886, groups = var_6865, pad = q_157_pad_0, pad_type = q_157_pad_type_0, strides = var_7884, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("q_157_cast")]; + tensor var_7890 = const()[name = tensor("op_7890"), val = tensor([1, 1])]; + tensor var_7892 = const()[name = tensor("op_7892"), val = tensor([1, 1])]; + tensor k_157_pad_type_0 = const()[name = tensor("k_157_pad_type_0"), val = tensor("custom")]; + tensor k_157_pad_0 = const()[name = tensor("k_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732208320))), lut = tensor([-0x1.684p-5, -0x1.b38p-7, 0x1.accp-7, 0x1.66cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_157_cast = conv(dilations = var_7892, groups = var_6865, pad = k_157_pad_0, pad_type = k_157_pad_type_0, strides = var_7890, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("k_157_cast")]; + tensor var_7896 = const()[name = tensor("op_7896"), val = tensor([1, 1])]; + tensor var_7898 = const()[name = tensor("op_7898"), val = tensor([1, 1])]; + tensor v_157_pad_type_0 = const()[name = tensor("v_157_pad_type_0"), val = tensor("custom")]; + tensor v_157_pad_0 = const()[name = tensor("v_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(732617984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733437248))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_157_cast = conv(dilations = var_7898, groups = var_6865, pad = v_157_pad_0, pad_type = v_157_pad_type_0, strides = var_7896, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("v_157_cast")]; + tensor var_7902 = const()[name = tensor("op_7902"), val = tensor([2, 20, 64, -1])]; + tensor var_7903_cast = reshape(shape = var_7902, x = q_157_cast)[name = tensor("op_7903_cast")]; + tensor var_7904 = const()[name = tensor("op_7904"), val = tensor([2, 20, 64, -1])]; + tensor var_7905_cast = reshape(shape = var_7904, x = k_157_cast)[name = tensor("op_7905_cast")]; + tensor var_7906 = const()[name = tensor("op_7906"), val = tensor([2, 20, 64, -1])]; + tensor var_7907_cast = reshape(shape = var_7906, x = v_157_cast)[name = tensor("op_7907_cast")]; + tensor attn_weights_313_transpose_x_0 = const()[name = tensor("attn_weights_313_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_313_transpose_y_0 = const()[name = tensor("attn_weights_313_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_313_cast = matmul(transpose_x = attn_weights_313_transpose_x_0, transpose_y = attn_weights_313_transpose_y_0, x = var_7903_cast, y = var_7905_cast)[name = tensor("attn_weights_313_cast")]; + tensor attn_weights_315_cast = mul(x = attn_weights_313_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_315_cast")]; + tensor var_7911_cast = softmax(axis = var_6849, x = attn_weights_315_cast)[name = tensor("op_7911_cast")]; + tensor attn_157_transpose_x_0 = const()[name = tensor("attn_157_transpose_x_0"), val = tensor(false)]; + tensor attn_157_transpose_y_0 = const()[name = tensor("attn_157_transpose_y_0"), val = tensor(true)]; + tensor attn_157_cast = matmul(transpose_x = attn_157_transpose_x_0, transpose_y = attn_157_transpose_y_0, x = var_7907_cast, y = var_7911_cast)[name = tensor("attn_157_cast")]; + tensor var_7915 = const()[name = tensor("op_7915"), val = tensor([2, 1280, 1, -1])]; + tensor input_479_cast = reshape(shape = var_7915, x = attn_157_cast)[name = tensor("input_479_cast")]; + tensor var_7920 = const()[name = tensor("op_7920"), val = tensor([1, 1])]; + tensor var_7922 = const()[name = tensor("op_7922"), val = tensor([1, 1])]; + tensor var_7924_pad_type_0 = const()[name = tensor("op_7924_pad_type_0"), val = tensor("custom")]; + tensor var_7924_pad_0 = const()[name = tensor("op_7924_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(733437376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734666240))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734666432)))]; + tensor var_7924_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_7922, groups = var_6865, pad = var_7924_pad_0, pad_type = var_7924_pad_type_0, strides = var_7920, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_479_cast)[name = tensor("op_7924_cast")]; + tensor inputs_237_cast = add(x = var_7924_cast, y = inputs_235_cast)[name = tensor("inputs_237_cast")]; + tensor var_7928 = const()[name = tensor("op_7928"), val = tensor([1])]; + tensor channels_mean_237_cast = reduce_mean(axes = var_7928, keep_dims = var_6860, x = inputs_237_cast)[name = tensor("channels_mean_237_cast")]; + tensor zero_mean_237_cast = sub(x = inputs_237_cast, y = channels_mean_237_cast)[name = tensor("zero_mean_237_cast")]; + tensor zero_mean_sq_237_cast = mul(x = zero_mean_237_cast, y = zero_mean_237_cast)[name = tensor("zero_mean_sq_237_cast")]; + tensor var_7932 = const()[name = tensor("op_7932"), val = tensor([1])]; + tensor var_7933_cast = reduce_mean(axes = var_7932, keep_dims = var_6860, x = zero_mean_sq_237_cast)[name = tensor("op_7933_cast")]; + tensor var_7934_to_fp16 = const()[name = tensor("op_7934_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7935_cast = add(x = var_7933_cast, y = var_7934_to_fp16)[name = tensor("op_7935_cast")]; + tensor denom_237_epsilon_0_to_fp16 = const()[name = tensor("denom_237_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_237_cast = rsqrt(epsilon = denom_237_epsilon_0_to_fp16, x = var_7935_cast)[name = tensor("denom_237_cast")]; + tensor out_237_cast = mul(x = zero_mean_237_cast, y = denom_237_cast)[name = tensor("out_237_cast")]; + tensor var_7939_to_fp16 = const()[name = tensor("op_7939_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734669056)))]; + tensor var_7940_cast = add(x = out_237_cast, y = var_7939_to_fp16)[name = tensor("op_7940_cast")]; + tensor var_7942_to_fp16 = const()[name = tensor("op_7942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734671680)))]; + tensor hidden_states_319_cast = mul(x = var_7940_cast, y = var_7942_to_fp16)[name = tensor("hidden_states_319_cast")]; + tensor var_7949 = const()[name = tensor("op_7949"), val = tensor([1, 1])]; + tensor var_7951 = const()[name = tensor("op_7951"), val = tensor([1, 1])]; + tensor q_159_pad_type_0 = const()[name = tensor("q_159_pad_type_0"), val = tensor("custom")]; + tensor q_159_pad_0 = const()[name = tensor("q_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734674304))), lut = tensor([-0x1.144p-6, 0x1.148p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_159_cast = conv(dilations = var_7951, groups = var_6865, pad = q_159_pad_0, pad_type = q_159_pad_type_0, strides = var_7949, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_319_cast)[name = tensor("q_159_cast")]; + tensor var_7955 = const()[name = tensor("op_7955"), val = tensor([1, 1])]; + tensor var_7957 = const()[name = tensor("op_7957"), val = tensor([1, 1])]; + tensor k_159_pad_type_0 = const()[name = tensor("k_159_pad_type_0"), val = tensor("custom")]; + tensor k_159_pad_0 = const()[name = tensor("k_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(734879168))), lut = tensor([-0x1.b5cp-7, 0x1.b6p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_159_cast = conv(dilations = var_7957, groups = var_6865, pad = k_159_pad_0, pad_type = k_159_pad_type_0, strides = var_7955, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_159_cast")]; + tensor var_7961 = const()[name = tensor("op_7961"), val = tensor([1, 1])]; + tensor var_7963 = const()[name = tensor("op_7963"), val = tensor([1, 1])]; + tensor v_159_pad_type_0 = const()[name = tensor("v_159_pad_type_0"), val = tensor("custom")]; + tensor v_159_pad_0 = const()[name = tensor("v_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735206912))), lut = tensor([-0x1.0cp-6, 0x1.0c8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_159_cast = conv(dilations = var_7963, groups = var_6865, pad = v_159_pad_0, pad_type = v_159_pad_type_0, strides = var_7961, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_159_cast")]; + tensor var_7967 = const()[name = tensor("op_7967"), val = tensor([2, 20, 64, -1])]; + tensor var_7968_cast = reshape(shape = var_7967, x = q_159_cast)[name = tensor("op_7968_cast")]; + tensor var_7969 = const()[name = tensor("op_7969"), val = tensor([2, 20, 64, -1])]; + tensor var_7970_cast = reshape(shape = var_7969, x = k_159_cast)[name = tensor("op_7970_cast")]; + tensor var_7971 = const()[name = tensor("op_7971"), val = tensor([2, 20, 64, -1])]; + tensor var_7972_cast = reshape(shape = var_7971, x = v_159_cast)[name = tensor("op_7972_cast")]; + tensor attn_weights_317_transpose_x_0 = const()[name = tensor("attn_weights_317_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_317_transpose_y_0 = const()[name = tensor("attn_weights_317_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_317_cast = matmul(transpose_x = attn_weights_317_transpose_x_0, transpose_y = attn_weights_317_transpose_y_0, x = var_7968_cast, y = var_7970_cast)[name = tensor("attn_weights_317_cast")]; + tensor attn_weights_319_cast = mul(x = attn_weights_317_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_319_cast")]; + tensor var_7976_cast = softmax(axis = var_6849, x = attn_weights_319_cast)[name = tensor("op_7976_cast")]; + tensor attn_159_transpose_x_0 = const()[name = tensor("attn_159_transpose_x_0"), val = tensor(false)]; + tensor attn_159_transpose_y_0 = const()[name = tensor("attn_159_transpose_y_0"), val = tensor(true)]; + tensor attn_159_cast = matmul(transpose_x = attn_159_transpose_x_0, transpose_y = attn_159_transpose_y_0, x = var_7972_cast, y = var_7976_cast)[name = tensor("attn_159_cast")]; + tensor var_7980 = const()[name = tensor("op_7980"), val = tensor([2, 1280, 1, -1])]; + tensor input_481_cast = reshape(shape = var_7980, x = attn_159_cast)[name = tensor("input_481_cast")]; + tensor var_7985 = const()[name = tensor("op_7985"), val = tensor([1, 1])]; + tensor var_7987 = const()[name = tensor("op_7987"), val = tensor([1, 1])]; + tensor var_7989_pad_type_0 = const()[name = tensor("op_7989_pad_type_0"), val = tensor("custom")]; + tensor var_7989_pad_0 = const()[name = tensor("op_7989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735534656))), lut = tensor([-0x1.46p-7, 0x1.45cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735739520)))]; + tensor var_7989_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_7987, groups = var_6865, pad = var_7989_pad_0, pad_type = var_7989_pad_type_0, strides = var_7985, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_481_cast)[name = tensor("op_7989_cast")]; + tensor inputs_239_cast = add(x = var_7989_cast, y = inputs_237_cast)[name = tensor("inputs_239_cast")]; + tensor var_7993 = const()[name = tensor("op_7993"), val = tensor([1])]; + tensor channels_mean_239_cast = reduce_mean(axes = var_7993, keep_dims = var_6860, x = inputs_239_cast)[name = tensor("channels_mean_239_cast")]; + tensor zero_mean_239_cast = sub(x = inputs_239_cast, y = channels_mean_239_cast)[name = tensor("zero_mean_239_cast")]; + tensor zero_mean_sq_239_cast = mul(x = zero_mean_239_cast, y = zero_mean_239_cast)[name = tensor("zero_mean_sq_239_cast")]; + tensor var_7997 = const()[name = tensor("op_7997"), val = tensor([1])]; + tensor var_7998_cast = reduce_mean(axes = var_7997, keep_dims = var_6860, x = zero_mean_sq_239_cast)[name = tensor("op_7998_cast")]; + tensor var_7999_to_fp16 = const()[name = tensor("op_7999_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8000_cast = add(x = var_7998_cast, y = var_7999_to_fp16)[name = tensor("op_8000_cast")]; + tensor denom_239_epsilon_0_to_fp16 = const()[name = tensor("denom_239_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_239_cast = rsqrt(epsilon = denom_239_epsilon_0_to_fp16, x = var_8000_cast)[name = tensor("denom_239_cast")]; + tensor out_239_cast = mul(x = zero_mean_239_cast, y = denom_239_cast)[name = tensor("out_239_cast")]; + tensor var_8004_to_fp16 = const()[name = tensor("op_8004_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735742144)))]; + tensor var_8005_cast = add(x = out_239_cast, y = var_8004_to_fp16)[name = tensor("op_8005_cast")]; + tensor var_8007_to_fp16 = const()[name = tensor("op_8007_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735744768)))]; + tensor input_483_cast = mul(x = var_8005_cast, y = var_8007_to_fp16)[name = tensor("input_483_cast")]; + tensor var_8015 = const()[name = tensor("op_8015"), val = tensor([1, 1])]; + tensor var_8017 = const()[name = tensor("op_8017"), val = tensor([1, 1])]; + tensor var_8019_pad_type_0 = const()[name = tensor("op_8019_pad_type_0"), val = tensor("custom")]; + tensor var_8019_pad_0 = const()[name = tensor("op_8019_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(735747392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742301056))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742301184)))]; + tensor var_8019_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_8017, groups = var_6865, pad = var_8019_pad_0, pad_type = var_8019_pad_type_0, strides = var_8015, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_483_cast)[name = tensor("op_8019_cast")]; + tensor var_8020_split_sizes_0 = const()[name = tensor("op_8020_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8020_axis_0 = const()[name = tensor("op_8020_axis_0"), val = tensor(1)]; + tensor var_8020_cast_0, tensor var_8020_cast_1 = split(axis = var_8020_axis_0, split_sizes = var_8020_split_sizes_0, x = var_8019_cast)[name = tensor("op_8020_cast")]; + tensor var_8022_mode_0 = const()[name = tensor("op_8022_mode_0"), val = tensor("EXACT")]; + tensor var_8022_cast = gelu(mode = var_8022_mode_0, x = var_8020_cast_1)[name = tensor("op_8022_cast")]; + tensor input_485_cast = mul(x = var_8020_cast_0, y = var_8022_cast)[name = tensor("input_485_cast")]; + tensor var_8026 = const()[name = tensor("op_8026"), val = tensor([1, 1])]; + tensor var_8028 = const()[name = tensor("op_8028"), val = tensor([1, 1])]; + tensor var_8030_pad_type_0 = const()[name = tensor("op_8030_pad_type_0"), val = tensor("custom")]; + tensor var_8030_pad_0 = const()[name = tensor("op_8030_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742321728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745598592))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745598720)))]; + tensor var_8030_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_8028, groups = var_6865, pad = var_8030_pad_0, pad_type = var_8030_pad_type_0, strides = var_8026, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_485_cast)[name = tensor("op_8030_cast")]; + tensor inputs_241_cast = add(x = var_8030_cast, y = inputs_239_cast)[name = tensor("inputs_241_cast")]; + tensor var_8040 = const()[name = tensor("op_8040"), val = tensor([1])]; + tensor channels_mean_241_cast = reduce_mean(axes = var_8040, keep_dims = var_6860, x = inputs_241_cast)[name = tensor("channels_mean_241_cast")]; + tensor zero_mean_241_cast = sub(x = inputs_241_cast, y = channels_mean_241_cast)[name = tensor("zero_mean_241_cast")]; + tensor zero_mean_sq_241_cast = mul(x = zero_mean_241_cast, y = zero_mean_241_cast)[name = tensor("zero_mean_sq_241_cast")]; + tensor var_8044 = const()[name = tensor("op_8044"), val = tensor([1])]; + tensor var_8045_cast = reduce_mean(axes = var_8044, keep_dims = var_6860, x = zero_mean_sq_241_cast)[name = tensor("op_8045_cast")]; + tensor var_8046_to_fp16 = const()[name = tensor("op_8046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8047_cast = add(x = var_8045_cast, y = var_8046_to_fp16)[name = tensor("op_8047_cast")]; + tensor denom_241_epsilon_0_to_fp16 = const()[name = tensor("denom_241_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_241_cast = rsqrt(epsilon = denom_241_epsilon_0_to_fp16, x = var_8047_cast)[name = tensor("denom_241_cast")]; + tensor out_241_cast = mul(x = zero_mean_241_cast, y = denom_241_cast)[name = tensor("out_241_cast")]; + tensor var_8051_to_fp16 = const()[name = tensor("op_8051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745601344)))]; + tensor var_8052_cast = add(x = out_241_cast, y = var_8051_to_fp16)[name = tensor("op_8052_cast")]; + tensor var_8054_to_fp16 = const()[name = tensor("op_8054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745603968)))]; + tensor hidden_states_323_cast = mul(x = var_8052_cast, y = var_8054_to_fp16)[name = tensor("hidden_states_323_cast")]; + tensor var_8061 = const()[name = tensor("op_8061"), val = tensor([1, 1])]; + tensor var_8063 = const()[name = tensor("op_8063"), val = tensor([1, 1])]; + tensor q_161_pad_type_0 = const()[name = tensor("q_161_pad_type_0"), val = tensor("custom")]; + tensor q_161_pad_0 = const()[name = tensor("q_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745606592))), lut = tensor([-0x1.7a8p-6, 0x1.7cp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_161_cast = conv(dilations = var_8063, groups = var_6865, pad = q_161_pad_0, pad_type = q_161_pad_type_0, strides = var_8061, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("q_161_cast")]; + tensor var_8067 = const()[name = tensor("op_8067"), val = tensor([1, 1])]; + tensor var_8069 = const()[name = tensor("op_8069"), val = tensor([1, 1])]; + tensor k_161_pad_type_0 = const()[name = tensor("k_161_pad_type_0"), val = tensor("custom")]; + tensor k_161_pad_0 = const()[name = tensor("k_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745811456))), lut = tensor([-0x1.7b8p-6, 0x1.7bcp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_161_cast = conv(dilations = var_8069, groups = var_6865, pad = k_161_pad_0, pad_type = k_161_pad_type_0, strides = var_8067, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("k_161_cast")]; + tensor var_8073 = const()[name = tensor("op_8073"), val = tensor([1, 1])]; + tensor var_8075 = const()[name = tensor("op_8075"), val = tensor([1, 1])]; + tensor v_161_pad_type_0 = const()[name = tensor("v_161_pad_type_0"), val = tensor("custom")]; + tensor v_161_pad_0 = const()[name = tensor("v_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746016320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746835584))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_161_cast = conv(dilations = var_8075, groups = var_6865, pad = v_161_pad_0, pad_type = v_161_pad_type_0, strides = var_8073, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("v_161_cast")]; + tensor var_8079 = const()[name = tensor("op_8079"), val = tensor([2, 20, 64, -1])]; + tensor var_8080_cast = reshape(shape = var_8079, x = q_161_cast)[name = tensor("op_8080_cast")]; + tensor var_8081 = const()[name = tensor("op_8081"), val = tensor([2, 20, 64, -1])]; + tensor var_8082_cast = reshape(shape = var_8081, x = k_161_cast)[name = tensor("op_8082_cast")]; + tensor var_8083 = const()[name = tensor("op_8083"), val = tensor([2, 20, 64, -1])]; + tensor var_8084_cast = reshape(shape = var_8083, x = v_161_cast)[name = tensor("op_8084_cast")]; + tensor attn_weights_321_transpose_x_0 = const()[name = tensor("attn_weights_321_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_321_transpose_y_0 = const()[name = tensor("attn_weights_321_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_321_cast = matmul(transpose_x = attn_weights_321_transpose_x_0, transpose_y = attn_weights_321_transpose_y_0, x = var_8080_cast, y = var_8082_cast)[name = tensor("attn_weights_321_cast")]; + tensor attn_weights_323_cast = mul(x = attn_weights_321_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_323_cast")]; + tensor var_8088_cast = softmax(axis = var_6849, x = attn_weights_323_cast)[name = tensor("op_8088_cast")]; + tensor attn_161_transpose_x_0 = const()[name = tensor("attn_161_transpose_x_0"), val = tensor(false)]; + tensor attn_161_transpose_y_0 = const()[name = tensor("attn_161_transpose_y_0"), val = tensor(true)]; + tensor attn_161_cast = matmul(transpose_x = attn_161_transpose_x_0, transpose_y = attn_161_transpose_y_0, x = var_8084_cast, y = var_8088_cast)[name = tensor("attn_161_cast")]; + tensor var_8092 = const()[name = tensor("op_8092"), val = tensor([2, 1280, 1, -1])]; + tensor input_487_cast = reshape(shape = var_8092, x = attn_161_cast)[name = tensor("input_487_cast")]; + tensor var_8097 = const()[name = tensor("op_8097"), val = tensor([1, 1])]; + tensor var_8099 = const()[name = tensor("op_8099"), val = tensor([1, 1])]; + tensor var_8101_pad_type_0 = const()[name = tensor("op_8101_pad_type_0"), val = tensor("custom")]; + tensor var_8101_pad_0 = const()[name = tensor("op_8101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746835712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748064576))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748064768)))]; + tensor var_8101_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_8099, groups = var_6865, pad = var_8101_pad_0, pad_type = var_8101_pad_type_0, strides = var_8097, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_487_cast)[name = tensor("op_8101_cast")]; + tensor inputs_243_cast = add(x = var_8101_cast, y = inputs_241_cast)[name = tensor("inputs_243_cast")]; + tensor var_8105 = const()[name = tensor("op_8105"), val = tensor([1])]; + tensor channels_mean_243_cast = reduce_mean(axes = var_8105, keep_dims = var_6860, x = inputs_243_cast)[name = tensor("channels_mean_243_cast")]; + tensor zero_mean_243_cast = sub(x = inputs_243_cast, y = channels_mean_243_cast)[name = tensor("zero_mean_243_cast")]; + tensor zero_mean_sq_243_cast = mul(x = zero_mean_243_cast, y = zero_mean_243_cast)[name = tensor("zero_mean_sq_243_cast")]; + tensor var_8109 = const()[name = tensor("op_8109"), val = tensor([1])]; + tensor var_8110_cast = reduce_mean(axes = var_8109, keep_dims = var_6860, x = zero_mean_sq_243_cast)[name = tensor("op_8110_cast")]; + tensor var_8111_to_fp16 = const()[name = tensor("op_8111_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8112_cast = add(x = var_8110_cast, y = var_8111_to_fp16)[name = tensor("op_8112_cast")]; + tensor denom_243_epsilon_0_to_fp16 = const()[name = tensor("denom_243_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_243_cast = rsqrt(epsilon = denom_243_epsilon_0_to_fp16, x = var_8112_cast)[name = tensor("denom_243_cast")]; + tensor out_243_cast = mul(x = zero_mean_243_cast, y = denom_243_cast)[name = tensor("out_243_cast")]; + tensor var_8116_to_fp16 = const()[name = tensor("op_8116_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748067392)))]; + tensor var_8117_cast = add(x = out_243_cast, y = var_8116_to_fp16)[name = tensor("op_8117_cast")]; + tensor var_8119_to_fp16 = const()[name = tensor("op_8119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748070016)))]; + tensor hidden_states_325_cast = mul(x = var_8117_cast, y = var_8119_to_fp16)[name = tensor("hidden_states_325_cast")]; + tensor var_8126 = const()[name = tensor("op_8126"), val = tensor([1, 1])]; + tensor var_8128 = const()[name = tensor("op_8128"), val = tensor([1, 1])]; + tensor q_163_pad_type_0 = const()[name = tensor("q_163_pad_type_0"), val = tensor("custom")]; + tensor q_163_pad_0 = const()[name = tensor("q_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748072640))), lut = tensor([-0x1.f4cp-7, 0x1.f54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_163_cast = conv(dilations = var_8128, groups = var_6865, pad = q_163_pad_0, pad_type = q_163_pad_type_0, strides = var_8126, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_325_cast)[name = tensor("q_163_cast")]; + tensor var_8132 = const()[name = tensor("op_8132"), val = tensor([1, 1])]; + tensor var_8134 = const()[name = tensor("op_8134"), val = tensor([1, 1])]; + tensor k_163_pad_type_0 = const()[name = tensor("k_163_pad_type_0"), val = tensor("custom")]; + tensor k_163_pad_0 = const()[name = tensor("k_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748277504))), lut = tensor([-0x1.7dcp-7, 0x1.7dp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_163_cast = conv(dilations = var_8134, groups = var_6865, pad = k_163_pad_0, pad_type = k_163_pad_type_0, strides = var_8132, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_163_cast")]; + tensor var_8138 = const()[name = tensor("op_8138"), val = tensor([1, 1])]; + tensor var_8140 = const()[name = tensor("op_8140"), val = tensor([1, 1])]; + tensor v_163_pad_type_0 = const()[name = tensor("v_163_pad_type_0"), val = tensor("custom")]; + tensor v_163_pad_0 = const()[name = tensor("v_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748605248))), lut = tensor([-0x1.ed4p-7, 0x1.ea4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_163_cast = conv(dilations = var_8140, groups = var_6865, pad = v_163_pad_0, pad_type = v_163_pad_type_0, strides = var_8138, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_163_cast")]; + tensor var_8144 = const()[name = tensor("op_8144"), val = tensor([2, 20, 64, -1])]; + tensor var_8145_cast = reshape(shape = var_8144, x = q_163_cast)[name = tensor("op_8145_cast")]; + tensor var_8146 = const()[name = tensor("op_8146"), val = tensor([2, 20, 64, -1])]; + tensor var_8147_cast = reshape(shape = var_8146, x = k_163_cast)[name = tensor("op_8147_cast")]; + tensor var_8148 = const()[name = tensor("op_8148"), val = tensor([2, 20, 64, -1])]; + tensor var_8149_cast = reshape(shape = var_8148, x = v_163_cast)[name = tensor("op_8149_cast")]; + tensor attn_weights_325_transpose_x_0 = const()[name = tensor("attn_weights_325_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_325_transpose_y_0 = const()[name = tensor("attn_weights_325_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_325_cast = matmul(transpose_x = attn_weights_325_transpose_x_0, transpose_y = attn_weights_325_transpose_y_0, x = var_8145_cast, y = var_8147_cast)[name = tensor("attn_weights_325_cast")]; + tensor attn_weights_327_cast = mul(x = attn_weights_325_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_327_cast")]; + tensor var_8153_cast = softmax(axis = var_6849, x = attn_weights_327_cast)[name = tensor("op_8153_cast")]; + tensor attn_163_transpose_x_0 = const()[name = tensor("attn_163_transpose_x_0"), val = tensor(false)]; + tensor attn_163_transpose_y_0 = const()[name = tensor("attn_163_transpose_y_0"), val = tensor(true)]; + tensor attn_163_cast = matmul(transpose_x = attn_163_transpose_x_0, transpose_y = attn_163_transpose_y_0, x = var_8149_cast, y = var_8153_cast)[name = tensor("attn_163_cast")]; + tensor var_8157 = const()[name = tensor("op_8157"), val = tensor([2, 1280, 1, -1])]; + tensor input_489_cast = reshape(shape = var_8157, x = attn_163_cast)[name = tensor("input_489_cast")]; + tensor var_8162 = const()[name = tensor("op_8162"), val = tensor([1, 1])]; + tensor var_8164 = const()[name = tensor("op_8164"), val = tensor([1, 1])]; + tensor var_8166_pad_type_0 = const()[name = tensor("op_8166_pad_type_0"), val = tensor("custom")]; + tensor var_8166_pad_0 = const()[name = tensor("op_8166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748932992))), lut = tensor([-0x1.2bp-7, 0x1.2b4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749137856)))]; + tensor var_8166_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_8164, groups = var_6865, pad = var_8166_pad_0, pad_type = var_8166_pad_type_0, strides = var_8162, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_489_cast)[name = tensor("op_8166_cast")]; + tensor inputs_245_cast = add(x = var_8166_cast, y = inputs_243_cast)[name = tensor("inputs_245_cast")]; + tensor var_8170 = const()[name = tensor("op_8170"), val = tensor([1])]; + tensor channels_mean_245_cast = reduce_mean(axes = var_8170, keep_dims = var_6860, x = inputs_245_cast)[name = tensor("channels_mean_245_cast")]; + tensor zero_mean_245_cast = sub(x = inputs_245_cast, y = channels_mean_245_cast)[name = tensor("zero_mean_245_cast")]; + tensor zero_mean_sq_245_cast = mul(x = zero_mean_245_cast, y = zero_mean_245_cast)[name = tensor("zero_mean_sq_245_cast")]; + tensor var_8174 = const()[name = tensor("op_8174"), val = tensor([1])]; + tensor var_8175_cast = reduce_mean(axes = var_8174, keep_dims = var_6860, x = zero_mean_sq_245_cast)[name = tensor("op_8175_cast")]; + tensor var_8176_to_fp16 = const()[name = tensor("op_8176_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8177_cast = add(x = var_8175_cast, y = var_8176_to_fp16)[name = tensor("op_8177_cast")]; + tensor denom_245_epsilon_0_to_fp16 = const()[name = tensor("denom_245_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_245_cast = rsqrt(epsilon = denom_245_epsilon_0_to_fp16, x = var_8177_cast)[name = tensor("denom_245_cast")]; + tensor out_245_cast = mul(x = zero_mean_245_cast, y = denom_245_cast)[name = tensor("out_245_cast")]; + tensor var_8181_to_fp16 = const()[name = tensor("op_8181_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749140480)))]; + tensor var_8182_cast = add(x = out_245_cast, y = var_8181_to_fp16)[name = tensor("op_8182_cast")]; + tensor var_8184_to_fp16 = const()[name = tensor("op_8184_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749143104)))]; + tensor input_491_cast = mul(x = var_8182_cast, y = var_8184_to_fp16)[name = tensor("input_491_cast")]; + tensor var_8192 = const()[name = tensor("op_8192"), val = tensor([1, 1])]; + tensor var_8194 = const()[name = tensor("op_8194"), val = tensor([1, 1])]; + tensor var_8196_pad_type_0 = const()[name = tensor("op_8196_pad_type_0"), val = tensor("custom")]; + tensor var_8196_pad_0 = const()[name = tensor("op_8196_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749145728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755699392))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755699520)))]; + tensor var_8196_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_8194, groups = var_6865, pad = var_8196_pad_0, pad_type = var_8196_pad_type_0, strides = var_8192, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_491_cast)[name = tensor("op_8196_cast")]; + tensor var_8197_split_sizes_0 = const()[name = tensor("op_8197_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8197_axis_0 = const()[name = tensor("op_8197_axis_0"), val = tensor(1)]; + tensor var_8197_cast_0, tensor var_8197_cast_1 = split(axis = var_8197_axis_0, split_sizes = var_8197_split_sizes_0, x = var_8196_cast)[name = tensor("op_8197_cast")]; + tensor var_8199_mode_0 = const()[name = tensor("op_8199_mode_0"), val = tensor("EXACT")]; + tensor var_8199_cast = gelu(mode = var_8199_mode_0, x = var_8197_cast_1)[name = tensor("op_8199_cast")]; + tensor input_493_cast = mul(x = var_8197_cast_0, y = var_8199_cast)[name = tensor("input_493_cast")]; + tensor var_8203 = const()[name = tensor("op_8203"), val = tensor([1, 1])]; + tensor var_8205 = const()[name = tensor("op_8205"), val = tensor([1, 1])]; + tensor var_8207_pad_type_0 = const()[name = tensor("op_8207_pad_type_0"), val = tensor("custom")]; + tensor var_8207_pad_0 = const()[name = tensor("op_8207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(755720064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758996928))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758997056)))]; + tensor var_8207_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_8205, groups = var_6865, pad = var_8207_pad_0, pad_type = var_8207_pad_type_0, strides = var_8203, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_493_cast)[name = tensor("op_8207_cast")]; + tensor inputs_247_cast = add(x = var_8207_cast, y = inputs_245_cast)[name = tensor("inputs_247_cast")]; + tensor var_8217 = const()[name = tensor("op_8217"), val = tensor([1])]; + tensor channels_mean_247_cast = reduce_mean(axes = var_8217, keep_dims = var_6860, x = inputs_247_cast)[name = tensor("channels_mean_247_cast")]; + tensor zero_mean_247_cast = sub(x = inputs_247_cast, y = channels_mean_247_cast)[name = tensor("zero_mean_247_cast")]; + tensor zero_mean_sq_247_cast = mul(x = zero_mean_247_cast, y = zero_mean_247_cast)[name = tensor("zero_mean_sq_247_cast")]; + tensor var_8221 = const()[name = tensor("op_8221"), val = tensor([1])]; + tensor var_8222_cast = reduce_mean(axes = var_8221, keep_dims = var_6860, x = zero_mean_sq_247_cast)[name = tensor("op_8222_cast")]; + tensor var_8223_to_fp16 = const()[name = tensor("op_8223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8224_cast = add(x = var_8222_cast, y = var_8223_to_fp16)[name = tensor("op_8224_cast")]; + tensor denom_247_epsilon_0_to_fp16 = const()[name = tensor("denom_247_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_247_cast = rsqrt(epsilon = denom_247_epsilon_0_to_fp16, x = var_8224_cast)[name = tensor("denom_247_cast")]; + tensor out_247_cast = mul(x = zero_mean_247_cast, y = denom_247_cast)[name = tensor("out_247_cast")]; + tensor var_8228_to_fp16 = const()[name = tensor("op_8228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(758999680)))]; + tensor var_8229_cast = add(x = out_247_cast, y = var_8228_to_fp16)[name = tensor("op_8229_cast")]; + tensor var_8231_to_fp16 = const()[name = tensor("op_8231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759002304)))]; + tensor hidden_states_329_cast = mul(x = var_8229_cast, y = var_8231_to_fp16)[name = tensor("hidden_states_329_cast")]; + tensor var_8238 = const()[name = tensor("op_8238"), val = tensor([1, 1])]; + tensor var_8240 = const()[name = tensor("op_8240"), val = tensor([1, 1])]; + tensor q_165_pad_type_0 = const()[name = tensor("q_165_pad_type_0"), val = tensor("custom")]; + tensor q_165_pad_0 = const()[name = tensor("q_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759004928))), lut = tensor([-0x1.67p-5, -0x1.afp-7, 0x1.afcp-7, 0x1.668p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_165_cast = conv(dilations = var_8240, groups = var_6865, pad = q_165_pad_0, pad_type = q_165_pad_type_0, strides = var_8238, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("q_165_cast")]; + tensor var_8244 = const()[name = tensor("op_8244"), val = tensor([1, 1])]; + tensor var_8246 = const()[name = tensor("op_8246"), val = tensor([1, 1])]; + tensor k_165_pad_type_0 = const()[name = tensor("k_165_pad_type_0"), val = tensor("custom")]; + tensor k_165_pad_0 = const()[name = tensor("k_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759414592))), lut = tensor([-0x1.678p-5, -0x1.b0cp-7, 0x1.b04p-7, 0x1.678p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_165_cast = conv(dilations = var_8246, groups = var_6865, pad = k_165_pad_0, pad_type = k_165_pad_type_0, strides = var_8244, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("k_165_cast")]; + tensor var_8250 = const()[name = tensor("op_8250"), val = tensor([1, 1])]; + tensor var_8252 = const()[name = tensor("op_8252"), val = tensor([1, 1])]; + tensor v_165_pad_type_0 = const()[name = tensor("v_165_pad_type_0"), val = tensor("custom")]; + tensor v_165_pad_0 = const()[name = tensor("v_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(759824256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760643520))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_165_cast = conv(dilations = var_8252, groups = var_6865, pad = v_165_pad_0, pad_type = v_165_pad_type_0, strides = var_8250, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("v_165_cast")]; + tensor var_8256 = const()[name = tensor("op_8256"), val = tensor([2, 20, 64, -1])]; + tensor var_8257_cast = reshape(shape = var_8256, x = q_165_cast)[name = tensor("op_8257_cast")]; + tensor var_8258 = const()[name = tensor("op_8258"), val = tensor([2, 20, 64, -1])]; + tensor var_8259_cast = reshape(shape = var_8258, x = k_165_cast)[name = tensor("op_8259_cast")]; + tensor var_8260 = const()[name = tensor("op_8260"), val = tensor([2, 20, 64, -1])]; + tensor var_8261_cast = reshape(shape = var_8260, x = v_165_cast)[name = tensor("op_8261_cast")]; + tensor attn_weights_329_transpose_x_0 = const()[name = tensor("attn_weights_329_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_329_transpose_y_0 = const()[name = tensor("attn_weights_329_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_329_cast = matmul(transpose_x = attn_weights_329_transpose_x_0, transpose_y = attn_weights_329_transpose_y_0, x = var_8257_cast, y = var_8259_cast)[name = tensor("attn_weights_329_cast")]; + tensor attn_weights_331_cast = mul(x = attn_weights_329_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_331_cast")]; + tensor var_8265_cast = softmax(axis = var_6849, x = attn_weights_331_cast)[name = tensor("op_8265_cast")]; + tensor attn_165_transpose_x_0 = const()[name = tensor("attn_165_transpose_x_0"), val = tensor(false)]; + tensor attn_165_transpose_y_0 = const()[name = tensor("attn_165_transpose_y_0"), val = tensor(true)]; + tensor attn_165_cast = matmul(transpose_x = attn_165_transpose_x_0, transpose_y = attn_165_transpose_y_0, x = var_8261_cast, y = var_8265_cast)[name = tensor("attn_165_cast")]; + tensor var_8269 = const()[name = tensor("op_8269"), val = tensor([2, 1280, 1, -1])]; + tensor input_495_cast = reshape(shape = var_8269, x = attn_165_cast)[name = tensor("input_495_cast")]; + tensor var_8274 = const()[name = tensor("op_8274"), val = tensor([1, 1])]; + tensor var_8276 = const()[name = tensor("op_8276"), val = tensor([1, 1])]; + tensor var_8278_pad_type_0 = const()[name = tensor("op_8278_pad_type_0"), val = tensor("custom")]; + tensor var_8278_pad_0 = const()[name = tensor("op_8278_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760643648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761462912))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761463040)))]; + tensor var_8278_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_8276, groups = var_6865, pad = var_8278_pad_0, pad_type = var_8278_pad_type_0, strides = var_8274, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_495_cast)[name = tensor("op_8278_cast")]; + tensor inputs_249_cast = add(x = var_8278_cast, y = inputs_247_cast)[name = tensor("inputs_249_cast")]; + tensor var_8282 = const()[name = tensor("op_8282"), val = tensor([1])]; + tensor channels_mean_249_cast = reduce_mean(axes = var_8282, keep_dims = var_6860, x = inputs_249_cast)[name = tensor("channels_mean_249_cast")]; + tensor zero_mean_249_cast = sub(x = inputs_249_cast, y = channels_mean_249_cast)[name = tensor("zero_mean_249_cast")]; + tensor zero_mean_sq_249_cast = mul(x = zero_mean_249_cast, y = zero_mean_249_cast)[name = tensor("zero_mean_sq_249_cast")]; + tensor var_8286 = const()[name = tensor("op_8286"), val = tensor([1])]; + tensor var_8287_cast = reduce_mean(axes = var_8286, keep_dims = var_6860, x = zero_mean_sq_249_cast)[name = tensor("op_8287_cast")]; + tensor var_8288_to_fp16 = const()[name = tensor("op_8288_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8289_cast = add(x = var_8287_cast, y = var_8288_to_fp16)[name = tensor("op_8289_cast")]; + tensor denom_249_epsilon_0_to_fp16 = const()[name = tensor("denom_249_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_249_cast = rsqrt(epsilon = denom_249_epsilon_0_to_fp16, x = var_8289_cast)[name = tensor("denom_249_cast")]; + tensor out_249_cast = mul(x = zero_mean_249_cast, y = denom_249_cast)[name = tensor("out_249_cast")]; + tensor var_8293_to_fp16 = const()[name = tensor("op_8293_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761465664)))]; + tensor var_8294_cast = add(x = out_249_cast, y = var_8293_to_fp16)[name = tensor("op_8294_cast")]; + tensor var_8296_to_fp16 = const()[name = tensor("op_8296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761468288)))]; + tensor hidden_states_331_cast = mul(x = var_8294_cast, y = var_8296_to_fp16)[name = tensor("hidden_states_331_cast")]; + tensor var_8303 = const()[name = tensor("op_8303"), val = tensor([1, 1])]; + tensor var_8305 = const()[name = tensor("op_8305"), val = tensor([1, 1])]; + tensor q_167_pad_type_0 = const()[name = tensor("q_167_pad_type_0"), val = tensor("custom")]; + tensor q_167_pad_0 = const()[name = tensor("q_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761470912))), lut = tensor([-0x1.d44p-7, 0x1.d3cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_167_cast = conv(dilations = var_8305, groups = var_6865, pad = q_167_pad_0, pad_type = q_167_pad_type_0, strides = var_8303, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_331_cast)[name = tensor("q_167_cast")]; + tensor var_8309 = const()[name = tensor("op_8309"), val = tensor([1, 1])]; + tensor var_8311 = const()[name = tensor("op_8311"), val = tensor([1, 1])]; + tensor k_167_pad_type_0 = const()[name = tensor("k_167_pad_type_0"), val = tensor("custom")]; + tensor k_167_pad_0 = const()[name = tensor("k_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(761675776))), lut = tensor([-0x1.59cp-7, 0x1.59p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_167_cast = conv(dilations = var_8311, groups = var_6865, pad = k_167_pad_0, pad_type = k_167_pad_type_0, strides = var_8309, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_167_cast")]; + tensor var_8315 = const()[name = tensor("op_8315"), val = tensor([1, 1])]; + tensor var_8317 = const()[name = tensor("op_8317"), val = tensor([1, 1])]; + tensor v_167_pad_type_0 = const()[name = tensor("v_167_pad_type_0"), val = tensor("custom")]; + tensor v_167_pad_0 = const()[name = tensor("v_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762003520))), lut = tensor([-0x1.c78p-7, 0x1.c88p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_167_cast = conv(dilations = var_8317, groups = var_6865, pad = v_167_pad_0, pad_type = v_167_pad_type_0, strides = var_8315, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_167_cast")]; + tensor var_8321 = const()[name = tensor("op_8321"), val = tensor([2, 20, 64, -1])]; + tensor var_8322_cast = reshape(shape = var_8321, x = q_167_cast)[name = tensor("op_8322_cast")]; + tensor var_8323 = const()[name = tensor("op_8323"), val = tensor([2, 20, 64, -1])]; + tensor var_8324_cast = reshape(shape = var_8323, x = k_167_cast)[name = tensor("op_8324_cast")]; + tensor var_8325 = const()[name = tensor("op_8325"), val = tensor([2, 20, 64, -1])]; + tensor var_8326_cast = reshape(shape = var_8325, x = v_167_cast)[name = tensor("op_8326_cast")]; + tensor attn_weights_333_transpose_x_0 = const()[name = tensor("attn_weights_333_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_333_transpose_y_0 = const()[name = tensor("attn_weights_333_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_333_cast = matmul(transpose_x = attn_weights_333_transpose_x_0, transpose_y = attn_weights_333_transpose_y_0, x = var_8322_cast, y = var_8324_cast)[name = tensor("attn_weights_333_cast")]; + tensor attn_weights_335_cast = mul(x = attn_weights_333_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_335_cast")]; + tensor var_8330_cast = softmax(axis = var_6849, x = attn_weights_335_cast)[name = tensor("op_8330_cast")]; + tensor attn_167_transpose_x_0 = const()[name = tensor("attn_167_transpose_x_0"), val = tensor(false)]; + tensor attn_167_transpose_y_0 = const()[name = tensor("attn_167_transpose_y_0"), val = tensor(true)]; + tensor attn_167_cast = matmul(transpose_x = attn_167_transpose_x_0, transpose_y = attn_167_transpose_y_0, x = var_8326_cast, y = var_8330_cast)[name = tensor("attn_167_cast")]; + tensor var_8334 = const()[name = tensor("op_8334"), val = tensor([2, 1280, 1, -1])]; + tensor input_497_cast = reshape(shape = var_8334, x = attn_167_cast)[name = tensor("input_497_cast")]; + tensor var_8339 = const()[name = tensor("op_8339"), val = tensor([1, 1])]; + tensor var_8341 = const()[name = tensor("op_8341"), val = tensor([1, 1])]; + tensor var_8343_pad_type_0 = const()[name = tensor("op_8343_pad_type_0"), val = tensor("custom")]; + tensor var_8343_pad_0 = const()[name = tensor("op_8343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762331264))), lut = tensor([-0x1.1a4p-7, 0x1.194p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762536128)))]; + tensor var_8343_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_8341, groups = var_6865, pad = var_8343_pad_0, pad_type = var_8343_pad_type_0, strides = var_8339, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_497_cast)[name = tensor("op_8343_cast")]; + tensor inputs_251_cast = add(x = var_8343_cast, y = inputs_249_cast)[name = tensor("inputs_251_cast")]; + tensor var_8347 = const()[name = tensor("op_8347"), val = tensor([1])]; + tensor channels_mean_251_cast = reduce_mean(axes = var_8347, keep_dims = var_6860, x = inputs_251_cast)[name = tensor("channels_mean_251_cast")]; + tensor zero_mean_251_cast = sub(x = inputs_251_cast, y = channels_mean_251_cast)[name = tensor("zero_mean_251_cast")]; + tensor zero_mean_sq_251_cast = mul(x = zero_mean_251_cast, y = zero_mean_251_cast)[name = tensor("zero_mean_sq_251_cast")]; + tensor var_8351 = const()[name = tensor("op_8351"), val = tensor([1])]; + tensor var_8352_cast = reduce_mean(axes = var_8351, keep_dims = var_6860, x = zero_mean_sq_251_cast)[name = tensor("op_8352_cast")]; + tensor var_8353_to_fp16 = const()[name = tensor("op_8353_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8354_cast = add(x = var_8352_cast, y = var_8353_to_fp16)[name = tensor("op_8354_cast")]; + tensor denom_251_epsilon_0_to_fp16 = const()[name = tensor("denom_251_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_251_cast = rsqrt(epsilon = denom_251_epsilon_0_to_fp16, x = var_8354_cast)[name = tensor("denom_251_cast")]; + tensor out_251_cast = mul(x = zero_mean_251_cast, y = denom_251_cast)[name = tensor("out_251_cast")]; + tensor var_8358_to_fp16 = const()[name = tensor("op_8358_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762538752)))]; + tensor var_8359_cast = add(x = out_251_cast, y = var_8358_to_fp16)[name = tensor("op_8359_cast")]; + tensor var_8361_to_fp16 = const()[name = tensor("op_8361_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762541376)))]; + tensor input_499_cast = mul(x = var_8359_cast, y = var_8361_to_fp16)[name = tensor("input_499_cast")]; + tensor var_8369 = const()[name = tensor("op_8369"), val = tensor([1, 1])]; + tensor var_8371 = const()[name = tensor("op_8371"), val = tensor([1, 1])]; + tensor var_8373_pad_type_0 = const()[name = tensor("op_8373_pad_type_0"), val = tensor("custom")]; + tensor var_8373_pad_0 = const()[name = tensor("op_8373_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(762544000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769097664))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769097792)))]; + tensor var_8373_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_8371, groups = var_6865, pad = var_8373_pad_0, pad_type = var_8373_pad_type_0, strides = var_8369, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_499_cast)[name = tensor("op_8373_cast")]; + tensor var_8374_split_sizes_0 = const()[name = tensor("op_8374_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8374_axis_0 = const()[name = tensor("op_8374_axis_0"), val = tensor(1)]; + tensor var_8374_cast_0, tensor var_8374_cast_1 = split(axis = var_8374_axis_0, split_sizes = var_8374_split_sizes_0, x = var_8373_cast)[name = tensor("op_8374_cast")]; + tensor var_8376_mode_0 = const()[name = tensor("op_8376_mode_0"), val = tensor("EXACT")]; + tensor var_8376_cast = gelu(mode = var_8376_mode_0, x = var_8374_cast_1)[name = tensor("op_8376_cast")]; + tensor input_501_cast = mul(x = var_8374_cast_0, y = var_8376_cast)[name = tensor("input_501_cast")]; + tensor var_8380 = const()[name = tensor("op_8380"), val = tensor([1, 1])]; + tensor var_8382 = const()[name = tensor("op_8382"), val = tensor([1, 1])]; + tensor var_8384_pad_type_0 = const()[name = tensor("op_8384_pad_type_0"), val = tensor("custom")]; + tensor var_8384_pad_0 = const()[name = tensor("op_8384_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769118336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772395200))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772395328)))]; + tensor var_8384_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_8382, groups = var_6865, pad = var_8384_pad_0, pad_type = var_8384_pad_type_0, strides = var_8380, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_501_cast)[name = tensor("op_8384_cast")]; + tensor inputs_253_cast = add(x = var_8384_cast, y = inputs_251_cast)[name = tensor("inputs_253_cast")]; + tensor var_8394 = const()[name = tensor("op_8394"), val = tensor([1])]; + tensor channels_mean_253_cast = reduce_mean(axes = var_8394, keep_dims = var_6860, x = inputs_253_cast)[name = tensor("channels_mean_253_cast")]; + tensor zero_mean_253_cast = sub(x = inputs_253_cast, y = channels_mean_253_cast)[name = tensor("zero_mean_253_cast")]; + tensor zero_mean_sq_253_cast = mul(x = zero_mean_253_cast, y = zero_mean_253_cast)[name = tensor("zero_mean_sq_253_cast")]; + tensor var_8398 = const()[name = tensor("op_8398"), val = tensor([1])]; + tensor var_8399_cast = reduce_mean(axes = var_8398, keep_dims = var_6860, x = zero_mean_sq_253_cast)[name = tensor("op_8399_cast")]; + tensor var_8400_to_fp16 = const()[name = tensor("op_8400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8401_cast = add(x = var_8399_cast, y = var_8400_to_fp16)[name = tensor("op_8401_cast")]; + tensor denom_253_epsilon_0_to_fp16 = const()[name = tensor("denom_253_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_253_cast = rsqrt(epsilon = denom_253_epsilon_0_to_fp16, x = var_8401_cast)[name = tensor("denom_253_cast")]; + tensor out_253_cast = mul(x = zero_mean_253_cast, y = denom_253_cast)[name = tensor("out_253_cast")]; + tensor var_8405_to_fp16 = const()[name = tensor("op_8405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772397952)))]; + tensor var_8406_cast = add(x = out_253_cast, y = var_8405_to_fp16)[name = tensor("op_8406_cast")]; + tensor var_8408_to_fp16 = const()[name = tensor("op_8408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772400576)))]; + tensor hidden_states_335_cast = mul(x = var_8406_cast, y = var_8408_to_fp16)[name = tensor("hidden_states_335_cast")]; + tensor var_8415 = const()[name = tensor("op_8415"), val = tensor([1, 1])]; + tensor var_8417 = const()[name = tensor("op_8417"), val = tensor([1, 1])]; + tensor q_169_pad_type_0 = const()[name = tensor("q_169_pad_type_0"), val = tensor("custom")]; + tensor q_169_pad_0 = const()[name = tensor("q_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772403200))), lut = tensor([-0x1.6ap-5, -0x1.b7p-7, 0x1.afp-7, 0x1.684p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_169_cast = conv(dilations = var_8417, groups = var_6865, pad = q_169_pad_0, pad_type = q_169_pad_type_0, strides = var_8415, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("q_169_cast")]; + tensor var_8421 = const()[name = tensor("op_8421"), val = tensor([1, 1])]; + tensor var_8423 = const()[name = tensor("op_8423"), val = tensor([1, 1])]; + tensor k_169_pad_type_0 = const()[name = tensor("k_169_pad_type_0"), val = tensor("custom")]; + tensor k_169_pad_0 = const()[name = tensor("k_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(772812864))), lut = tensor([-0x1.69p-5, -0x1.b34p-7, 0x1.b2cp-7, 0x1.688p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_169_cast = conv(dilations = var_8423, groups = var_6865, pad = k_169_pad_0, pad_type = k_169_pad_type_0, strides = var_8421, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("k_169_cast")]; + tensor var_8427 = const()[name = tensor("op_8427"), val = tensor([1, 1])]; + tensor var_8429 = const()[name = tensor("op_8429"), val = tensor([1, 1])]; + tensor v_169_pad_type_0 = const()[name = tensor("v_169_pad_type_0"), val = tensor("custom")]; + tensor v_169_pad_0 = const()[name = tensor("v_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(773222528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774041792))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_169_cast = conv(dilations = var_8429, groups = var_6865, pad = v_169_pad_0, pad_type = v_169_pad_type_0, strides = var_8427, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("v_169_cast")]; + tensor var_8433 = const()[name = tensor("op_8433"), val = tensor([2, 20, 64, -1])]; + tensor var_8434_cast = reshape(shape = var_8433, x = q_169_cast)[name = tensor("op_8434_cast")]; + tensor var_8435 = const()[name = tensor("op_8435"), val = tensor([2, 20, 64, -1])]; + tensor var_8436_cast = reshape(shape = var_8435, x = k_169_cast)[name = tensor("op_8436_cast")]; + tensor var_8437 = const()[name = tensor("op_8437"), val = tensor([2, 20, 64, -1])]; + tensor var_8438_cast = reshape(shape = var_8437, x = v_169_cast)[name = tensor("op_8438_cast")]; + tensor attn_weights_337_transpose_x_0 = const()[name = tensor("attn_weights_337_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_337_transpose_y_0 = const()[name = tensor("attn_weights_337_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_337_cast = matmul(transpose_x = attn_weights_337_transpose_x_0, transpose_y = attn_weights_337_transpose_y_0, x = var_8434_cast, y = var_8436_cast)[name = tensor("attn_weights_337_cast")]; + tensor attn_weights_339_cast = mul(x = attn_weights_337_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_339_cast")]; + tensor var_8442_cast = softmax(axis = var_6849, x = attn_weights_339_cast)[name = tensor("op_8442_cast")]; + tensor attn_169_transpose_x_0 = const()[name = tensor("attn_169_transpose_x_0"), val = tensor(false)]; + tensor attn_169_transpose_y_0 = const()[name = tensor("attn_169_transpose_y_0"), val = tensor(true)]; + tensor attn_169_cast = matmul(transpose_x = attn_169_transpose_x_0, transpose_y = attn_169_transpose_y_0, x = var_8438_cast, y = var_8442_cast)[name = tensor("attn_169_cast")]; + tensor var_8446 = const()[name = tensor("op_8446"), val = tensor([2, 1280, 1, -1])]; + tensor input_503_cast = reshape(shape = var_8446, x = attn_169_cast)[name = tensor("input_503_cast")]; + tensor var_8451 = const()[name = tensor("op_8451"), val = tensor([1, 1])]; + tensor var_8453 = const()[name = tensor("op_8453"), val = tensor([1, 1])]; + tensor var_8455_pad_type_0 = const()[name = tensor("op_8455_pad_type_0"), val = tensor("custom")]; + tensor var_8455_pad_0 = const()[name = tensor("op_8455_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774041920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774861184))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774861312)))]; + tensor var_8455_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_8453, groups = var_6865, pad = var_8455_pad_0, pad_type = var_8455_pad_type_0, strides = var_8451, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_503_cast)[name = tensor("op_8455_cast")]; + tensor inputs_255_cast = add(x = var_8455_cast, y = inputs_253_cast)[name = tensor("inputs_255_cast")]; + tensor var_8459 = const()[name = tensor("op_8459"), val = tensor([1])]; + tensor channels_mean_255_cast = reduce_mean(axes = var_8459, keep_dims = var_6860, x = inputs_255_cast)[name = tensor("channels_mean_255_cast")]; + tensor zero_mean_255_cast = sub(x = inputs_255_cast, y = channels_mean_255_cast)[name = tensor("zero_mean_255_cast")]; + tensor zero_mean_sq_255_cast = mul(x = zero_mean_255_cast, y = zero_mean_255_cast)[name = tensor("zero_mean_sq_255_cast")]; + tensor var_8463 = const()[name = tensor("op_8463"), val = tensor([1])]; + tensor var_8464_cast = reduce_mean(axes = var_8463, keep_dims = var_6860, x = zero_mean_sq_255_cast)[name = tensor("op_8464_cast")]; + tensor var_8465_to_fp16 = const()[name = tensor("op_8465_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8466_cast = add(x = var_8464_cast, y = var_8465_to_fp16)[name = tensor("op_8466_cast")]; + tensor denom_255_epsilon_0_to_fp16 = const()[name = tensor("denom_255_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_255_cast = rsqrt(epsilon = denom_255_epsilon_0_to_fp16, x = var_8466_cast)[name = tensor("denom_255_cast")]; + tensor out_255_cast = mul(x = zero_mean_255_cast, y = denom_255_cast)[name = tensor("out_255_cast")]; + tensor var_8470_to_fp16 = const()[name = tensor("op_8470_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774863936)))]; + tensor var_8471_cast = add(x = out_255_cast, y = var_8470_to_fp16)[name = tensor("op_8471_cast")]; + tensor var_8473_to_fp16 = const()[name = tensor("op_8473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774866560)))]; + tensor hidden_states_337_cast = mul(x = var_8471_cast, y = var_8473_to_fp16)[name = tensor("hidden_states_337_cast")]; + tensor var_8480 = const()[name = tensor("op_8480"), val = tensor([1, 1])]; + tensor var_8482 = const()[name = tensor("op_8482"), val = tensor([1, 1])]; + tensor q_171_pad_type_0 = const()[name = tensor("q_171_pad_type_0"), val = tensor("custom")]; + tensor q_171_pad_0 = const()[name = tensor("q_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(774869184))), lut = tensor([-0x1.b48p-7, 0x1.b54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_171_cast = conv(dilations = var_8482, groups = var_6865, pad = q_171_pad_0, pad_type = q_171_pad_type_0, strides = var_8480, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_337_cast)[name = tensor("q_171_cast")]; + tensor var_8486 = const()[name = tensor("op_8486"), val = tensor([1, 1])]; + tensor var_8488 = const()[name = tensor("op_8488"), val = tensor([1, 1])]; + tensor k_171_pad_type_0 = const()[name = tensor("k_171_pad_type_0"), val = tensor("custom")]; + tensor k_171_pad_0 = const()[name = tensor("k_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775074048))), lut = tensor([-0x1.38p-7, 0x1.37p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_171_cast = conv(dilations = var_8488, groups = var_6865, pad = k_171_pad_0, pad_type = k_171_pad_type_0, strides = var_8486, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_171_cast")]; + tensor var_8492 = const()[name = tensor("op_8492"), val = tensor([1, 1])]; + tensor var_8494 = const()[name = tensor("op_8494"), val = tensor([1, 1])]; + tensor v_171_pad_type_0 = const()[name = tensor("v_171_pad_type_0"), val = tensor("custom")]; + tensor v_171_pad_0 = const()[name = tensor("v_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775401792))), lut = tensor([-0x1.a1p-7, 0x1.a28p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_171_cast = conv(dilations = var_8494, groups = var_6865, pad = v_171_pad_0, pad_type = v_171_pad_type_0, strides = var_8492, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_171_cast")]; + tensor var_8498 = const()[name = tensor("op_8498"), val = tensor([2, 20, 64, -1])]; + tensor var_8499_cast = reshape(shape = var_8498, x = q_171_cast)[name = tensor("op_8499_cast")]; + tensor var_8500 = const()[name = tensor("op_8500"), val = tensor([2, 20, 64, -1])]; + tensor var_8501_cast = reshape(shape = var_8500, x = k_171_cast)[name = tensor("op_8501_cast")]; + tensor var_8502 = const()[name = tensor("op_8502"), val = tensor([2, 20, 64, -1])]; + tensor var_8503_cast = reshape(shape = var_8502, x = v_171_cast)[name = tensor("op_8503_cast")]; + tensor attn_weights_341_transpose_x_0 = const()[name = tensor("attn_weights_341_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_341_transpose_y_0 = const()[name = tensor("attn_weights_341_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_341_cast = matmul(transpose_x = attn_weights_341_transpose_x_0, transpose_y = attn_weights_341_transpose_y_0, x = var_8499_cast, y = var_8501_cast)[name = tensor("attn_weights_341_cast")]; + tensor attn_weights_343_cast = mul(x = attn_weights_341_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_343_cast")]; + tensor var_8507_cast = softmax(axis = var_6849, x = attn_weights_343_cast)[name = tensor("op_8507_cast")]; + tensor attn_171_transpose_x_0 = const()[name = tensor("attn_171_transpose_x_0"), val = tensor(false)]; + tensor attn_171_transpose_y_0 = const()[name = tensor("attn_171_transpose_y_0"), val = tensor(true)]; + tensor attn_171_cast = matmul(transpose_x = attn_171_transpose_x_0, transpose_y = attn_171_transpose_y_0, x = var_8503_cast, y = var_8507_cast)[name = tensor("attn_171_cast")]; + tensor var_8511 = const()[name = tensor("op_8511"), val = tensor([2, 1280, 1, -1])]; + tensor input_505_cast = reshape(shape = var_8511, x = attn_171_cast)[name = tensor("input_505_cast")]; + tensor var_8516 = const()[name = tensor("op_8516"), val = tensor([1, 1])]; + tensor var_8518 = const()[name = tensor("op_8518"), val = tensor([1, 1])]; + tensor var_8520_pad_type_0 = const()[name = tensor("op_8520_pad_type_0"), val = tensor("custom")]; + tensor var_8520_pad_0 = const()[name = tensor("op_8520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775729536))), lut = tensor([-0x1.048p-7, 0x1.04cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775934400)))]; + tensor var_8520_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_8518, groups = var_6865, pad = var_8520_pad_0, pad_type = var_8520_pad_type_0, strides = var_8516, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_505_cast)[name = tensor("op_8520_cast")]; + tensor inputs_257_cast = add(x = var_8520_cast, y = inputs_255_cast)[name = tensor("inputs_257_cast")]; + tensor var_8524 = const()[name = tensor("op_8524"), val = tensor([1])]; + tensor channels_mean_257_cast = reduce_mean(axes = var_8524, keep_dims = var_6860, x = inputs_257_cast)[name = tensor("channels_mean_257_cast")]; + tensor zero_mean_257_cast = sub(x = inputs_257_cast, y = channels_mean_257_cast)[name = tensor("zero_mean_257_cast")]; + tensor zero_mean_sq_257_cast = mul(x = zero_mean_257_cast, y = zero_mean_257_cast)[name = tensor("zero_mean_sq_257_cast")]; + tensor var_8528 = const()[name = tensor("op_8528"), val = tensor([1])]; + tensor var_8529_cast = reduce_mean(axes = var_8528, keep_dims = var_6860, x = zero_mean_sq_257_cast)[name = tensor("op_8529_cast")]; + tensor var_8530_to_fp16 = const()[name = tensor("op_8530_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8531_cast = add(x = var_8529_cast, y = var_8530_to_fp16)[name = tensor("op_8531_cast")]; + tensor denom_257_epsilon_0_to_fp16 = const()[name = tensor("denom_257_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_257_cast = rsqrt(epsilon = denom_257_epsilon_0_to_fp16, x = var_8531_cast)[name = tensor("denom_257_cast")]; + tensor out_257_cast = mul(x = zero_mean_257_cast, y = denom_257_cast)[name = tensor("out_257_cast")]; + tensor var_8535_to_fp16 = const()[name = tensor("op_8535_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775937024)))]; + tensor var_8536_cast = add(x = out_257_cast, y = var_8535_to_fp16)[name = tensor("op_8536_cast")]; + tensor var_8538_to_fp16 = const()[name = tensor("op_8538_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775939648)))]; + tensor input_507_cast = mul(x = var_8536_cast, y = var_8538_to_fp16)[name = tensor("input_507_cast")]; + tensor var_8546 = const()[name = tensor("op_8546"), val = tensor([1, 1])]; + tensor var_8548 = const()[name = tensor("op_8548"), val = tensor([1, 1])]; + tensor var_8550_pad_type_0 = const()[name = tensor("op_8550_pad_type_0"), val = tensor("custom")]; + tensor var_8550_pad_0 = const()[name = tensor("op_8550_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(775942272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782495936))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782496064)))]; + tensor var_8550_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_8548, groups = var_6865, pad = var_8550_pad_0, pad_type = var_8550_pad_type_0, strides = var_8546, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_507_cast)[name = tensor("op_8550_cast")]; + tensor var_8551_split_sizes_0 = const()[name = tensor("op_8551_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8551_axis_0 = const()[name = tensor("op_8551_axis_0"), val = tensor(1)]; + tensor var_8551_cast_0, tensor var_8551_cast_1 = split(axis = var_8551_axis_0, split_sizes = var_8551_split_sizes_0, x = var_8550_cast)[name = tensor("op_8551_cast")]; + tensor var_8553_mode_0 = const()[name = tensor("op_8553_mode_0"), val = tensor("EXACT")]; + tensor var_8553_cast = gelu(mode = var_8553_mode_0, x = var_8551_cast_1)[name = tensor("op_8553_cast")]; + tensor input_509_cast = mul(x = var_8551_cast_0, y = var_8553_cast)[name = tensor("input_509_cast")]; + tensor var_8557 = const()[name = tensor("op_8557"), val = tensor([1, 1])]; + tensor var_8559 = const()[name = tensor("op_8559"), val = tensor([1, 1])]; + tensor var_8561_pad_type_0 = const()[name = tensor("op_8561_pad_type_0"), val = tensor("custom")]; + tensor var_8561_pad_0 = const()[name = tensor("op_8561_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(782516608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785793472))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785793600)))]; + tensor var_8561_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_8559, groups = var_6865, pad = var_8561_pad_0, pad_type = var_8561_pad_type_0, strides = var_8557, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_509_cast)[name = tensor("op_8561_cast")]; + tensor inputs_259_cast = add(x = var_8561_cast, y = inputs_257_cast)[name = tensor("inputs_259_cast")]; + tensor var_8571 = const()[name = tensor("op_8571"), val = tensor([1])]; + tensor channels_mean_259_cast = reduce_mean(axes = var_8571, keep_dims = var_6860, x = inputs_259_cast)[name = tensor("channels_mean_259_cast")]; + tensor zero_mean_259_cast = sub(x = inputs_259_cast, y = channels_mean_259_cast)[name = tensor("zero_mean_259_cast")]; + tensor zero_mean_sq_259_cast = mul(x = zero_mean_259_cast, y = zero_mean_259_cast)[name = tensor("zero_mean_sq_259_cast")]; + tensor var_8575 = const()[name = tensor("op_8575"), val = tensor([1])]; + tensor var_8576_cast = reduce_mean(axes = var_8575, keep_dims = var_6860, x = zero_mean_sq_259_cast)[name = tensor("op_8576_cast")]; + tensor var_8577_to_fp16 = const()[name = tensor("op_8577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8578_cast = add(x = var_8576_cast, y = var_8577_to_fp16)[name = tensor("op_8578_cast")]; + tensor denom_259_epsilon_0_to_fp16 = const()[name = tensor("denom_259_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_259_cast = rsqrt(epsilon = denom_259_epsilon_0_to_fp16, x = var_8578_cast)[name = tensor("denom_259_cast")]; + tensor out_259_cast = mul(x = zero_mean_259_cast, y = denom_259_cast)[name = tensor("out_259_cast")]; + tensor var_8582_to_fp16 = const()[name = tensor("op_8582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785796224)))]; + tensor var_8583_cast = add(x = out_259_cast, y = var_8582_to_fp16)[name = tensor("op_8583_cast")]; + tensor var_8585_to_fp16 = const()[name = tensor("op_8585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785798848)))]; + tensor hidden_states_341_cast = mul(x = var_8583_cast, y = var_8585_to_fp16)[name = tensor("hidden_states_341_cast")]; + tensor var_8592 = const()[name = tensor("op_8592"), val = tensor([1, 1])]; + tensor var_8594 = const()[name = tensor("op_8594"), val = tensor([1, 1])]; + tensor q_173_pad_type_0 = const()[name = tensor("q_173_pad_type_0"), val = tensor("custom")]; + tensor q_173_pad_0 = const()[name = tensor("q_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(785801472))), lut = tensor([-0x1.6b4p-5, -0x1.b6cp-7, 0x1.b3cp-7, 0x1.6a4p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_173_cast = conv(dilations = var_8594, groups = var_6865, pad = q_173_pad_0, pad_type = q_173_pad_type_0, strides = var_8592, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("q_173_cast")]; + tensor var_8598 = const()[name = tensor("op_8598"), val = tensor([1, 1])]; + tensor var_8600 = const()[name = tensor("op_8600"), val = tensor([1, 1])]; + tensor k_173_pad_type_0 = const()[name = tensor("k_173_pad_type_0"), val = tensor("custom")]; + tensor k_173_pad_0 = const()[name = tensor("k_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786211136))), lut = tensor([-0x1.69cp-5, -0x1.b24p-7, 0x1.b88p-7, 0x1.6b8p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_173_cast = conv(dilations = var_8600, groups = var_6865, pad = k_173_pad_0, pad_type = k_173_pad_type_0, strides = var_8598, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("k_173_cast")]; + tensor var_8604 = const()[name = tensor("op_8604"), val = tensor([1, 1])]; + tensor var_8606 = const()[name = tensor("op_8606"), val = tensor([1, 1])]; + tensor v_173_pad_type_0 = const()[name = tensor("v_173_pad_type_0"), val = tensor("custom")]; + tensor v_173_pad_0 = const()[name = tensor("v_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(786620800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787440064))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_173_cast = conv(dilations = var_8606, groups = var_6865, pad = v_173_pad_0, pad_type = v_173_pad_type_0, strides = var_8604, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("v_173_cast")]; + tensor var_8610 = const()[name = tensor("op_8610"), val = tensor([2, 20, 64, -1])]; + tensor var_8611_cast = reshape(shape = var_8610, x = q_173_cast)[name = tensor("op_8611_cast")]; + tensor var_8612 = const()[name = tensor("op_8612"), val = tensor([2, 20, 64, -1])]; + tensor var_8613_cast = reshape(shape = var_8612, x = k_173_cast)[name = tensor("op_8613_cast")]; + tensor var_8614 = const()[name = tensor("op_8614"), val = tensor([2, 20, 64, -1])]; + tensor var_8615_cast = reshape(shape = var_8614, x = v_173_cast)[name = tensor("op_8615_cast")]; + tensor attn_weights_345_transpose_x_0 = const()[name = tensor("attn_weights_345_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_345_transpose_y_0 = const()[name = tensor("attn_weights_345_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_345_cast = matmul(transpose_x = attn_weights_345_transpose_x_0, transpose_y = attn_weights_345_transpose_y_0, x = var_8611_cast, y = var_8613_cast)[name = tensor("attn_weights_345_cast")]; + tensor attn_weights_347_cast = mul(x = attn_weights_345_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_347_cast")]; + tensor var_8619_cast = softmax(axis = var_6849, x = attn_weights_347_cast)[name = tensor("op_8619_cast")]; + tensor attn_173_transpose_x_0 = const()[name = tensor("attn_173_transpose_x_0"), val = tensor(false)]; + tensor attn_173_transpose_y_0 = const()[name = tensor("attn_173_transpose_y_0"), val = tensor(true)]; + tensor attn_173_cast = matmul(transpose_x = attn_173_transpose_x_0, transpose_y = attn_173_transpose_y_0, x = var_8615_cast, y = var_8619_cast)[name = tensor("attn_173_cast")]; + tensor var_8623 = const()[name = tensor("op_8623"), val = tensor([2, 1280, 1, -1])]; + tensor input_511_cast = reshape(shape = var_8623, x = attn_173_cast)[name = tensor("input_511_cast")]; + tensor var_8628 = const()[name = tensor("op_8628"), val = tensor([1, 1])]; + tensor var_8630 = const()[name = tensor("op_8630"), val = tensor([1, 1])]; + tensor var_8632_pad_type_0 = const()[name = tensor("op_8632_pad_type_0"), val = tensor("custom")]; + tensor var_8632_pad_0 = const()[name = tensor("op_8632_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787440192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788669056))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788669248)))]; + tensor var_8632_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_8630, groups = var_6865, pad = var_8632_pad_0, pad_type = var_8632_pad_type_0, strides = var_8628, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_511_cast)[name = tensor("op_8632_cast")]; + tensor inputs_261_cast = add(x = var_8632_cast, y = inputs_259_cast)[name = tensor("inputs_261_cast")]; + tensor var_8636 = const()[name = tensor("op_8636"), val = tensor([1])]; + tensor channels_mean_261_cast = reduce_mean(axes = var_8636, keep_dims = var_6860, x = inputs_261_cast)[name = tensor("channels_mean_261_cast")]; + tensor zero_mean_261_cast = sub(x = inputs_261_cast, y = channels_mean_261_cast)[name = tensor("zero_mean_261_cast")]; + tensor zero_mean_sq_261_cast = mul(x = zero_mean_261_cast, y = zero_mean_261_cast)[name = tensor("zero_mean_sq_261_cast")]; + tensor var_8640 = const()[name = tensor("op_8640"), val = tensor([1])]; + tensor var_8641_cast = reduce_mean(axes = var_8640, keep_dims = var_6860, x = zero_mean_sq_261_cast)[name = tensor("op_8641_cast")]; + tensor var_8642_to_fp16 = const()[name = tensor("op_8642_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8643_cast = add(x = var_8641_cast, y = var_8642_to_fp16)[name = tensor("op_8643_cast")]; + tensor denom_261_epsilon_0_to_fp16 = const()[name = tensor("denom_261_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_261_cast = rsqrt(epsilon = denom_261_epsilon_0_to_fp16, x = var_8643_cast)[name = tensor("denom_261_cast")]; + tensor out_261_cast = mul(x = zero_mean_261_cast, y = denom_261_cast)[name = tensor("out_261_cast")]; + tensor var_8647_to_fp16 = const()[name = tensor("op_8647_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788671872)))]; + tensor var_8648_cast = add(x = out_261_cast, y = var_8647_to_fp16)[name = tensor("op_8648_cast")]; + tensor var_8650_to_fp16 = const()[name = tensor("op_8650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788674496)))]; + tensor hidden_states_343_cast = mul(x = var_8648_cast, y = var_8650_to_fp16)[name = tensor("hidden_states_343_cast")]; + tensor var_8657 = const()[name = tensor("op_8657"), val = tensor([1, 1])]; + tensor var_8659 = const()[name = tensor("op_8659"), val = tensor([1, 1])]; + tensor q_175_pad_type_0 = const()[name = tensor("q_175_pad_type_0"), val = tensor("custom")]; + tensor q_175_pad_0 = const()[name = tensor("q_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788677120))), lut = tensor([-0x1.944p-7, 0x1.958p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_175_cast = conv(dilations = var_8659, groups = var_6865, pad = q_175_pad_0, pad_type = q_175_pad_type_0, strides = var_8657, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_343_cast)[name = tensor("q_175_cast")]; + tensor var_8663 = const()[name = tensor("op_8663"), val = tensor([1, 1])]; + tensor var_8665 = const()[name = tensor("op_8665"), val = tensor([1, 1])]; + tensor k_175_pad_type_0 = const()[name = tensor("k_175_pad_type_0"), val = tensor("custom")]; + tensor k_175_pad_0 = const()[name = tensor("k_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788881984))), lut = tensor([-0x1.128p-7, 0x1.124p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_175_cast = conv(dilations = var_8665, groups = var_6865, pad = k_175_pad_0, pad_type = k_175_pad_type_0, strides = var_8663, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_175_cast")]; + tensor var_8669 = const()[name = tensor("op_8669"), val = tensor([1, 1])]; + tensor var_8671 = const()[name = tensor("op_8671"), val = tensor([1, 1])]; + tensor v_175_pad_type_0 = const()[name = tensor("v_175_pad_type_0"), val = tensor("custom")]; + tensor v_175_pad_0 = const()[name = tensor("v_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789209728))), lut = tensor([-0x1.57p-7, 0x1.57cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_175_cast = conv(dilations = var_8671, groups = var_6865, pad = v_175_pad_0, pad_type = v_175_pad_type_0, strides = var_8669, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_175_cast")]; + tensor var_8675 = const()[name = tensor("op_8675"), val = tensor([2, 20, 64, -1])]; + tensor var_8676_cast = reshape(shape = var_8675, x = q_175_cast)[name = tensor("op_8676_cast")]; + tensor var_8677 = const()[name = tensor("op_8677"), val = tensor([2, 20, 64, -1])]; + tensor var_8678_cast = reshape(shape = var_8677, x = k_175_cast)[name = tensor("op_8678_cast")]; + tensor var_8679 = const()[name = tensor("op_8679"), val = tensor([2, 20, 64, -1])]; + tensor var_8680_cast = reshape(shape = var_8679, x = v_175_cast)[name = tensor("op_8680_cast")]; + tensor attn_weights_349_transpose_x_0 = const()[name = tensor("attn_weights_349_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_349_transpose_y_0 = const()[name = tensor("attn_weights_349_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_349_cast = matmul(transpose_x = attn_weights_349_transpose_x_0, transpose_y = attn_weights_349_transpose_y_0, x = var_8676_cast, y = var_8678_cast)[name = tensor("attn_weights_349_cast")]; + tensor attn_weights_351_cast = mul(x = attn_weights_349_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_351_cast")]; + tensor var_8684_cast = softmax(axis = var_6849, x = attn_weights_351_cast)[name = tensor("op_8684_cast")]; + tensor attn_175_transpose_x_0 = const()[name = tensor("attn_175_transpose_x_0"), val = tensor(false)]; + tensor attn_175_transpose_y_0 = const()[name = tensor("attn_175_transpose_y_0"), val = tensor(true)]; + tensor attn_175_cast = matmul(transpose_x = attn_175_transpose_x_0, transpose_y = attn_175_transpose_y_0, x = var_8680_cast, y = var_8684_cast)[name = tensor("attn_175_cast")]; + tensor var_8688 = const()[name = tensor("op_8688"), val = tensor([2, 1280, 1, -1])]; + tensor input_513_cast = reshape(shape = var_8688, x = attn_175_cast)[name = tensor("input_513_cast")]; + tensor var_8693 = const()[name = tensor("op_8693"), val = tensor([1, 1])]; + tensor var_8695 = const()[name = tensor("op_8695"), val = tensor([1, 1])]; + tensor var_8697_pad_type_0 = const()[name = tensor("op_8697_pad_type_0"), val = tensor("custom")]; + tensor var_8697_pad_0 = const()[name = tensor("op_8697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789537472))), lut = tensor([-0x1.bep-8, 0x1.bdcp-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789742336)))]; + tensor var_8697_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_8695, groups = var_6865, pad = var_8697_pad_0, pad_type = var_8697_pad_type_0, strides = var_8693, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_513_cast)[name = tensor("op_8697_cast")]; + tensor inputs_263_cast = add(x = var_8697_cast, y = inputs_261_cast)[name = tensor("inputs_263_cast")]; + tensor var_8701 = const()[name = tensor("op_8701"), val = tensor([1])]; + tensor channels_mean_263_cast = reduce_mean(axes = var_8701, keep_dims = var_6860, x = inputs_263_cast)[name = tensor("channels_mean_263_cast")]; + tensor zero_mean_263_cast = sub(x = inputs_263_cast, y = channels_mean_263_cast)[name = tensor("zero_mean_263_cast")]; + tensor zero_mean_sq_263_cast = mul(x = zero_mean_263_cast, y = zero_mean_263_cast)[name = tensor("zero_mean_sq_263_cast")]; + tensor var_8705 = const()[name = tensor("op_8705"), val = tensor([1])]; + tensor var_8706_cast = reduce_mean(axes = var_8705, keep_dims = var_6860, x = zero_mean_sq_263_cast)[name = tensor("op_8706_cast")]; + tensor var_8707_to_fp16 = const()[name = tensor("op_8707_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8708_cast = add(x = var_8706_cast, y = var_8707_to_fp16)[name = tensor("op_8708_cast")]; + tensor denom_263_epsilon_0_to_fp16 = const()[name = tensor("denom_263_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_263_cast = rsqrt(epsilon = denom_263_epsilon_0_to_fp16, x = var_8708_cast)[name = tensor("denom_263_cast")]; + tensor out_263_cast = mul(x = zero_mean_263_cast, y = denom_263_cast)[name = tensor("out_263_cast")]; + tensor var_8712_to_fp16 = const()[name = tensor("op_8712_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789744960)))]; + tensor var_8713_cast = add(x = out_263_cast, y = var_8712_to_fp16)[name = tensor("op_8713_cast")]; + tensor var_8715_to_fp16 = const()[name = tensor("op_8715_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789747584)))]; + tensor input_515_cast = mul(x = var_8713_cast, y = var_8715_to_fp16)[name = tensor("input_515_cast")]; + tensor var_8723 = const()[name = tensor("op_8723"), val = tensor([1, 1])]; + tensor var_8725 = const()[name = tensor("op_8725"), val = tensor([1, 1])]; + tensor var_8727_pad_type_0 = const()[name = tensor("op_8727_pad_type_0"), val = tensor("custom")]; + tensor var_8727_pad_0 = const()[name = tensor("op_8727_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789750208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796303872))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796304000)))]; + tensor var_8727_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_8725, groups = var_6865, pad = var_8727_pad_0, pad_type = var_8727_pad_type_0, strides = var_8723, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_515_cast)[name = tensor("op_8727_cast")]; + tensor var_8728_split_sizes_0 = const()[name = tensor("op_8728_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8728_axis_0 = const()[name = tensor("op_8728_axis_0"), val = tensor(1)]; + tensor var_8728_cast_0, tensor var_8728_cast_1 = split(axis = var_8728_axis_0, split_sizes = var_8728_split_sizes_0, x = var_8727_cast)[name = tensor("op_8728_cast")]; + tensor var_8730_mode_0 = const()[name = tensor("op_8730_mode_0"), val = tensor("EXACT")]; + tensor var_8730_cast = gelu(mode = var_8730_mode_0, x = var_8728_cast_1)[name = tensor("op_8730_cast")]; + tensor input_517_cast = mul(x = var_8728_cast_0, y = var_8730_cast)[name = tensor("input_517_cast")]; + tensor var_8734 = const()[name = tensor("op_8734"), val = tensor([1, 1])]; + tensor var_8736 = const()[name = tensor("op_8736"), val = tensor([1, 1])]; + tensor var_8738_pad_type_0 = const()[name = tensor("op_8738_pad_type_0"), val = tensor("custom")]; + tensor var_8738_pad_0 = const()[name = tensor("op_8738_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(796324544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801239808))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801240000)))]; + tensor var_8738_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_8736, groups = var_6865, pad = var_8738_pad_0, pad_type = var_8738_pad_type_0, strides = var_8734, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_517_cast)[name = tensor("op_8738_cast")]; + tensor hidden_states_347_cast = add(x = var_8738_cast, y = inputs_263_cast)[name = tensor("hidden_states_347_cast")]; + tensor var_8740 = const()[name = tensor("op_8740"), val = tensor([2, 1280, 32, 32])]; + tensor input_519_cast = reshape(shape = var_8740, x = hidden_states_347_cast)[name = tensor("input_519_cast")]; + tensor var_8744 = const()[name = tensor("op_8744"), val = tensor([1, 1])]; + tensor var_8746 = const()[name = tensor("op_8746"), val = tensor([1, 1])]; + tensor hidden_states_349_pad_type_0 = const()[name = tensor("hidden_states_349_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_349_pad_0 = const()[name = tensor("hidden_states_349_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(801242624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802471488))), name = tensor("up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802471680)))]; + tensor hidden_states_349_cast = conv(bias = up_blocks_0_attentions_0_proj_out_bias_to_fp16, dilations = var_8746, groups = var_6865, pad = hidden_states_349_pad_0, pad_type = hidden_states_349_pad_type_0, strides = var_8744, weight = up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized, x = input_519_cast)[name = tensor("hidden_states_349_cast")]; + tensor hidden_states_351_cast = add(x = hidden_states_349_cast, y = hidden_states_283_cast)[name = tensor("hidden_states_351_cast")]; + tensor input_521_interleave_0 = const()[name = tensor("input_521_interleave_0"), val = tensor(false)]; + tensor input_521_cast = concat(axis = var_6865, interleave = input_521_interleave_0, values = (hidden_states_351_cast, input_213_cast))[name = tensor("input_521_cast")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_96_cast = reshape(shape = reshape_96_shape_0, x = input_521_cast)[name = tensor("reshape_96_cast")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72_cast = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96_cast)[name = tensor("reduce_mean_72_cast")]; + tensor sub_48_cast = sub(x = reshape_96_cast, y = reduce_mean_72_cast)[name = tensor("sub_48_cast")]; + tensor square_24_cast = square(x = sub_48_cast)[name = tensor("square_24_cast")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74_cast = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24_cast)[name = tensor("reduce_mean_74_cast")]; + tensor add_48_y_0_to_fp16 = const()[name = tensor("add_48_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_48_cast = add(x = reduce_mean_74_cast, y = add_48_y_0_to_fp16)[name = tensor("add_48_cast")]; + tensor sqrt_24_cast = sqrt(x = add_48_cast)[name = tensor("sqrt_24_cast")]; + tensor real_div_24_cast = real_div(x = sub_48_cast, y = sqrt_24_cast)[name = tensor("real_div_24_cast")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_97_cast = reshape(shape = reshape_97_shape_0, x = real_div_24_cast)[name = tensor("reshape_97_cast")]; + tensor add_49_gamma_0_to_fp16 = const()[name = tensor("add_49_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802474304)))]; + tensor add_49_beta_0_to_fp16 = const()[name = tensor("add_49_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802479488)))]; + tensor add_49_epsilon_0_to_fp16 = const()[name = tensor("add_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_49_cast = batch_norm(beta = add_49_beta_0_to_fp16, epsilon = add_49_epsilon_0_to_fp16, gamma = add_49_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_97_cast)[name = tensor("add_49_cast")]; + tensor input_525_cast = silu(x = add_49_cast)[name = tensor("input_525_cast")]; + tensor var_8764 = const()[name = tensor("op_8764"), val = tensor([1, 1])]; + tensor var_8766 = const()[name = tensor("op_8766"), val = tensor([1, 1])]; + tensor hidden_states_353_pad_type_0 = const()[name = tensor("hidden_states_353_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_353_pad_0 = const()[name = tensor("hidden_states_353_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802484672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824603136))), name = tensor("up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824603328)))]; + tensor hidden_states_353_cast = conv(bias = up_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_8766, groups = var_6865, pad = hidden_states_353_pad_0, pad_type = hidden_states_353_pad_type_0, strides = var_8764, weight = up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_525_cast)[name = tensor("hidden_states_353_cast")]; + tensor var_8772 = const()[name = tensor("op_8772"), val = tensor([1, 1])]; + tensor var_8774 = const()[name = tensor("op_8774"), val = tensor([1, 1])]; + tensor temb_19_pad_type_0 = const()[name = tensor("temb_19_pad_type_0"), val = tensor("custom")]; + tensor temb_19_pad_0 = const()[name = tensor("temb_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824605952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825834816))), name = tensor("up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825835008)))]; + tensor temb_19_cast = conv(bias = up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_8774, groups = var_6865, pad = temb_19_pad_0, pad_type = temb_19_pad_type_0, strides = var_8772, weight = up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_19_cast")]; + tensor input_529_cast = add(x = hidden_states_353_cast, y = temb_19_cast)[name = tensor("input_529_cast")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_100_cast = reshape(shape = reshape_100_shape_0, x = input_529_cast)[name = tensor("reshape_100_cast")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75_cast = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100_cast)[name = tensor("reduce_mean_75_cast")]; + tensor sub_50_cast = sub(x = reshape_100_cast, y = reduce_mean_75_cast)[name = tensor("sub_50_cast")]; + tensor square_25_cast = square(x = sub_50_cast)[name = tensor("square_25_cast")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77_cast = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25_cast)[name = tensor("reduce_mean_77_cast")]; + tensor add_50_y_0_to_fp16 = const()[name = tensor("add_50_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_50_cast = add(x = reduce_mean_77_cast, y = add_50_y_0_to_fp16)[name = tensor("add_50_cast")]; + tensor sqrt_25_cast = sqrt(x = add_50_cast)[name = tensor("sqrt_25_cast")]; + tensor real_div_25_cast = real_div(x = sub_50_cast, y = sqrt_25_cast)[name = tensor("real_div_25_cast")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_101_cast = reshape(shape = reshape_101_shape_0, x = real_div_25_cast)[name = tensor("reshape_101_cast")]; + tensor add_51_gamma_0_to_fp16 = const()[name = tensor("add_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825837632)))]; + tensor add_51_beta_0_to_fp16 = const()[name = tensor("add_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825840256)))]; + tensor add_51_epsilon_0_to_fp16 = const()[name = tensor("add_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_51_cast = batch_norm(beta = add_51_beta_0_to_fp16, epsilon = add_51_epsilon_0_to_fp16, gamma = add_51_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_101_cast)[name = tensor("add_51_cast")]; + tensor input_533_cast = silu(x = add_51_cast)[name = tensor("input_533_cast")]; + tensor var_8784 = const()[name = tensor("op_8784"), val = tensor([1, 1])]; + tensor var_8786 = const()[name = tensor("op_8786"), val = tensor([1, 1])]; + tensor hidden_states_355_pad_type_0 = const()[name = tensor("hidden_states_355_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_355_pad_0 = const()[name = tensor("hidden_states_355_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825842880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836902144))), name = tensor("up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836902336)))]; + tensor hidden_states_355_cast = conv(bias = up_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_8786, groups = var_6865, pad = hidden_states_355_pad_0, pad_type = hidden_states_355_pad_type_0, strides = var_8784, weight = up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_533_cast)[name = tensor("hidden_states_355_cast")]; + tensor var_8791 = const()[name = tensor("op_8791"), val = tensor([1, 1])]; + tensor var_8793 = const()[name = tensor("op_8793"), val = tensor([1, 1])]; + tensor x_7_pad_type_0 = const()[name = tensor("x_7_pad_type_0"), val = tensor("custom")]; + tensor x_7_pad_0 = const()[name = tensor("x_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(836904960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839362624))), name = tensor("up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839362816)))]; + tensor x_7_cast = conv(bias = up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_8793, groups = var_6865, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = var_8791, weight = up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_521_cast)[name = tensor("x_7_cast")]; + tensor hidden_states_357_cast = add(x = x_7_cast, y = hidden_states_355_cast)[name = tensor("hidden_states_357_cast")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_104_cast = reshape(shape = reshape_104_shape_0, x = hidden_states_357_cast)[name = tensor("reshape_104_cast")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78_cast = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104_cast)[name = tensor("reduce_mean_78_cast")]; + tensor sub_52_cast = sub(x = reshape_104_cast, y = reduce_mean_78_cast)[name = tensor("sub_52_cast")]; + tensor square_26_cast = square(x = sub_52_cast)[name = tensor("square_26_cast")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80_cast = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26_cast)[name = tensor("reduce_mean_80_cast")]; + tensor add_52_y_0_to_fp16 = const()[name = tensor("add_52_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_52_cast = add(x = reduce_mean_80_cast, y = add_52_y_0_to_fp16)[name = tensor("add_52_cast")]; + tensor sqrt_26_cast = sqrt(x = add_52_cast)[name = tensor("sqrt_26_cast")]; + tensor real_div_26_cast = real_div(x = sub_52_cast, y = sqrt_26_cast)[name = tensor("real_div_26_cast")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_105_cast = reshape(shape = reshape_105_shape_0, x = real_div_26_cast)[name = tensor("reshape_105_cast")]; + tensor add_53_gamma_0_to_fp16 = const()[name = tensor("add_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839365440)))]; + tensor add_53_beta_0_to_fp16 = const()[name = tensor("add_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839368064)))]; + tensor add_53_epsilon_0_to_fp16 = const()[name = tensor("add_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_53_cast = batch_norm(beta = add_53_beta_0_to_fp16, epsilon = add_53_epsilon_0_to_fp16, gamma = add_53_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_105_cast)[name = tensor("add_53_cast")]; + tensor var_8831 = const()[name = tensor("op_8831"), val = tensor([1, 1])]; + tensor var_8833 = const()[name = tensor("op_8833"), val = tensor([1, 1])]; + tensor hidden_states_359_pad_type_0 = const()[name = tensor("hidden_states_359_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_359_pad_0 = const()[name = tensor("hidden_states_359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839370688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840599552))), name = tensor("up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840599744)))]; + tensor hidden_states_359_cast = conv(bias = up_blocks_0_attentions_1_proj_in_bias_to_fp16, dilations = var_8833, groups = var_6865, pad = hidden_states_359_pad_0, pad_type = hidden_states_359_pad_type_0, strides = var_8831, weight = up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized, x = add_53_cast)[name = tensor("hidden_states_359_cast")]; + tensor var_8838 = const()[name = tensor("op_8838"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_265_cast = reshape(shape = var_8838, x = hidden_states_359_cast)[name = tensor("inputs_265_cast")]; + tensor var_8848 = const()[name = tensor("op_8848"), val = tensor([1])]; + tensor channels_mean_265_cast = reduce_mean(axes = var_8848, keep_dims = var_6860, x = inputs_265_cast)[name = tensor("channels_mean_265_cast")]; + tensor zero_mean_265_cast = sub(x = inputs_265_cast, y = channels_mean_265_cast)[name = tensor("zero_mean_265_cast")]; + tensor zero_mean_sq_265_cast = mul(x = zero_mean_265_cast, y = zero_mean_265_cast)[name = tensor("zero_mean_sq_265_cast")]; + tensor var_8852 = const()[name = tensor("op_8852"), val = tensor([1])]; + tensor var_8853_cast = reduce_mean(axes = var_8852, keep_dims = var_6860, x = zero_mean_sq_265_cast)[name = tensor("op_8853_cast")]; + tensor var_8854_to_fp16 = const()[name = tensor("op_8854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8855_cast = add(x = var_8853_cast, y = var_8854_to_fp16)[name = tensor("op_8855_cast")]; + tensor denom_265_epsilon_0_to_fp16 = const()[name = tensor("denom_265_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_265_cast = rsqrt(epsilon = denom_265_epsilon_0_to_fp16, x = var_8855_cast)[name = tensor("denom_265_cast")]; + tensor out_265_cast = mul(x = zero_mean_265_cast, y = denom_265_cast)[name = tensor("out_265_cast")]; + tensor var_8859_to_fp16 = const()[name = tensor("op_8859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840602368)))]; + tensor var_8860_cast = add(x = out_265_cast, y = var_8859_to_fp16)[name = tensor("op_8860_cast")]; + tensor var_8862_to_fp16 = const()[name = tensor("op_8862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840604992)))]; + tensor hidden_states_361_cast = mul(x = var_8860_cast, y = var_8862_to_fp16)[name = tensor("hidden_states_361_cast")]; + tensor var_8869 = const()[name = tensor("op_8869"), val = tensor([1, 1])]; + tensor var_8871 = const()[name = tensor("op_8871"), val = tensor([1, 1])]; + tensor q_177_pad_type_0 = const()[name = tensor("q_177_pad_type_0"), val = tensor("custom")]; + tensor q_177_pad_0 = const()[name = tensor("q_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(840607616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841426880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_177_cast = conv(dilations = var_8871, groups = var_6865, pad = q_177_pad_0, pad_type = q_177_pad_type_0, strides = var_8869, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("q_177_cast")]; + tensor var_8875 = const()[name = tensor("op_8875"), val = tensor([1, 1])]; + tensor var_8877 = const()[name = tensor("op_8877"), val = tensor([1, 1])]; + tensor k_177_pad_type_0 = const()[name = tensor("k_177_pad_type_0"), val = tensor("custom")]; + tensor k_177_pad_0 = const()[name = tensor("k_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(841427008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(842246272))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_177_cast = conv(dilations = var_8877, groups = var_6865, pad = k_177_pad_0, pad_type = k_177_pad_type_0, strides = var_8875, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("k_177_cast")]; + tensor var_8881 = const()[name = tensor("op_8881"), val = tensor([1, 1])]; + tensor var_8883 = const()[name = tensor("op_8883"), val = tensor([1, 1])]; + tensor v_177_pad_type_0 = const()[name = tensor("v_177_pad_type_0"), val = tensor("custom")]; + tensor v_177_pad_0 = const()[name = tensor("v_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(842246400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843475264))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_177_cast = conv(dilations = var_8883, groups = var_6865, pad = v_177_pad_0, pad_type = v_177_pad_type_0, strides = var_8881, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("v_177_cast")]; + tensor var_8887 = const()[name = tensor("op_8887"), val = tensor([2, 20, 64, -1])]; + tensor var_8888_cast = reshape(shape = var_8887, x = q_177_cast)[name = tensor("op_8888_cast")]; + tensor var_8889 = const()[name = tensor("op_8889"), val = tensor([2, 20, 64, -1])]; + tensor var_8890_cast = reshape(shape = var_8889, x = k_177_cast)[name = tensor("op_8890_cast")]; + tensor var_8891 = const()[name = tensor("op_8891"), val = tensor([2, 20, 64, -1])]; + tensor var_8892_cast = reshape(shape = var_8891, x = v_177_cast)[name = tensor("op_8892_cast")]; + tensor attn_weights_353_transpose_x_0 = const()[name = tensor("attn_weights_353_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_353_transpose_y_0 = const()[name = tensor("attn_weights_353_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_353_cast = matmul(transpose_x = attn_weights_353_transpose_x_0, transpose_y = attn_weights_353_transpose_y_0, x = var_8888_cast, y = var_8890_cast)[name = tensor("attn_weights_353_cast")]; + tensor attn_weights_355_cast = mul(x = attn_weights_353_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_355_cast")]; + tensor var_8896_cast = softmax(axis = var_6849, x = attn_weights_355_cast)[name = tensor("op_8896_cast")]; + tensor attn_177_transpose_x_0 = const()[name = tensor("attn_177_transpose_x_0"), val = tensor(false)]; + tensor attn_177_transpose_y_0 = const()[name = tensor("attn_177_transpose_y_0"), val = tensor(true)]; + tensor attn_177_cast = matmul(transpose_x = attn_177_transpose_x_0, transpose_y = attn_177_transpose_y_0, x = var_8892_cast, y = var_8896_cast)[name = tensor("attn_177_cast")]; + tensor var_8900 = const()[name = tensor("op_8900"), val = tensor([2, 1280, 1, -1])]; + tensor input_537_cast = reshape(shape = var_8900, x = attn_177_cast)[name = tensor("input_537_cast")]; + tensor var_8905 = const()[name = tensor("op_8905"), val = tensor([1, 1])]; + tensor var_8907 = const()[name = tensor("op_8907"), val = tensor([1, 1])]; + tensor var_8909_pad_type_0 = const()[name = tensor("op_8909_pad_type_0"), val = tensor("custom")]; + tensor var_8909_pad_0 = const()[name = tensor("op_8909_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(843475456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844704320))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844704512)))]; + tensor var_8909_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_8907, groups = var_6865, pad = var_8909_pad_0, pad_type = var_8909_pad_type_0, strides = var_8905, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_537_cast)[name = tensor("op_8909_cast")]; + tensor inputs_267_cast = add(x = var_8909_cast, y = inputs_265_cast)[name = tensor("inputs_267_cast")]; + tensor var_8913 = const()[name = tensor("op_8913"), val = tensor([1])]; + tensor channels_mean_267_cast = reduce_mean(axes = var_8913, keep_dims = var_6860, x = inputs_267_cast)[name = tensor("channels_mean_267_cast")]; + tensor zero_mean_267_cast = sub(x = inputs_267_cast, y = channels_mean_267_cast)[name = tensor("zero_mean_267_cast")]; + tensor zero_mean_sq_267_cast = mul(x = zero_mean_267_cast, y = zero_mean_267_cast)[name = tensor("zero_mean_sq_267_cast")]; + tensor var_8917 = const()[name = tensor("op_8917"), val = tensor([1])]; + tensor var_8918_cast = reduce_mean(axes = var_8917, keep_dims = var_6860, x = zero_mean_sq_267_cast)[name = tensor("op_8918_cast")]; + tensor var_8919_to_fp16 = const()[name = tensor("op_8919_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8920_cast = add(x = var_8918_cast, y = var_8919_to_fp16)[name = tensor("op_8920_cast")]; + tensor denom_267_epsilon_0_to_fp16 = const()[name = tensor("denom_267_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_267_cast = rsqrt(epsilon = denom_267_epsilon_0_to_fp16, x = var_8920_cast)[name = tensor("denom_267_cast")]; + tensor out_267_cast = mul(x = zero_mean_267_cast, y = denom_267_cast)[name = tensor("out_267_cast")]; + tensor var_8924_to_fp16 = const()[name = tensor("op_8924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844707136)))]; + tensor var_8925_cast = add(x = out_267_cast, y = var_8924_to_fp16)[name = tensor("op_8925_cast")]; + tensor var_8927_to_fp16 = const()[name = tensor("op_8927_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844709760)))]; + tensor hidden_states_363_cast = mul(x = var_8925_cast, y = var_8927_to_fp16)[name = tensor("hidden_states_363_cast")]; + tensor var_8934 = const()[name = tensor("op_8934"), val = tensor([1, 1])]; + tensor var_8936 = const()[name = tensor("op_8936"), val = tensor([1, 1])]; + tensor q_179_pad_type_0 = const()[name = tensor("q_179_pad_type_0"), val = tensor("custom")]; + tensor q_179_pad_0 = const()[name = tensor("q_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844712384))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1c8p-7, 0x1.d38p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_179_cast = conv(dilations = var_8936, groups = var_6865, pad = q_179_pad_0, pad_type = q_179_pad_type_0, strides = var_8934, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_363_cast)[name = tensor("q_179_cast")]; + tensor var_8940 = const()[name = tensor("op_8940"), val = tensor([1, 1])]; + tensor var_8942 = const()[name = tensor("op_8942"), val = tensor([1, 1])]; + tensor k_179_pad_type_0 = const()[name = tensor("k_179_pad_type_0"), val = tensor("custom")]; + tensor k_179_pad_0 = const()[name = tensor("k_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845122048))), lut = tensor([-0x1.b84p-6, -0x1.05p-7, 0x1.07p-7, 0x1.b9cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_179_cast = conv(dilations = var_8942, groups = var_6865, pad = k_179_pad_0, pad_type = k_179_pad_type_0, strides = var_8940, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_179_cast")]; + tensor var_8946 = const()[name = tensor("op_8946"), val = tensor([1, 1])]; + tensor var_8948 = const()[name = tensor("op_8948"), val = tensor([1, 1])]; + tensor v_179_pad_type_0 = const()[name = tensor("v_179_pad_type_0"), val = tensor("custom")]; + tensor v_179_pad_0 = const()[name = tensor("v_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845777472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847088256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_179_cast = conv(dilations = var_8948, groups = var_6865, pad = v_179_pad_0, pad_type = v_179_pad_type_0, strides = var_8946, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_179_cast")]; + tensor var_8952 = const()[name = tensor("op_8952"), val = tensor([2, 20, 64, -1])]; + tensor var_8953_cast = reshape(shape = var_8952, x = q_179_cast)[name = tensor("op_8953_cast")]; + tensor var_8954 = const()[name = tensor("op_8954"), val = tensor([2, 20, 64, -1])]; + tensor var_8955_cast = reshape(shape = var_8954, x = k_179_cast)[name = tensor("op_8955_cast")]; + tensor var_8956 = const()[name = tensor("op_8956"), val = tensor([2, 20, 64, -1])]; + tensor var_8957_cast = reshape(shape = var_8956, x = v_179_cast)[name = tensor("op_8957_cast")]; + tensor attn_weights_357_transpose_x_0 = const()[name = tensor("attn_weights_357_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_357_transpose_y_0 = const()[name = tensor("attn_weights_357_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_357_cast = matmul(transpose_x = attn_weights_357_transpose_x_0, transpose_y = attn_weights_357_transpose_y_0, x = var_8953_cast, y = var_8955_cast)[name = tensor("attn_weights_357_cast")]; + tensor attn_weights_359_cast = mul(x = attn_weights_357_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_359_cast")]; + tensor var_8961_cast = softmax(axis = var_6849, x = attn_weights_359_cast)[name = tensor("op_8961_cast")]; + tensor attn_179_transpose_x_0 = const()[name = tensor("attn_179_transpose_x_0"), val = tensor(false)]; + tensor attn_179_transpose_y_0 = const()[name = tensor("attn_179_transpose_y_0"), val = tensor(true)]; + tensor attn_179_cast = matmul(transpose_x = attn_179_transpose_x_0, transpose_y = attn_179_transpose_y_0, x = var_8957_cast, y = var_8961_cast)[name = tensor("attn_179_cast")]; + tensor var_8965 = const()[name = tensor("op_8965"), val = tensor([2, 1280, 1, -1])]; + tensor input_539_cast = reshape(shape = var_8965, x = attn_179_cast)[name = tensor("input_539_cast")]; + tensor var_8970 = const()[name = tensor("op_8970"), val = tensor([1, 1])]; + tensor var_8972 = const()[name = tensor("op_8972"), val = tensor([1, 1])]; + tensor var_8974_pad_type_0 = const()[name = tensor("op_8974_pad_type_0"), val = tensor("custom")]; + tensor var_8974_pad_0 = const()[name = tensor("op_8974_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847088384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847907648))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847907776)))]; + tensor var_8974_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_8972, groups = var_6865, pad = var_8974_pad_0, pad_type = var_8974_pad_type_0, strides = var_8970, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_539_cast)[name = tensor("op_8974_cast")]; + tensor inputs_269_cast = add(x = var_8974_cast, y = inputs_267_cast)[name = tensor("inputs_269_cast")]; + tensor var_8978 = const()[name = tensor("op_8978"), val = tensor([1])]; + tensor channels_mean_269_cast = reduce_mean(axes = var_8978, keep_dims = var_6860, x = inputs_269_cast)[name = tensor("channels_mean_269_cast")]; + tensor zero_mean_269_cast = sub(x = inputs_269_cast, y = channels_mean_269_cast)[name = tensor("zero_mean_269_cast")]; + tensor zero_mean_sq_269_cast = mul(x = zero_mean_269_cast, y = zero_mean_269_cast)[name = tensor("zero_mean_sq_269_cast")]; + tensor var_8982 = const()[name = tensor("op_8982"), val = tensor([1])]; + tensor var_8983_cast = reduce_mean(axes = var_8982, keep_dims = var_6860, x = zero_mean_sq_269_cast)[name = tensor("op_8983_cast")]; + tensor var_8984_to_fp16 = const()[name = tensor("op_8984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8985_cast = add(x = var_8983_cast, y = var_8984_to_fp16)[name = tensor("op_8985_cast")]; + tensor denom_269_epsilon_0_to_fp16 = const()[name = tensor("denom_269_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_269_cast = rsqrt(epsilon = denom_269_epsilon_0_to_fp16, x = var_8985_cast)[name = tensor("denom_269_cast")]; + tensor out_269_cast = mul(x = zero_mean_269_cast, y = denom_269_cast)[name = tensor("out_269_cast")]; + tensor var_8989_to_fp16 = const()[name = tensor("op_8989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847910400)))]; + tensor var_8990_cast = add(x = out_269_cast, y = var_8989_to_fp16)[name = tensor("op_8990_cast")]; + tensor var_8992_to_fp16 = const()[name = tensor("op_8992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847913024)))]; + tensor input_541_cast = mul(x = var_8990_cast, y = var_8992_to_fp16)[name = tensor("input_541_cast")]; + tensor var_9000 = const()[name = tensor("op_9000"), val = tensor([1, 1])]; + tensor var_9002 = const()[name = tensor("op_9002"), val = tensor([1, 1])]; + tensor var_9004_pad_type_0 = const()[name = tensor("op_9004_pad_type_0"), val = tensor("custom")]; + tensor var_9004_pad_0 = const()[name = tensor("op_9004_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847915648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857746112))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857746304)))]; + tensor var_9004_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_9002, groups = var_6865, pad = var_9004_pad_0, pad_type = var_9004_pad_type_0, strides = var_9000, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_541_cast)[name = tensor("op_9004_cast")]; + tensor var_9005_split_sizes_0 = const()[name = tensor("op_9005_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9005_axis_0 = const()[name = tensor("op_9005_axis_0"), val = tensor(1)]; + tensor var_9005_cast_0, tensor var_9005_cast_1 = split(axis = var_9005_axis_0, split_sizes = var_9005_split_sizes_0, x = var_9004_cast)[name = tensor("op_9005_cast")]; + tensor var_9007_mode_0 = const()[name = tensor("op_9007_mode_0"), val = tensor("EXACT")]; + tensor var_9007_cast = gelu(mode = var_9007_mode_0, x = var_9005_cast_1)[name = tensor("op_9007_cast")]; + tensor input_543_cast = mul(x = var_9005_cast_0, y = var_9007_cast)[name = tensor("input_543_cast")]; + tensor var_9011 = const()[name = tensor("op_9011"), val = tensor([1, 1])]; + tensor var_9013 = const()[name = tensor("op_9013"), val = tensor([1, 1])]; + tensor var_9015_pad_type_0 = const()[name = tensor("op_9015_pad_type_0"), val = tensor("custom")]; + tensor var_9015_pad_0 = const()[name = tensor("op_9015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(857766848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862682112))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862682304)))]; + tensor var_9015_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_9013, groups = var_6865, pad = var_9015_pad_0, pad_type = var_9015_pad_type_0, strides = var_9011, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_543_cast)[name = tensor("op_9015_cast")]; + tensor inputs_271_cast = add(x = var_9015_cast, y = inputs_269_cast)[name = tensor("inputs_271_cast")]; + tensor var_9025 = const()[name = tensor("op_9025"), val = tensor([1])]; + tensor channels_mean_271_cast = reduce_mean(axes = var_9025, keep_dims = var_6860, x = inputs_271_cast)[name = tensor("channels_mean_271_cast")]; + tensor zero_mean_271_cast = sub(x = inputs_271_cast, y = channels_mean_271_cast)[name = tensor("zero_mean_271_cast")]; + tensor zero_mean_sq_271_cast = mul(x = zero_mean_271_cast, y = zero_mean_271_cast)[name = tensor("zero_mean_sq_271_cast")]; + tensor var_9029 = const()[name = tensor("op_9029"), val = tensor([1])]; + tensor var_9030_cast = reduce_mean(axes = var_9029, keep_dims = var_6860, x = zero_mean_sq_271_cast)[name = tensor("op_9030_cast")]; + tensor var_9031_to_fp16 = const()[name = tensor("op_9031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9032_cast = add(x = var_9030_cast, y = var_9031_to_fp16)[name = tensor("op_9032_cast")]; + tensor denom_271_epsilon_0_to_fp16 = const()[name = tensor("denom_271_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_271_cast = rsqrt(epsilon = denom_271_epsilon_0_to_fp16, x = var_9032_cast)[name = tensor("denom_271_cast")]; + tensor out_271_cast = mul(x = zero_mean_271_cast, y = denom_271_cast)[name = tensor("out_271_cast")]; + tensor var_9036_to_fp16 = const()[name = tensor("op_9036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862684928)))]; + tensor var_9037_cast = add(x = out_271_cast, y = var_9036_to_fp16)[name = tensor("op_9037_cast")]; + tensor var_9039_to_fp16 = const()[name = tensor("op_9039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862687552)))]; + tensor hidden_states_367_cast = mul(x = var_9037_cast, y = var_9039_to_fp16)[name = tensor("hidden_states_367_cast")]; + tensor var_9046 = const()[name = tensor("op_9046"), val = tensor([1, 1])]; + tensor var_9048 = const()[name = tensor("op_9048"), val = tensor([1, 1])]; + tensor q_181_pad_type_0 = const()[name = tensor("q_181_pad_type_0"), val = tensor("custom")]; + tensor q_181_pad_0 = const()[name = tensor("q_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(862690176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863509440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_181_cast = conv(dilations = var_9048, groups = var_6865, pad = q_181_pad_0, pad_type = q_181_pad_type_0, strides = var_9046, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("q_181_cast")]; + tensor var_9052 = const()[name = tensor("op_9052"), val = tensor([1, 1])]; + tensor var_9054 = const()[name = tensor("op_9054"), val = tensor([1, 1])]; + tensor k_181_pad_type_0 = const()[name = tensor("k_181_pad_type_0"), val = tensor("custom")]; + tensor k_181_pad_0 = const()[name = tensor("k_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(863509568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864328832))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_181_cast = conv(dilations = var_9054, groups = var_6865, pad = k_181_pad_0, pad_type = k_181_pad_type_0, strides = var_9052, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("k_181_cast")]; + tensor var_9058 = const()[name = tensor("op_9058"), val = tensor([1, 1])]; + tensor var_9060 = const()[name = tensor("op_9060"), val = tensor([1, 1])]; + tensor v_181_pad_type_0 = const()[name = tensor("v_181_pad_type_0"), val = tensor("custom")]; + tensor v_181_pad_0 = const()[name = tensor("v_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(864328960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865557824))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_181_cast = conv(dilations = var_9060, groups = var_6865, pad = v_181_pad_0, pad_type = v_181_pad_type_0, strides = var_9058, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("v_181_cast")]; + tensor var_9064 = const()[name = tensor("op_9064"), val = tensor([2, 20, 64, -1])]; + tensor var_9065_cast = reshape(shape = var_9064, x = q_181_cast)[name = tensor("op_9065_cast")]; + tensor var_9066 = const()[name = tensor("op_9066"), val = tensor([2, 20, 64, -1])]; + tensor var_9067_cast = reshape(shape = var_9066, x = k_181_cast)[name = tensor("op_9067_cast")]; + tensor var_9068 = const()[name = tensor("op_9068"), val = tensor([2, 20, 64, -1])]; + tensor var_9069_cast = reshape(shape = var_9068, x = v_181_cast)[name = tensor("op_9069_cast")]; + tensor attn_weights_361_transpose_x_0 = const()[name = tensor("attn_weights_361_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_361_transpose_y_0 = const()[name = tensor("attn_weights_361_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_361_cast = matmul(transpose_x = attn_weights_361_transpose_x_0, transpose_y = attn_weights_361_transpose_y_0, x = var_9065_cast, y = var_9067_cast)[name = tensor("attn_weights_361_cast")]; + tensor attn_weights_363_cast = mul(x = attn_weights_361_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_363_cast")]; + tensor var_9073_cast = softmax(axis = var_6849, x = attn_weights_363_cast)[name = tensor("op_9073_cast")]; + tensor attn_181_transpose_x_0 = const()[name = tensor("attn_181_transpose_x_0"), val = tensor(false)]; + tensor attn_181_transpose_y_0 = const()[name = tensor("attn_181_transpose_y_0"), val = tensor(true)]; + tensor attn_181_cast = matmul(transpose_x = attn_181_transpose_x_0, transpose_y = attn_181_transpose_y_0, x = var_9069_cast, y = var_9073_cast)[name = tensor("attn_181_cast")]; + tensor var_9077 = const()[name = tensor("op_9077"), val = tensor([2, 1280, 1, -1])]; + tensor input_545_cast = reshape(shape = var_9077, x = attn_181_cast)[name = tensor("input_545_cast")]; + tensor var_9082 = const()[name = tensor("op_9082"), val = tensor([1, 1])]; + tensor var_9084 = const()[name = tensor("op_9084"), val = tensor([1, 1])]; + tensor var_9086_pad_type_0 = const()[name = tensor("op_9086_pad_type_0"), val = tensor("custom")]; + tensor var_9086_pad_0 = const()[name = tensor("op_9086_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865558016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866786880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866787072)))]; + tensor var_9086_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_9084, groups = var_6865, pad = var_9086_pad_0, pad_type = var_9086_pad_type_0, strides = var_9082, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_545_cast)[name = tensor("op_9086_cast")]; + tensor inputs_273_cast = add(x = var_9086_cast, y = inputs_271_cast)[name = tensor("inputs_273_cast")]; + tensor var_9090 = const()[name = tensor("op_9090"), val = tensor([1])]; + tensor channels_mean_273_cast = reduce_mean(axes = var_9090, keep_dims = var_6860, x = inputs_273_cast)[name = tensor("channels_mean_273_cast")]; + tensor zero_mean_273_cast = sub(x = inputs_273_cast, y = channels_mean_273_cast)[name = tensor("zero_mean_273_cast")]; + tensor zero_mean_sq_273_cast = mul(x = zero_mean_273_cast, y = zero_mean_273_cast)[name = tensor("zero_mean_sq_273_cast")]; + tensor var_9094 = const()[name = tensor("op_9094"), val = tensor([1])]; + tensor var_9095_cast = reduce_mean(axes = var_9094, keep_dims = var_6860, x = zero_mean_sq_273_cast)[name = tensor("op_9095_cast")]; + tensor var_9096_to_fp16 = const()[name = tensor("op_9096_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9097_cast = add(x = var_9095_cast, y = var_9096_to_fp16)[name = tensor("op_9097_cast")]; + tensor denom_273_epsilon_0_to_fp16 = const()[name = tensor("denom_273_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_273_cast = rsqrt(epsilon = denom_273_epsilon_0_to_fp16, x = var_9097_cast)[name = tensor("denom_273_cast")]; + tensor out_273_cast = mul(x = zero_mean_273_cast, y = denom_273_cast)[name = tensor("out_273_cast")]; + tensor var_9101_to_fp16 = const()[name = tensor("op_9101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866789696)))]; + tensor var_9102_cast = add(x = out_273_cast, y = var_9101_to_fp16)[name = tensor("op_9102_cast")]; + tensor var_9104_to_fp16 = const()[name = tensor("op_9104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866792320)))]; + tensor hidden_states_369_cast = mul(x = var_9102_cast, y = var_9104_to_fp16)[name = tensor("hidden_states_369_cast")]; + tensor var_9111 = const()[name = tensor("op_9111"), val = tensor([1, 1])]; + tensor var_9113 = const()[name = tensor("op_9113"), val = tensor([1, 1])]; + tensor q_183_pad_type_0 = const()[name = tensor("q_183_pad_type_0"), val = tensor("custom")]; + tensor q_183_pad_0 = const()[name = tensor("q_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866794944))), lut = tensor([-0x1.2bp-5, -0x1.64cp-7, 0x1.60cp-7, 0x1.2ap-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_183_cast = conv(dilations = var_9113, groups = var_6865, pad = q_183_pad_0, pad_type = q_183_pad_type_0, strides = var_9111, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_369_cast)[name = tensor("q_183_cast")]; + tensor var_9117 = const()[name = tensor("op_9117"), val = tensor([1, 1])]; + tensor var_9119 = const()[name = tensor("op_9119"), val = tensor([1, 1])]; + tensor k_183_pad_type_0 = const()[name = tensor("k_183_pad_type_0"), val = tensor("custom")]; + tensor k_183_pad_0 = const()[name = tensor("k_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(867204608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(868515392))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_183_cast = conv(dilations = var_9119, groups = var_6865, pad = k_183_pad_0, pad_type = k_183_pad_type_0, strides = var_9117, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_183_cast")]; + tensor var_9123 = const()[name = tensor("op_9123"), val = tensor([1, 1])]; + tensor var_9125 = const()[name = tensor("op_9125"), val = tensor([1, 1])]; + tensor v_183_pad_type_0 = const()[name = tensor("v_183_pad_type_0"), val = tensor("custom")]; + tensor v_183_pad_0 = const()[name = tensor("v_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(868515520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(869826304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_183_cast = conv(dilations = var_9125, groups = var_6865, pad = v_183_pad_0, pad_type = v_183_pad_type_0, strides = var_9123, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_183_cast")]; + tensor var_9129 = const()[name = tensor("op_9129"), val = tensor([2, 20, 64, -1])]; + tensor var_9130_cast = reshape(shape = var_9129, x = q_183_cast)[name = tensor("op_9130_cast")]; + tensor var_9131 = const()[name = tensor("op_9131"), val = tensor([2, 20, 64, -1])]; + tensor var_9132_cast = reshape(shape = var_9131, x = k_183_cast)[name = tensor("op_9132_cast")]; + tensor var_9133 = const()[name = tensor("op_9133"), val = tensor([2, 20, 64, -1])]; + tensor var_9134_cast = reshape(shape = var_9133, x = v_183_cast)[name = tensor("op_9134_cast")]; + tensor attn_weights_365_transpose_x_0 = const()[name = tensor("attn_weights_365_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_365_transpose_y_0 = const()[name = tensor("attn_weights_365_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_365_cast = matmul(transpose_x = attn_weights_365_transpose_x_0, transpose_y = attn_weights_365_transpose_y_0, x = var_9130_cast, y = var_9132_cast)[name = tensor("attn_weights_365_cast")]; + tensor attn_weights_367_cast = mul(x = attn_weights_365_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_367_cast")]; + tensor var_9138_cast = softmax(axis = var_6849, x = attn_weights_367_cast)[name = tensor("op_9138_cast")]; + tensor attn_183_transpose_x_0 = const()[name = tensor("attn_183_transpose_x_0"), val = tensor(false)]; + tensor attn_183_transpose_y_0 = const()[name = tensor("attn_183_transpose_y_0"), val = tensor(true)]; + tensor attn_183_cast = matmul(transpose_x = attn_183_transpose_x_0, transpose_y = attn_183_transpose_y_0, x = var_9134_cast, y = var_9138_cast)[name = tensor("attn_183_cast")]; + tensor var_9142 = const()[name = tensor("op_9142"), val = tensor([2, 1280, 1, -1])]; + tensor input_547_cast = reshape(shape = var_9142, x = attn_183_cast)[name = tensor("input_547_cast")]; + tensor var_9147 = const()[name = tensor("op_9147"), val = tensor([1, 1])]; + tensor var_9149 = const()[name = tensor("op_9149"), val = tensor([1, 1])]; + tensor var_9151_pad_type_0 = const()[name = tensor("op_9151_pad_type_0"), val = tensor("custom")]; + tensor var_9151_pad_0 = const()[name = tensor("op_9151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(869826432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870645696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870645824)))]; + tensor var_9151_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_9149, groups = var_6865, pad = var_9151_pad_0, pad_type = var_9151_pad_type_0, strides = var_9147, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_547_cast)[name = tensor("op_9151_cast")]; + tensor inputs_275_cast = add(x = var_9151_cast, y = inputs_273_cast)[name = tensor("inputs_275_cast")]; + tensor var_9155 = const()[name = tensor("op_9155"), val = tensor([1])]; + tensor channels_mean_275_cast = reduce_mean(axes = var_9155, keep_dims = var_6860, x = inputs_275_cast)[name = tensor("channels_mean_275_cast")]; + tensor zero_mean_275_cast = sub(x = inputs_275_cast, y = channels_mean_275_cast)[name = tensor("zero_mean_275_cast")]; + tensor zero_mean_sq_275_cast = mul(x = zero_mean_275_cast, y = zero_mean_275_cast)[name = tensor("zero_mean_sq_275_cast")]; + tensor var_9159 = const()[name = tensor("op_9159"), val = tensor([1])]; + tensor var_9160_cast = reduce_mean(axes = var_9159, keep_dims = var_6860, x = zero_mean_sq_275_cast)[name = tensor("op_9160_cast")]; + tensor var_9161_to_fp16 = const()[name = tensor("op_9161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9162_cast = add(x = var_9160_cast, y = var_9161_to_fp16)[name = tensor("op_9162_cast")]; + tensor denom_275_epsilon_0_to_fp16 = const()[name = tensor("denom_275_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_275_cast = rsqrt(epsilon = denom_275_epsilon_0_to_fp16, x = var_9162_cast)[name = tensor("denom_275_cast")]; + tensor out_275_cast = mul(x = zero_mean_275_cast, y = denom_275_cast)[name = tensor("out_275_cast")]; + tensor var_9166_to_fp16 = const()[name = tensor("op_9166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870648448)))]; + tensor var_9167_cast = add(x = out_275_cast, y = var_9166_to_fp16)[name = tensor("op_9167_cast")]; + tensor var_9169_to_fp16 = const()[name = tensor("op_9169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870651072)))]; + tensor input_549_cast = mul(x = var_9167_cast, y = var_9169_to_fp16)[name = tensor("input_549_cast")]; + tensor var_9177 = const()[name = tensor("op_9177"), val = tensor([1, 1])]; + tensor var_9179 = const()[name = tensor("op_9179"), val = tensor([1, 1])]; + tensor var_9181_pad_type_0 = const()[name = tensor("op_9181_pad_type_0"), val = tensor("custom")]; + tensor var_9181_pad_0 = const()[name = tensor("op_9181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(870653696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880484160))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880484352)))]; + tensor var_9181_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_9179, groups = var_6865, pad = var_9181_pad_0, pad_type = var_9181_pad_type_0, strides = var_9177, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_549_cast)[name = tensor("op_9181_cast")]; + tensor var_9182_split_sizes_0 = const()[name = tensor("op_9182_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9182_axis_0 = const()[name = tensor("op_9182_axis_0"), val = tensor(1)]; + tensor var_9182_cast_0, tensor var_9182_cast_1 = split(axis = var_9182_axis_0, split_sizes = var_9182_split_sizes_0, x = var_9181_cast)[name = tensor("op_9182_cast")]; + tensor var_9184_mode_0 = const()[name = tensor("op_9184_mode_0"), val = tensor("EXACT")]; + tensor var_9184_cast = gelu(mode = var_9184_mode_0, x = var_9182_cast_1)[name = tensor("op_9184_cast")]; + tensor input_551_cast = mul(x = var_9182_cast_0, y = var_9184_cast)[name = tensor("input_551_cast")]; + tensor var_9188 = const()[name = tensor("op_9188"), val = tensor([1, 1])]; + tensor var_9190 = const()[name = tensor("op_9190"), val = tensor([1, 1])]; + tensor var_9192_pad_type_0 = const()[name = tensor("op_9192_pad_type_0"), val = tensor("custom")]; + tensor var_9192_pad_0 = const()[name = tensor("op_9192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(880504896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885420160))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885420352)))]; + tensor var_9192_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_9190, groups = var_6865, pad = var_9192_pad_0, pad_type = var_9192_pad_type_0, strides = var_9188, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_551_cast)[name = tensor("op_9192_cast")]; + tensor inputs_277_cast = add(x = var_9192_cast, y = inputs_275_cast)[name = tensor("inputs_277_cast")]; + tensor var_9202 = const()[name = tensor("op_9202"), val = tensor([1])]; + tensor channels_mean_277_cast = reduce_mean(axes = var_9202, keep_dims = var_6860, x = inputs_277_cast)[name = tensor("channels_mean_277_cast")]; + tensor zero_mean_277_cast = sub(x = inputs_277_cast, y = channels_mean_277_cast)[name = tensor("zero_mean_277_cast")]; + tensor zero_mean_sq_277_cast = mul(x = zero_mean_277_cast, y = zero_mean_277_cast)[name = tensor("zero_mean_sq_277_cast")]; + tensor var_9206 = const()[name = tensor("op_9206"), val = tensor([1])]; + tensor var_9207_cast = reduce_mean(axes = var_9206, keep_dims = var_6860, x = zero_mean_sq_277_cast)[name = tensor("op_9207_cast")]; + tensor var_9208_to_fp16 = const()[name = tensor("op_9208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9209_cast = add(x = var_9207_cast, y = var_9208_to_fp16)[name = tensor("op_9209_cast")]; + tensor denom_277_epsilon_0_to_fp16 = const()[name = tensor("denom_277_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_277_cast = rsqrt(epsilon = denom_277_epsilon_0_to_fp16, x = var_9209_cast)[name = tensor("denom_277_cast")]; + tensor out_277_cast = mul(x = zero_mean_277_cast, y = denom_277_cast)[name = tensor("out_277_cast")]; + tensor var_9213_to_fp16 = const()[name = tensor("op_9213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885422976)))]; + tensor var_9214_cast = add(x = out_277_cast, y = var_9213_to_fp16)[name = tensor("op_9214_cast")]; + tensor var_9216_to_fp16 = const()[name = tensor("op_9216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885425600)))]; + tensor hidden_states_373_cast = mul(x = var_9214_cast, y = var_9216_to_fp16)[name = tensor("hidden_states_373_cast")]; + tensor var_9223 = const()[name = tensor("op_9223"), val = tensor([1, 1])]; + tensor var_9225 = const()[name = tensor("op_9225"), val = tensor([1, 1])]; + tensor q_185_pad_type_0 = const()[name = tensor("q_185_pad_type_0"), val = tensor("custom")]; + tensor q_185_pad_0 = const()[name = tensor("q_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(885428224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886247488))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_185_cast = conv(dilations = var_9225, groups = var_6865, pad = q_185_pad_0, pad_type = q_185_pad_type_0, strides = var_9223, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("q_185_cast")]; + tensor var_9229 = const()[name = tensor("op_9229"), val = tensor([1, 1])]; + tensor var_9231 = const()[name = tensor("op_9231"), val = tensor([1, 1])]; + tensor k_185_pad_type_0 = const()[name = tensor("k_185_pad_type_0"), val = tensor("custom")]; + tensor k_185_pad_0 = const()[name = tensor("k_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(886247616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887066880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_185_cast = conv(dilations = var_9231, groups = var_6865, pad = k_185_pad_0, pad_type = k_185_pad_type_0, strides = var_9229, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("k_185_cast")]; + tensor var_9235 = const()[name = tensor("op_9235"), val = tensor([1, 1])]; + tensor var_9237 = const()[name = tensor("op_9237"), val = tensor([1, 1])]; + tensor v_185_pad_type_0 = const()[name = tensor("v_185_pad_type_0"), val = tensor("custom")]; + tensor v_185_pad_0 = const()[name = tensor("v_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(887067008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888295872))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_185_cast = conv(dilations = var_9237, groups = var_6865, pad = v_185_pad_0, pad_type = v_185_pad_type_0, strides = var_9235, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("v_185_cast")]; + tensor var_9241 = const()[name = tensor("op_9241"), val = tensor([2, 20, 64, -1])]; + tensor var_9242_cast = reshape(shape = var_9241, x = q_185_cast)[name = tensor("op_9242_cast")]; + tensor var_9243 = const()[name = tensor("op_9243"), val = tensor([2, 20, 64, -1])]; + tensor var_9244_cast = reshape(shape = var_9243, x = k_185_cast)[name = tensor("op_9244_cast")]; + tensor var_9245 = const()[name = tensor("op_9245"), val = tensor([2, 20, 64, -1])]; + tensor var_9246_cast = reshape(shape = var_9245, x = v_185_cast)[name = tensor("op_9246_cast")]; + tensor attn_weights_369_transpose_x_0 = const()[name = tensor("attn_weights_369_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_369_transpose_y_0 = const()[name = tensor("attn_weights_369_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_369_cast = matmul(transpose_x = attn_weights_369_transpose_x_0, transpose_y = attn_weights_369_transpose_y_0, x = var_9242_cast, y = var_9244_cast)[name = tensor("attn_weights_369_cast")]; + tensor attn_weights_371_cast = mul(x = attn_weights_369_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_371_cast")]; + tensor var_9250_cast = softmax(axis = var_6849, x = attn_weights_371_cast)[name = tensor("op_9250_cast")]; + tensor attn_185_transpose_x_0 = const()[name = tensor("attn_185_transpose_x_0"), val = tensor(false)]; + tensor attn_185_transpose_y_0 = const()[name = tensor("attn_185_transpose_y_0"), val = tensor(true)]; + tensor attn_185_cast = matmul(transpose_x = attn_185_transpose_x_0, transpose_y = attn_185_transpose_y_0, x = var_9246_cast, y = var_9250_cast)[name = tensor("attn_185_cast")]; + tensor var_9254 = const()[name = tensor("op_9254"), val = tensor([2, 1280, 1, -1])]; + tensor input_553_cast = reshape(shape = var_9254, x = attn_185_cast)[name = tensor("input_553_cast")]; + tensor var_9259 = const()[name = tensor("op_9259"), val = tensor([1, 1])]; + tensor var_9261 = const()[name = tensor("op_9261"), val = tensor([1, 1])]; + tensor var_9263_pad_type_0 = const()[name = tensor("op_9263_pad_type_0"), val = tensor("custom")]; + tensor var_9263_pad_0 = const()[name = tensor("op_9263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(888296064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889524928))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889525120)))]; + tensor var_9263_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_9261, groups = var_6865, pad = var_9263_pad_0, pad_type = var_9263_pad_type_0, strides = var_9259, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_553_cast)[name = tensor("op_9263_cast")]; + tensor inputs_279_cast = add(x = var_9263_cast, y = inputs_277_cast)[name = tensor("inputs_279_cast")]; + tensor var_9267 = const()[name = tensor("op_9267"), val = tensor([1])]; + tensor channels_mean_279_cast = reduce_mean(axes = var_9267, keep_dims = var_6860, x = inputs_279_cast)[name = tensor("channels_mean_279_cast")]; + tensor zero_mean_279_cast = sub(x = inputs_279_cast, y = channels_mean_279_cast)[name = tensor("zero_mean_279_cast")]; + tensor zero_mean_sq_279_cast = mul(x = zero_mean_279_cast, y = zero_mean_279_cast)[name = tensor("zero_mean_sq_279_cast")]; + tensor var_9271 = const()[name = tensor("op_9271"), val = tensor([1])]; + tensor var_9272_cast = reduce_mean(axes = var_9271, keep_dims = var_6860, x = zero_mean_sq_279_cast)[name = tensor("op_9272_cast")]; + tensor var_9273_to_fp16 = const()[name = tensor("op_9273_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9274_cast = add(x = var_9272_cast, y = var_9273_to_fp16)[name = tensor("op_9274_cast")]; + tensor denom_279_epsilon_0_to_fp16 = const()[name = tensor("denom_279_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_279_cast = rsqrt(epsilon = denom_279_epsilon_0_to_fp16, x = var_9274_cast)[name = tensor("denom_279_cast")]; + tensor out_279_cast = mul(x = zero_mean_279_cast, y = denom_279_cast)[name = tensor("out_279_cast")]; + tensor var_9278_to_fp16 = const()[name = tensor("op_9278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889527744)))]; + tensor var_9279_cast = add(x = out_279_cast, y = var_9278_to_fp16)[name = tensor("op_9279_cast")]; + tensor var_9281_to_fp16 = const()[name = tensor("op_9281_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889530368)))]; + tensor hidden_states_375_cast = mul(x = var_9279_cast, y = var_9281_to_fp16)[name = tensor("hidden_states_375_cast")]; + tensor var_9288 = const()[name = tensor("op_9288"), val = tensor([1, 1])]; + tensor var_9290 = const()[name = tensor("op_9290"), val = tensor([1, 1])]; + tensor q_187_pad_type_0 = const()[name = tensor("q_187_pad_type_0"), val = tensor("custom")]; + tensor q_187_pad_0 = const()[name = tensor("q_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889532992))), lut = tensor([-0x1.348p-5, -0x1.6fcp-7, 0x1.71cp-7, 0x1.354p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_187_cast = conv(dilations = var_9290, groups = var_6865, pad = q_187_pad_0, pad_type = q_187_pad_type_0, strides = var_9288, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_375_cast)[name = tensor("q_187_cast")]; + tensor var_9294 = const()[name = tensor("op_9294"), val = tensor([1, 1])]; + tensor var_9296 = const()[name = tensor("op_9296"), val = tensor([1, 1])]; + tensor k_187_pad_type_0 = const()[name = tensor("k_187_pad_type_0"), val = tensor("custom")]; + tensor k_187_pad_0 = const()[name = tensor("k_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(889942656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(891253440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_187_cast = conv(dilations = var_9296, groups = var_6865, pad = k_187_pad_0, pad_type = k_187_pad_type_0, strides = var_9294, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_187_cast")]; + tensor var_9300 = const()[name = tensor("op_9300"), val = tensor([1, 1])]; + tensor var_9302 = const()[name = tensor("op_9302"), val = tensor([1, 1])]; + tensor v_187_pad_type_0 = const()[name = tensor("v_187_pad_type_0"), val = tensor("custom")]; + tensor v_187_pad_0 = const()[name = tensor("v_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(891253568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892564352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_187_cast = conv(dilations = var_9302, groups = var_6865, pad = v_187_pad_0, pad_type = v_187_pad_type_0, strides = var_9300, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_187_cast")]; + tensor var_9306 = const()[name = tensor("op_9306"), val = tensor([2, 20, 64, -1])]; + tensor var_9307_cast = reshape(shape = var_9306, x = q_187_cast)[name = tensor("op_9307_cast")]; + tensor var_9308 = const()[name = tensor("op_9308"), val = tensor([2, 20, 64, -1])]; + tensor var_9309_cast = reshape(shape = var_9308, x = k_187_cast)[name = tensor("op_9309_cast")]; + tensor var_9310 = const()[name = tensor("op_9310"), val = tensor([2, 20, 64, -1])]; + tensor var_9311_cast = reshape(shape = var_9310, x = v_187_cast)[name = tensor("op_9311_cast")]; + tensor attn_weights_373_transpose_x_0 = const()[name = tensor("attn_weights_373_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_373_transpose_y_0 = const()[name = tensor("attn_weights_373_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_373_cast = matmul(transpose_x = attn_weights_373_transpose_x_0, transpose_y = attn_weights_373_transpose_y_0, x = var_9307_cast, y = var_9309_cast)[name = tensor("attn_weights_373_cast")]; + tensor attn_weights_375_cast = mul(x = attn_weights_373_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_375_cast")]; + tensor var_9315_cast = softmax(axis = var_6849, x = attn_weights_375_cast)[name = tensor("op_9315_cast")]; + tensor attn_187_transpose_x_0 = const()[name = tensor("attn_187_transpose_x_0"), val = tensor(false)]; + tensor attn_187_transpose_y_0 = const()[name = tensor("attn_187_transpose_y_0"), val = tensor(true)]; + tensor attn_187_cast = matmul(transpose_x = attn_187_transpose_x_0, transpose_y = attn_187_transpose_y_0, x = var_9311_cast, y = var_9315_cast)[name = tensor("attn_187_cast")]; + tensor var_9319 = const()[name = tensor("op_9319"), val = tensor([2, 1280, 1, -1])]; + tensor input_555_cast = reshape(shape = var_9319, x = attn_187_cast)[name = tensor("input_555_cast")]; + tensor var_9324 = const()[name = tensor("op_9324"), val = tensor([1, 1])]; + tensor var_9326 = const()[name = tensor("op_9326"), val = tensor([1, 1])]; + tensor var_9328_pad_type_0 = const()[name = tensor("op_9328_pad_type_0"), val = tensor("custom")]; + tensor var_9328_pad_0 = const()[name = tensor("op_9328_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(892564480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893383744))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893383872)))]; + tensor var_9328_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_9326, groups = var_6865, pad = var_9328_pad_0, pad_type = var_9328_pad_type_0, strides = var_9324, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_555_cast)[name = tensor("op_9328_cast")]; + tensor inputs_281_cast = add(x = var_9328_cast, y = inputs_279_cast)[name = tensor("inputs_281_cast")]; + tensor var_9332 = const()[name = tensor("op_9332"), val = tensor([1])]; + tensor channels_mean_281_cast = reduce_mean(axes = var_9332, keep_dims = var_6860, x = inputs_281_cast)[name = tensor("channels_mean_281_cast")]; + tensor zero_mean_281_cast = sub(x = inputs_281_cast, y = channels_mean_281_cast)[name = tensor("zero_mean_281_cast")]; + tensor zero_mean_sq_281_cast = mul(x = zero_mean_281_cast, y = zero_mean_281_cast)[name = tensor("zero_mean_sq_281_cast")]; + tensor var_9336 = const()[name = tensor("op_9336"), val = tensor([1])]; + tensor var_9337_cast = reduce_mean(axes = var_9336, keep_dims = var_6860, x = zero_mean_sq_281_cast)[name = tensor("op_9337_cast")]; + tensor var_9338_to_fp16 = const()[name = tensor("op_9338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9339_cast = add(x = var_9337_cast, y = var_9338_to_fp16)[name = tensor("op_9339_cast")]; + tensor denom_281_epsilon_0_to_fp16 = const()[name = tensor("denom_281_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_281_cast = rsqrt(epsilon = denom_281_epsilon_0_to_fp16, x = var_9339_cast)[name = tensor("denom_281_cast")]; + tensor out_281_cast = mul(x = zero_mean_281_cast, y = denom_281_cast)[name = tensor("out_281_cast")]; + tensor var_9343_to_fp16 = const()[name = tensor("op_9343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893386496)))]; + tensor var_9344_cast = add(x = out_281_cast, y = var_9343_to_fp16)[name = tensor("op_9344_cast")]; + tensor var_9346_to_fp16 = const()[name = tensor("op_9346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893389120)))]; + tensor input_557_cast = mul(x = var_9344_cast, y = var_9346_to_fp16)[name = tensor("input_557_cast")]; + tensor var_9354 = const()[name = tensor("op_9354"), val = tensor([1, 1])]; + tensor var_9356 = const()[name = tensor("op_9356"), val = tensor([1, 1])]; + tensor var_9358_pad_type_0 = const()[name = tensor("op_9358_pad_type_0"), val = tensor("custom")]; + tensor var_9358_pad_0 = const()[name = tensor("op_9358_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893391744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903222208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903222400)))]; + tensor var_9358_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_9356, groups = var_6865, pad = var_9358_pad_0, pad_type = var_9358_pad_type_0, strides = var_9354, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_557_cast)[name = tensor("op_9358_cast")]; + tensor var_9359_split_sizes_0 = const()[name = tensor("op_9359_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9359_axis_0 = const()[name = tensor("op_9359_axis_0"), val = tensor(1)]; + tensor var_9359_cast_0, tensor var_9359_cast_1 = split(axis = var_9359_axis_0, split_sizes = var_9359_split_sizes_0, x = var_9358_cast)[name = tensor("op_9359_cast")]; + tensor var_9361_mode_0 = const()[name = tensor("op_9361_mode_0"), val = tensor("EXACT")]; + tensor var_9361_cast = gelu(mode = var_9361_mode_0, x = var_9359_cast_1)[name = tensor("op_9361_cast")]; + tensor input_559_cast = mul(x = var_9359_cast_0, y = var_9361_cast)[name = tensor("input_559_cast")]; + tensor var_9365 = const()[name = tensor("op_9365"), val = tensor([1, 1])]; + tensor var_9367 = const()[name = tensor("op_9367"), val = tensor([1, 1])]; + tensor var_9369_pad_type_0 = const()[name = tensor("op_9369_pad_type_0"), val = tensor("custom")]; + tensor var_9369_pad_0 = const()[name = tensor("op_9369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(903242944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908158208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908158400)))]; + tensor var_9369_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_9367, groups = var_6865, pad = var_9369_pad_0, pad_type = var_9369_pad_type_0, strides = var_9365, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_559_cast)[name = tensor("op_9369_cast")]; + tensor inputs_283_cast = add(x = var_9369_cast, y = inputs_281_cast)[name = tensor("inputs_283_cast")]; + tensor var_9379 = const()[name = tensor("op_9379"), val = tensor([1])]; + tensor channels_mean_283_cast = reduce_mean(axes = var_9379, keep_dims = var_6860, x = inputs_283_cast)[name = tensor("channels_mean_283_cast")]; + tensor zero_mean_283_cast = sub(x = inputs_283_cast, y = channels_mean_283_cast)[name = tensor("zero_mean_283_cast")]; + tensor zero_mean_sq_283_cast = mul(x = zero_mean_283_cast, y = zero_mean_283_cast)[name = tensor("zero_mean_sq_283_cast")]; + tensor var_9383 = const()[name = tensor("op_9383"), val = tensor([1])]; + tensor var_9384_cast = reduce_mean(axes = var_9383, keep_dims = var_6860, x = zero_mean_sq_283_cast)[name = tensor("op_9384_cast")]; + tensor var_9385_to_fp16 = const()[name = tensor("op_9385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9386_cast = add(x = var_9384_cast, y = var_9385_to_fp16)[name = tensor("op_9386_cast")]; + tensor denom_283_epsilon_0_to_fp16 = const()[name = tensor("denom_283_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_283_cast = rsqrt(epsilon = denom_283_epsilon_0_to_fp16, x = var_9386_cast)[name = tensor("denom_283_cast")]; + tensor out_283_cast = mul(x = zero_mean_283_cast, y = denom_283_cast)[name = tensor("out_283_cast")]; + tensor var_9390_to_fp16 = const()[name = tensor("op_9390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908161024)))]; + tensor var_9391_cast = add(x = out_283_cast, y = var_9390_to_fp16)[name = tensor("op_9391_cast")]; + tensor var_9393_to_fp16 = const()[name = tensor("op_9393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908163648)))]; + tensor hidden_states_379_cast = mul(x = var_9391_cast, y = var_9393_to_fp16)[name = tensor("hidden_states_379_cast")]; + tensor var_9400 = const()[name = tensor("op_9400"), val = tensor([1, 1])]; + tensor var_9402 = const()[name = tensor("op_9402"), val = tensor([1, 1])]; + tensor q_189_pad_type_0 = const()[name = tensor("q_189_pad_type_0"), val = tensor("custom")]; + tensor q_189_pad_0 = const()[name = tensor("q_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908166272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908985536))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_189_cast = conv(dilations = var_9402, groups = var_6865, pad = q_189_pad_0, pad_type = q_189_pad_type_0, strides = var_9400, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("q_189_cast")]; + tensor var_9406 = const()[name = tensor("op_9406"), val = tensor([1, 1])]; + tensor var_9408 = const()[name = tensor("op_9408"), val = tensor([1, 1])]; + tensor k_189_pad_type_0 = const()[name = tensor("k_189_pad_type_0"), val = tensor("custom")]; + tensor k_189_pad_0 = const()[name = tensor("k_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(908985664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909804928))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_189_cast = conv(dilations = var_9408, groups = var_6865, pad = k_189_pad_0, pad_type = k_189_pad_type_0, strides = var_9406, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("k_189_cast")]; + tensor var_9412 = const()[name = tensor("op_9412"), val = tensor([1, 1])]; + tensor var_9414 = const()[name = tensor("op_9414"), val = tensor([1, 1])]; + tensor v_189_pad_type_0 = const()[name = tensor("v_189_pad_type_0"), val = tensor("custom")]; + tensor v_189_pad_0 = const()[name = tensor("v_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(909805056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911033920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_189_cast = conv(dilations = var_9414, groups = var_6865, pad = v_189_pad_0, pad_type = v_189_pad_type_0, strides = var_9412, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("v_189_cast")]; + tensor var_9418 = const()[name = tensor("op_9418"), val = tensor([2, 20, 64, -1])]; + tensor var_9419_cast = reshape(shape = var_9418, x = q_189_cast)[name = tensor("op_9419_cast")]; + tensor var_9420 = const()[name = tensor("op_9420"), val = tensor([2, 20, 64, -1])]; + tensor var_9421_cast = reshape(shape = var_9420, x = k_189_cast)[name = tensor("op_9421_cast")]; + tensor var_9422 = const()[name = tensor("op_9422"), val = tensor([2, 20, 64, -1])]; + tensor var_9423_cast = reshape(shape = var_9422, x = v_189_cast)[name = tensor("op_9423_cast")]; + tensor attn_weights_377_transpose_x_0 = const()[name = tensor("attn_weights_377_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_377_transpose_y_0 = const()[name = tensor("attn_weights_377_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_377_cast = matmul(transpose_x = attn_weights_377_transpose_x_0, transpose_y = attn_weights_377_transpose_y_0, x = var_9419_cast, y = var_9421_cast)[name = tensor("attn_weights_377_cast")]; + tensor attn_weights_379_cast = mul(x = attn_weights_377_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_379_cast")]; + tensor var_9427_cast = softmax(axis = var_6849, x = attn_weights_379_cast)[name = tensor("op_9427_cast")]; + tensor attn_189_transpose_x_0 = const()[name = tensor("attn_189_transpose_x_0"), val = tensor(false)]; + tensor attn_189_transpose_y_0 = const()[name = tensor("attn_189_transpose_y_0"), val = tensor(true)]; + tensor attn_189_cast = matmul(transpose_x = attn_189_transpose_x_0, transpose_y = attn_189_transpose_y_0, x = var_9423_cast, y = var_9427_cast)[name = tensor("attn_189_cast")]; + tensor var_9431 = const()[name = tensor("op_9431"), val = tensor([2, 1280, 1, -1])]; + tensor input_561_cast = reshape(shape = var_9431, x = attn_189_cast)[name = tensor("input_561_cast")]; + tensor var_9436 = const()[name = tensor("op_9436"), val = tensor([1, 1])]; + tensor var_9438 = const()[name = tensor("op_9438"), val = tensor([1, 1])]; + tensor var_9440_pad_type_0 = const()[name = tensor("op_9440_pad_type_0"), val = tensor("custom")]; + tensor var_9440_pad_0 = const()[name = tensor("op_9440_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(911034112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912262976))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912263168)))]; + tensor var_9440_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_9438, groups = var_6865, pad = var_9440_pad_0, pad_type = var_9440_pad_type_0, strides = var_9436, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_561_cast)[name = tensor("op_9440_cast")]; + tensor inputs_285_cast = add(x = var_9440_cast, y = inputs_283_cast)[name = tensor("inputs_285_cast")]; + tensor var_9444 = const()[name = tensor("op_9444"), val = tensor([1])]; + tensor channels_mean_285_cast = reduce_mean(axes = var_9444, keep_dims = var_6860, x = inputs_285_cast)[name = tensor("channels_mean_285_cast")]; + tensor zero_mean_285_cast = sub(x = inputs_285_cast, y = channels_mean_285_cast)[name = tensor("zero_mean_285_cast")]; + tensor zero_mean_sq_285_cast = mul(x = zero_mean_285_cast, y = zero_mean_285_cast)[name = tensor("zero_mean_sq_285_cast")]; + tensor var_9448 = const()[name = tensor("op_9448"), val = tensor([1])]; + tensor var_9449_cast = reduce_mean(axes = var_9448, keep_dims = var_6860, x = zero_mean_sq_285_cast)[name = tensor("op_9449_cast")]; + tensor var_9450_to_fp16 = const()[name = tensor("op_9450_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9451_cast = add(x = var_9449_cast, y = var_9450_to_fp16)[name = tensor("op_9451_cast")]; + tensor denom_285_epsilon_0_to_fp16 = const()[name = tensor("denom_285_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_285_cast = rsqrt(epsilon = denom_285_epsilon_0_to_fp16, x = var_9451_cast)[name = tensor("denom_285_cast")]; + tensor out_285_cast = mul(x = zero_mean_285_cast, y = denom_285_cast)[name = tensor("out_285_cast")]; + tensor var_9455_to_fp16 = const()[name = tensor("op_9455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912265792)))]; + tensor var_9456_cast = add(x = out_285_cast, y = var_9455_to_fp16)[name = tensor("op_9456_cast")]; + tensor var_9458_to_fp16 = const()[name = tensor("op_9458_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912268416)))]; + tensor hidden_states_381_cast = mul(x = var_9456_cast, y = var_9458_to_fp16)[name = tensor("hidden_states_381_cast")]; + tensor var_9465 = const()[name = tensor("op_9465"), val = tensor([1, 1])]; + tensor var_9467 = const()[name = tensor("op_9467"), val = tensor([1, 1])]; + tensor q_191_pad_type_0 = const()[name = tensor("q_191_pad_type_0"), val = tensor("custom")]; + tensor q_191_pad_0 = const()[name = tensor("q_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912271040))), lut = tensor([-0x1.42cp-5, -0x1.84p-7, 0x1.774p-7, 0x1.3fp-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_191_cast = conv(dilations = var_9467, groups = var_6865, pad = q_191_pad_0, pad_type = q_191_pad_type_0, strides = var_9465, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_381_cast)[name = tensor("q_191_cast")]; + tensor var_9471 = const()[name = tensor("op_9471"), val = tensor([1, 1])]; + tensor var_9473 = const()[name = tensor("op_9473"), val = tensor([1, 1])]; + tensor k_191_pad_type_0 = const()[name = tensor("k_191_pad_type_0"), val = tensor("custom")]; + tensor k_191_pad_0 = const()[name = tensor("k_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(912680704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913991488))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_191_cast = conv(dilations = var_9473, groups = var_6865, pad = k_191_pad_0, pad_type = k_191_pad_type_0, strides = var_9471, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_191_cast")]; + tensor var_9477 = const()[name = tensor("op_9477"), val = tensor([1, 1])]; + tensor var_9479 = const()[name = tensor("op_9479"), val = tensor([1, 1])]; + tensor v_191_pad_type_0 = const()[name = tensor("v_191_pad_type_0"), val = tensor("custom")]; + tensor v_191_pad_0 = const()[name = tensor("v_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(913991616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(915302400))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_191_cast = conv(dilations = var_9479, groups = var_6865, pad = v_191_pad_0, pad_type = v_191_pad_type_0, strides = var_9477, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_191_cast")]; + tensor var_9483 = const()[name = tensor("op_9483"), val = tensor([2, 20, 64, -1])]; + tensor var_9484_cast = reshape(shape = var_9483, x = q_191_cast)[name = tensor("op_9484_cast")]; + tensor var_9485 = const()[name = tensor("op_9485"), val = tensor([2, 20, 64, -1])]; + tensor var_9486_cast = reshape(shape = var_9485, x = k_191_cast)[name = tensor("op_9486_cast")]; + tensor var_9487 = const()[name = tensor("op_9487"), val = tensor([2, 20, 64, -1])]; + tensor var_9488_cast = reshape(shape = var_9487, x = v_191_cast)[name = tensor("op_9488_cast")]; + tensor attn_weights_381_transpose_x_0 = const()[name = tensor("attn_weights_381_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_381_transpose_y_0 = const()[name = tensor("attn_weights_381_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_381_cast = matmul(transpose_x = attn_weights_381_transpose_x_0, transpose_y = attn_weights_381_transpose_y_0, x = var_9484_cast, y = var_9486_cast)[name = tensor("attn_weights_381_cast")]; + tensor attn_weights_383_cast = mul(x = attn_weights_381_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_383_cast")]; + tensor var_9492_cast = softmax(axis = var_6849, x = attn_weights_383_cast)[name = tensor("op_9492_cast")]; + tensor attn_191_transpose_x_0 = const()[name = tensor("attn_191_transpose_x_0"), val = tensor(false)]; + tensor attn_191_transpose_y_0 = const()[name = tensor("attn_191_transpose_y_0"), val = tensor(true)]; + tensor attn_191_cast = matmul(transpose_x = attn_191_transpose_x_0, transpose_y = attn_191_transpose_y_0, x = var_9488_cast, y = var_9492_cast)[name = tensor("attn_191_cast")]; + tensor var_9496 = const()[name = tensor("op_9496"), val = tensor([2, 1280, 1, -1])]; + tensor input_563_cast = reshape(shape = var_9496, x = attn_191_cast)[name = tensor("input_563_cast")]; + tensor var_9501 = const()[name = tensor("op_9501"), val = tensor([1, 1])]; + tensor var_9503 = const()[name = tensor("op_9503"), val = tensor([1, 1])]; + tensor var_9505_pad_type_0 = const()[name = tensor("op_9505_pad_type_0"), val = tensor("custom")]; + tensor var_9505_pad_0 = const()[name = tensor("op_9505_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(915302528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916121792))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916121920)))]; + tensor var_9505_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_9503, groups = var_6865, pad = var_9505_pad_0, pad_type = var_9505_pad_type_0, strides = var_9501, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_563_cast)[name = tensor("op_9505_cast")]; + tensor inputs_287_cast = add(x = var_9505_cast, y = inputs_285_cast)[name = tensor("inputs_287_cast")]; + tensor var_9509 = const()[name = tensor("op_9509"), val = tensor([1])]; + tensor channels_mean_287_cast = reduce_mean(axes = var_9509, keep_dims = var_6860, x = inputs_287_cast)[name = tensor("channels_mean_287_cast")]; + tensor zero_mean_287_cast = sub(x = inputs_287_cast, y = channels_mean_287_cast)[name = tensor("zero_mean_287_cast")]; + tensor zero_mean_sq_287_cast = mul(x = zero_mean_287_cast, y = zero_mean_287_cast)[name = tensor("zero_mean_sq_287_cast")]; + tensor var_9513 = const()[name = tensor("op_9513"), val = tensor([1])]; + tensor var_9514_cast = reduce_mean(axes = var_9513, keep_dims = var_6860, x = zero_mean_sq_287_cast)[name = tensor("op_9514_cast")]; + tensor var_9515_to_fp16 = const()[name = tensor("op_9515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9516_cast = add(x = var_9514_cast, y = var_9515_to_fp16)[name = tensor("op_9516_cast")]; + tensor denom_287_epsilon_0_to_fp16 = const()[name = tensor("denom_287_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_287_cast = rsqrt(epsilon = denom_287_epsilon_0_to_fp16, x = var_9516_cast)[name = tensor("denom_287_cast")]; + tensor out_287_cast = mul(x = zero_mean_287_cast, y = denom_287_cast)[name = tensor("out_287_cast")]; + tensor var_9520_to_fp16 = const()[name = tensor("op_9520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916124544)))]; + tensor var_9521_cast = add(x = out_287_cast, y = var_9520_to_fp16)[name = tensor("op_9521_cast")]; + tensor var_9523_to_fp16 = const()[name = tensor("op_9523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916127168)))]; + tensor input_565_cast = mul(x = var_9521_cast, y = var_9523_to_fp16)[name = tensor("input_565_cast")]; + tensor var_9531 = const()[name = tensor("op_9531"), val = tensor([1, 1])]; + tensor var_9533 = const()[name = tensor("op_9533"), val = tensor([1, 1])]; + tensor var_9535_pad_type_0 = const()[name = tensor("op_9535_pad_type_0"), val = tensor("custom")]; + tensor var_9535_pad_0 = const()[name = tensor("op_9535_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(916129792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925960256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925960448)))]; + tensor var_9535_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_9533, groups = var_6865, pad = var_9535_pad_0, pad_type = var_9535_pad_type_0, strides = var_9531, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_565_cast)[name = tensor("op_9535_cast")]; + tensor var_9536_split_sizes_0 = const()[name = tensor("op_9536_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9536_axis_0 = const()[name = tensor("op_9536_axis_0"), val = tensor(1)]; + tensor var_9536_cast_0, tensor var_9536_cast_1 = split(axis = var_9536_axis_0, split_sizes = var_9536_split_sizes_0, x = var_9535_cast)[name = tensor("op_9536_cast")]; + tensor var_9538_mode_0 = const()[name = tensor("op_9538_mode_0"), val = tensor("EXACT")]; + tensor var_9538_cast = gelu(mode = var_9538_mode_0, x = var_9536_cast_1)[name = tensor("op_9538_cast")]; + tensor input_567_cast = mul(x = var_9536_cast_0, y = var_9538_cast)[name = tensor("input_567_cast")]; + tensor var_9542 = const()[name = tensor("op_9542"), val = tensor([1, 1])]; + tensor var_9544 = const()[name = tensor("op_9544"), val = tensor([1, 1])]; + tensor var_9546_pad_type_0 = const()[name = tensor("op_9546_pad_type_0"), val = tensor("custom")]; + tensor var_9546_pad_0 = const()[name = tensor("op_9546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(925980992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930896256))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930896448)))]; + tensor var_9546_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_9544, groups = var_6865, pad = var_9546_pad_0, pad_type = var_9546_pad_type_0, strides = var_9542, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_567_cast)[name = tensor("op_9546_cast")]; + tensor inputs_289_cast = add(x = var_9546_cast, y = inputs_287_cast)[name = tensor("inputs_289_cast")]; + tensor var_9556 = const()[name = tensor("op_9556"), val = tensor([1])]; + tensor channels_mean_289_cast = reduce_mean(axes = var_9556, keep_dims = var_6860, x = inputs_289_cast)[name = tensor("channels_mean_289_cast")]; + tensor zero_mean_289_cast = sub(x = inputs_289_cast, y = channels_mean_289_cast)[name = tensor("zero_mean_289_cast")]; + tensor zero_mean_sq_289_cast = mul(x = zero_mean_289_cast, y = zero_mean_289_cast)[name = tensor("zero_mean_sq_289_cast")]; + tensor var_9560 = const()[name = tensor("op_9560"), val = tensor([1])]; + tensor var_9561_cast = reduce_mean(axes = var_9560, keep_dims = var_6860, x = zero_mean_sq_289_cast)[name = tensor("op_9561_cast")]; + tensor var_9562_to_fp16 = const()[name = tensor("op_9562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9563_cast = add(x = var_9561_cast, y = var_9562_to_fp16)[name = tensor("op_9563_cast")]; + tensor denom_289_epsilon_0_to_fp16 = const()[name = tensor("denom_289_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_289_cast = rsqrt(epsilon = denom_289_epsilon_0_to_fp16, x = var_9563_cast)[name = tensor("denom_289_cast")]; + tensor out_289_cast = mul(x = zero_mean_289_cast, y = denom_289_cast)[name = tensor("out_289_cast")]; + tensor var_9567_to_fp16 = const()[name = tensor("op_9567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930899072)))]; + tensor var_9568_cast = add(x = out_289_cast, y = var_9567_to_fp16)[name = tensor("op_9568_cast")]; + tensor var_9570_to_fp16 = const()[name = tensor("op_9570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930901696)))]; + tensor hidden_states_385_cast = mul(x = var_9568_cast, y = var_9570_to_fp16)[name = tensor("hidden_states_385_cast")]; + tensor var_9577 = const()[name = tensor("op_9577"), val = tensor([1, 1])]; + tensor var_9579 = const()[name = tensor("op_9579"), val = tensor([1, 1])]; + tensor q_193_pad_type_0 = const()[name = tensor("q_193_pad_type_0"), val = tensor("custom")]; + tensor q_193_pad_0 = const()[name = tensor("q_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(930904320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931723584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_193_cast = conv(dilations = var_9579, groups = var_6865, pad = q_193_pad_0, pad_type = q_193_pad_type_0, strides = var_9577, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("q_193_cast")]; + tensor var_9583 = const()[name = tensor("op_9583"), val = tensor([1, 1])]; + tensor var_9585 = const()[name = tensor("op_9585"), val = tensor([1, 1])]; + tensor k_193_pad_type_0 = const()[name = tensor("k_193_pad_type_0"), val = tensor("custom")]; + tensor k_193_pad_0 = const()[name = tensor("k_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(931723712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932542976))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_193_cast = conv(dilations = var_9585, groups = var_6865, pad = k_193_pad_0, pad_type = k_193_pad_type_0, strides = var_9583, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("k_193_cast")]; + tensor var_9589 = const()[name = tensor("op_9589"), val = tensor([1, 1])]; + tensor var_9591 = const()[name = tensor("op_9591"), val = tensor([1, 1])]; + tensor v_193_pad_type_0 = const()[name = tensor("v_193_pad_type_0"), val = tensor("custom")]; + tensor v_193_pad_0 = const()[name = tensor("v_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(932543104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933771968))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_193_cast = conv(dilations = var_9591, groups = var_6865, pad = v_193_pad_0, pad_type = v_193_pad_type_0, strides = var_9589, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("v_193_cast")]; + tensor var_9595 = const()[name = tensor("op_9595"), val = tensor([2, 20, 64, -1])]; + tensor var_9596_cast = reshape(shape = var_9595, x = q_193_cast)[name = tensor("op_9596_cast")]; + tensor var_9597 = const()[name = tensor("op_9597"), val = tensor([2, 20, 64, -1])]; + tensor var_9598_cast = reshape(shape = var_9597, x = k_193_cast)[name = tensor("op_9598_cast")]; + tensor var_9599 = const()[name = tensor("op_9599"), val = tensor([2, 20, 64, -1])]; + tensor var_9600_cast = reshape(shape = var_9599, x = v_193_cast)[name = tensor("op_9600_cast")]; + tensor attn_weights_385_transpose_x_0 = const()[name = tensor("attn_weights_385_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_385_transpose_y_0 = const()[name = tensor("attn_weights_385_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_385_cast = matmul(transpose_x = attn_weights_385_transpose_x_0, transpose_y = attn_weights_385_transpose_y_0, x = var_9596_cast, y = var_9598_cast)[name = tensor("attn_weights_385_cast")]; + tensor attn_weights_387_cast = mul(x = attn_weights_385_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_387_cast")]; + tensor var_9604_cast = softmax(axis = var_6849, x = attn_weights_387_cast)[name = tensor("op_9604_cast")]; + tensor attn_193_transpose_x_0 = const()[name = tensor("attn_193_transpose_x_0"), val = tensor(false)]; + tensor attn_193_transpose_y_0 = const()[name = tensor("attn_193_transpose_y_0"), val = tensor(true)]; + tensor attn_193_cast = matmul(transpose_x = attn_193_transpose_x_0, transpose_y = attn_193_transpose_y_0, x = var_9600_cast, y = var_9604_cast)[name = tensor("attn_193_cast")]; + tensor var_9608 = const()[name = tensor("op_9608"), val = tensor([2, 1280, 1, -1])]; + tensor input_569_cast = reshape(shape = var_9608, x = attn_193_cast)[name = tensor("input_569_cast")]; + tensor var_9613 = const()[name = tensor("op_9613"), val = tensor([1, 1])]; + tensor var_9615 = const()[name = tensor("op_9615"), val = tensor([1, 1])]; + tensor var_9617_pad_type_0 = const()[name = tensor("op_9617_pad_type_0"), val = tensor("custom")]; + tensor var_9617_pad_0 = const()[name = tensor("op_9617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(933772160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935001024))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935001216)))]; + tensor var_9617_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_9615, groups = var_6865, pad = var_9617_pad_0, pad_type = var_9617_pad_type_0, strides = var_9613, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_569_cast)[name = tensor("op_9617_cast")]; + tensor inputs_291_cast = add(x = var_9617_cast, y = inputs_289_cast)[name = tensor("inputs_291_cast")]; + tensor var_9621 = const()[name = tensor("op_9621"), val = tensor([1])]; + tensor channels_mean_291_cast = reduce_mean(axes = var_9621, keep_dims = var_6860, x = inputs_291_cast)[name = tensor("channels_mean_291_cast")]; + tensor zero_mean_291_cast = sub(x = inputs_291_cast, y = channels_mean_291_cast)[name = tensor("zero_mean_291_cast")]; + tensor zero_mean_sq_291_cast = mul(x = zero_mean_291_cast, y = zero_mean_291_cast)[name = tensor("zero_mean_sq_291_cast")]; + tensor var_9625 = const()[name = tensor("op_9625"), val = tensor([1])]; + tensor var_9626_cast = reduce_mean(axes = var_9625, keep_dims = var_6860, x = zero_mean_sq_291_cast)[name = tensor("op_9626_cast")]; + tensor var_9627_to_fp16 = const()[name = tensor("op_9627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9628_cast = add(x = var_9626_cast, y = var_9627_to_fp16)[name = tensor("op_9628_cast")]; + tensor denom_291_epsilon_0_to_fp16 = const()[name = tensor("denom_291_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_291_cast = rsqrt(epsilon = denom_291_epsilon_0_to_fp16, x = var_9628_cast)[name = tensor("denom_291_cast")]; + tensor out_291_cast = mul(x = zero_mean_291_cast, y = denom_291_cast)[name = tensor("out_291_cast")]; + tensor var_9632_to_fp16 = const()[name = tensor("op_9632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935003840)))]; + tensor var_9633_cast = add(x = out_291_cast, y = var_9632_to_fp16)[name = tensor("op_9633_cast")]; + tensor var_9635_to_fp16 = const()[name = tensor("op_9635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935006464)))]; + tensor hidden_states_387_cast = mul(x = var_9633_cast, y = var_9635_to_fp16)[name = tensor("hidden_states_387_cast")]; + tensor var_9642 = const()[name = tensor("op_9642"), val = tensor([1, 1])]; + tensor var_9644 = const()[name = tensor("op_9644"), val = tensor([1, 1])]; + tensor q_195_pad_type_0 = const()[name = tensor("q_195_pad_type_0"), val = tensor("custom")]; + tensor q_195_pad_0 = const()[name = tensor("q_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935009088))), lut = tensor([-0x1.264p-5, -0x1.604p-7, 0x1.658p-7, 0x1.278p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_195_cast = conv(dilations = var_9644, groups = var_6865, pad = q_195_pad_0, pad_type = q_195_pad_type_0, strides = var_9642, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_387_cast)[name = tensor("q_195_cast")]; + tensor var_9648 = const()[name = tensor("op_9648"), val = tensor([1, 1])]; + tensor var_9650 = const()[name = tensor("op_9650"), val = tensor([1, 1])]; + tensor k_195_pad_type_0 = const()[name = tensor("k_195_pad_type_0"), val = tensor("custom")]; + tensor k_195_pad_0 = const()[name = tensor("k_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(935418752))), lut = tensor([-0x1.fa4p-6, -0x1.2dcp-7, 0x1.274p-7, 0x1.f7p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_195_cast = conv(dilations = var_9650, groups = var_6865, pad = k_195_pad_0, pad_type = k_195_pad_type_0, strides = var_9648, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_195_cast")]; + tensor var_9654 = const()[name = tensor("op_9654"), val = tensor([1, 1])]; + tensor var_9656 = const()[name = tensor("op_9656"), val = tensor([1, 1])]; + tensor v_195_pad_type_0 = const()[name = tensor("v_195_pad_type_0"), val = tensor("custom")]; + tensor v_195_pad_0 = const()[name = tensor("v_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(936074176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937384960))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_195_cast = conv(dilations = var_9656, groups = var_6865, pad = v_195_pad_0, pad_type = v_195_pad_type_0, strides = var_9654, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_195_cast")]; + tensor var_9660 = const()[name = tensor("op_9660"), val = tensor([2, 20, 64, -1])]; + tensor var_9661_cast = reshape(shape = var_9660, x = q_195_cast)[name = tensor("op_9661_cast")]; + tensor var_9662 = const()[name = tensor("op_9662"), val = tensor([2, 20, 64, -1])]; + tensor var_9663_cast = reshape(shape = var_9662, x = k_195_cast)[name = tensor("op_9663_cast")]; + tensor var_9664 = const()[name = tensor("op_9664"), val = tensor([2, 20, 64, -1])]; + tensor var_9665_cast = reshape(shape = var_9664, x = v_195_cast)[name = tensor("op_9665_cast")]; + tensor attn_weights_389_transpose_x_0 = const()[name = tensor("attn_weights_389_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_389_transpose_y_0 = const()[name = tensor("attn_weights_389_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_389_cast = matmul(transpose_x = attn_weights_389_transpose_x_0, transpose_y = attn_weights_389_transpose_y_0, x = var_9661_cast, y = var_9663_cast)[name = tensor("attn_weights_389_cast")]; + tensor attn_weights_391_cast = mul(x = attn_weights_389_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_391_cast")]; + tensor var_9669_cast = softmax(axis = var_6849, x = attn_weights_391_cast)[name = tensor("op_9669_cast")]; + tensor attn_195_transpose_x_0 = const()[name = tensor("attn_195_transpose_x_0"), val = tensor(false)]; + tensor attn_195_transpose_y_0 = const()[name = tensor("attn_195_transpose_y_0"), val = tensor(true)]; + tensor attn_195_cast = matmul(transpose_x = attn_195_transpose_x_0, transpose_y = attn_195_transpose_y_0, x = var_9665_cast, y = var_9669_cast)[name = tensor("attn_195_cast")]; + tensor var_9673 = const()[name = tensor("op_9673"), val = tensor([2, 1280, 1, -1])]; + tensor input_571_cast = reshape(shape = var_9673, x = attn_195_cast)[name = tensor("input_571_cast")]; + tensor var_9678 = const()[name = tensor("op_9678"), val = tensor([1, 1])]; + tensor var_9680 = const()[name = tensor("op_9680"), val = tensor([1, 1])]; + tensor var_9682_pad_type_0 = const()[name = tensor("op_9682_pad_type_0"), val = tensor("custom")]; + tensor var_9682_pad_0 = const()[name = tensor("op_9682_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937385088))), lut = tensor([-0x1.3a8p-6, -0x1.79p-8, 0x1.798p-8, 0x1.3bp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937794752)))]; + tensor var_9682_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_9680, groups = var_6865, pad = var_9682_pad_0, pad_type = var_9682_pad_type_0, strides = var_9678, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_571_cast)[name = tensor("op_9682_cast")]; + tensor inputs_293_cast = add(x = var_9682_cast, y = inputs_291_cast)[name = tensor("inputs_293_cast")]; + tensor var_9686 = const()[name = tensor("op_9686"), val = tensor([1])]; + tensor channels_mean_293_cast = reduce_mean(axes = var_9686, keep_dims = var_6860, x = inputs_293_cast)[name = tensor("channels_mean_293_cast")]; + tensor zero_mean_293_cast = sub(x = inputs_293_cast, y = channels_mean_293_cast)[name = tensor("zero_mean_293_cast")]; + tensor zero_mean_sq_293_cast = mul(x = zero_mean_293_cast, y = zero_mean_293_cast)[name = tensor("zero_mean_sq_293_cast")]; + tensor var_9690 = const()[name = tensor("op_9690"), val = tensor([1])]; + tensor var_9691_cast = reduce_mean(axes = var_9690, keep_dims = var_6860, x = zero_mean_sq_293_cast)[name = tensor("op_9691_cast")]; + tensor var_9692_to_fp16 = const()[name = tensor("op_9692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9693_cast = add(x = var_9691_cast, y = var_9692_to_fp16)[name = tensor("op_9693_cast")]; + tensor denom_293_epsilon_0_to_fp16 = const()[name = tensor("denom_293_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_293_cast = rsqrt(epsilon = denom_293_epsilon_0_to_fp16, x = var_9693_cast)[name = tensor("denom_293_cast")]; + tensor out_293_cast = mul(x = zero_mean_293_cast, y = denom_293_cast)[name = tensor("out_293_cast")]; + tensor var_9697_to_fp16 = const()[name = tensor("op_9697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937797376)))]; + tensor var_9698_cast = add(x = out_293_cast, y = var_9697_to_fp16)[name = tensor("op_9698_cast")]; + tensor var_9700_to_fp16 = const()[name = tensor("op_9700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937800000)))]; + tensor input_573_cast = mul(x = var_9698_cast, y = var_9700_to_fp16)[name = tensor("input_573_cast")]; + tensor var_9708 = const()[name = tensor("op_9708"), val = tensor([1, 1])]; + tensor var_9710 = const()[name = tensor("op_9710"), val = tensor([1, 1])]; + tensor var_9712_pad_type_0 = const()[name = tensor("op_9712_pad_type_0"), val = tensor("custom")]; + tensor var_9712_pad_0 = const()[name = tensor("op_9712_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(937802624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947633088))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947633280)))]; + tensor var_9712_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_9710, groups = var_6865, pad = var_9712_pad_0, pad_type = var_9712_pad_type_0, strides = var_9708, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_573_cast)[name = tensor("op_9712_cast")]; + tensor var_9713_split_sizes_0 = const()[name = tensor("op_9713_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9713_axis_0 = const()[name = tensor("op_9713_axis_0"), val = tensor(1)]; + tensor var_9713_cast_0, tensor var_9713_cast_1 = split(axis = var_9713_axis_0, split_sizes = var_9713_split_sizes_0, x = var_9712_cast)[name = tensor("op_9713_cast")]; + tensor var_9715_mode_0 = const()[name = tensor("op_9715_mode_0"), val = tensor("EXACT")]; + tensor var_9715_cast = gelu(mode = var_9715_mode_0, x = var_9713_cast_1)[name = tensor("op_9715_cast")]; + tensor input_575_cast = mul(x = var_9713_cast_0, y = var_9715_cast)[name = tensor("input_575_cast")]; + tensor var_9719 = const()[name = tensor("op_9719"), val = tensor([1, 1])]; + tensor var_9721 = const()[name = tensor("op_9721"), val = tensor([1, 1])]; + tensor var_9723_pad_type_0 = const()[name = tensor("op_9723_pad_type_0"), val = tensor("custom")]; + tensor var_9723_pad_0 = const()[name = tensor("op_9723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947653824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952569088))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952569280)))]; + tensor var_9723_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_9721, groups = var_6865, pad = var_9723_pad_0, pad_type = var_9723_pad_type_0, strides = var_9719, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_575_cast)[name = tensor("op_9723_cast")]; + tensor inputs_295_cast = add(x = var_9723_cast, y = inputs_293_cast)[name = tensor("inputs_295_cast")]; + tensor var_9733 = const()[name = tensor("op_9733"), val = tensor([1])]; + tensor channels_mean_295_cast = reduce_mean(axes = var_9733, keep_dims = var_6860, x = inputs_295_cast)[name = tensor("channels_mean_295_cast")]; + tensor zero_mean_295_cast = sub(x = inputs_295_cast, y = channels_mean_295_cast)[name = tensor("zero_mean_295_cast")]; + tensor zero_mean_sq_295_cast = mul(x = zero_mean_295_cast, y = zero_mean_295_cast)[name = tensor("zero_mean_sq_295_cast")]; + tensor var_9737 = const()[name = tensor("op_9737"), val = tensor([1])]; + tensor var_9738_cast = reduce_mean(axes = var_9737, keep_dims = var_6860, x = zero_mean_sq_295_cast)[name = tensor("op_9738_cast")]; + tensor var_9739_to_fp16 = const()[name = tensor("op_9739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9740_cast = add(x = var_9738_cast, y = var_9739_to_fp16)[name = tensor("op_9740_cast")]; + tensor denom_295_epsilon_0_to_fp16 = const()[name = tensor("denom_295_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_295_cast = rsqrt(epsilon = denom_295_epsilon_0_to_fp16, x = var_9740_cast)[name = tensor("denom_295_cast")]; + tensor out_295_cast = mul(x = zero_mean_295_cast, y = denom_295_cast)[name = tensor("out_295_cast")]; + tensor var_9744_to_fp16 = const()[name = tensor("op_9744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952571904)))]; + tensor var_9745_cast = add(x = out_295_cast, y = var_9744_to_fp16)[name = tensor("op_9745_cast")]; + tensor var_9747_to_fp16 = const()[name = tensor("op_9747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952574528)))]; + tensor hidden_states_391_cast = mul(x = var_9745_cast, y = var_9747_to_fp16)[name = tensor("hidden_states_391_cast")]; + tensor var_9754 = const()[name = tensor("op_9754"), val = tensor([1, 1])]; + tensor var_9756 = const()[name = tensor("op_9756"), val = tensor([1, 1])]; + tensor q_197_pad_type_0 = const()[name = tensor("q_197_pad_type_0"), val = tensor("custom")]; + tensor q_197_pad_0 = const()[name = tensor("q_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(952577152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953396416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_197_cast = conv(dilations = var_9756, groups = var_6865, pad = q_197_pad_0, pad_type = q_197_pad_type_0, strides = var_9754, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("q_197_cast")]; + tensor var_9760 = const()[name = tensor("op_9760"), val = tensor([1, 1])]; + tensor var_9762 = const()[name = tensor("op_9762"), val = tensor([1, 1])]; + tensor k_197_pad_type_0 = const()[name = tensor("k_197_pad_type_0"), val = tensor("custom")]; + tensor k_197_pad_0 = const()[name = tensor("k_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(953396544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954215808))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_197_cast = conv(dilations = var_9762, groups = var_6865, pad = k_197_pad_0, pad_type = k_197_pad_type_0, strides = var_9760, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("k_197_cast")]; + tensor var_9766 = const()[name = tensor("op_9766"), val = tensor([1, 1])]; + tensor var_9768 = const()[name = tensor("op_9768"), val = tensor([1, 1])]; + tensor v_197_pad_type_0 = const()[name = tensor("v_197_pad_type_0"), val = tensor("custom")]; + tensor v_197_pad_0 = const()[name = tensor("v_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(954215936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955444800))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_197_cast = conv(dilations = var_9768, groups = var_6865, pad = v_197_pad_0, pad_type = v_197_pad_type_0, strides = var_9766, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("v_197_cast")]; + tensor var_9772 = const()[name = tensor("op_9772"), val = tensor([2, 20, 64, -1])]; + tensor var_9773_cast = reshape(shape = var_9772, x = q_197_cast)[name = tensor("op_9773_cast")]; + tensor var_9774 = const()[name = tensor("op_9774"), val = tensor([2, 20, 64, -1])]; + tensor var_9775_cast = reshape(shape = var_9774, x = k_197_cast)[name = tensor("op_9775_cast")]; + tensor var_9776 = const()[name = tensor("op_9776"), val = tensor([2, 20, 64, -1])]; + tensor var_9777_cast = reshape(shape = var_9776, x = v_197_cast)[name = tensor("op_9777_cast")]; + tensor attn_weights_393_transpose_x_0 = const()[name = tensor("attn_weights_393_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_393_transpose_y_0 = const()[name = tensor("attn_weights_393_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_393_cast = matmul(transpose_x = attn_weights_393_transpose_x_0, transpose_y = attn_weights_393_transpose_y_0, x = var_9773_cast, y = var_9775_cast)[name = tensor("attn_weights_393_cast")]; + tensor attn_weights_395_cast = mul(x = attn_weights_393_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_395_cast")]; + tensor var_9781_cast = softmax(axis = var_6849, x = attn_weights_395_cast)[name = tensor("op_9781_cast")]; + tensor attn_197_transpose_x_0 = const()[name = tensor("attn_197_transpose_x_0"), val = tensor(false)]; + tensor attn_197_transpose_y_0 = const()[name = tensor("attn_197_transpose_y_0"), val = tensor(true)]; + tensor attn_197_cast = matmul(transpose_x = attn_197_transpose_x_0, transpose_y = attn_197_transpose_y_0, x = var_9777_cast, y = var_9781_cast)[name = tensor("attn_197_cast")]; + tensor var_9785 = const()[name = tensor("op_9785"), val = tensor([2, 1280, 1, -1])]; + tensor input_577_cast = reshape(shape = var_9785, x = attn_197_cast)[name = tensor("input_577_cast")]; + tensor var_9790 = const()[name = tensor("op_9790"), val = tensor([1, 1])]; + tensor var_9792 = const()[name = tensor("op_9792"), val = tensor([1, 1])]; + tensor var_9794_pad_type_0 = const()[name = tensor("op_9794_pad_type_0"), val = tensor("custom")]; + tensor var_9794_pad_0 = const()[name = tensor("op_9794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(955444992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956673856))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956674048)))]; + tensor var_9794_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_9792, groups = var_6865, pad = var_9794_pad_0, pad_type = var_9794_pad_type_0, strides = var_9790, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_577_cast)[name = tensor("op_9794_cast")]; + tensor inputs_297_cast = add(x = var_9794_cast, y = inputs_295_cast)[name = tensor("inputs_297_cast")]; + tensor var_9798 = const()[name = tensor("op_9798"), val = tensor([1])]; + tensor channels_mean_297_cast = reduce_mean(axes = var_9798, keep_dims = var_6860, x = inputs_297_cast)[name = tensor("channels_mean_297_cast")]; + tensor zero_mean_297_cast = sub(x = inputs_297_cast, y = channels_mean_297_cast)[name = tensor("zero_mean_297_cast")]; + tensor zero_mean_sq_297_cast = mul(x = zero_mean_297_cast, y = zero_mean_297_cast)[name = tensor("zero_mean_sq_297_cast")]; + tensor var_9802 = const()[name = tensor("op_9802"), val = tensor([1])]; + tensor var_9803_cast = reduce_mean(axes = var_9802, keep_dims = var_6860, x = zero_mean_sq_297_cast)[name = tensor("op_9803_cast")]; + tensor var_9804_to_fp16 = const()[name = tensor("op_9804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9805_cast = add(x = var_9803_cast, y = var_9804_to_fp16)[name = tensor("op_9805_cast")]; + tensor denom_297_epsilon_0_to_fp16 = const()[name = tensor("denom_297_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_297_cast = rsqrt(epsilon = denom_297_epsilon_0_to_fp16, x = var_9805_cast)[name = tensor("denom_297_cast")]; + tensor out_297_cast = mul(x = zero_mean_297_cast, y = denom_297_cast)[name = tensor("out_297_cast")]; + tensor var_9809_to_fp16 = const()[name = tensor("op_9809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956676672)))]; + tensor var_9810_cast = add(x = out_297_cast, y = var_9809_to_fp16)[name = tensor("op_9810_cast")]; + tensor var_9812_to_fp16 = const()[name = tensor("op_9812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956679296)))]; + tensor hidden_states_393_cast = mul(x = var_9810_cast, y = var_9812_to_fp16)[name = tensor("hidden_states_393_cast")]; + tensor var_9819 = const()[name = tensor("op_9819"), val = tensor([1, 1])]; + tensor var_9821 = const()[name = tensor("op_9821"), val = tensor([1, 1])]; + tensor q_199_pad_type_0 = const()[name = tensor("q_199_pad_type_0"), val = tensor("custom")]; + tensor q_199_pad_0 = const()[name = tensor("q_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(956681920))), lut = tensor([-0x1.164p-5, -0x1.4ecp-7, 0x1.5p-7, 0x1.16p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_199_cast = conv(dilations = var_9821, groups = var_6865, pad = q_199_pad_0, pad_type = q_199_pad_type_0, strides = var_9819, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_393_cast)[name = tensor("q_199_cast")]; + tensor var_9825 = const()[name = tensor("op_9825"), val = tensor([1, 1])]; + tensor var_9827 = const()[name = tensor("op_9827"), val = tensor([1, 1])]; + tensor k_199_pad_type_0 = const()[name = tensor("k_199_pad_type_0"), val = tensor("custom")]; + tensor k_199_pad_0 = const()[name = tensor("k_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(957091584))), lut = tensor([-0x1.cfcp-6, -0x1.12p-7, 0x1.0f8p-7, 0x1.cep-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_199_cast = conv(dilations = var_9827, groups = var_6865, pad = k_199_pad_0, pad_type = k_199_pad_type_0, strides = var_9825, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_199_cast")]; + tensor var_9831 = const()[name = tensor("op_9831"), val = tensor([1, 1])]; + tensor var_9833 = const()[name = tensor("op_9833"), val = tensor([1, 1])]; + tensor v_199_pad_type_0 = const()[name = tensor("v_199_pad_type_0"), val = tensor("custom")]; + tensor v_199_pad_0 = const()[name = tensor("v_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(957747008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959057792))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_199_cast = conv(dilations = var_9833, groups = var_6865, pad = v_199_pad_0, pad_type = v_199_pad_type_0, strides = var_9831, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_199_cast")]; + tensor var_9837 = const()[name = tensor("op_9837"), val = tensor([2, 20, 64, -1])]; + tensor var_9838_cast = reshape(shape = var_9837, x = q_199_cast)[name = tensor("op_9838_cast")]; + tensor var_9839 = const()[name = tensor("op_9839"), val = tensor([2, 20, 64, -1])]; + tensor var_9840_cast = reshape(shape = var_9839, x = k_199_cast)[name = tensor("op_9840_cast")]; + tensor var_9841 = const()[name = tensor("op_9841"), val = tensor([2, 20, 64, -1])]; + tensor var_9842_cast = reshape(shape = var_9841, x = v_199_cast)[name = tensor("op_9842_cast")]; + tensor attn_weights_397_transpose_x_0 = const()[name = tensor("attn_weights_397_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_397_transpose_y_0 = const()[name = tensor("attn_weights_397_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_397_cast = matmul(transpose_x = attn_weights_397_transpose_x_0, transpose_y = attn_weights_397_transpose_y_0, x = var_9838_cast, y = var_9840_cast)[name = tensor("attn_weights_397_cast")]; + tensor attn_weights_399_cast = mul(x = attn_weights_397_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_399_cast")]; + tensor var_9846_cast = softmax(axis = var_6849, x = attn_weights_399_cast)[name = tensor("op_9846_cast")]; + tensor attn_199_transpose_x_0 = const()[name = tensor("attn_199_transpose_x_0"), val = tensor(false)]; + tensor attn_199_transpose_y_0 = const()[name = tensor("attn_199_transpose_y_0"), val = tensor(true)]; + tensor attn_199_cast = matmul(transpose_x = attn_199_transpose_x_0, transpose_y = attn_199_transpose_y_0, x = var_9842_cast, y = var_9846_cast)[name = tensor("attn_199_cast")]; + tensor var_9850 = const()[name = tensor("op_9850"), val = tensor([2, 1280, 1, -1])]; + tensor input_579_cast = reshape(shape = var_9850, x = attn_199_cast)[name = tensor("input_579_cast")]; + tensor var_9855 = const()[name = tensor("op_9855"), val = tensor([1, 1])]; + tensor var_9857 = const()[name = tensor("op_9857"), val = tensor([1, 1])]; + tensor var_9859_pad_type_0 = const()[name = tensor("op_9859_pad_type_0"), val = tensor("custom")]; + tensor var_9859_pad_0 = const()[name = tensor("op_9859_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959057920))), lut = tensor([-0x1.2b8p-6, -0x1.664p-8, 0x1.644p-8, 0x1.2b4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959467584)))]; + tensor var_9859_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_9857, groups = var_6865, pad = var_9859_pad_0, pad_type = var_9859_pad_type_0, strides = var_9855, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_579_cast)[name = tensor("op_9859_cast")]; + tensor inputs_299_cast = add(x = var_9859_cast, y = inputs_297_cast)[name = tensor("inputs_299_cast")]; + tensor var_9863 = const()[name = tensor("op_9863"), val = tensor([1])]; + tensor channels_mean_299_cast = reduce_mean(axes = var_9863, keep_dims = var_6860, x = inputs_299_cast)[name = tensor("channels_mean_299_cast")]; + tensor zero_mean_299_cast = sub(x = inputs_299_cast, y = channels_mean_299_cast)[name = tensor("zero_mean_299_cast")]; + tensor zero_mean_sq_299_cast = mul(x = zero_mean_299_cast, y = zero_mean_299_cast)[name = tensor("zero_mean_sq_299_cast")]; + tensor var_9867 = const()[name = tensor("op_9867"), val = tensor([1])]; + tensor var_9868_cast = reduce_mean(axes = var_9867, keep_dims = var_6860, x = zero_mean_sq_299_cast)[name = tensor("op_9868_cast")]; + tensor var_9869_to_fp16 = const()[name = tensor("op_9869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9870_cast = add(x = var_9868_cast, y = var_9869_to_fp16)[name = tensor("op_9870_cast")]; + tensor denom_299_epsilon_0_to_fp16 = const()[name = tensor("denom_299_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_299_cast = rsqrt(epsilon = denom_299_epsilon_0_to_fp16, x = var_9870_cast)[name = tensor("denom_299_cast")]; + tensor out_299_cast = mul(x = zero_mean_299_cast, y = denom_299_cast)[name = tensor("out_299_cast")]; + tensor var_9874_to_fp16 = const()[name = tensor("op_9874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959470208)))]; + tensor var_9875_cast = add(x = out_299_cast, y = var_9874_to_fp16)[name = tensor("op_9875_cast")]; + tensor var_9877_to_fp16 = const()[name = tensor("op_9877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959472832)))]; + tensor input_581_cast = mul(x = var_9875_cast, y = var_9877_to_fp16)[name = tensor("input_581_cast")]; + tensor var_9885 = const()[name = tensor("op_9885"), val = tensor([1, 1])]; + tensor var_9887 = const()[name = tensor("op_9887"), val = tensor([1, 1])]; + tensor var_9889_pad_type_0 = const()[name = tensor("op_9889_pad_type_0"), val = tensor("custom")]; + tensor var_9889_pad_0 = const()[name = tensor("op_9889_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(959475456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969305920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969306112)))]; + tensor var_9889_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_9887, groups = var_6865, pad = var_9889_pad_0, pad_type = var_9889_pad_type_0, strides = var_9885, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_581_cast)[name = tensor("op_9889_cast")]; + tensor var_9890_split_sizes_0 = const()[name = tensor("op_9890_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9890_axis_0 = const()[name = tensor("op_9890_axis_0"), val = tensor(1)]; + tensor var_9890_cast_0, tensor var_9890_cast_1 = split(axis = var_9890_axis_0, split_sizes = var_9890_split_sizes_0, x = var_9889_cast)[name = tensor("op_9890_cast")]; + tensor var_9892_mode_0 = const()[name = tensor("op_9892_mode_0"), val = tensor("EXACT")]; + tensor var_9892_cast = gelu(mode = var_9892_mode_0, x = var_9890_cast_1)[name = tensor("op_9892_cast")]; + tensor input_583_cast = mul(x = var_9890_cast_0, y = var_9892_cast)[name = tensor("input_583_cast")]; + tensor var_9896 = const()[name = tensor("op_9896"), val = tensor([1, 1])]; + tensor var_9898 = const()[name = tensor("op_9898"), val = tensor([1, 1])]; + tensor var_9900_pad_type_0 = const()[name = tensor("op_9900_pad_type_0"), val = tensor("custom")]; + tensor var_9900_pad_0 = const()[name = tensor("op_9900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969326656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974241920))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974242112)))]; + tensor var_9900_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_9898, groups = var_6865, pad = var_9900_pad_0, pad_type = var_9900_pad_type_0, strides = var_9896, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_583_cast)[name = tensor("op_9900_cast")]; + tensor inputs_301_cast = add(x = var_9900_cast, y = inputs_299_cast)[name = tensor("inputs_301_cast")]; + tensor var_9910 = const()[name = tensor("op_9910"), val = tensor([1])]; + tensor channels_mean_301_cast = reduce_mean(axes = var_9910, keep_dims = var_6860, x = inputs_301_cast)[name = tensor("channels_mean_301_cast")]; + tensor zero_mean_301_cast = sub(x = inputs_301_cast, y = channels_mean_301_cast)[name = tensor("zero_mean_301_cast")]; + tensor zero_mean_sq_301_cast = mul(x = zero_mean_301_cast, y = zero_mean_301_cast)[name = tensor("zero_mean_sq_301_cast")]; + tensor var_9914 = const()[name = tensor("op_9914"), val = tensor([1])]; + tensor var_9915_cast = reduce_mean(axes = var_9914, keep_dims = var_6860, x = zero_mean_sq_301_cast)[name = tensor("op_9915_cast")]; + tensor var_9916_to_fp16 = const()[name = tensor("op_9916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9917_cast = add(x = var_9915_cast, y = var_9916_to_fp16)[name = tensor("op_9917_cast")]; + tensor denom_301_epsilon_0_to_fp16 = const()[name = tensor("denom_301_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_301_cast = rsqrt(epsilon = denom_301_epsilon_0_to_fp16, x = var_9917_cast)[name = tensor("denom_301_cast")]; + tensor out_301_cast = mul(x = zero_mean_301_cast, y = denom_301_cast)[name = tensor("out_301_cast")]; + tensor var_9921_to_fp16 = const()[name = tensor("op_9921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974244736)))]; + tensor var_9922_cast = add(x = out_301_cast, y = var_9921_to_fp16)[name = tensor("op_9922_cast")]; + tensor var_9924_to_fp16 = const()[name = tensor("op_9924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974247360)))]; + tensor hidden_states_397_cast = mul(x = var_9922_cast, y = var_9924_to_fp16)[name = tensor("hidden_states_397_cast")]; + tensor var_9931 = const()[name = tensor("op_9931"), val = tensor([1, 1])]; + tensor var_9933 = const()[name = tensor("op_9933"), val = tensor([1, 1])]; + tensor q_201_pad_type_0 = const()[name = tensor("q_201_pad_type_0"), val = tensor("custom")]; + tensor q_201_pad_0 = const()[name = tensor("q_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(974249984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975069248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_201_cast = conv(dilations = var_9933, groups = var_6865, pad = q_201_pad_0, pad_type = q_201_pad_type_0, strides = var_9931, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("q_201_cast")]; + tensor var_9937 = const()[name = tensor("op_9937"), val = tensor([1, 1])]; + tensor var_9939 = const()[name = tensor("op_9939"), val = tensor([1, 1])]; + tensor k_201_pad_type_0 = const()[name = tensor("k_201_pad_type_0"), val = tensor("custom")]; + tensor k_201_pad_0 = const()[name = tensor("k_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975069376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975888640))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_201_cast = conv(dilations = var_9939, groups = var_6865, pad = k_201_pad_0, pad_type = k_201_pad_type_0, strides = var_9937, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("k_201_cast")]; + tensor var_9943 = const()[name = tensor("op_9943"), val = tensor([1, 1])]; + tensor var_9945 = const()[name = tensor("op_9945"), val = tensor([1, 1])]; + tensor v_201_pad_type_0 = const()[name = tensor("v_201_pad_type_0"), val = tensor("custom")]; + tensor v_201_pad_0 = const()[name = tensor("v_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975888768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977117632))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_201_cast = conv(dilations = var_9945, groups = var_6865, pad = v_201_pad_0, pad_type = v_201_pad_type_0, strides = var_9943, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("v_201_cast")]; + tensor var_9949 = const()[name = tensor("op_9949"), val = tensor([2, 20, 64, -1])]; + tensor var_9950_cast = reshape(shape = var_9949, x = q_201_cast)[name = tensor("op_9950_cast")]; + tensor var_9951 = const()[name = tensor("op_9951"), val = tensor([2, 20, 64, -1])]; + tensor var_9952_cast = reshape(shape = var_9951, x = k_201_cast)[name = tensor("op_9952_cast")]; + tensor var_9953 = const()[name = tensor("op_9953"), val = tensor([2, 20, 64, -1])]; + tensor var_9954_cast = reshape(shape = var_9953, x = v_201_cast)[name = tensor("op_9954_cast")]; + tensor attn_weights_401_transpose_x_0 = const()[name = tensor("attn_weights_401_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_401_transpose_y_0 = const()[name = tensor("attn_weights_401_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_401_cast = matmul(transpose_x = attn_weights_401_transpose_x_0, transpose_y = attn_weights_401_transpose_y_0, x = var_9950_cast, y = var_9952_cast)[name = tensor("attn_weights_401_cast")]; + tensor attn_weights_403_cast = mul(x = attn_weights_401_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_403_cast")]; + tensor var_9958_cast = softmax(axis = var_6849, x = attn_weights_403_cast)[name = tensor("op_9958_cast")]; + tensor attn_201_transpose_x_0 = const()[name = tensor("attn_201_transpose_x_0"), val = tensor(false)]; + tensor attn_201_transpose_y_0 = const()[name = tensor("attn_201_transpose_y_0"), val = tensor(true)]; + tensor attn_201_cast = matmul(transpose_x = attn_201_transpose_x_0, transpose_y = attn_201_transpose_y_0, x = var_9954_cast, y = var_9958_cast)[name = tensor("attn_201_cast")]; + tensor var_9962 = const()[name = tensor("op_9962"), val = tensor([2, 1280, 1, -1])]; + tensor input_585_cast = reshape(shape = var_9962, x = attn_201_cast)[name = tensor("input_585_cast")]; + tensor var_9967 = const()[name = tensor("op_9967"), val = tensor([1, 1])]; + tensor var_9969 = const()[name = tensor("op_9969"), val = tensor([1, 1])]; + tensor var_9971_pad_type_0 = const()[name = tensor("op_9971_pad_type_0"), val = tensor("custom")]; + tensor var_9971_pad_0 = const()[name = tensor("op_9971_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(977117824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978346688))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978346880)))]; + tensor var_9971_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_9969, groups = var_6865, pad = var_9971_pad_0, pad_type = var_9971_pad_type_0, strides = var_9967, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_585_cast)[name = tensor("op_9971_cast")]; + tensor inputs_303_cast = add(x = var_9971_cast, y = inputs_301_cast)[name = tensor("inputs_303_cast")]; + tensor var_9975 = const()[name = tensor("op_9975"), val = tensor([1])]; + tensor channels_mean_303_cast = reduce_mean(axes = var_9975, keep_dims = var_6860, x = inputs_303_cast)[name = tensor("channels_mean_303_cast")]; + tensor zero_mean_303_cast = sub(x = inputs_303_cast, y = channels_mean_303_cast)[name = tensor("zero_mean_303_cast")]; + tensor zero_mean_sq_303_cast = mul(x = zero_mean_303_cast, y = zero_mean_303_cast)[name = tensor("zero_mean_sq_303_cast")]; + tensor var_9979 = const()[name = tensor("op_9979"), val = tensor([1])]; + tensor var_9980_cast = reduce_mean(axes = var_9979, keep_dims = var_6860, x = zero_mean_sq_303_cast)[name = tensor("op_9980_cast")]; + tensor var_9981_to_fp16 = const()[name = tensor("op_9981_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9982_cast = add(x = var_9980_cast, y = var_9981_to_fp16)[name = tensor("op_9982_cast")]; + tensor denom_303_epsilon_0_to_fp16 = const()[name = tensor("denom_303_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_303_cast = rsqrt(epsilon = denom_303_epsilon_0_to_fp16, x = var_9982_cast)[name = tensor("denom_303_cast")]; + tensor out_303_cast = mul(x = zero_mean_303_cast, y = denom_303_cast)[name = tensor("out_303_cast")]; + tensor var_9986_to_fp16 = const()[name = tensor("op_9986_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978349504)))]; + tensor var_9987_cast = add(x = out_303_cast, y = var_9986_to_fp16)[name = tensor("op_9987_cast")]; + tensor var_9989_to_fp16 = const()[name = tensor("op_9989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978352128)))]; + tensor hidden_states_399_cast = mul(x = var_9987_cast, y = var_9989_to_fp16)[name = tensor("hidden_states_399_cast")]; + tensor var_9996 = const()[name = tensor("op_9996"), val = tensor([1, 1])]; + tensor var_9998 = const()[name = tensor("op_9998"), val = tensor([1, 1])]; + tensor q_203_pad_type_0 = const()[name = tensor("q_203_pad_type_0"), val = tensor("custom")]; + tensor q_203_pad_0 = const()[name = tensor("q_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978354752))), lut = tensor([-0x1.048p-5, -0x1.3ap-7, 0x1.3cp-7, 0x1.05p-5]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_203_cast = conv(dilations = var_9998, groups = var_6865, pad = q_203_pad_0, pad_type = q_203_pad_type_0, strides = var_9996, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_399_cast)[name = tensor("q_203_cast")]; + tensor var_10002 = const()[name = tensor("op_10002"), val = tensor([1, 1])]; + tensor var_10004 = const()[name = tensor("op_10004"), val = tensor([1, 1])]; + tensor k_203_pad_type_0 = const()[name = tensor("k_203_pad_type_0"), val = tensor("custom")]; + tensor k_203_pad_0 = const()[name = tensor("k_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(978764416))), lut = tensor([-0x1.ac4p-6, -0x1.f84p-8, 0x1.f88p-8, 0x1.ac4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_203_cast = conv(dilations = var_10004, groups = var_6865, pad = k_203_pad_0, pad_type = k_203_pad_type_0, strides = var_10002, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_203_cast")]; + tensor var_10008 = const()[name = tensor("op_10008"), val = tensor([1, 1])]; + tensor var_10010 = const()[name = tensor("op_10010"), val = tensor([1, 1])]; + tensor v_203_pad_type_0 = const()[name = tensor("v_203_pad_type_0"), val = tensor("custom")]; + tensor v_203_pad_0 = const()[name = tensor("v_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(979419840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(980730624))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_203_cast = conv(dilations = var_10010, groups = var_6865, pad = v_203_pad_0, pad_type = v_203_pad_type_0, strides = var_10008, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_203_cast")]; + tensor var_10014 = const()[name = tensor("op_10014"), val = tensor([2, 20, 64, -1])]; + tensor var_10015_cast = reshape(shape = var_10014, x = q_203_cast)[name = tensor("op_10015_cast")]; + tensor var_10016 = const()[name = tensor("op_10016"), val = tensor([2, 20, 64, -1])]; + tensor var_10017_cast = reshape(shape = var_10016, x = k_203_cast)[name = tensor("op_10017_cast")]; + tensor var_10018 = const()[name = tensor("op_10018"), val = tensor([2, 20, 64, -1])]; + tensor var_10019_cast = reshape(shape = var_10018, x = v_203_cast)[name = tensor("op_10019_cast")]; + tensor attn_weights_405_transpose_x_0 = const()[name = tensor("attn_weights_405_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_405_transpose_y_0 = const()[name = tensor("attn_weights_405_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_405_cast = matmul(transpose_x = attn_weights_405_transpose_x_0, transpose_y = attn_weights_405_transpose_y_0, x = var_10015_cast, y = var_10017_cast)[name = tensor("attn_weights_405_cast")]; + tensor attn_weights_407_cast = mul(x = attn_weights_405_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_407_cast")]; + tensor var_10023_cast = softmax(axis = var_6849, x = attn_weights_407_cast)[name = tensor("op_10023_cast")]; + tensor attn_203_transpose_x_0 = const()[name = tensor("attn_203_transpose_x_0"), val = tensor(false)]; + tensor attn_203_transpose_y_0 = const()[name = tensor("attn_203_transpose_y_0"), val = tensor(true)]; + tensor attn_203_cast = matmul(transpose_x = attn_203_transpose_x_0, transpose_y = attn_203_transpose_y_0, x = var_10019_cast, y = var_10023_cast)[name = tensor("attn_203_cast")]; + tensor var_10027 = const()[name = tensor("op_10027"), val = tensor([2, 1280, 1, -1])]; + tensor input_587_cast = reshape(shape = var_10027, x = attn_203_cast)[name = tensor("input_587_cast")]; + tensor var_10032 = const()[name = tensor("op_10032"), val = tensor([1, 1])]; + tensor var_10034 = const()[name = tensor("op_10034"), val = tensor([1, 1])]; + tensor var_10036_pad_type_0 = const()[name = tensor("op_10036_pad_type_0"), val = tensor("custom")]; + tensor var_10036_pad_0 = const()[name = tensor("op_10036_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(980730752))), lut = tensor([-0x1.1bcp-6, -0x1.55p-8, 0x1.52p-8, 0x1.1acp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981140416)))]; + tensor var_10036_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_10034, groups = var_6865, pad = var_10036_pad_0, pad_type = var_10036_pad_type_0, strides = var_10032, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_587_cast)[name = tensor("op_10036_cast")]; + tensor inputs_305_cast = add(x = var_10036_cast, y = inputs_303_cast)[name = tensor("inputs_305_cast")]; + tensor var_10040 = const()[name = tensor("op_10040"), val = tensor([1])]; + tensor channels_mean_305_cast = reduce_mean(axes = var_10040, keep_dims = var_6860, x = inputs_305_cast)[name = tensor("channels_mean_305_cast")]; + tensor zero_mean_305_cast = sub(x = inputs_305_cast, y = channels_mean_305_cast)[name = tensor("zero_mean_305_cast")]; + tensor zero_mean_sq_305_cast = mul(x = zero_mean_305_cast, y = zero_mean_305_cast)[name = tensor("zero_mean_sq_305_cast")]; + tensor var_10044 = const()[name = tensor("op_10044"), val = tensor([1])]; + tensor var_10045_cast = reduce_mean(axes = var_10044, keep_dims = var_6860, x = zero_mean_sq_305_cast)[name = tensor("op_10045_cast")]; + tensor var_10046_to_fp16 = const()[name = tensor("op_10046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10047_cast = add(x = var_10045_cast, y = var_10046_to_fp16)[name = tensor("op_10047_cast")]; + tensor denom_305_epsilon_0_to_fp16 = const()[name = tensor("denom_305_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_305_cast = rsqrt(epsilon = denom_305_epsilon_0_to_fp16, x = var_10047_cast)[name = tensor("denom_305_cast")]; + tensor out_305_cast = mul(x = zero_mean_305_cast, y = denom_305_cast)[name = tensor("out_305_cast")]; + tensor var_10051_to_fp16 = const()[name = tensor("op_10051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981143040)))]; + tensor var_10052_cast = add(x = out_305_cast, y = var_10051_to_fp16)[name = tensor("op_10052_cast")]; + tensor var_10054_to_fp16 = const()[name = tensor("op_10054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981145664)))]; + tensor input_589_cast = mul(x = var_10052_cast, y = var_10054_to_fp16)[name = tensor("input_589_cast")]; + tensor var_10062 = const()[name = tensor("op_10062"), val = tensor([1, 1])]; + tensor var_10064 = const()[name = tensor("op_10064"), val = tensor([1, 1])]; + tensor var_10066_pad_type_0 = const()[name = tensor("op_10066_pad_type_0"), val = tensor("custom")]; + tensor var_10066_pad_0 = const()[name = tensor("op_10066_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(981148288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990978752))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990978944)))]; + tensor var_10066_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_10064, groups = var_6865, pad = var_10066_pad_0, pad_type = var_10066_pad_type_0, strides = var_10062, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_589_cast)[name = tensor("op_10066_cast")]; + tensor var_10067_split_sizes_0 = const()[name = tensor("op_10067_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10067_axis_0 = const()[name = tensor("op_10067_axis_0"), val = tensor(1)]; + tensor var_10067_cast_0, tensor var_10067_cast_1 = split(axis = var_10067_axis_0, split_sizes = var_10067_split_sizes_0, x = var_10066_cast)[name = tensor("op_10067_cast")]; + tensor var_10069_mode_0 = const()[name = tensor("op_10069_mode_0"), val = tensor("EXACT")]; + tensor var_10069_cast = gelu(mode = var_10069_mode_0, x = var_10067_cast_1)[name = tensor("op_10069_cast")]; + tensor input_591_cast = mul(x = var_10067_cast_0, y = var_10069_cast)[name = tensor("input_591_cast")]; + tensor var_10073 = const()[name = tensor("op_10073"), val = tensor([1, 1])]; + tensor var_10075 = const()[name = tensor("op_10075"), val = tensor([1, 1])]; + tensor var_10077_pad_type_0 = const()[name = tensor("op_10077_pad_type_0"), val = tensor("custom")]; + tensor var_10077_pad_0 = const()[name = tensor("op_10077_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990999488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995914752))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995914944)))]; + tensor var_10077_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_10075, groups = var_6865, pad = var_10077_pad_0, pad_type = var_10077_pad_type_0, strides = var_10073, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_591_cast)[name = tensor("op_10077_cast")]; + tensor inputs_307_cast = add(x = var_10077_cast, y = inputs_305_cast)[name = tensor("inputs_307_cast")]; + tensor var_10087 = const()[name = tensor("op_10087"), val = tensor([1])]; + tensor channels_mean_307_cast = reduce_mean(axes = var_10087, keep_dims = var_6860, x = inputs_307_cast)[name = tensor("channels_mean_307_cast")]; + tensor zero_mean_307_cast = sub(x = inputs_307_cast, y = channels_mean_307_cast)[name = tensor("zero_mean_307_cast")]; + tensor zero_mean_sq_307_cast = mul(x = zero_mean_307_cast, y = zero_mean_307_cast)[name = tensor("zero_mean_sq_307_cast")]; + tensor var_10091 = const()[name = tensor("op_10091"), val = tensor([1])]; + tensor var_10092_cast = reduce_mean(axes = var_10091, keep_dims = var_6860, x = zero_mean_sq_307_cast)[name = tensor("op_10092_cast")]; + tensor var_10093_to_fp16 = const()[name = tensor("op_10093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10094_cast = add(x = var_10092_cast, y = var_10093_to_fp16)[name = tensor("op_10094_cast")]; + tensor denom_307_epsilon_0_to_fp16 = const()[name = tensor("denom_307_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_307_cast = rsqrt(epsilon = denom_307_epsilon_0_to_fp16, x = var_10094_cast)[name = tensor("denom_307_cast")]; + tensor out_307_cast = mul(x = zero_mean_307_cast, y = denom_307_cast)[name = tensor("out_307_cast")]; + tensor var_10098_to_fp16 = const()[name = tensor("op_10098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995917568)))]; + tensor var_10099_cast = add(x = out_307_cast, y = var_10098_to_fp16)[name = tensor("op_10099_cast")]; + tensor var_10101_to_fp16 = const()[name = tensor("op_10101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995920192)))]; + tensor hidden_states_403_cast = mul(x = var_10099_cast, y = var_10101_to_fp16)[name = tensor("hidden_states_403_cast")]; + tensor var_10108 = const()[name = tensor("op_10108"), val = tensor([1, 1])]; + tensor var_10110 = const()[name = tensor("op_10110"), val = tensor([1, 1])]; + tensor q_205_pad_type_0 = const()[name = tensor("q_205_pad_type_0"), val = tensor("custom")]; + tensor q_205_pad_0 = const()[name = tensor("q_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(995922816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996742080))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_205_cast = conv(dilations = var_10110, groups = var_6865, pad = q_205_pad_0, pad_type = q_205_pad_type_0, strides = var_10108, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("q_205_cast")]; + tensor var_10114 = const()[name = tensor("op_10114"), val = tensor([1, 1])]; + tensor var_10116 = const()[name = tensor("op_10116"), val = tensor([1, 1])]; + tensor k_205_pad_type_0 = const()[name = tensor("k_205_pad_type_0"), val = tensor("custom")]; + tensor k_205_pad_0 = const()[name = tensor("k_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996742208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997561472))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_205_cast = conv(dilations = var_10116, groups = var_6865, pad = k_205_pad_0, pad_type = k_205_pad_type_0, strides = var_10114, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("k_205_cast")]; + tensor var_10120 = const()[name = tensor("op_10120"), val = tensor([1, 1])]; + tensor var_10122 = const()[name = tensor("op_10122"), val = tensor([1, 1])]; + tensor v_205_pad_type_0 = const()[name = tensor("v_205_pad_type_0"), val = tensor("custom")]; + tensor v_205_pad_0 = const()[name = tensor("v_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(997561600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998790464))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_205_cast = conv(dilations = var_10122, groups = var_6865, pad = v_205_pad_0, pad_type = v_205_pad_type_0, strides = var_10120, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("v_205_cast")]; + tensor var_10126 = const()[name = tensor("op_10126"), val = tensor([2, 20, 64, -1])]; + tensor var_10127_cast = reshape(shape = var_10126, x = q_205_cast)[name = tensor("op_10127_cast")]; + tensor var_10128 = const()[name = tensor("op_10128"), val = tensor([2, 20, 64, -1])]; + tensor var_10129_cast = reshape(shape = var_10128, x = k_205_cast)[name = tensor("op_10129_cast")]; + tensor var_10130 = const()[name = tensor("op_10130"), val = tensor([2, 20, 64, -1])]; + tensor var_10131_cast = reshape(shape = var_10130, x = v_205_cast)[name = tensor("op_10131_cast")]; + tensor attn_weights_409_transpose_x_0 = const()[name = tensor("attn_weights_409_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_409_transpose_y_0 = const()[name = tensor("attn_weights_409_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_409_cast = matmul(transpose_x = attn_weights_409_transpose_x_0, transpose_y = attn_weights_409_transpose_y_0, x = var_10127_cast, y = var_10129_cast)[name = tensor("attn_weights_409_cast")]; + tensor attn_weights_411_cast = mul(x = attn_weights_409_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_411_cast")]; + tensor var_10135_cast = softmax(axis = var_6849, x = attn_weights_411_cast)[name = tensor("op_10135_cast")]; + tensor attn_205_transpose_x_0 = const()[name = tensor("attn_205_transpose_x_0"), val = tensor(false)]; + tensor attn_205_transpose_y_0 = const()[name = tensor("attn_205_transpose_y_0"), val = tensor(true)]; + tensor attn_205_cast = matmul(transpose_x = attn_205_transpose_x_0, transpose_y = attn_205_transpose_y_0, x = var_10131_cast, y = var_10135_cast)[name = tensor("attn_205_cast")]; + tensor var_10139 = const()[name = tensor("op_10139"), val = tensor([2, 1280, 1, -1])]; + tensor input_593_cast = reshape(shape = var_10139, x = attn_205_cast)[name = tensor("input_593_cast")]; + tensor var_10144 = const()[name = tensor("op_10144"), val = tensor([1, 1])]; + tensor var_10146 = const()[name = tensor("op_10146"), val = tensor([1, 1])]; + tensor var_10148_pad_type_0 = const()[name = tensor("op_10148_pad_type_0"), val = tensor("custom")]; + tensor var_10148_pad_0 = const()[name = tensor("op_10148_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998790656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000019520))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000019712)))]; + tensor var_10148_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_10146, groups = var_6865, pad = var_10148_pad_0, pad_type = var_10148_pad_type_0, strides = var_10144, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_593_cast)[name = tensor("op_10148_cast")]; + tensor inputs_309_cast = add(x = var_10148_cast, y = inputs_307_cast)[name = tensor("inputs_309_cast")]; + tensor var_10152 = const()[name = tensor("op_10152"), val = tensor([1])]; + tensor channels_mean_309_cast = reduce_mean(axes = var_10152, keep_dims = var_6860, x = inputs_309_cast)[name = tensor("channels_mean_309_cast")]; + tensor zero_mean_309_cast = sub(x = inputs_309_cast, y = channels_mean_309_cast)[name = tensor("zero_mean_309_cast")]; + tensor zero_mean_sq_309_cast = mul(x = zero_mean_309_cast, y = zero_mean_309_cast)[name = tensor("zero_mean_sq_309_cast")]; + tensor var_10156 = const()[name = tensor("op_10156"), val = tensor([1])]; + tensor var_10157_cast = reduce_mean(axes = var_10156, keep_dims = var_6860, x = zero_mean_sq_309_cast)[name = tensor("op_10157_cast")]; + tensor var_10158_to_fp16 = const()[name = tensor("op_10158_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10159_cast = add(x = var_10157_cast, y = var_10158_to_fp16)[name = tensor("op_10159_cast")]; + tensor denom_309_epsilon_0_to_fp16 = const()[name = tensor("denom_309_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_309_cast = rsqrt(epsilon = denom_309_epsilon_0_to_fp16, x = var_10159_cast)[name = tensor("denom_309_cast")]; + tensor out_309_cast = mul(x = zero_mean_309_cast, y = denom_309_cast)[name = tensor("out_309_cast")]; + tensor var_10163_to_fp16 = const()[name = tensor("op_10163_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000022336)))]; + tensor var_10164_cast = add(x = out_309_cast, y = var_10163_to_fp16)[name = tensor("op_10164_cast")]; + tensor var_10166_to_fp16 = const()[name = tensor("op_10166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000024960)))]; + tensor hidden_states_405_cast = mul(x = var_10164_cast, y = var_10166_to_fp16)[name = tensor("hidden_states_405_cast")]; + tensor var_10173 = const()[name = tensor("op_10173"), val = tensor([1, 1])]; + tensor var_10175 = const()[name = tensor("op_10175"), val = tensor([1, 1])]; + tensor q_207_pad_type_0 = const()[name = tensor("q_207_pad_type_0"), val = tensor("custom")]; + tensor q_207_pad_0 = const()[name = tensor("q_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000027584))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1ep-7, 0x1.d2cp-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_207_cast = conv(dilations = var_10175, groups = var_6865, pad = q_207_pad_0, pad_type = q_207_pad_type_0, strides = var_10173, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_405_cast)[name = tensor("q_207_cast")]; + tensor var_10179 = const()[name = tensor("op_10179"), val = tensor([1, 1])]; + tensor var_10181 = const()[name = tensor("op_10181"), val = tensor([1, 1])]; + tensor k_207_pad_type_0 = const()[name = tensor("k_207_pad_type_0"), val = tensor("custom")]; + tensor k_207_pad_0 = const()[name = tensor("k_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1000437248))), lut = tensor([-0x1.664p-6, -0x1.a54p-8, 0x1.b0cp-8, 0x1.69p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_207_cast = conv(dilations = var_10181, groups = var_6865, pad = k_207_pad_0, pad_type = k_207_pad_type_0, strides = var_10179, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_207_cast")]; + tensor var_10185 = const()[name = tensor("op_10185"), val = tensor([1, 1])]; + tensor var_10187 = const()[name = tensor("op_10187"), val = tensor([1, 1])]; + tensor v_207_pad_type_0 = const()[name = tensor("v_207_pad_type_0"), val = tensor("custom")]; + tensor v_207_pad_0 = const()[name = tensor("v_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1001092672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002403456))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_207_cast = conv(dilations = var_10187, groups = var_6865, pad = v_207_pad_0, pad_type = v_207_pad_type_0, strides = var_10185, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_207_cast")]; + tensor var_10191 = const()[name = tensor("op_10191"), val = tensor([2, 20, 64, -1])]; + tensor var_10192_cast = reshape(shape = var_10191, x = q_207_cast)[name = tensor("op_10192_cast")]; + tensor var_10193 = const()[name = tensor("op_10193"), val = tensor([2, 20, 64, -1])]; + tensor var_10194_cast = reshape(shape = var_10193, x = k_207_cast)[name = tensor("op_10194_cast")]; + tensor var_10195 = const()[name = tensor("op_10195"), val = tensor([2, 20, 64, -1])]; + tensor var_10196_cast = reshape(shape = var_10195, x = v_207_cast)[name = tensor("op_10196_cast")]; + tensor attn_weights_413_transpose_x_0 = const()[name = tensor("attn_weights_413_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_413_transpose_y_0 = const()[name = tensor("attn_weights_413_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_413_cast = matmul(transpose_x = attn_weights_413_transpose_x_0, transpose_y = attn_weights_413_transpose_y_0, x = var_10192_cast, y = var_10194_cast)[name = tensor("attn_weights_413_cast")]; + tensor attn_weights_415_cast = mul(x = attn_weights_413_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_415_cast")]; + tensor var_10200_cast = softmax(axis = var_6849, x = attn_weights_415_cast)[name = tensor("op_10200_cast")]; + tensor attn_207_transpose_x_0 = const()[name = tensor("attn_207_transpose_x_0"), val = tensor(false)]; + tensor attn_207_transpose_y_0 = const()[name = tensor("attn_207_transpose_y_0"), val = tensor(true)]; + tensor attn_207_cast = matmul(transpose_x = attn_207_transpose_x_0, transpose_y = attn_207_transpose_y_0, x = var_10196_cast, y = var_10200_cast)[name = tensor("attn_207_cast")]; + tensor var_10204 = const()[name = tensor("op_10204"), val = tensor([2, 1280, 1, -1])]; + tensor input_595_cast = reshape(shape = var_10204, x = attn_207_cast)[name = tensor("input_595_cast")]; + tensor var_10209 = const()[name = tensor("op_10209"), val = tensor([1, 1])]; + tensor var_10211 = const()[name = tensor("op_10211"), val = tensor([1, 1])]; + tensor var_10213_pad_type_0 = const()[name = tensor("op_10213_pad_type_0"), val = tensor("custom")]; + tensor var_10213_pad_0 = const()[name = tensor("op_10213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002403584))), lut = tensor([-0x1.09p-6, -0x1.3ep-8, 0x1.3b8p-8, 0x1.084p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002813248)))]; + tensor var_10213_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_10211, groups = var_6865, pad = var_10213_pad_0, pad_type = var_10213_pad_type_0, strides = var_10209, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_595_cast)[name = tensor("op_10213_cast")]; + tensor inputs_311_cast = add(x = var_10213_cast, y = inputs_309_cast)[name = tensor("inputs_311_cast")]; + tensor var_10217 = const()[name = tensor("op_10217"), val = tensor([1])]; + tensor channels_mean_311_cast = reduce_mean(axes = var_10217, keep_dims = var_6860, x = inputs_311_cast)[name = tensor("channels_mean_311_cast")]; + tensor zero_mean_311_cast = sub(x = inputs_311_cast, y = channels_mean_311_cast)[name = tensor("zero_mean_311_cast")]; + tensor zero_mean_sq_311_cast = mul(x = zero_mean_311_cast, y = zero_mean_311_cast)[name = tensor("zero_mean_sq_311_cast")]; + tensor var_10221 = const()[name = tensor("op_10221"), val = tensor([1])]; + tensor var_10222_cast = reduce_mean(axes = var_10221, keep_dims = var_6860, x = zero_mean_sq_311_cast)[name = tensor("op_10222_cast")]; + tensor var_10223_to_fp16 = const()[name = tensor("op_10223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10224_cast = add(x = var_10222_cast, y = var_10223_to_fp16)[name = tensor("op_10224_cast")]; + tensor denom_311_epsilon_0_to_fp16 = const()[name = tensor("denom_311_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_311_cast = rsqrt(epsilon = denom_311_epsilon_0_to_fp16, x = var_10224_cast)[name = tensor("denom_311_cast")]; + tensor out_311_cast = mul(x = zero_mean_311_cast, y = denom_311_cast)[name = tensor("out_311_cast")]; + tensor var_10228_to_fp16 = const()[name = tensor("op_10228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002815872)))]; + tensor var_10229_cast = add(x = out_311_cast, y = var_10228_to_fp16)[name = tensor("op_10229_cast")]; + tensor var_10231_to_fp16 = const()[name = tensor("op_10231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002818496)))]; + tensor input_597_cast = mul(x = var_10229_cast, y = var_10231_to_fp16)[name = tensor("input_597_cast")]; + tensor var_10239 = const()[name = tensor("op_10239"), val = tensor([1, 1])]; + tensor var_10241 = const()[name = tensor("op_10241"), val = tensor([1, 1])]; + tensor var_10243_pad_type_0 = const()[name = tensor("op_10243_pad_type_0"), val = tensor("custom")]; + tensor var_10243_pad_0 = const()[name = tensor("op_10243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1002821120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012651584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012651776)))]; + tensor var_10243_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_10241, groups = var_6865, pad = var_10243_pad_0, pad_type = var_10243_pad_type_0, strides = var_10239, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_597_cast)[name = tensor("op_10243_cast")]; + tensor var_10244_split_sizes_0 = const()[name = tensor("op_10244_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10244_axis_0 = const()[name = tensor("op_10244_axis_0"), val = tensor(1)]; + tensor var_10244_cast_0, tensor var_10244_cast_1 = split(axis = var_10244_axis_0, split_sizes = var_10244_split_sizes_0, x = var_10243_cast)[name = tensor("op_10244_cast")]; + tensor var_10246_mode_0 = const()[name = tensor("op_10246_mode_0"), val = tensor("EXACT")]; + tensor var_10246_cast = gelu(mode = var_10246_mode_0, x = var_10244_cast_1)[name = tensor("op_10246_cast")]; + tensor input_599_cast = mul(x = var_10244_cast_0, y = var_10246_cast)[name = tensor("input_599_cast")]; + tensor var_10250 = const()[name = tensor("op_10250"), val = tensor([1, 1])]; + tensor var_10252 = const()[name = tensor("op_10252"), val = tensor([1, 1])]; + tensor var_10254_pad_type_0 = const()[name = tensor("op_10254_pad_type_0"), val = tensor("custom")]; + tensor var_10254_pad_0 = const()[name = tensor("op_10254_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1012672320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017587584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017587776)))]; + tensor var_10254_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_10252, groups = var_6865, pad = var_10254_pad_0, pad_type = var_10254_pad_type_0, strides = var_10250, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_599_cast)[name = tensor("op_10254_cast")]; + tensor inputs_313_cast = add(x = var_10254_cast, y = inputs_311_cast)[name = tensor("inputs_313_cast")]; + tensor var_10264 = const()[name = tensor("op_10264"), val = tensor([1])]; + tensor channels_mean_313_cast = reduce_mean(axes = var_10264, keep_dims = var_6860, x = inputs_313_cast)[name = tensor("channels_mean_313_cast")]; + tensor zero_mean_313_cast = sub(x = inputs_313_cast, y = channels_mean_313_cast)[name = tensor("zero_mean_313_cast")]; + tensor zero_mean_sq_313_cast = mul(x = zero_mean_313_cast, y = zero_mean_313_cast)[name = tensor("zero_mean_sq_313_cast")]; + tensor var_10268 = const()[name = tensor("op_10268"), val = tensor([1])]; + tensor var_10269_cast = reduce_mean(axes = var_10268, keep_dims = var_6860, x = zero_mean_sq_313_cast)[name = tensor("op_10269_cast")]; + tensor var_10270_to_fp16 = const()[name = tensor("op_10270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10271_cast = add(x = var_10269_cast, y = var_10270_to_fp16)[name = tensor("op_10271_cast")]; + tensor denom_313_epsilon_0_to_fp16 = const()[name = tensor("denom_313_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_313_cast = rsqrt(epsilon = denom_313_epsilon_0_to_fp16, x = var_10271_cast)[name = tensor("denom_313_cast")]; + tensor out_313_cast = mul(x = zero_mean_313_cast, y = denom_313_cast)[name = tensor("out_313_cast")]; + tensor var_10275_to_fp16 = const()[name = tensor("op_10275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017590400)))]; + tensor var_10276_cast = add(x = out_313_cast, y = var_10275_to_fp16)[name = tensor("op_10276_cast")]; + tensor var_10278_to_fp16 = const()[name = tensor("op_10278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017593024)))]; + tensor hidden_states_409_cast = mul(x = var_10276_cast, y = var_10278_to_fp16)[name = tensor("hidden_states_409_cast")]; + tensor var_10285 = const()[name = tensor("op_10285"), val = tensor([1, 1])]; + tensor var_10287 = const()[name = tensor("op_10287"), val = tensor([1, 1])]; + tensor q_209_pad_type_0 = const()[name = tensor("q_209_pad_type_0"), val = tensor("custom")]; + tensor q_209_pad_0 = const()[name = tensor("q_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017595648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018414912))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_209_cast = conv(dilations = var_10287, groups = var_6865, pad = q_209_pad_0, pad_type = q_209_pad_type_0, strides = var_10285, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("q_209_cast")]; + tensor var_10291 = const()[name = tensor("op_10291"), val = tensor([1, 1])]; + tensor var_10293 = const()[name = tensor("op_10293"), val = tensor([1, 1])]; + tensor k_209_pad_type_0 = const()[name = tensor("k_209_pad_type_0"), val = tensor("custom")]; + tensor k_209_pad_0 = const()[name = tensor("k_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018415040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019234304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_209_cast = conv(dilations = var_10293, groups = var_6865, pad = k_209_pad_0, pad_type = k_209_pad_type_0, strides = var_10291, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("k_209_cast")]; + tensor var_10297 = const()[name = tensor("op_10297"), val = tensor([1, 1])]; + tensor var_10299 = const()[name = tensor("op_10299"), val = tensor([1, 1])]; + tensor v_209_pad_type_0 = const()[name = tensor("v_209_pad_type_0"), val = tensor("custom")]; + tensor v_209_pad_0 = const()[name = tensor("v_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1019234432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020463296))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_209_cast = conv(dilations = var_10299, groups = var_6865, pad = v_209_pad_0, pad_type = v_209_pad_type_0, strides = var_10297, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("v_209_cast")]; + tensor var_10303 = const()[name = tensor("op_10303"), val = tensor([2, 20, 64, -1])]; + tensor var_10304_cast = reshape(shape = var_10303, x = q_209_cast)[name = tensor("op_10304_cast")]; + tensor var_10305 = const()[name = tensor("op_10305"), val = tensor([2, 20, 64, -1])]; + tensor var_10306_cast = reshape(shape = var_10305, x = k_209_cast)[name = tensor("op_10306_cast")]; + tensor var_10307 = const()[name = tensor("op_10307"), val = tensor([2, 20, 64, -1])]; + tensor var_10308_cast = reshape(shape = var_10307, x = v_209_cast)[name = tensor("op_10308_cast")]; + tensor attn_weights_417_transpose_x_0 = const()[name = tensor("attn_weights_417_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_417_transpose_y_0 = const()[name = tensor("attn_weights_417_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_417_cast = matmul(transpose_x = attn_weights_417_transpose_x_0, transpose_y = attn_weights_417_transpose_y_0, x = var_10304_cast, y = var_10306_cast)[name = tensor("attn_weights_417_cast")]; + tensor attn_weights_419_cast = mul(x = attn_weights_417_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_419_cast")]; + tensor var_10312_cast = softmax(axis = var_6849, x = attn_weights_419_cast)[name = tensor("op_10312_cast")]; + tensor attn_209_transpose_x_0 = const()[name = tensor("attn_209_transpose_x_0"), val = tensor(false)]; + tensor attn_209_transpose_y_0 = const()[name = tensor("attn_209_transpose_y_0"), val = tensor(true)]; + tensor attn_209_cast = matmul(transpose_x = attn_209_transpose_x_0, transpose_y = attn_209_transpose_y_0, x = var_10308_cast, y = var_10312_cast)[name = tensor("attn_209_cast")]; + tensor var_10316 = const()[name = tensor("op_10316"), val = tensor([2, 1280, 1, -1])]; + tensor input_601_cast = reshape(shape = var_10316, x = attn_209_cast)[name = tensor("input_601_cast")]; + tensor var_10321 = const()[name = tensor("op_10321"), val = tensor([1, 1])]; + tensor var_10323 = const()[name = tensor("op_10323"), val = tensor([1, 1])]; + tensor var_10325_pad_type_0 = const()[name = tensor("op_10325_pad_type_0"), val = tensor("custom")]; + tensor var_10325_pad_0 = const()[name = tensor("op_10325_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020463488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021692352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021692544)))]; + tensor var_10325_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_10323, groups = var_6865, pad = var_10325_pad_0, pad_type = var_10325_pad_type_0, strides = var_10321, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_601_cast)[name = tensor("op_10325_cast")]; + tensor inputs_315_cast = add(x = var_10325_cast, y = inputs_313_cast)[name = tensor("inputs_315_cast")]; + tensor var_10329 = const()[name = tensor("op_10329"), val = tensor([1])]; + tensor channels_mean_315_cast = reduce_mean(axes = var_10329, keep_dims = var_6860, x = inputs_315_cast)[name = tensor("channels_mean_315_cast")]; + tensor zero_mean_315_cast = sub(x = inputs_315_cast, y = channels_mean_315_cast)[name = tensor("zero_mean_315_cast")]; + tensor zero_mean_sq_315_cast = mul(x = zero_mean_315_cast, y = zero_mean_315_cast)[name = tensor("zero_mean_sq_315_cast")]; + tensor var_10333 = const()[name = tensor("op_10333"), val = tensor([1])]; + tensor var_10334_cast = reduce_mean(axes = var_10333, keep_dims = var_6860, x = zero_mean_sq_315_cast)[name = tensor("op_10334_cast")]; + tensor var_10335_to_fp16 = const()[name = tensor("op_10335_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10336_cast = add(x = var_10334_cast, y = var_10335_to_fp16)[name = tensor("op_10336_cast")]; + tensor denom_315_epsilon_0_to_fp16 = const()[name = tensor("denom_315_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_315_cast = rsqrt(epsilon = denom_315_epsilon_0_to_fp16, x = var_10336_cast)[name = tensor("denom_315_cast")]; + tensor out_315_cast = mul(x = zero_mean_315_cast, y = denom_315_cast)[name = tensor("out_315_cast")]; + tensor var_10340_to_fp16 = const()[name = tensor("op_10340_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021695168)))]; + tensor var_10341_cast = add(x = out_315_cast, y = var_10340_to_fp16)[name = tensor("op_10341_cast")]; + tensor var_10343_to_fp16 = const()[name = tensor("op_10343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021697792)))]; + tensor hidden_states_411_cast = mul(x = var_10341_cast, y = var_10343_to_fp16)[name = tensor("hidden_states_411_cast")]; + tensor var_10350 = const()[name = tensor("op_10350"), val = tensor([1, 1])]; + tensor var_10352 = const()[name = tensor("op_10352"), val = tensor([1, 1])]; + tensor q_211_pad_type_0 = const()[name = tensor("q_211_pad_type_0"), val = tensor("custom")]; + tensor q_211_pad_0 = const()[name = tensor("q_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021700416))), lut = tensor([-0x1.a5p-6, -0x1.048p-7, 0x1.054p-7, 0x1.a54p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_211_cast = conv(dilations = var_10352, groups = var_6865, pad = q_211_pad_0, pad_type = q_211_pad_type_0, strides = var_10350, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_411_cast)[name = tensor("q_211_cast")]; + tensor var_10356 = const()[name = tensor("op_10356"), val = tensor([1, 1])]; + tensor var_10358 = const()[name = tensor("op_10358"), val = tensor([1, 1])]; + tensor k_211_pad_type_0 = const()[name = tensor("k_211_pad_type_0"), val = tensor("custom")]; + tensor k_211_pad_0 = const()[name = tensor("k_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022110080))), lut = tensor([-0x1.404p-6, -0x1.808p-8, 0x1.7ccp-8, 0x1.3f4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_211_cast = conv(dilations = var_10358, groups = var_6865, pad = k_211_pad_0, pad_type = k_211_pad_type_0, strides = var_10356, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_211_cast")]; + tensor var_10362 = const()[name = tensor("op_10362"), val = tensor([1, 1])]; + tensor var_10364 = const()[name = tensor("op_10364"), val = tensor([1, 1])]; + tensor v_211_pad_type_0 = const()[name = tensor("v_211_pad_type_0"), val = tensor("custom")]; + tensor v_211_pad_0 = const()[name = tensor("v_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022765504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024076288))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_211_cast = conv(dilations = var_10364, groups = var_6865, pad = v_211_pad_0, pad_type = v_211_pad_type_0, strides = var_10362, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_211_cast")]; + tensor var_10368 = const()[name = tensor("op_10368"), val = tensor([2, 20, 64, -1])]; + tensor var_10369_cast = reshape(shape = var_10368, x = q_211_cast)[name = tensor("op_10369_cast")]; + tensor var_10370 = const()[name = tensor("op_10370"), val = tensor([2, 20, 64, -1])]; + tensor var_10371_cast = reshape(shape = var_10370, x = k_211_cast)[name = tensor("op_10371_cast")]; + tensor var_10372 = const()[name = tensor("op_10372"), val = tensor([2, 20, 64, -1])]; + tensor var_10373_cast = reshape(shape = var_10372, x = v_211_cast)[name = tensor("op_10373_cast")]; + tensor attn_weights_421_transpose_x_0 = const()[name = tensor("attn_weights_421_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_421_transpose_y_0 = const()[name = tensor("attn_weights_421_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_421_cast = matmul(transpose_x = attn_weights_421_transpose_x_0, transpose_y = attn_weights_421_transpose_y_0, x = var_10369_cast, y = var_10371_cast)[name = tensor("attn_weights_421_cast")]; + tensor attn_weights_423_cast = mul(x = attn_weights_421_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_423_cast")]; + tensor var_10377_cast = softmax(axis = var_6849, x = attn_weights_423_cast)[name = tensor("op_10377_cast")]; + tensor attn_211_transpose_x_0 = const()[name = tensor("attn_211_transpose_x_0"), val = tensor(false)]; + tensor attn_211_transpose_y_0 = const()[name = tensor("attn_211_transpose_y_0"), val = tensor(true)]; + tensor attn_211_cast = matmul(transpose_x = attn_211_transpose_x_0, transpose_y = attn_211_transpose_y_0, x = var_10373_cast, y = var_10377_cast)[name = tensor("attn_211_cast")]; + tensor var_10381 = const()[name = tensor("op_10381"), val = tensor([2, 1280, 1, -1])]; + tensor input_603_cast = reshape(shape = var_10381, x = attn_211_cast)[name = tensor("input_603_cast")]; + tensor var_10386 = const()[name = tensor("op_10386"), val = tensor([1, 1])]; + tensor var_10388 = const()[name = tensor("op_10388"), val = tensor([1, 1])]; + tensor var_10390_pad_type_0 = const()[name = tensor("op_10390_pad_type_0"), val = tensor("custom")]; + tensor var_10390_pad_0 = const()[name = tensor("op_10390_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024076416))), lut = tensor([-0x1.f0cp-7, -0x1.2bp-8, 0x1.2b8p-8, 0x1.f08p-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024486080)))]; + tensor var_10390_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_10388, groups = var_6865, pad = var_10390_pad_0, pad_type = var_10390_pad_type_0, strides = var_10386, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_603_cast)[name = tensor("op_10390_cast")]; + tensor inputs_317_cast = add(x = var_10390_cast, y = inputs_315_cast)[name = tensor("inputs_317_cast")]; + tensor var_10394 = const()[name = tensor("op_10394"), val = tensor([1])]; + tensor channels_mean_317_cast = reduce_mean(axes = var_10394, keep_dims = var_6860, x = inputs_317_cast)[name = tensor("channels_mean_317_cast")]; + tensor zero_mean_317_cast = sub(x = inputs_317_cast, y = channels_mean_317_cast)[name = tensor("zero_mean_317_cast")]; + tensor zero_mean_sq_317_cast = mul(x = zero_mean_317_cast, y = zero_mean_317_cast)[name = tensor("zero_mean_sq_317_cast")]; + tensor var_10398 = const()[name = tensor("op_10398"), val = tensor([1])]; + tensor var_10399_cast = reduce_mean(axes = var_10398, keep_dims = var_6860, x = zero_mean_sq_317_cast)[name = tensor("op_10399_cast")]; + tensor var_10400_to_fp16 = const()[name = tensor("op_10400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10401_cast = add(x = var_10399_cast, y = var_10400_to_fp16)[name = tensor("op_10401_cast")]; + tensor denom_317_epsilon_0_to_fp16 = const()[name = tensor("denom_317_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_317_cast = rsqrt(epsilon = denom_317_epsilon_0_to_fp16, x = var_10401_cast)[name = tensor("denom_317_cast")]; + tensor out_317_cast = mul(x = zero_mean_317_cast, y = denom_317_cast)[name = tensor("out_317_cast")]; + tensor var_10405_to_fp16 = const()[name = tensor("op_10405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024488704)))]; + tensor var_10406_cast = add(x = out_317_cast, y = var_10405_to_fp16)[name = tensor("op_10406_cast")]; + tensor var_10408_to_fp16 = const()[name = tensor("op_10408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024491328)))]; + tensor input_605_cast = mul(x = var_10406_cast, y = var_10408_to_fp16)[name = tensor("input_605_cast")]; + tensor var_10416 = const()[name = tensor("op_10416"), val = tensor([1, 1])]; + tensor var_10418 = const()[name = tensor("op_10418"), val = tensor([1, 1])]; + tensor var_10420_pad_type_0 = const()[name = tensor("op_10420_pad_type_0"), val = tensor("custom")]; + tensor var_10420_pad_0 = const()[name = tensor("op_10420_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024493952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034324416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034324608)))]; + tensor var_10420_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_10418, groups = var_6865, pad = var_10420_pad_0, pad_type = var_10420_pad_type_0, strides = var_10416, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_605_cast)[name = tensor("op_10420_cast")]; + tensor var_10421_split_sizes_0 = const()[name = tensor("op_10421_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10421_axis_0 = const()[name = tensor("op_10421_axis_0"), val = tensor(1)]; + tensor var_10421_cast_0, tensor var_10421_cast_1 = split(axis = var_10421_axis_0, split_sizes = var_10421_split_sizes_0, x = var_10420_cast)[name = tensor("op_10421_cast")]; + tensor var_10423_mode_0 = const()[name = tensor("op_10423_mode_0"), val = tensor("EXACT")]; + tensor var_10423_cast = gelu(mode = var_10423_mode_0, x = var_10421_cast_1)[name = tensor("op_10423_cast")]; + tensor input_607_cast = mul(x = var_10421_cast_0, y = var_10423_cast)[name = tensor("input_607_cast")]; + tensor var_10427 = const()[name = tensor("op_10427"), val = tensor([1, 1])]; + tensor var_10429 = const()[name = tensor("op_10429"), val = tensor([1, 1])]; + tensor var_10431_pad_type_0 = const()[name = tensor("op_10431_pad_type_0"), val = tensor("custom")]; + tensor var_10431_pad_0 = const()[name = tensor("op_10431_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1034345152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039260416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039260608)))]; + tensor var_10431_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_10429, groups = var_6865, pad = var_10431_pad_0, pad_type = var_10431_pad_type_0, strides = var_10427, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_607_cast)[name = tensor("op_10431_cast")]; + tensor inputs_319_cast = add(x = var_10431_cast, y = inputs_317_cast)[name = tensor("inputs_319_cast")]; + tensor var_10441 = const()[name = tensor("op_10441"), val = tensor([1])]; + tensor channels_mean_319_cast = reduce_mean(axes = var_10441, keep_dims = var_6860, x = inputs_319_cast)[name = tensor("channels_mean_319_cast")]; + tensor zero_mean_319_cast = sub(x = inputs_319_cast, y = channels_mean_319_cast)[name = tensor("zero_mean_319_cast")]; + tensor zero_mean_sq_319_cast = mul(x = zero_mean_319_cast, y = zero_mean_319_cast)[name = tensor("zero_mean_sq_319_cast")]; + tensor var_10445 = const()[name = tensor("op_10445"), val = tensor([1])]; + tensor var_10446_cast = reduce_mean(axes = var_10445, keep_dims = var_6860, x = zero_mean_sq_319_cast)[name = tensor("op_10446_cast")]; + tensor var_10447_to_fp16 = const()[name = tensor("op_10447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10448_cast = add(x = var_10446_cast, y = var_10447_to_fp16)[name = tensor("op_10448_cast")]; + tensor denom_319_epsilon_0_to_fp16 = const()[name = tensor("denom_319_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_319_cast = rsqrt(epsilon = denom_319_epsilon_0_to_fp16, x = var_10448_cast)[name = tensor("denom_319_cast")]; + tensor out_319_cast = mul(x = zero_mean_319_cast, y = denom_319_cast)[name = tensor("out_319_cast")]; + tensor var_10452_to_fp16 = const()[name = tensor("op_10452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039263232)))]; + tensor var_10453_cast = add(x = out_319_cast, y = var_10452_to_fp16)[name = tensor("op_10453_cast")]; + tensor var_10455_to_fp16 = const()[name = tensor("op_10455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039265856)))]; + tensor hidden_states_415_cast = mul(x = var_10453_cast, y = var_10455_to_fp16)[name = tensor("hidden_states_415_cast")]; + tensor var_10462 = const()[name = tensor("op_10462"), val = tensor([1, 1])]; + tensor var_10464 = const()[name = tensor("op_10464"), val = tensor([1, 1])]; + tensor q_213_pad_type_0 = const()[name = tensor("q_213_pad_type_0"), val = tensor("custom")]; + tensor q_213_pad_0 = const()[name = tensor("q_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1039268480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040087744))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_213_cast = conv(dilations = var_10464, groups = var_6865, pad = q_213_pad_0, pad_type = q_213_pad_type_0, strides = var_10462, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("q_213_cast")]; + tensor var_10468 = const()[name = tensor("op_10468"), val = tensor([1, 1])]; + tensor var_10470 = const()[name = tensor("op_10470"), val = tensor([1, 1])]; + tensor k_213_pad_type_0 = const()[name = tensor("k_213_pad_type_0"), val = tensor("custom")]; + tensor k_213_pad_0 = const()[name = tensor("k_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040087872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040907136))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_213_cast = conv(dilations = var_10470, groups = var_6865, pad = k_213_pad_0, pad_type = k_213_pad_type_0, strides = var_10468, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("k_213_cast")]; + tensor var_10474 = const()[name = tensor("op_10474"), val = tensor([1, 1])]; + tensor var_10476 = const()[name = tensor("op_10476"), val = tensor([1, 1])]; + tensor v_213_pad_type_0 = const()[name = tensor("v_213_pad_type_0"), val = tensor("custom")]; + tensor v_213_pad_0 = const()[name = tensor("v_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040907264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042136128))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_213_cast = conv(dilations = var_10476, groups = var_6865, pad = v_213_pad_0, pad_type = v_213_pad_type_0, strides = var_10474, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("v_213_cast")]; + tensor var_10480 = const()[name = tensor("op_10480"), val = tensor([2, 20, 64, -1])]; + tensor var_10481_cast = reshape(shape = var_10480, x = q_213_cast)[name = tensor("op_10481_cast")]; + tensor var_10482 = const()[name = tensor("op_10482"), val = tensor([2, 20, 64, -1])]; + tensor var_10483_cast = reshape(shape = var_10482, x = k_213_cast)[name = tensor("op_10483_cast")]; + tensor var_10484 = const()[name = tensor("op_10484"), val = tensor([2, 20, 64, -1])]; + tensor var_10485_cast = reshape(shape = var_10484, x = v_213_cast)[name = tensor("op_10485_cast")]; + tensor attn_weights_425_transpose_x_0 = const()[name = tensor("attn_weights_425_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_425_transpose_y_0 = const()[name = tensor("attn_weights_425_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_425_cast = matmul(transpose_x = attn_weights_425_transpose_x_0, transpose_y = attn_weights_425_transpose_y_0, x = var_10481_cast, y = var_10483_cast)[name = tensor("attn_weights_425_cast")]; + tensor attn_weights_427_cast = mul(x = attn_weights_425_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_427_cast")]; + tensor var_10489_cast = softmax(axis = var_6849, x = attn_weights_427_cast)[name = tensor("op_10489_cast")]; + tensor attn_213_transpose_x_0 = const()[name = tensor("attn_213_transpose_x_0"), val = tensor(false)]; + tensor attn_213_transpose_y_0 = const()[name = tensor("attn_213_transpose_y_0"), val = tensor(true)]; + tensor attn_213_cast = matmul(transpose_x = attn_213_transpose_x_0, transpose_y = attn_213_transpose_y_0, x = var_10485_cast, y = var_10489_cast)[name = tensor("attn_213_cast")]; + tensor var_10493 = const()[name = tensor("op_10493"), val = tensor([2, 1280, 1, -1])]; + tensor input_609_cast = reshape(shape = var_10493, x = attn_213_cast)[name = tensor("input_609_cast")]; + tensor var_10498 = const()[name = tensor("op_10498"), val = tensor([1, 1])]; + tensor var_10500 = const()[name = tensor("op_10500"), val = tensor([1, 1])]; + tensor var_10502_pad_type_0 = const()[name = tensor("op_10502_pad_type_0"), val = tensor("custom")]; + tensor var_10502_pad_0 = const()[name = tensor("op_10502_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042136320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043365184))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043365376)))]; + tensor var_10502_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_10500, groups = var_6865, pad = var_10502_pad_0, pad_type = var_10502_pad_type_0, strides = var_10498, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_609_cast)[name = tensor("op_10502_cast")]; + tensor inputs_321_cast = add(x = var_10502_cast, y = inputs_319_cast)[name = tensor("inputs_321_cast")]; + tensor var_10506 = const()[name = tensor("op_10506"), val = tensor([1])]; + tensor channels_mean_321_cast = reduce_mean(axes = var_10506, keep_dims = var_6860, x = inputs_321_cast)[name = tensor("channels_mean_321_cast")]; + tensor zero_mean_321_cast = sub(x = inputs_321_cast, y = channels_mean_321_cast)[name = tensor("zero_mean_321_cast")]; + tensor zero_mean_sq_321_cast = mul(x = zero_mean_321_cast, y = zero_mean_321_cast)[name = tensor("zero_mean_sq_321_cast")]; + tensor var_10510 = const()[name = tensor("op_10510"), val = tensor([1])]; + tensor var_10511_cast = reduce_mean(axes = var_10510, keep_dims = var_6860, x = zero_mean_sq_321_cast)[name = tensor("op_10511_cast")]; + tensor var_10512_to_fp16 = const()[name = tensor("op_10512_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10513_cast = add(x = var_10511_cast, y = var_10512_to_fp16)[name = tensor("op_10513_cast")]; + tensor denom_321_epsilon_0_to_fp16 = const()[name = tensor("denom_321_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_321_cast = rsqrt(epsilon = denom_321_epsilon_0_to_fp16, x = var_10513_cast)[name = tensor("denom_321_cast")]; + tensor out_321_cast = mul(x = zero_mean_321_cast, y = denom_321_cast)[name = tensor("out_321_cast")]; + tensor var_10517_to_fp16 = const()[name = tensor("op_10517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043368000)))]; + tensor var_10518_cast = add(x = out_321_cast, y = var_10517_to_fp16)[name = tensor("op_10518_cast")]; + tensor var_10520_to_fp16 = const()[name = tensor("op_10520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043370624)))]; + tensor hidden_states_417_cast = mul(x = var_10518_cast, y = var_10520_to_fp16)[name = tensor("hidden_states_417_cast")]; + tensor var_10527 = const()[name = tensor("op_10527"), val = tensor([1, 1])]; + tensor var_10529 = const()[name = tensor("op_10529"), val = tensor([1, 1])]; + tensor q_215_pad_type_0 = const()[name = tensor("q_215_pad_type_0"), val = tensor("custom")]; + tensor q_215_pad_0 = const()[name = tensor("q_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043373248))), lut = tensor([-0x1.8bcp-6, -0x1.eep-8, 0x1.ef4p-8, 0x1.8c4p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_215_cast = conv(dilations = var_10529, groups = var_6865, pad = q_215_pad_0, pad_type = q_215_pad_type_0, strides = var_10527, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_417_cast)[name = tensor("q_215_cast")]; + tensor var_10533 = const()[name = tensor("op_10533"), val = tensor([1, 1])]; + tensor var_10535 = const()[name = tensor("op_10535"), val = tensor([1, 1])]; + tensor k_215_pad_type_0 = const()[name = tensor("k_215_pad_type_0"), val = tensor("custom")]; + tensor k_215_pad_0 = const()[name = tensor("k_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1043782912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045093696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_215_cast = conv(dilations = var_10535, groups = var_6865, pad = k_215_pad_0, pad_type = k_215_pad_type_0, strides = var_10533, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_215_cast")]; + tensor var_10539 = const()[name = tensor("op_10539"), val = tensor([1, 1])]; + tensor var_10541 = const()[name = tensor("op_10541"), val = tensor([1, 1])]; + tensor v_215_pad_type_0 = const()[name = tensor("v_215_pad_type_0"), val = tensor("custom")]; + tensor v_215_pad_0 = const()[name = tensor("v_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045093824))), lut = tensor([-0x1.45cp-6, -0x1.8p-8, 0x1.7fp-8, 0x1.454p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_215_cast = conv(dilations = var_10541, groups = var_6865, pad = v_215_pad_0, pad_type = v_215_pad_type_0, strides = var_10539, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_215_cast")]; + tensor var_10545 = const()[name = tensor("op_10545"), val = tensor([2, 20, 64, -1])]; + tensor var_10546_cast = reshape(shape = var_10545, x = q_215_cast)[name = tensor("op_10546_cast")]; + tensor var_10547 = const()[name = tensor("op_10547"), val = tensor([2, 20, 64, -1])]; + tensor var_10548_cast = reshape(shape = var_10547, x = k_215_cast)[name = tensor("op_10548_cast")]; + tensor var_10549 = const()[name = tensor("op_10549"), val = tensor([2, 20, 64, -1])]; + tensor var_10550_cast = reshape(shape = var_10549, x = v_215_cast)[name = tensor("op_10550_cast")]; + tensor attn_weights_429_transpose_x_0 = const()[name = tensor("attn_weights_429_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_429_transpose_y_0 = const()[name = tensor("attn_weights_429_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_429_cast = matmul(transpose_x = attn_weights_429_transpose_x_0, transpose_y = attn_weights_429_transpose_y_0, x = var_10546_cast, y = var_10548_cast)[name = tensor("attn_weights_429_cast")]; + tensor attn_weights_431_cast = mul(x = attn_weights_429_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_431_cast")]; + tensor var_10554_cast = softmax(axis = var_6849, x = attn_weights_431_cast)[name = tensor("op_10554_cast")]; + tensor attn_215_transpose_x_0 = const()[name = tensor("attn_215_transpose_x_0"), val = tensor(false)]; + tensor attn_215_transpose_y_0 = const()[name = tensor("attn_215_transpose_y_0"), val = tensor(true)]; + tensor attn_215_cast = matmul(transpose_x = attn_215_transpose_x_0, transpose_y = attn_215_transpose_y_0, x = var_10550_cast, y = var_10554_cast)[name = tensor("attn_215_cast")]; + tensor var_10558 = const()[name = tensor("op_10558"), val = tensor([2, 1280, 1, -1])]; + tensor input_611_cast = reshape(shape = var_10558, x = attn_215_cast)[name = tensor("input_611_cast")]; + tensor var_10563 = const()[name = tensor("op_10563"), val = tensor([1, 1])]; + tensor var_10565 = const()[name = tensor("op_10565"), val = tensor([1, 1])]; + tensor var_10567_pad_type_0 = const()[name = tensor("op_10567_pad_type_0"), val = tensor("custom")]; + tensor var_10567_pad_0 = const()[name = tensor("op_10567_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045749248))), lut = tensor([-0x1.9fcp-7, -0x1.f98p-9, 0x1.f8cp-9, 0x1.ap-7]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046158912)))]; + tensor var_10567_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_10565, groups = var_6865, pad = var_10567_pad_0, pad_type = var_10567_pad_type_0, strides = var_10563, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_611_cast)[name = tensor("op_10567_cast")]; + tensor inputs_323_cast = add(x = var_10567_cast, y = inputs_321_cast)[name = tensor("inputs_323_cast")]; + tensor var_10571 = const()[name = tensor("op_10571"), val = tensor([1])]; + tensor channels_mean_323_cast = reduce_mean(axes = var_10571, keep_dims = var_6860, x = inputs_323_cast)[name = tensor("channels_mean_323_cast")]; + tensor zero_mean_323_cast = sub(x = inputs_323_cast, y = channels_mean_323_cast)[name = tensor("zero_mean_323_cast")]; + tensor zero_mean_sq_323_cast = mul(x = zero_mean_323_cast, y = zero_mean_323_cast)[name = tensor("zero_mean_sq_323_cast")]; + tensor var_10575 = const()[name = tensor("op_10575"), val = tensor([1])]; + tensor var_10576_cast = reduce_mean(axes = var_10575, keep_dims = var_6860, x = zero_mean_sq_323_cast)[name = tensor("op_10576_cast")]; + tensor var_10577_to_fp16 = const()[name = tensor("op_10577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10578_cast = add(x = var_10576_cast, y = var_10577_to_fp16)[name = tensor("op_10578_cast")]; + tensor denom_323_epsilon_0_to_fp16 = const()[name = tensor("denom_323_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_323_cast = rsqrt(epsilon = denom_323_epsilon_0_to_fp16, x = var_10578_cast)[name = tensor("denom_323_cast")]; + tensor out_323_cast = mul(x = zero_mean_323_cast, y = denom_323_cast)[name = tensor("out_323_cast")]; + tensor var_10582_to_fp16 = const()[name = tensor("op_10582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046161536)))]; + tensor var_10583_cast = add(x = out_323_cast, y = var_10582_to_fp16)[name = tensor("op_10583_cast")]; + tensor var_10585_to_fp16 = const()[name = tensor("op_10585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046164160)))]; + tensor input_613_cast = mul(x = var_10583_cast, y = var_10585_to_fp16)[name = tensor("input_613_cast")]; + tensor var_10593 = const()[name = tensor("op_10593"), val = tensor([1, 1])]; + tensor var_10595 = const()[name = tensor("op_10595"), val = tensor([1, 1])]; + tensor var_10597_pad_type_0 = const()[name = tensor("op_10597_pad_type_0"), val = tensor("custom")]; + tensor var_10597_pad_0 = const()[name = tensor("op_10597_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046166784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055997248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1055997440)))]; + tensor var_10597_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_10595, groups = var_6865, pad = var_10597_pad_0, pad_type = var_10597_pad_type_0, strides = var_10593, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_613_cast)[name = tensor("op_10597_cast")]; + tensor var_10598_split_sizes_0 = const()[name = tensor("op_10598_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10598_axis_0 = const()[name = tensor("op_10598_axis_0"), val = tensor(1)]; + tensor var_10598_cast_0, tensor var_10598_cast_1 = split(axis = var_10598_axis_0, split_sizes = var_10598_split_sizes_0, x = var_10597_cast)[name = tensor("op_10598_cast")]; + tensor var_10600_mode_0 = const()[name = tensor("op_10600_mode_0"), val = tensor("EXACT")]; + tensor var_10600_cast = gelu(mode = var_10600_mode_0, x = var_10598_cast_1)[name = tensor("op_10600_cast")]; + tensor input_615_cast = mul(x = var_10598_cast_0, y = var_10600_cast)[name = tensor("input_615_cast")]; + tensor var_10604 = const()[name = tensor("op_10604"), val = tensor([1, 1])]; + tensor var_10606 = const()[name = tensor("op_10606"), val = tensor([1, 1])]; + tensor var_10608_pad_type_0 = const()[name = tensor("op_10608_pad_type_0"), val = tensor("custom")]; + tensor var_10608_pad_0 = const()[name = tensor("op_10608_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1056017984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060933248))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060933440)))]; + tensor var_10608_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_10606, groups = var_6865, pad = var_10608_pad_0, pad_type = var_10608_pad_type_0, strides = var_10604, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_615_cast)[name = tensor("op_10608_cast")]; + tensor hidden_states_421_cast = add(x = var_10608_cast, y = inputs_323_cast)[name = tensor("hidden_states_421_cast")]; + tensor var_10610 = const()[name = tensor("op_10610"), val = tensor([2, 1280, 32, 32])]; + tensor input_617_cast = reshape(shape = var_10610, x = hidden_states_421_cast)[name = tensor("input_617_cast")]; + tensor var_10614 = const()[name = tensor("op_10614"), val = tensor([1, 1])]; + tensor var_10616 = const()[name = tensor("op_10616"), val = tensor([1, 1])]; + tensor hidden_states_423_pad_type_0 = const()[name = tensor("hidden_states_423_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_423_pad_0 = const()[name = tensor("hidden_states_423_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1060936064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062574528))), name = tensor("up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062575104)))]; + tensor hidden_states_423_cast = conv(bias = up_blocks_0_attentions_1_proj_out_bias_to_fp16, dilations = var_10616, groups = var_6865, pad = hidden_states_423_pad_0, pad_type = hidden_states_423_pad_type_0, strides = var_10614, weight = up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized, x = input_617_cast)[name = tensor("hidden_states_423_cast")]; + tensor hidden_states_425_cast = add(x = hidden_states_423_cast, y = hidden_states_357_cast)[name = tensor("hidden_states_425_cast")]; + tensor input_619_interleave_0 = const()[name = tensor("input_619_interleave_0"), val = tensor(false)]; + tensor input_619_cast = concat(axis = var_6865, interleave = input_619_interleave_0, values = (hidden_states_425_cast, input_115_cast))[name = tensor("input_619_cast")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([2, 32, 60, 32, 32])]; + tensor reshape_108_cast = reshape(shape = reshape_108_shape_0, x = input_619_cast)[name = tensor("reshape_108_cast")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81_cast = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108_cast)[name = tensor("reduce_mean_81_cast")]; + tensor sub_54_cast = sub(x = reshape_108_cast, y = reduce_mean_81_cast)[name = tensor("sub_54_cast")]; + tensor square_27_cast = square(x = sub_54_cast)[name = tensor("square_27_cast")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83_cast = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27_cast)[name = tensor("reduce_mean_83_cast")]; + tensor add_54_y_0_to_fp16 = const()[name = tensor("add_54_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_54_cast = add(x = reduce_mean_83_cast, y = add_54_y_0_to_fp16)[name = tensor("add_54_cast")]; + tensor sqrt_27_cast = sqrt(x = add_54_cast)[name = tensor("sqrt_27_cast")]; + tensor real_div_27_cast = real_div(x = sub_54_cast, y = sqrt_27_cast)[name = tensor("real_div_27_cast")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([2, 1920, 32, 32])]; + tensor reshape_109_cast = reshape(shape = reshape_109_shape_0, x = real_div_27_cast)[name = tensor("reshape_109_cast")]; + tensor add_55_mean_0_to_fp16 = const()[name = tensor("add_55_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062577728)))]; + tensor add_55_variance_0_to_fp16 = const()[name = tensor("add_55_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062581632)))]; + tensor add_55_gamma_0_to_fp16 = const()[name = tensor("add_55_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062585536)))]; + tensor add_55_beta_0_to_fp16 = const()[name = tensor("add_55_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062589440)))]; + tensor add_55_epsilon_0_to_fp16 = const()[name = tensor("add_55_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_55_cast = batch_norm(beta = add_55_beta_0_to_fp16, epsilon = add_55_epsilon_0_to_fp16, gamma = add_55_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_109_cast)[name = tensor("add_55_cast")]; + tensor input_623_cast = silu(x = add_55_cast)[name = tensor("input_623_cast")]; + tensor var_10634 = const()[name = tensor("op_10634"), val = tensor([1, 1])]; + tensor var_10636 = const()[name = tensor("op_10636"), val = tensor([1, 1])]; + tensor hidden_states_427_pad_type_0 = const()[name = tensor("hidden_states_427_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_427_pad_0 = const()[name = tensor("hidden_states_427_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062593344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079182208))), name = tensor("up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 3, 3])]; + tensor up_blocks_0_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079182400)))]; + tensor hidden_states_427_cast = conv(bias = up_blocks_0_resnets_2_conv1_bias_to_fp16, dilations = var_10636, groups = var_6865, pad = hidden_states_427_pad_0, pad_type = hidden_states_427_pad_type_0, strides = var_10634, weight = up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized, x = input_623_cast)[name = tensor("hidden_states_427_cast")]; + tensor var_10642 = const()[name = tensor("op_10642"), val = tensor([1, 1])]; + tensor var_10644 = const()[name = tensor("op_10644"), val = tensor([1, 1])]; + tensor temb_21_pad_type_0 = const()[name = tensor("temb_21_pad_type_0"), val = tensor("custom")]; + tensor temb_21_pad_0 = const()[name = tensor("temb_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079185024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080413888))), name = tensor("up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080414080)))]; + tensor temb_21_cast = conv(bias = up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_10644, groups = var_6865, pad = temb_21_pad_0, pad_type = temb_21_pad_type_0, strides = var_10642, weight = up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_21_cast")]; + tensor input_627_cast = add(x = hidden_states_427_cast, y = temb_21_cast)[name = tensor("input_627_cast")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_112_cast = reshape(shape = reshape_112_shape_0, x = input_627_cast)[name = tensor("reshape_112_cast")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84_cast = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112_cast)[name = tensor("reduce_mean_84_cast")]; + tensor sub_56_cast = sub(x = reshape_112_cast, y = reduce_mean_84_cast)[name = tensor("sub_56_cast")]; + tensor square_28_cast = square(x = sub_56_cast)[name = tensor("square_28_cast")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86_cast = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28_cast)[name = tensor("reduce_mean_86_cast")]; + tensor add_56_y_0_to_fp16 = const()[name = tensor("add_56_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_56_cast = add(x = reduce_mean_86_cast, y = add_56_y_0_to_fp16)[name = tensor("add_56_cast")]; + tensor sqrt_28_cast = sqrt(x = add_56_cast)[name = tensor("sqrt_28_cast")]; + tensor real_div_28_cast = real_div(x = sub_56_cast, y = sqrt_28_cast)[name = tensor("real_div_28_cast")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_113_cast = reshape(shape = reshape_113_shape_0, x = real_div_28_cast)[name = tensor("reshape_113_cast")]; + tensor add_57_gamma_0_to_fp16 = const()[name = tensor("add_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080416704)))]; + tensor add_57_beta_0_to_fp16 = const()[name = tensor("add_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080419328)))]; + tensor add_57_epsilon_0_to_fp16 = const()[name = tensor("add_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_57_cast = batch_norm(beta = add_57_beta_0_to_fp16, epsilon = add_57_epsilon_0_to_fp16, gamma = add_57_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_113_cast)[name = tensor("add_57_cast")]; + tensor input_631_cast = silu(x = add_57_cast)[name = tensor("input_631_cast")]; + tensor var_10654 = const()[name = tensor("op_10654"), val = tensor([1, 1])]; + tensor var_10656 = const()[name = tensor("op_10656"), val = tensor([1, 1])]; + tensor hidden_states_429_pad_type_0 = const()[name = tensor("hidden_states_429_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_429_pad_0 = const()[name = tensor("hidden_states_429_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1080421952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091481216))), name = tensor("up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091481408)))]; + tensor hidden_states_429_cast = conv(bias = up_blocks_0_resnets_2_conv2_bias_to_fp16, dilations = var_10656, groups = var_6865, pad = hidden_states_429_pad_0, pad_type = hidden_states_429_pad_type_0, strides = var_10654, weight = up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized, x = input_631_cast)[name = tensor("hidden_states_429_cast")]; + tensor var_10661 = const()[name = tensor("op_10661"), val = tensor([1, 1])]; + tensor var_10663 = const()[name = tensor("op_10663"), val = tensor([1, 1])]; + tensor x_9_pad_type_0 = const()[name = tensor("x_9_pad_type_0"), val = tensor("custom")]; + tensor x_9_pad_0 = const()[name = tensor("x_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1091484032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093941696))), name = tensor("up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 1, 1])]; + tensor up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093942272)))]; + tensor x_9_cast = conv(bias = up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_10663, groups = var_6865, pad = x_9_pad_0, pad_type = x_9_pad_type_0, strides = var_10661, weight = up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_619_cast)[name = tensor("x_9_cast")]; + tensor hidden_states_431_cast = add(x = x_9_cast, y = hidden_states_429_cast)[name = tensor("hidden_states_431_cast")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_116_cast = reshape(shape = reshape_116_shape_0, x = hidden_states_431_cast)[name = tensor("reshape_116_cast")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87_cast = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116_cast)[name = tensor("reduce_mean_87_cast")]; + tensor sub_58_cast = sub(x = reshape_116_cast, y = reduce_mean_87_cast)[name = tensor("sub_58_cast")]; + tensor square_29_cast = square(x = sub_58_cast)[name = tensor("square_29_cast")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89_cast = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29_cast)[name = tensor("reduce_mean_89_cast")]; + tensor add_58_y_0_to_fp16 = const()[name = tensor("add_58_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_58_cast = add(x = reduce_mean_89_cast, y = add_58_y_0_to_fp16)[name = tensor("add_58_cast")]; + tensor sqrt_29_cast = sqrt(x = add_58_cast)[name = tensor("sqrt_29_cast")]; + tensor real_div_29_cast = real_div(x = sub_58_cast, y = sqrt_29_cast)[name = tensor("real_div_29_cast")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_117_cast = reshape(shape = reshape_117_shape_0, x = real_div_29_cast)[name = tensor("reshape_117_cast")]; + tensor add_59_gamma_0_to_fp16 = const()[name = tensor("add_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093944896)))]; + tensor add_59_beta_0_to_fp16 = const()[name = tensor("add_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093947520)))]; + tensor add_59_epsilon_0_to_fp16 = const()[name = tensor("add_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_59_cast = batch_norm(beta = add_59_beta_0_to_fp16, epsilon = add_59_epsilon_0_to_fp16, gamma = add_59_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_117_cast)[name = tensor("add_59_cast")]; + tensor var_10701 = const()[name = tensor("op_10701"), val = tensor([1, 1])]; + tensor var_10703 = const()[name = tensor("op_10703"), val = tensor([1, 1])]; + tensor hidden_states_433_pad_type_0 = const()[name = tensor("hidden_states_433_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_433_pad_0 = const()[name = tensor("hidden_states_433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1093950144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095179008))), name = tensor("up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095179200)))]; + tensor hidden_states_433_cast = conv(bias = up_blocks_0_attentions_2_proj_in_bias_to_fp16, dilations = var_10703, groups = var_6865, pad = hidden_states_433_pad_0, pad_type = hidden_states_433_pad_type_0, strides = var_10701, weight = up_blocks_0_attentions_2_proj_in_weight_to_fp16_palettized, x = add_59_cast)[name = tensor("hidden_states_433_cast")]; + tensor var_10708 = const()[name = tensor("op_10708"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_325_cast = reshape(shape = var_10708, x = hidden_states_433_cast)[name = tensor("inputs_325_cast")]; + tensor var_10718 = const()[name = tensor("op_10718"), val = tensor([1])]; + tensor channels_mean_325_cast = reduce_mean(axes = var_10718, keep_dims = var_6860, x = inputs_325_cast)[name = tensor("channels_mean_325_cast")]; + tensor zero_mean_325_cast = sub(x = inputs_325_cast, y = channels_mean_325_cast)[name = tensor("zero_mean_325_cast")]; + tensor zero_mean_sq_325_cast = mul(x = zero_mean_325_cast, y = zero_mean_325_cast)[name = tensor("zero_mean_sq_325_cast")]; + tensor var_10722 = const()[name = tensor("op_10722"), val = tensor([1])]; + tensor var_10723_cast = reduce_mean(axes = var_10722, keep_dims = var_6860, x = zero_mean_sq_325_cast)[name = tensor("op_10723_cast")]; + tensor var_10724_to_fp16 = const()[name = tensor("op_10724_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10725_cast = add(x = var_10723_cast, y = var_10724_to_fp16)[name = tensor("op_10725_cast")]; + tensor denom_325_epsilon_0_to_fp16 = const()[name = tensor("denom_325_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_325_cast = rsqrt(epsilon = denom_325_epsilon_0_to_fp16, x = var_10725_cast)[name = tensor("denom_325_cast")]; + tensor out_325_cast = mul(x = zero_mean_325_cast, y = denom_325_cast)[name = tensor("out_325_cast")]; + tensor var_10729_to_fp16 = const()[name = tensor("op_10729_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095181824)))]; + tensor var_10730_cast = add(x = out_325_cast, y = var_10729_to_fp16)[name = tensor("op_10730_cast")]; + tensor var_10732_to_fp16 = const()[name = tensor("op_10732_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095184448)))]; + tensor hidden_states_435_cast = mul(x = var_10730_cast, y = var_10732_to_fp16)[name = tensor("hidden_states_435_cast")]; + tensor var_10739 = const()[name = tensor("op_10739"), val = tensor([1, 1])]; + tensor var_10741 = const()[name = tensor("op_10741"), val = tensor([1, 1])]; + tensor q_217_pad_type_0 = const()[name = tensor("q_217_pad_type_0"), val = tensor("custom")]; + tensor q_217_pad_0 = const()[name = tensor("q_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1095187072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096006336))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_217_cast = conv(dilations = var_10741, groups = var_6865, pad = q_217_pad_0, pad_type = q_217_pad_type_0, strides = var_10739, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("q_217_cast")]; + tensor var_10745 = const()[name = tensor("op_10745"), val = tensor([1, 1])]; + tensor var_10747 = const()[name = tensor("op_10747"), val = tensor([1, 1])]; + tensor k_217_pad_type_0 = const()[name = tensor("k_217_pad_type_0"), val = tensor("custom")]; + tensor k_217_pad_0 = const()[name = tensor("k_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096006464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096825728))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_217_cast = conv(dilations = var_10747, groups = var_6865, pad = k_217_pad_0, pad_type = k_217_pad_type_0, strides = var_10745, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("k_217_cast")]; + tensor var_10751 = const()[name = tensor("op_10751"), val = tensor([1, 1])]; + tensor var_10753 = const()[name = tensor("op_10753"), val = tensor([1, 1])]; + tensor v_217_pad_type_0 = const()[name = tensor("v_217_pad_type_0"), val = tensor("custom")]; + tensor v_217_pad_0 = const()[name = tensor("v_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1096825856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1098054720))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_217_cast = conv(dilations = var_10753, groups = var_6865, pad = v_217_pad_0, pad_type = v_217_pad_type_0, strides = var_10751, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("v_217_cast")]; + tensor var_10757 = const()[name = tensor("op_10757"), val = tensor([2, 20, 64, -1])]; + tensor var_10758_cast = reshape(shape = var_10757, x = q_217_cast)[name = tensor("op_10758_cast")]; + tensor var_10759 = const()[name = tensor("op_10759"), val = tensor([2, 20, 64, -1])]; + tensor var_10760_cast = reshape(shape = var_10759, x = k_217_cast)[name = tensor("op_10760_cast")]; + tensor var_10761 = const()[name = tensor("op_10761"), val = tensor([2, 20, 64, -1])]; + tensor var_10762_cast = reshape(shape = var_10761, x = v_217_cast)[name = tensor("op_10762_cast")]; + tensor attn_weights_433_transpose_x_0 = const()[name = tensor("attn_weights_433_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_433_transpose_y_0 = const()[name = tensor("attn_weights_433_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_433_cast = matmul(transpose_x = attn_weights_433_transpose_x_0, transpose_y = attn_weights_433_transpose_y_0, x = var_10758_cast, y = var_10760_cast)[name = tensor("attn_weights_433_cast")]; + tensor attn_weights_435_cast = mul(x = attn_weights_433_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_435_cast")]; + tensor var_10766_cast = softmax(axis = var_6849, x = attn_weights_435_cast)[name = tensor("op_10766_cast")]; + tensor attn_217_transpose_x_0 = const()[name = tensor("attn_217_transpose_x_0"), val = tensor(false)]; + tensor attn_217_transpose_y_0 = const()[name = tensor("attn_217_transpose_y_0"), val = tensor(true)]; + tensor attn_217_cast = matmul(transpose_x = attn_217_transpose_x_0, transpose_y = attn_217_transpose_y_0, x = var_10762_cast, y = var_10766_cast)[name = tensor("attn_217_cast")]; + tensor var_10770 = const()[name = tensor("op_10770"), val = tensor([2, 1280, 1, -1])]; + tensor input_635_cast = reshape(shape = var_10770, x = attn_217_cast)[name = tensor("input_635_cast")]; + tensor var_10775 = const()[name = tensor("op_10775"), val = tensor([1, 1])]; + tensor var_10777 = const()[name = tensor("op_10777"), val = tensor([1, 1])]; + tensor var_10779_pad_type_0 = const()[name = tensor("op_10779_pad_type_0"), val = tensor("custom")]; + tensor var_10779_pad_0 = const()[name = tensor("op_10779_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1098054912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099283776))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099283968)))]; + tensor var_10779_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_10777, groups = var_6865, pad = var_10779_pad_0, pad_type = var_10779_pad_type_0, strides = var_10775, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_635_cast)[name = tensor("op_10779_cast")]; + tensor inputs_327_cast = add(x = var_10779_cast, y = inputs_325_cast)[name = tensor("inputs_327_cast")]; + tensor var_10783 = const()[name = tensor("op_10783"), val = tensor([1])]; + tensor channels_mean_327_cast = reduce_mean(axes = var_10783, keep_dims = var_6860, x = inputs_327_cast)[name = tensor("channels_mean_327_cast")]; + tensor zero_mean_327_cast = sub(x = inputs_327_cast, y = channels_mean_327_cast)[name = tensor("zero_mean_327_cast")]; + tensor zero_mean_sq_327_cast = mul(x = zero_mean_327_cast, y = zero_mean_327_cast)[name = tensor("zero_mean_sq_327_cast")]; + tensor var_10787 = const()[name = tensor("op_10787"), val = tensor([1])]; + tensor var_10788_cast = reduce_mean(axes = var_10787, keep_dims = var_6860, x = zero_mean_sq_327_cast)[name = tensor("op_10788_cast")]; + tensor var_10789_to_fp16 = const()[name = tensor("op_10789_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10790_cast = add(x = var_10788_cast, y = var_10789_to_fp16)[name = tensor("op_10790_cast")]; + tensor denom_327_epsilon_0_to_fp16 = const()[name = tensor("denom_327_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_327_cast = rsqrt(epsilon = denom_327_epsilon_0_to_fp16, x = var_10790_cast)[name = tensor("denom_327_cast")]; + tensor out_327_cast = mul(x = zero_mean_327_cast, y = denom_327_cast)[name = tensor("out_327_cast")]; + tensor var_10794_to_fp16 = const()[name = tensor("op_10794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099286592)))]; + tensor var_10795_cast = add(x = out_327_cast, y = var_10794_to_fp16)[name = tensor("op_10795_cast")]; + tensor var_10797_to_fp16 = const()[name = tensor("op_10797_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099289216)))]; + tensor hidden_states_437_cast = mul(x = var_10795_cast, y = var_10797_to_fp16)[name = tensor("hidden_states_437_cast")]; + tensor var_10804 = const()[name = tensor("op_10804"), val = tensor([1, 1])]; + tensor var_10806 = const()[name = tensor("op_10806"), val = tensor([1, 1])]; + tensor q_219_pad_type_0 = const()[name = tensor("q_219_pad_type_0"), val = tensor("custom")]; + tensor q_219_pad_0 = const()[name = tensor("q_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099291840))), lut = tensor([-0x1.c78p-6, -0x1.154p-7, 0x1.148p-7, 0x1.c7p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_219_cast = conv(dilations = var_10806, groups = var_6865, pad = q_219_pad_0, pad_type = q_219_pad_type_0, strides = var_10804, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_437_cast)[name = tensor("q_219_cast")]; + tensor var_10810 = const()[name = tensor("op_10810"), val = tensor([1, 1])]; + tensor var_10812 = const()[name = tensor("op_10812"), val = tensor([1, 1])]; + tensor k_219_pad_type_0 = const()[name = tensor("k_219_pad_type_0"), val = tensor("custom")]; + tensor k_219_pad_0 = const()[name = tensor("k_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099701504))), lut = tensor([-0x1.a8p-6, -0x1.ef8p-8, 0x1.f2cp-8, 0x1.a9p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_219_cast = conv(dilations = var_10812, groups = var_6865, pad = k_219_pad_0, pad_type = k_219_pad_type_0, strides = var_10810, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_219_cast")]; + tensor var_10816 = const()[name = tensor("op_10816"), val = tensor([1, 1])]; + tensor var_10818 = const()[name = tensor("op_10818"), val = tensor([1, 1])]; + tensor v_219_pad_type_0 = const()[name = tensor("v_219_pad_type_0"), val = tensor("custom")]; + tensor v_219_pad_0 = const()[name = tensor("v_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1100356928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101667712))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_219_cast = conv(dilations = var_10818, groups = var_6865, pad = v_219_pad_0, pad_type = v_219_pad_type_0, strides = var_10816, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_219_cast")]; + tensor var_10822 = const()[name = tensor("op_10822"), val = tensor([2, 20, 64, -1])]; + tensor var_10823_cast = reshape(shape = var_10822, x = q_219_cast)[name = tensor("op_10823_cast")]; + tensor var_10824 = const()[name = tensor("op_10824"), val = tensor([2, 20, 64, -1])]; + tensor var_10825_cast = reshape(shape = var_10824, x = k_219_cast)[name = tensor("op_10825_cast")]; + tensor var_10826 = const()[name = tensor("op_10826"), val = tensor([2, 20, 64, -1])]; + tensor var_10827_cast = reshape(shape = var_10826, x = v_219_cast)[name = tensor("op_10827_cast")]; + tensor attn_weights_437_transpose_x_0 = const()[name = tensor("attn_weights_437_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_437_transpose_y_0 = const()[name = tensor("attn_weights_437_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_437_cast = matmul(transpose_x = attn_weights_437_transpose_x_0, transpose_y = attn_weights_437_transpose_y_0, x = var_10823_cast, y = var_10825_cast)[name = tensor("attn_weights_437_cast")]; + tensor attn_weights_439_cast = mul(x = attn_weights_437_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_439_cast")]; + tensor var_10831_cast = softmax(axis = var_6849, x = attn_weights_439_cast)[name = tensor("op_10831_cast")]; + tensor attn_219_transpose_x_0 = const()[name = tensor("attn_219_transpose_x_0"), val = tensor(false)]; + tensor attn_219_transpose_y_0 = const()[name = tensor("attn_219_transpose_y_0"), val = tensor(true)]; + tensor attn_219_cast = matmul(transpose_x = attn_219_transpose_x_0, transpose_y = attn_219_transpose_y_0, x = var_10827_cast, y = var_10831_cast)[name = tensor("attn_219_cast")]; + tensor var_10835 = const()[name = tensor("op_10835"), val = tensor([2, 1280, 1, -1])]; + tensor input_637_cast = reshape(shape = var_10835, x = attn_219_cast)[name = tensor("input_637_cast")]; + tensor var_10840 = const()[name = tensor("op_10840"), val = tensor([1, 1])]; + tensor var_10842 = const()[name = tensor("op_10842"), val = tensor([1, 1])]; + tensor var_10844_pad_type_0 = const()[name = tensor("op_10844_pad_type_0"), val = tensor("custom")]; + tensor var_10844_pad_0 = const()[name = tensor("op_10844_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1101667840))), lut = tensor([-0x1.48cp-7, -0x1.828p-9, 0x1.83cp-9, 0x1.49cp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102077504)))]; + tensor var_10844_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_10842, groups = var_6865, pad = var_10844_pad_0, pad_type = var_10844_pad_type_0, strides = var_10840, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_637_cast)[name = tensor("op_10844_cast")]; + tensor inputs_329_cast = add(x = var_10844_cast, y = inputs_327_cast)[name = tensor("inputs_329_cast")]; + tensor var_10848 = const()[name = tensor("op_10848"), val = tensor([1])]; + tensor channels_mean_329_cast = reduce_mean(axes = var_10848, keep_dims = var_6860, x = inputs_329_cast)[name = tensor("channels_mean_329_cast")]; + tensor zero_mean_329_cast = sub(x = inputs_329_cast, y = channels_mean_329_cast)[name = tensor("zero_mean_329_cast")]; + tensor zero_mean_sq_329_cast = mul(x = zero_mean_329_cast, y = zero_mean_329_cast)[name = tensor("zero_mean_sq_329_cast")]; + tensor var_10852 = const()[name = tensor("op_10852"), val = tensor([1])]; + tensor var_10853_cast = reduce_mean(axes = var_10852, keep_dims = var_6860, x = zero_mean_sq_329_cast)[name = tensor("op_10853_cast")]; + tensor var_10854_to_fp16 = const()[name = tensor("op_10854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10855_cast = add(x = var_10853_cast, y = var_10854_to_fp16)[name = tensor("op_10855_cast")]; + tensor denom_329_epsilon_0_to_fp16 = const()[name = tensor("denom_329_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_329_cast = rsqrt(epsilon = denom_329_epsilon_0_to_fp16, x = var_10855_cast)[name = tensor("denom_329_cast")]; + tensor out_329_cast = mul(x = zero_mean_329_cast, y = denom_329_cast)[name = tensor("out_329_cast")]; + tensor var_10859_to_fp16 = const()[name = tensor("op_10859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102080128)))]; + tensor var_10860_cast = add(x = out_329_cast, y = var_10859_to_fp16)[name = tensor("op_10860_cast")]; + tensor var_10862_to_fp16 = const()[name = tensor("op_10862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102082752)))]; + tensor input_639_cast = mul(x = var_10860_cast, y = var_10862_to_fp16)[name = tensor("input_639_cast")]; + tensor var_10870 = const()[name = tensor("op_10870"), val = tensor([1, 1])]; + tensor var_10872 = const()[name = tensor("op_10872"), val = tensor([1, 1])]; + tensor var_10874_pad_type_0 = const()[name = tensor("op_10874_pad_type_0"), val = tensor("custom")]; + tensor var_10874_pad_0 = const()[name = tensor("op_10874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1102085376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111915840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111916032)))]; + tensor var_10874_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_10872, groups = var_6865, pad = var_10874_pad_0, pad_type = var_10874_pad_type_0, strides = var_10870, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_639_cast)[name = tensor("op_10874_cast")]; + tensor var_10875_split_sizes_0 = const()[name = tensor("op_10875_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10875_axis_0 = const()[name = tensor("op_10875_axis_0"), val = tensor(1)]; + tensor var_10875_cast_0, tensor var_10875_cast_1 = split(axis = var_10875_axis_0, split_sizes = var_10875_split_sizes_0, x = var_10874_cast)[name = tensor("op_10875_cast")]; + tensor var_10877_mode_0 = const()[name = tensor("op_10877_mode_0"), val = tensor("EXACT")]; + tensor var_10877_cast = gelu(mode = var_10877_mode_0, x = var_10875_cast_1)[name = tensor("op_10877_cast")]; + tensor input_641_cast = mul(x = var_10875_cast_0, y = var_10877_cast)[name = tensor("input_641_cast")]; + tensor var_10881 = const()[name = tensor("op_10881"), val = tensor([1, 1])]; + tensor var_10883 = const()[name = tensor("op_10883"), val = tensor([1, 1])]; + tensor var_10885_pad_type_0 = const()[name = tensor("op_10885_pad_type_0"), val = tensor("custom")]; + tensor var_10885_pad_0 = const()[name = tensor("op_10885_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1111936576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116851840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116852032)))]; + tensor var_10885_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_10883, groups = var_6865, pad = var_10885_pad_0, pad_type = var_10885_pad_type_0, strides = var_10881, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_641_cast)[name = tensor("op_10885_cast")]; + tensor inputs_331_cast = add(x = var_10885_cast, y = inputs_329_cast)[name = tensor("inputs_331_cast")]; + tensor var_10895 = const()[name = tensor("op_10895"), val = tensor([1])]; + tensor channels_mean_331_cast = reduce_mean(axes = var_10895, keep_dims = var_6860, x = inputs_331_cast)[name = tensor("channels_mean_331_cast")]; + tensor zero_mean_331_cast = sub(x = inputs_331_cast, y = channels_mean_331_cast)[name = tensor("zero_mean_331_cast")]; + tensor zero_mean_sq_331_cast = mul(x = zero_mean_331_cast, y = zero_mean_331_cast)[name = tensor("zero_mean_sq_331_cast")]; + tensor var_10899 = const()[name = tensor("op_10899"), val = tensor([1])]; + tensor var_10900_cast = reduce_mean(axes = var_10899, keep_dims = var_6860, x = zero_mean_sq_331_cast)[name = tensor("op_10900_cast")]; + tensor var_10901_to_fp16 = const()[name = tensor("op_10901_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10902_cast = add(x = var_10900_cast, y = var_10901_to_fp16)[name = tensor("op_10902_cast")]; + tensor denom_331_epsilon_0_to_fp16 = const()[name = tensor("denom_331_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_331_cast = rsqrt(epsilon = denom_331_epsilon_0_to_fp16, x = var_10902_cast)[name = tensor("denom_331_cast")]; + tensor out_331_cast = mul(x = zero_mean_331_cast, y = denom_331_cast)[name = tensor("out_331_cast")]; + tensor var_10906_to_fp16 = const()[name = tensor("op_10906_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116854656)))]; + tensor var_10907_cast = add(x = out_331_cast, y = var_10906_to_fp16)[name = tensor("op_10907_cast")]; + tensor var_10909_to_fp16 = const()[name = tensor("op_10909_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116857280)))]; + tensor hidden_states_441_cast = mul(x = var_10907_cast, y = var_10909_to_fp16)[name = tensor("hidden_states_441_cast")]; + tensor var_10916 = const()[name = tensor("op_10916"), val = tensor([1, 1])]; + tensor var_10918 = const()[name = tensor("op_10918"), val = tensor([1, 1])]; + tensor q_221_pad_type_0 = const()[name = tensor("q_221_pad_type_0"), val = tensor("custom")]; + tensor q_221_pad_0 = const()[name = tensor("q_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1116859904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117679168))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_221_cast = conv(dilations = var_10918, groups = var_6865, pad = q_221_pad_0, pad_type = q_221_pad_type_0, strides = var_10916, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("q_221_cast")]; + tensor var_10922 = const()[name = tensor("op_10922"), val = tensor([1, 1])]; + tensor var_10924 = const()[name = tensor("op_10924"), val = tensor([1, 1])]; + tensor k_221_pad_type_0 = const()[name = tensor("k_221_pad_type_0"), val = tensor("custom")]; + tensor k_221_pad_0 = const()[name = tensor("k_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1117679296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118498560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_221_cast = conv(dilations = var_10924, groups = var_6865, pad = k_221_pad_0, pad_type = k_221_pad_type_0, strides = var_10922, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("k_221_cast")]; + tensor var_10928 = const()[name = tensor("op_10928"), val = tensor([1, 1])]; + tensor var_10930 = const()[name = tensor("op_10930"), val = tensor([1, 1])]; + tensor v_221_pad_type_0 = const()[name = tensor("v_221_pad_type_0"), val = tensor("custom")]; + tensor v_221_pad_0 = const()[name = tensor("v_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1118498688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119727552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_221_cast = conv(dilations = var_10930, groups = var_6865, pad = v_221_pad_0, pad_type = v_221_pad_type_0, strides = var_10928, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("v_221_cast")]; + tensor var_10934 = const()[name = tensor("op_10934"), val = tensor([2, 20, 64, -1])]; + tensor var_10935_cast = reshape(shape = var_10934, x = q_221_cast)[name = tensor("op_10935_cast")]; + tensor var_10936 = const()[name = tensor("op_10936"), val = tensor([2, 20, 64, -1])]; + tensor var_10937_cast = reshape(shape = var_10936, x = k_221_cast)[name = tensor("op_10937_cast")]; + tensor var_10938 = const()[name = tensor("op_10938"), val = tensor([2, 20, 64, -1])]; + tensor var_10939_cast = reshape(shape = var_10938, x = v_221_cast)[name = tensor("op_10939_cast")]; + tensor attn_weights_441_transpose_x_0 = const()[name = tensor("attn_weights_441_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_441_transpose_y_0 = const()[name = tensor("attn_weights_441_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_441_cast = matmul(transpose_x = attn_weights_441_transpose_x_0, transpose_y = attn_weights_441_transpose_y_0, x = var_10935_cast, y = var_10937_cast)[name = tensor("attn_weights_441_cast")]; + tensor attn_weights_443_cast = mul(x = attn_weights_441_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_443_cast")]; + tensor var_10943_cast = softmax(axis = var_6849, x = attn_weights_443_cast)[name = tensor("op_10943_cast")]; + tensor attn_221_transpose_x_0 = const()[name = tensor("attn_221_transpose_x_0"), val = tensor(false)]; + tensor attn_221_transpose_y_0 = const()[name = tensor("attn_221_transpose_y_0"), val = tensor(true)]; + tensor attn_221_cast = matmul(transpose_x = attn_221_transpose_x_0, transpose_y = attn_221_transpose_y_0, x = var_10939_cast, y = var_10943_cast)[name = tensor("attn_221_cast")]; + tensor var_10947 = const()[name = tensor("op_10947"), val = tensor([2, 1280, 1, -1])]; + tensor input_643_cast = reshape(shape = var_10947, x = attn_221_cast)[name = tensor("input_643_cast")]; + tensor var_10952 = const()[name = tensor("op_10952"), val = tensor([1, 1])]; + tensor var_10954 = const()[name = tensor("op_10954"), val = tensor([1, 1])]; + tensor var_10956_pad_type_0 = const()[name = tensor("op_10956_pad_type_0"), val = tensor("custom")]; + tensor var_10956_pad_0 = const()[name = tensor("op_10956_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1119727744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120956608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120956800)))]; + tensor var_10956_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_10954, groups = var_6865, pad = var_10956_pad_0, pad_type = var_10956_pad_type_0, strides = var_10952, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_643_cast)[name = tensor("op_10956_cast")]; + tensor inputs_333_cast = add(x = var_10956_cast, y = inputs_331_cast)[name = tensor("inputs_333_cast")]; + tensor var_10960 = const()[name = tensor("op_10960"), val = tensor([1])]; + tensor channels_mean_333_cast = reduce_mean(axes = var_10960, keep_dims = var_6860, x = inputs_333_cast)[name = tensor("channels_mean_333_cast")]; + tensor zero_mean_333_cast = sub(x = inputs_333_cast, y = channels_mean_333_cast)[name = tensor("zero_mean_333_cast")]; + tensor zero_mean_sq_333_cast = mul(x = zero_mean_333_cast, y = zero_mean_333_cast)[name = tensor("zero_mean_sq_333_cast")]; + tensor var_10964 = const()[name = tensor("op_10964"), val = tensor([1])]; + tensor var_10965_cast = reduce_mean(axes = var_10964, keep_dims = var_6860, x = zero_mean_sq_333_cast)[name = tensor("op_10965_cast")]; + tensor var_10966_to_fp16 = const()[name = tensor("op_10966_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10967_cast = add(x = var_10965_cast, y = var_10966_to_fp16)[name = tensor("op_10967_cast")]; + tensor denom_333_epsilon_0_to_fp16 = const()[name = tensor("denom_333_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_333_cast = rsqrt(epsilon = denom_333_epsilon_0_to_fp16, x = var_10967_cast)[name = tensor("denom_333_cast")]; + tensor out_333_cast = mul(x = zero_mean_333_cast, y = denom_333_cast)[name = tensor("out_333_cast")]; + tensor var_10971_to_fp16 = const()[name = tensor("op_10971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120959424)))]; + tensor var_10972_cast = add(x = out_333_cast, y = var_10971_to_fp16)[name = tensor("op_10972_cast")]; + tensor var_10974_to_fp16 = const()[name = tensor("op_10974_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120962048)))]; + tensor hidden_states_443_cast = mul(x = var_10972_cast, y = var_10974_to_fp16)[name = tensor("hidden_states_443_cast")]; + tensor var_10981 = const()[name = tensor("op_10981"), val = tensor([1, 1])]; + tensor var_10983 = const()[name = tensor("op_10983"), val = tensor([1, 1])]; + tensor q_223_pad_type_0 = const()[name = tensor("q_223_pad_type_0"), val = tensor("custom")]; + tensor q_223_pad_0 = const()[name = tensor("q_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120964672))), lut = tensor([-0x1.304p-5, -0x1.668p-7, 0x1.624p-7, 0x1.2f8p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_223_cast = conv(dilations = var_10983, groups = var_6865, pad = q_223_pad_0, pad_type = q_223_pad_type_0, strides = var_10981, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_443_cast)[name = tensor("q_223_cast")]; + tensor var_10987 = const()[name = tensor("op_10987"), val = tensor([1, 1])]; + tensor var_10989 = const()[name = tensor("op_10989"), val = tensor([1, 1])]; + tensor k_223_pad_type_0 = const()[name = tensor("k_223_pad_type_0"), val = tensor("custom")]; + tensor k_223_pad_0 = const()[name = tensor("k_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1121374336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122685120))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_223_cast = conv(dilations = var_10989, groups = var_6865, pad = k_223_pad_0, pad_type = k_223_pad_type_0, strides = var_10987, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_223_cast")]; + tensor var_10993 = const()[name = tensor("op_10993"), val = tensor([1, 1])]; + tensor var_10995 = const()[name = tensor("op_10995"), val = tensor([1, 1])]; + tensor v_223_pad_type_0 = const()[name = tensor("v_223_pad_type_0"), val = tensor("custom")]; + tensor v_223_pad_0 = const()[name = tensor("v_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1122685248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123996032))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_223_cast = conv(dilations = var_10995, groups = var_6865, pad = v_223_pad_0, pad_type = v_223_pad_type_0, strides = var_10993, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_223_cast")]; + tensor var_10999 = const()[name = tensor("op_10999"), val = tensor([2, 20, 64, -1])]; + tensor var_11000_cast = reshape(shape = var_10999, x = q_223_cast)[name = tensor("op_11000_cast")]; + tensor var_11001 = const()[name = tensor("op_11001"), val = tensor([2, 20, 64, -1])]; + tensor var_11002_cast = reshape(shape = var_11001, x = k_223_cast)[name = tensor("op_11002_cast")]; + tensor var_11003 = const()[name = tensor("op_11003"), val = tensor([2, 20, 64, -1])]; + tensor var_11004_cast = reshape(shape = var_11003, x = v_223_cast)[name = tensor("op_11004_cast")]; + tensor attn_weights_445_transpose_x_0 = const()[name = tensor("attn_weights_445_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_445_transpose_y_0 = const()[name = tensor("attn_weights_445_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_445_cast = matmul(transpose_x = attn_weights_445_transpose_x_0, transpose_y = attn_weights_445_transpose_y_0, x = var_11000_cast, y = var_11002_cast)[name = tensor("attn_weights_445_cast")]; + tensor attn_weights_447_cast = mul(x = attn_weights_445_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_447_cast")]; + tensor var_11008_cast = softmax(axis = var_6849, x = attn_weights_447_cast)[name = tensor("op_11008_cast")]; + tensor attn_223_transpose_x_0 = const()[name = tensor("attn_223_transpose_x_0"), val = tensor(false)]; + tensor attn_223_transpose_y_0 = const()[name = tensor("attn_223_transpose_y_0"), val = tensor(true)]; + tensor attn_223_cast = matmul(transpose_x = attn_223_transpose_x_0, transpose_y = attn_223_transpose_y_0, x = var_11004_cast, y = var_11008_cast)[name = tensor("attn_223_cast")]; + tensor var_11012 = const()[name = tensor("op_11012"), val = tensor([2, 1280, 1, -1])]; + tensor input_645_cast = reshape(shape = var_11012, x = attn_223_cast)[name = tensor("input_645_cast")]; + tensor var_11017 = const()[name = tensor("op_11017"), val = tensor([1, 1])]; + tensor var_11019 = const()[name = tensor("op_11019"), val = tensor([1, 1])]; + tensor var_11021_pad_type_0 = const()[name = tensor("op_11021_pad_type_0"), val = tensor("custom")]; + tensor var_11021_pad_0 = const()[name = tensor("op_11021_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1123996160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124815424))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124815552)))]; + tensor var_11021_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_11019, groups = var_6865, pad = var_11021_pad_0, pad_type = var_11021_pad_type_0, strides = var_11017, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_645_cast)[name = tensor("op_11021_cast")]; + tensor inputs_335_cast = add(x = var_11021_cast, y = inputs_333_cast)[name = tensor("inputs_335_cast")]; + tensor var_11025 = const()[name = tensor("op_11025"), val = tensor([1])]; + tensor channels_mean_335_cast = reduce_mean(axes = var_11025, keep_dims = var_6860, x = inputs_335_cast)[name = tensor("channels_mean_335_cast")]; + tensor zero_mean_335_cast = sub(x = inputs_335_cast, y = channels_mean_335_cast)[name = tensor("zero_mean_335_cast")]; + tensor zero_mean_sq_335_cast = mul(x = zero_mean_335_cast, y = zero_mean_335_cast)[name = tensor("zero_mean_sq_335_cast")]; + tensor var_11029 = const()[name = tensor("op_11029"), val = tensor([1])]; + tensor var_11030_cast = reduce_mean(axes = var_11029, keep_dims = var_6860, x = zero_mean_sq_335_cast)[name = tensor("op_11030_cast")]; + tensor var_11031_to_fp16 = const()[name = tensor("op_11031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11032_cast = add(x = var_11030_cast, y = var_11031_to_fp16)[name = tensor("op_11032_cast")]; + tensor denom_335_epsilon_0_to_fp16 = const()[name = tensor("denom_335_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_335_cast = rsqrt(epsilon = denom_335_epsilon_0_to_fp16, x = var_11032_cast)[name = tensor("denom_335_cast")]; + tensor out_335_cast = mul(x = zero_mean_335_cast, y = denom_335_cast)[name = tensor("out_335_cast")]; + tensor var_11036_to_fp16 = const()[name = tensor("op_11036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124818176)))]; + tensor var_11037_cast = add(x = out_335_cast, y = var_11036_to_fp16)[name = tensor("op_11037_cast")]; + tensor var_11039_to_fp16 = const()[name = tensor("op_11039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124820800)))]; + tensor input_647_cast = mul(x = var_11037_cast, y = var_11039_to_fp16)[name = tensor("input_647_cast")]; + tensor var_11047 = const()[name = tensor("op_11047"), val = tensor([1, 1])]; + tensor var_11049 = const()[name = tensor("op_11049"), val = tensor([1, 1])]; + tensor var_11051_pad_type_0 = const()[name = tensor("op_11051_pad_type_0"), val = tensor("custom")]; + tensor var_11051_pad_0 = const()[name = tensor("op_11051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1124823424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134653888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134654080)))]; + tensor var_11051_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_11049, groups = var_6865, pad = var_11051_pad_0, pad_type = var_11051_pad_type_0, strides = var_11047, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_647_cast)[name = tensor("op_11051_cast")]; + tensor var_11052_split_sizes_0 = const()[name = tensor("op_11052_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11052_axis_0 = const()[name = tensor("op_11052_axis_0"), val = tensor(1)]; + tensor var_11052_cast_0, tensor var_11052_cast_1 = split(axis = var_11052_axis_0, split_sizes = var_11052_split_sizes_0, x = var_11051_cast)[name = tensor("op_11052_cast")]; + tensor var_11054_mode_0 = const()[name = tensor("op_11054_mode_0"), val = tensor("EXACT")]; + tensor var_11054_cast = gelu(mode = var_11054_mode_0, x = var_11052_cast_1)[name = tensor("op_11054_cast")]; + tensor input_649_cast = mul(x = var_11052_cast_0, y = var_11054_cast)[name = tensor("input_649_cast")]; + tensor var_11058 = const()[name = tensor("op_11058"), val = tensor([1, 1])]; + tensor var_11060 = const()[name = tensor("op_11060"), val = tensor([1, 1])]; + tensor var_11062_pad_type_0 = const()[name = tensor("op_11062_pad_type_0"), val = tensor("custom")]; + tensor var_11062_pad_0 = const()[name = tensor("op_11062_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1134674624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139589888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139590080)))]; + tensor var_11062_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_11060, groups = var_6865, pad = var_11062_pad_0, pad_type = var_11062_pad_type_0, strides = var_11058, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_649_cast)[name = tensor("op_11062_cast")]; + tensor inputs_337_cast = add(x = var_11062_cast, y = inputs_335_cast)[name = tensor("inputs_337_cast")]; + tensor var_11072 = const()[name = tensor("op_11072"), val = tensor([1])]; + tensor channels_mean_337_cast = reduce_mean(axes = var_11072, keep_dims = var_6860, x = inputs_337_cast)[name = tensor("channels_mean_337_cast")]; + tensor zero_mean_337_cast = sub(x = inputs_337_cast, y = channels_mean_337_cast)[name = tensor("zero_mean_337_cast")]; + tensor zero_mean_sq_337_cast = mul(x = zero_mean_337_cast, y = zero_mean_337_cast)[name = tensor("zero_mean_sq_337_cast")]; + tensor var_11076 = const()[name = tensor("op_11076"), val = tensor([1])]; + tensor var_11077_cast = reduce_mean(axes = var_11076, keep_dims = var_6860, x = zero_mean_sq_337_cast)[name = tensor("op_11077_cast")]; + tensor var_11078_to_fp16 = const()[name = tensor("op_11078_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11079_cast = add(x = var_11077_cast, y = var_11078_to_fp16)[name = tensor("op_11079_cast")]; + tensor denom_337_epsilon_0_to_fp16 = const()[name = tensor("denom_337_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_337_cast = rsqrt(epsilon = denom_337_epsilon_0_to_fp16, x = var_11079_cast)[name = tensor("denom_337_cast")]; + tensor out_337_cast = mul(x = zero_mean_337_cast, y = denom_337_cast)[name = tensor("out_337_cast")]; + tensor var_11083_to_fp16 = const()[name = tensor("op_11083_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139592704)))]; + tensor var_11084_cast = add(x = out_337_cast, y = var_11083_to_fp16)[name = tensor("op_11084_cast")]; + tensor var_11086_to_fp16 = const()[name = tensor("op_11086_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139595328)))]; + tensor hidden_states_447_cast = mul(x = var_11084_cast, y = var_11086_to_fp16)[name = tensor("hidden_states_447_cast")]; + tensor var_11093 = const()[name = tensor("op_11093"), val = tensor([1, 1])]; + tensor var_11095 = const()[name = tensor("op_11095"), val = tensor([1, 1])]; + tensor q_225_pad_type_0 = const()[name = tensor("q_225_pad_type_0"), val = tensor("custom")]; + tensor q_225_pad_0 = const()[name = tensor("q_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1139597952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1140417216))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_225_cast = conv(dilations = var_11095, groups = var_6865, pad = q_225_pad_0, pad_type = q_225_pad_type_0, strides = var_11093, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("q_225_cast")]; + tensor var_11099 = const()[name = tensor("op_11099"), val = tensor([1, 1])]; + tensor var_11101 = const()[name = tensor("op_11101"), val = tensor([1, 1])]; + tensor k_225_pad_type_0 = const()[name = tensor("k_225_pad_type_0"), val = tensor("custom")]; + tensor k_225_pad_0 = const()[name = tensor("k_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1140417344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141236608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_225_cast = conv(dilations = var_11101, groups = var_6865, pad = k_225_pad_0, pad_type = k_225_pad_type_0, strides = var_11099, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("k_225_cast")]; + tensor var_11105 = const()[name = tensor("op_11105"), val = tensor([1, 1])]; + tensor var_11107 = const()[name = tensor("op_11107"), val = tensor([1, 1])]; + tensor v_225_pad_type_0 = const()[name = tensor("v_225_pad_type_0"), val = tensor("custom")]; + tensor v_225_pad_0 = const()[name = tensor("v_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141236736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142465600))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_225_cast = conv(dilations = var_11107, groups = var_6865, pad = v_225_pad_0, pad_type = v_225_pad_type_0, strides = var_11105, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("v_225_cast")]; + tensor var_11111 = const()[name = tensor("op_11111"), val = tensor([2, 20, 64, -1])]; + tensor var_11112_cast = reshape(shape = var_11111, x = q_225_cast)[name = tensor("op_11112_cast")]; + tensor var_11113 = const()[name = tensor("op_11113"), val = tensor([2, 20, 64, -1])]; + tensor var_11114_cast = reshape(shape = var_11113, x = k_225_cast)[name = tensor("op_11114_cast")]; + tensor var_11115 = const()[name = tensor("op_11115"), val = tensor([2, 20, 64, -1])]; + tensor var_11116_cast = reshape(shape = var_11115, x = v_225_cast)[name = tensor("op_11116_cast")]; + tensor attn_weights_449_transpose_x_0 = const()[name = tensor("attn_weights_449_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_449_transpose_y_0 = const()[name = tensor("attn_weights_449_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_449_cast = matmul(transpose_x = attn_weights_449_transpose_x_0, transpose_y = attn_weights_449_transpose_y_0, x = var_11112_cast, y = var_11114_cast)[name = tensor("attn_weights_449_cast")]; + tensor attn_weights_451_cast = mul(x = attn_weights_449_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_451_cast")]; + tensor var_11120_cast = softmax(axis = var_6849, x = attn_weights_451_cast)[name = tensor("op_11120_cast")]; + tensor attn_225_transpose_x_0 = const()[name = tensor("attn_225_transpose_x_0"), val = tensor(false)]; + tensor attn_225_transpose_y_0 = const()[name = tensor("attn_225_transpose_y_0"), val = tensor(true)]; + tensor attn_225_cast = matmul(transpose_x = attn_225_transpose_x_0, transpose_y = attn_225_transpose_y_0, x = var_11116_cast, y = var_11120_cast)[name = tensor("attn_225_cast")]; + tensor var_11124 = const()[name = tensor("op_11124"), val = tensor([2, 1280, 1, -1])]; + tensor input_651_cast = reshape(shape = var_11124, x = attn_225_cast)[name = tensor("input_651_cast")]; + tensor var_11129 = const()[name = tensor("op_11129"), val = tensor([1, 1])]; + tensor var_11131 = const()[name = tensor("op_11131"), val = tensor([1, 1])]; + tensor var_11133_pad_type_0 = const()[name = tensor("op_11133_pad_type_0"), val = tensor("custom")]; + tensor var_11133_pad_0 = const()[name = tensor("op_11133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1142465792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143694656))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143694848)))]; + tensor var_11133_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_11131, groups = var_6865, pad = var_11133_pad_0, pad_type = var_11133_pad_type_0, strides = var_11129, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_651_cast)[name = tensor("op_11133_cast")]; + tensor inputs_339_cast = add(x = var_11133_cast, y = inputs_337_cast)[name = tensor("inputs_339_cast")]; + tensor var_11137 = const()[name = tensor("op_11137"), val = tensor([1])]; + tensor channels_mean_339_cast = reduce_mean(axes = var_11137, keep_dims = var_6860, x = inputs_339_cast)[name = tensor("channels_mean_339_cast")]; + tensor zero_mean_339_cast = sub(x = inputs_339_cast, y = channels_mean_339_cast)[name = tensor("zero_mean_339_cast")]; + tensor zero_mean_sq_339_cast = mul(x = zero_mean_339_cast, y = zero_mean_339_cast)[name = tensor("zero_mean_sq_339_cast")]; + tensor var_11141 = const()[name = tensor("op_11141"), val = tensor([1])]; + tensor var_11142_cast = reduce_mean(axes = var_11141, keep_dims = var_6860, x = zero_mean_sq_339_cast)[name = tensor("op_11142_cast")]; + tensor var_11143_to_fp16 = const()[name = tensor("op_11143_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11144_cast = add(x = var_11142_cast, y = var_11143_to_fp16)[name = tensor("op_11144_cast")]; + tensor denom_339_epsilon_0_to_fp16 = const()[name = tensor("denom_339_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_339_cast = rsqrt(epsilon = denom_339_epsilon_0_to_fp16, x = var_11144_cast)[name = tensor("denom_339_cast")]; + tensor out_339_cast = mul(x = zero_mean_339_cast, y = denom_339_cast)[name = tensor("out_339_cast")]; + tensor var_11148_to_fp16 = const()[name = tensor("op_11148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143697472)))]; + tensor var_11149_cast = add(x = out_339_cast, y = var_11148_to_fp16)[name = tensor("op_11149_cast")]; + tensor var_11151_to_fp16 = const()[name = tensor("op_11151_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143700096)))]; + tensor hidden_states_449_cast = mul(x = var_11149_cast, y = var_11151_to_fp16)[name = tensor("hidden_states_449_cast")]; + tensor var_11158 = const()[name = tensor("op_11158"), val = tensor([1, 1])]; + tensor var_11160 = const()[name = tensor("op_11160"), val = tensor([1, 1])]; + tensor q_227_pad_type_0 = const()[name = tensor("q_227_pad_type_0"), val = tensor("custom")]; + tensor q_227_pad_0 = const()[name = tensor("q_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1143702720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1144521984))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_227_cast = conv(dilations = var_11160, groups = var_6865, pad = q_227_pad_0, pad_type = q_227_pad_type_0, strides = var_11158, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_449_cast)[name = tensor("q_227_cast")]; + tensor var_11164 = const()[name = tensor("op_11164"), val = tensor([1, 1])]; + tensor var_11166 = const()[name = tensor("op_11166"), val = tensor([1, 1])]; + tensor k_227_pad_type_0 = const()[name = tensor("k_227_pad_type_0"), val = tensor("custom")]; + tensor k_227_pad_0 = const()[name = tensor("k_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1144522112))), lut = tensor([-0x1.fdcp-6, -0x1.22cp-7, 0x1.24cp-7, 0x1.ff4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_227_cast = conv(dilations = var_11166, groups = var_6865, pad = k_227_pad_0, pad_type = k_227_pad_type_0, strides = var_11164, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_227_cast")]; + tensor var_11170 = const()[name = tensor("op_11170"), val = tensor([1, 1])]; + tensor var_11172 = const()[name = tensor("op_11172"), val = tensor([1, 1])]; + tensor v_227_pad_type_0 = const()[name = tensor("v_227_pad_type_0"), val = tensor("custom")]; + tensor v_227_pad_0 = const()[name = tensor("v_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1145177536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146488320))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_227_cast = conv(dilations = var_11172, groups = var_6865, pad = v_227_pad_0, pad_type = v_227_pad_type_0, strides = var_11170, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_227_cast")]; + tensor var_11176 = const()[name = tensor("op_11176"), val = tensor([2, 20, 64, -1])]; + tensor var_11177_cast = reshape(shape = var_11176, x = q_227_cast)[name = tensor("op_11177_cast")]; + tensor var_11178 = const()[name = tensor("op_11178"), val = tensor([2, 20, 64, -1])]; + tensor var_11179_cast = reshape(shape = var_11178, x = k_227_cast)[name = tensor("op_11179_cast")]; + tensor var_11180 = const()[name = tensor("op_11180"), val = tensor([2, 20, 64, -1])]; + tensor var_11181_cast = reshape(shape = var_11180, x = v_227_cast)[name = tensor("op_11181_cast")]; + tensor attn_weights_453_transpose_x_0 = const()[name = tensor("attn_weights_453_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_453_transpose_y_0 = const()[name = tensor("attn_weights_453_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_453_cast = matmul(transpose_x = attn_weights_453_transpose_x_0, transpose_y = attn_weights_453_transpose_y_0, x = var_11177_cast, y = var_11179_cast)[name = tensor("attn_weights_453_cast")]; + tensor attn_weights_455_cast = mul(x = attn_weights_453_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_455_cast")]; + tensor var_11185_cast = softmax(axis = var_6849, x = attn_weights_455_cast)[name = tensor("op_11185_cast")]; + tensor attn_227_transpose_x_0 = const()[name = tensor("attn_227_transpose_x_0"), val = tensor(false)]; + tensor attn_227_transpose_y_0 = const()[name = tensor("attn_227_transpose_y_0"), val = tensor(true)]; + tensor attn_227_cast = matmul(transpose_x = attn_227_transpose_x_0, transpose_y = attn_227_transpose_y_0, x = var_11181_cast, y = var_11185_cast)[name = tensor("attn_227_cast")]; + tensor var_11189 = const()[name = tensor("op_11189"), val = tensor([2, 1280, 1, -1])]; + tensor input_653_cast = reshape(shape = var_11189, x = attn_227_cast)[name = tensor("input_653_cast")]; + tensor var_11194 = const()[name = tensor("op_11194"), val = tensor([1, 1])]; + tensor var_11196 = const()[name = tensor("op_11196"), val = tensor([1, 1])]; + tensor var_11198_pad_type_0 = const()[name = tensor("op_11198_pad_type_0"), val = tensor("custom")]; + tensor var_11198_pad_0 = const()[name = tensor("op_11198_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146488448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147307712))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147307840)))]; + tensor var_11198_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_11196, groups = var_6865, pad = var_11198_pad_0, pad_type = var_11198_pad_type_0, strides = var_11194, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_653_cast)[name = tensor("op_11198_cast")]; + tensor inputs_341_cast = add(x = var_11198_cast, y = inputs_339_cast)[name = tensor("inputs_341_cast")]; + tensor var_11202 = const()[name = tensor("op_11202"), val = tensor([1])]; + tensor channels_mean_341_cast = reduce_mean(axes = var_11202, keep_dims = var_6860, x = inputs_341_cast)[name = tensor("channels_mean_341_cast")]; + tensor zero_mean_341_cast = sub(x = inputs_341_cast, y = channels_mean_341_cast)[name = tensor("zero_mean_341_cast")]; + tensor zero_mean_sq_341_cast = mul(x = zero_mean_341_cast, y = zero_mean_341_cast)[name = tensor("zero_mean_sq_341_cast")]; + tensor var_11206 = const()[name = tensor("op_11206"), val = tensor([1])]; + tensor var_11207_cast = reduce_mean(axes = var_11206, keep_dims = var_6860, x = zero_mean_sq_341_cast)[name = tensor("op_11207_cast")]; + tensor var_11208_to_fp16 = const()[name = tensor("op_11208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11209_cast = add(x = var_11207_cast, y = var_11208_to_fp16)[name = tensor("op_11209_cast")]; + tensor denom_341_epsilon_0_to_fp16 = const()[name = tensor("denom_341_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_341_cast = rsqrt(epsilon = denom_341_epsilon_0_to_fp16, x = var_11209_cast)[name = tensor("denom_341_cast")]; + tensor out_341_cast = mul(x = zero_mean_341_cast, y = denom_341_cast)[name = tensor("out_341_cast")]; + tensor var_11213_to_fp16 = const()[name = tensor("op_11213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147310464)))]; + tensor var_11214_cast = add(x = out_341_cast, y = var_11213_to_fp16)[name = tensor("op_11214_cast")]; + tensor var_11216_to_fp16 = const()[name = tensor("op_11216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147313088)))]; + tensor input_655_cast = mul(x = var_11214_cast, y = var_11216_to_fp16)[name = tensor("input_655_cast")]; + tensor var_11224 = const()[name = tensor("op_11224"), val = tensor([1, 1])]; + tensor var_11226 = const()[name = tensor("op_11226"), val = tensor([1, 1])]; + tensor var_11228_pad_type_0 = const()[name = tensor("op_11228_pad_type_0"), val = tensor("custom")]; + tensor var_11228_pad_0 = const()[name = tensor("op_11228_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147315712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157146176))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157146368)))]; + tensor var_11228_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_11226, groups = var_6865, pad = var_11228_pad_0, pad_type = var_11228_pad_type_0, strides = var_11224, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_655_cast)[name = tensor("op_11228_cast")]; + tensor var_11229_split_sizes_0 = const()[name = tensor("op_11229_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11229_axis_0 = const()[name = tensor("op_11229_axis_0"), val = tensor(1)]; + tensor var_11229_cast_0, tensor var_11229_cast_1 = split(axis = var_11229_axis_0, split_sizes = var_11229_split_sizes_0, x = var_11228_cast)[name = tensor("op_11229_cast")]; + tensor var_11231_mode_0 = const()[name = tensor("op_11231_mode_0"), val = tensor("EXACT")]; + tensor var_11231_cast = gelu(mode = var_11231_mode_0, x = var_11229_cast_1)[name = tensor("op_11231_cast")]; + tensor input_657_cast = mul(x = var_11229_cast_0, y = var_11231_cast)[name = tensor("input_657_cast")]; + tensor var_11235 = const()[name = tensor("op_11235"), val = tensor([1, 1])]; + tensor var_11237 = const()[name = tensor("op_11237"), val = tensor([1, 1])]; + tensor var_11239_pad_type_0 = const()[name = tensor("op_11239_pad_type_0"), val = tensor("custom")]; + tensor var_11239_pad_0 = const()[name = tensor("op_11239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1157166912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162082176))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162082368)))]; + tensor var_11239_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_11237, groups = var_6865, pad = var_11239_pad_0, pad_type = var_11239_pad_type_0, strides = var_11235, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_657_cast)[name = tensor("op_11239_cast")]; + tensor inputs_343_cast = add(x = var_11239_cast, y = inputs_341_cast)[name = tensor("inputs_343_cast")]; + tensor var_11249 = const()[name = tensor("op_11249"), val = tensor([1])]; + tensor channels_mean_343_cast = reduce_mean(axes = var_11249, keep_dims = var_6860, x = inputs_343_cast)[name = tensor("channels_mean_343_cast")]; + tensor zero_mean_343_cast = sub(x = inputs_343_cast, y = channels_mean_343_cast)[name = tensor("zero_mean_343_cast")]; + tensor zero_mean_sq_343_cast = mul(x = zero_mean_343_cast, y = zero_mean_343_cast)[name = tensor("zero_mean_sq_343_cast")]; + tensor var_11253 = const()[name = tensor("op_11253"), val = tensor([1])]; + tensor var_11254_cast = reduce_mean(axes = var_11253, keep_dims = var_6860, x = zero_mean_sq_343_cast)[name = tensor("op_11254_cast")]; + tensor var_11255_to_fp16 = const()[name = tensor("op_11255_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11256_cast = add(x = var_11254_cast, y = var_11255_to_fp16)[name = tensor("op_11256_cast")]; + tensor denom_343_epsilon_0_to_fp16 = const()[name = tensor("denom_343_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_343_cast = rsqrt(epsilon = denom_343_epsilon_0_to_fp16, x = var_11256_cast)[name = tensor("denom_343_cast")]; + tensor out_343_cast = mul(x = zero_mean_343_cast, y = denom_343_cast)[name = tensor("out_343_cast")]; + tensor var_11260_to_fp16 = const()[name = tensor("op_11260_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162084992)))]; + tensor var_11261_cast = add(x = out_343_cast, y = var_11260_to_fp16)[name = tensor("op_11261_cast")]; + tensor var_11263_to_fp16 = const()[name = tensor("op_11263_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162087616)))]; + tensor hidden_states_453_cast = mul(x = var_11261_cast, y = var_11263_to_fp16)[name = tensor("hidden_states_453_cast")]; + tensor var_11270 = const()[name = tensor("op_11270"), val = tensor([1, 1])]; + tensor var_11272 = const()[name = tensor("op_11272"), val = tensor([1, 1])]; + tensor q_229_pad_type_0 = const()[name = tensor("q_229_pad_type_0"), val = tensor("custom")]; + tensor q_229_pad_0 = const()[name = tensor("q_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162090240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162909504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_229_cast = conv(dilations = var_11272, groups = var_6865, pad = q_229_pad_0, pad_type = q_229_pad_type_0, strides = var_11270, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("q_229_cast")]; + tensor var_11276 = const()[name = tensor("op_11276"), val = tensor([1, 1])]; + tensor var_11278 = const()[name = tensor("op_11278"), val = tensor([1, 1])]; + tensor k_229_pad_type_0 = const()[name = tensor("k_229_pad_type_0"), val = tensor("custom")]; + tensor k_229_pad_0 = const()[name = tensor("k_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1162909632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163728896))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_229_cast = conv(dilations = var_11278, groups = var_6865, pad = k_229_pad_0, pad_type = k_229_pad_type_0, strides = var_11276, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("k_229_cast")]; + tensor var_11282 = const()[name = tensor("op_11282"), val = tensor([1, 1])]; + tensor var_11284 = const()[name = tensor("op_11284"), val = tensor([1, 1])]; + tensor v_229_pad_type_0 = const()[name = tensor("v_229_pad_type_0"), val = tensor("custom")]; + tensor v_229_pad_0 = const()[name = tensor("v_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1163729024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164957888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_229_cast = conv(dilations = var_11284, groups = var_6865, pad = v_229_pad_0, pad_type = v_229_pad_type_0, strides = var_11282, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("v_229_cast")]; + tensor var_11288 = const()[name = tensor("op_11288"), val = tensor([2, 20, 64, -1])]; + tensor var_11289_cast = reshape(shape = var_11288, x = q_229_cast)[name = tensor("op_11289_cast")]; + tensor var_11290 = const()[name = tensor("op_11290"), val = tensor([2, 20, 64, -1])]; + tensor var_11291_cast = reshape(shape = var_11290, x = k_229_cast)[name = tensor("op_11291_cast")]; + tensor var_11292 = const()[name = tensor("op_11292"), val = tensor([2, 20, 64, -1])]; + tensor var_11293_cast = reshape(shape = var_11292, x = v_229_cast)[name = tensor("op_11293_cast")]; + tensor attn_weights_457_transpose_x_0 = const()[name = tensor("attn_weights_457_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_457_transpose_y_0 = const()[name = tensor("attn_weights_457_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_457_cast = matmul(transpose_x = attn_weights_457_transpose_x_0, transpose_y = attn_weights_457_transpose_y_0, x = var_11289_cast, y = var_11291_cast)[name = tensor("attn_weights_457_cast")]; + tensor attn_weights_459_cast = mul(x = attn_weights_457_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_459_cast")]; + tensor var_11297_cast = softmax(axis = var_6849, x = attn_weights_459_cast)[name = tensor("op_11297_cast")]; + tensor attn_229_transpose_x_0 = const()[name = tensor("attn_229_transpose_x_0"), val = tensor(false)]; + tensor attn_229_transpose_y_0 = const()[name = tensor("attn_229_transpose_y_0"), val = tensor(true)]; + tensor attn_229_cast = matmul(transpose_x = attn_229_transpose_x_0, transpose_y = attn_229_transpose_y_0, x = var_11293_cast, y = var_11297_cast)[name = tensor("attn_229_cast")]; + tensor var_11301 = const()[name = tensor("op_11301"), val = tensor([2, 1280, 1, -1])]; + tensor input_659_cast = reshape(shape = var_11301, x = attn_229_cast)[name = tensor("input_659_cast")]; + tensor var_11306 = const()[name = tensor("op_11306"), val = tensor([1, 1])]; + tensor var_11308 = const()[name = tensor("op_11308"), val = tensor([1, 1])]; + tensor var_11310_pad_type_0 = const()[name = tensor("op_11310_pad_type_0"), val = tensor("custom")]; + tensor var_11310_pad_0 = const()[name = tensor("op_11310_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1164958080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166186944))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166187136)))]; + tensor var_11310_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_11308, groups = var_6865, pad = var_11310_pad_0, pad_type = var_11310_pad_type_0, strides = var_11306, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_659_cast)[name = tensor("op_11310_cast")]; + tensor inputs_345_cast = add(x = var_11310_cast, y = inputs_343_cast)[name = tensor("inputs_345_cast")]; + tensor var_11314 = const()[name = tensor("op_11314"), val = tensor([1])]; + tensor channels_mean_345_cast = reduce_mean(axes = var_11314, keep_dims = var_6860, x = inputs_345_cast)[name = tensor("channels_mean_345_cast")]; + tensor zero_mean_345_cast = sub(x = inputs_345_cast, y = channels_mean_345_cast)[name = tensor("zero_mean_345_cast")]; + tensor zero_mean_sq_345_cast = mul(x = zero_mean_345_cast, y = zero_mean_345_cast)[name = tensor("zero_mean_sq_345_cast")]; + tensor var_11318 = const()[name = tensor("op_11318"), val = tensor([1])]; + tensor var_11319_cast = reduce_mean(axes = var_11318, keep_dims = var_6860, x = zero_mean_sq_345_cast)[name = tensor("op_11319_cast")]; + tensor var_11320_to_fp16 = const()[name = tensor("op_11320_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11321_cast = add(x = var_11319_cast, y = var_11320_to_fp16)[name = tensor("op_11321_cast")]; + tensor denom_345_epsilon_0_to_fp16 = const()[name = tensor("denom_345_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_345_cast = rsqrt(epsilon = denom_345_epsilon_0_to_fp16, x = var_11321_cast)[name = tensor("denom_345_cast")]; + tensor out_345_cast = mul(x = zero_mean_345_cast, y = denom_345_cast)[name = tensor("out_345_cast")]; + tensor var_11325_to_fp16 = const()[name = tensor("op_11325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166189760)))]; + tensor var_11326_cast = add(x = out_345_cast, y = var_11325_to_fp16)[name = tensor("op_11326_cast")]; + tensor var_11328_to_fp16 = const()[name = tensor("op_11328_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166192384)))]; + tensor hidden_states_455_cast = mul(x = var_11326_cast, y = var_11328_to_fp16)[name = tensor("hidden_states_455_cast")]; + tensor var_11335 = const()[name = tensor("op_11335"), val = tensor([1, 1])]; + tensor var_11337 = const()[name = tensor("op_11337"), val = tensor([1, 1])]; + tensor q_231_pad_type_0 = const()[name = tensor("q_231_pad_type_0"), val = tensor("custom")]; + tensor q_231_pad_0 = const()[name = tensor("q_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166195008))), lut = tensor([-0x1.028p-5, -0x1.36cp-7, 0x1.348p-7, 0x1.024p-5]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_231_cast = conv(dilations = var_11337, groups = var_6865, pad = q_231_pad_0, pad_type = q_231_pad_type_0, strides = var_11335, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_455_cast)[name = tensor("q_231_cast")]; + tensor var_11341 = const()[name = tensor("op_11341"), val = tensor([1, 1])]; + tensor var_11343 = const()[name = tensor("op_11343"), val = tensor([1, 1])]; + tensor k_231_pad_type_0 = const()[name = tensor("k_231_pad_type_0"), val = tensor("custom")]; + tensor k_231_pad_0 = const()[name = tensor("k_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1166604672))), lut = tensor([-0x1.b3cp-6, -0x1.f7cp-8, 0x1.fb8p-8, 0x1.b6p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_231_cast = conv(dilations = var_11343, groups = var_6865, pad = k_231_pad_0, pad_type = k_231_pad_type_0, strides = var_11341, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_231_cast")]; + tensor var_11347 = const()[name = tensor("op_11347"), val = tensor([1, 1])]; + tensor var_11349 = const()[name = tensor("op_11349"), val = tensor([1, 1])]; + tensor v_231_pad_type_0 = const()[name = tensor("v_231_pad_type_0"), val = tensor("custom")]; + tensor v_231_pad_0 = const()[name = tensor("v_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1167260096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168570880))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_231_cast = conv(dilations = var_11349, groups = var_6865, pad = v_231_pad_0, pad_type = v_231_pad_type_0, strides = var_11347, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_231_cast")]; + tensor var_11353 = const()[name = tensor("op_11353"), val = tensor([2, 20, 64, -1])]; + tensor var_11354_cast = reshape(shape = var_11353, x = q_231_cast)[name = tensor("op_11354_cast")]; + tensor var_11355 = const()[name = tensor("op_11355"), val = tensor([2, 20, 64, -1])]; + tensor var_11356_cast = reshape(shape = var_11355, x = k_231_cast)[name = tensor("op_11356_cast")]; + tensor var_11357 = const()[name = tensor("op_11357"), val = tensor([2, 20, 64, -1])]; + tensor var_11358_cast = reshape(shape = var_11357, x = v_231_cast)[name = tensor("op_11358_cast")]; + tensor attn_weights_461_transpose_x_0 = const()[name = tensor("attn_weights_461_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_461_transpose_y_0 = const()[name = tensor("attn_weights_461_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_461_cast = matmul(transpose_x = attn_weights_461_transpose_x_0, transpose_y = attn_weights_461_transpose_y_0, x = var_11354_cast, y = var_11356_cast)[name = tensor("attn_weights_461_cast")]; + tensor attn_weights_463_cast = mul(x = attn_weights_461_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_463_cast")]; + tensor var_11362_cast = softmax(axis = var_6849, x = attn_weights_463_cast)[name = tensor("op_11362_cast")]; + tensor attn_231_transpose_x_0 = const()[name = tensor("attn_231_transpose_x_0"), val = tensor(false)]; + tensor attn_231_transpose_y_0 = const()[name = tensor("attn_231_transpose_y_0"), val = tensor(true)]; + tensor attn_231_cast = matmul(transpose_x = attn_231_transpose_x_0, transpose_y = attn_231_transpose_y_0, x = var_11358_cast, y = var_11362_cast)[name = tensor("attn_231_cast")]; + tensor var_11366 = const()[name = tensor("op_11366"), val = tensor([2, 1280, 1, -1])]; + tensor input_661_cast = reshape(shape = var_11366, x = attn_231_cast)[name = tensor("input_661_cast")]; + tensor var_11371 = const()[name = tensor("op_11371"), val = tensor([1, 1])]; + tensor var_11373 = const()[name = tensor("op_11373"), val = tensor([1, 1])]; + tensor var_11375_pad_type_0 = const()[name = tensor("op_11375_pad_type_0"), val = tensor("custom")]; + tensor var_11375_pad_0 = const()[name = tensor("op_11375_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1168571008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169390272))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169390400)))]; + tensor var_11375_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_11373, groups = var_6865, pad = var_11375_pad_0, pad_type = var_11375_pad_type_0, strides = var_11371, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_661_cast)[name = tensor("op_11375_cast")]; + tensor inputs_347_cast = add(x = var_11375_cast, y = inputs_345_cast)[name = tensor("inputs_347_cast")]; + tensor var_11379 = const()[name = tensor("op_11379"), val = tensor([1])]; + tensor channels_mean_347_cast = reduce_mean(axes = var_11379, keep_dims = var_6860, x = inputs_347_cast)[name = tensor("channels_mean_347_cast")]; + tensor zero_mean_347_cast = sub(x = inputs_347_cast, y = channels_mean_347_cast)[name = tensor("zero_mean_347_cast")]; + tensor zero_mean_sq_347_cast = mul(x = zero_mean_347_cast, y = zero_mean_347_cast)[name = tensor("zero_mean_sq_347_cast")]; + tensor var_11383 = const()[name = tensor("op_11383"), val = tensor([1])]; + tensor var_11384_cast = reduce_mean(axes = var_11383, keep_dims = var_6860, x = zero_mean_sq_347_cast)[name = tensor("op_11384_cast")]; + tensor var_11385_to_fp16 = const()[name = tensor("op_11385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11386_cast = add(x = var_11384_cast, y = var_11385_to_fp16)[name = tensor("op_11386_cast")]; + tensor denom_347_epsilon_0_to_fp16 = const()[name = tensor("denom_347_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_347_cast = rsqrt(epsilon = denom_347_epsilon_0_to_fp16, x = var_11386_cast)[name = tensor("denom_347_cast")]; + tensor out_347_cast = mul(x = zero_mean_347_cast, y = denom_347_cast)[name = tensor("out_347_cast")]; + tensor var_11390_to_fp16 = const()[name = tensor("op_11390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169393024)))]; + tensor var_11391_cast = add(x = out_347_cast, y = var_11390_to_fp16)[name = tensor("op_11391_cast")]; + tensor var_11393_to_fp16 = const()[name = tensor("op_11393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169395648)))]; + tensor input_663_cast = mul(x = var_11391_cast, y = var_11393_to_fp16)[name = tensor("input_663_cast")]; + tensor var_11401 = const()[name = tensor("op_11401"), val = tensor([1, 1])]; + tensor var_11403 = const()[name = tensor("op_11403"), val = tensor([1, 1])]; + tensor var_11405_pad_type_0 = const()[name = tensor("op_11405_pad_type_0"), val = tensor("custom")]; + tensor var_11405_pad_0 = const()[name = tensor("op_11405_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1169398272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179228736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179228928)))]; + tensor var_11405_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_11403, groups = var_6865, pad = var_11405_pad_0, pad_type = var_11405_pad_type_0, strides = var_11401, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_663_cast)[name = tensor("op_11405_cast")]; + tensor var_11406_split_sizes_0 = const()[name = tensor("op_11406_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11406_axis_0 = const()[name = tensor("op_11406_axis_0"), val = tensor(1)]; + tensor var_11406_cast_0, tensor var_11406_cast_1 = split(axis = var_11406_axis_0, split_sizes = var_11406_split_sizes_0, x = var_11405_cast)[name = tensor("op_11406_cast")]; + tensor var_11408_mode_0 = const()[name = tensor("op_11408_mode_0"), val = tensor("EXACT")]; + tensor var_11408_cast = gelu(mode = var_11408_mode_0, x = var_11406_cast_1)[name = tensor("op_11408_cast")]; + tensor input_665_cast = mul(x = var_11406_cast_0, y = var_11408_cast)[name = tensor("input_665_cast")]; + tensor var_11412 = const()[name = tensor("op_11412"), val = tensor([1, 1])]; + tensor var_11414 = const()[name = tensor("op_11414"), val = tensor([1, 1])]; + tensor var_11416_pad_type_0 = const()[name = tensor("op_11416_pad_type_0"), val = tensor("custom")]; + tensor var_11416_pad_0 = const()[name = tensor("op_11416_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1179249472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184164736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184164928)))]; + tensor var_11416_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_11414, groups = var_6865, pad = var_11416_pad_0, pad_type = var_11416_pad_type_0, strides = var_11412, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_665_cast)[name = tensor("op_11416_cast")]; + tensor inputs_349_cast = add(x = var_11416_cast, y = inputs_347_cast)[name = tensor("inputs_349_cast")]; + tensor var_11426 = const()[name = tensor("op_11426"), val = tensor([1])]; + tensor channels_mean_349_cast = reduce_mean(axes = var_11426, keep_dims = var_6860, x = inputs_349_cast)[name = tensor("channels_mean_349_cast")]; + tensor zero_mean_349_cast = sub(x = inputs_349_cast, y = channels_mean_349_cast)[name = tensor("zero_mean_349_cast")]; + tensor zero_mean_sq_349_cast = mul(x = zero_mean_349_cast, y = zero_mean_349_cast)[name = tensor("zero_mean_sq_349_cast")]; + tensor var_11430 = const()[name = tensor("op_11430"), val = tensor([1])]; + tensor var_11431_cast = reduce_mean(axes = var_11430, keep_dims = var_6860, x = zero_mean_sq_349_cast)[name = tensor("op_11431_cast")]; + tensor var_11432_to_fp16 = const()[name = tensor("op_11432_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11433_cast = add(x = var_11431_cast, y = var_11432_to_fp16)[name = tensor("op_11433_cast")]; + tensor denom_349_epsilon_0_to_fp16 = const()[name = tensor("denom_349_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_349_cast = rsqrt(epsilon = denom_349_epsilon_0_to_fp16, x = var_11433_cast)[name = tensor("denom_349_cast")]; + tensor out_349_cast = mul(x = zero_mean_349_cast, y = denom_349_cast)[name = tensor("out_349_cast")]; + tensor var_11437_to_fp16 = const()[name = tensor("op_11437_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184167552)))]; + tensor var_11438_cast = add(x = out_349_cast, y = var_11437_to_fp16)[name = tensor("op_11438_cast")]; + tensor var_11440_to_fp16 = const()[name = tensor("op_11440_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184170176)))]; + tensor hidden_states_459_cast = mul(x = var_11438_cast, y = var_11440_to_fp16)[name = tensor("hidden_states_459_cast")]; + tensor var_11447 = const()[name = tensor("op_11447"), val = tensor([1, 1])]; + tensor var_11449 = const()[name = tensor("op_11449"), val = tensor([1, 1])]; + tensor q_233_pad_type_0 = const()[name = tensor("q_233_pad_type_0"), val = tensor("custom")]; + tensor q_233_pad_0 = const()[name = tensor("q_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184172800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184992064))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_233_cast = conv(dilations = var_11449, groups = var_6865, pad = q_233_pad_0, pad_type = q_233_pad_type_0, strides = var_11447, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("q_233_cast")]; + tensor var_11453 = const()[name = tensor("op_11453"), val = tensor([1, 1])]; + tensor var_11455 = const()[name = tensor("op_11455"), val = tensor([1, 1])]; + tensor k_233_pad_type_0 = const()[name = tensor("k_233_pad_type_0"), val = tensor("custom")]; + tensor k_233_pad_0 = const()[name = tensor("k_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1184992192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1185811456))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_233_cast = conv(dilations = var_11455, groups = var_6865, pad = k_233_pad_0, pad_type = k_233_pad_type_0, strides = var_11453, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("k_233_cast")]; + tensor var_11459 = const()[name = tensor("op_11459"), val = tensor([1, 1])]; + tensor var_11461 = const()[name = tensor("op_11461"), val = tensor([1, 1])]; + tensor v_233_pad_type_0 = const()[name = tensor("v_233_pad_type_0"), val = tensor("custom")]; + tensor v_233_pad_0 = const()[name = tensor("v_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1185811584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1187040448))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_233_cast = conv(dilations = var_11461, groups = var_6865, pad = v_233_pad_0, pad_type = v_233_pad_type_0, strides = var_11459, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("v_233_cast")]; + tensor var_11465 = const()[name = tensor("op_11465"), val = tensor([2, 20, 64, -1])]; + tensor var_11466_cast = reshape(shape = var_11465, x = q_233_cast)[name = tensor("op_11466_cast")]; + tensor var_11467 = const()[name = tensor("op_11467"), val = tensor([2, 20, 64, -1])]; + tensor var_11468_cast = reshape(shape = var_11467, x = k_233_cast)[name = tensor("op_11468_cast")]; + tensor var_11469 = const()[name = tensor("op_11469"), val = tensor([2, 20, 64, -1])]; + tensor var_11470_cast = reshape(shape = var_11469, x = v_233_cast)[name = tensor("op_11470_cast")]; + tensor attn_weights_465_transpose_x_0 = const()[name = tensor("attn_weights_465_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_465_transpose_y_0 = const()[name = tensor("attn_weights_465_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_465_cast = matmul(transpose_x = attn_weights_465_transpose_x_0, transpose_y = attn_weights_465_transpose_y_0, x = var_11466_cast, y = var_11468_cast)[name = tensor("attn_weights_465_cast")]; + tensor attn_weights_467_cast = mul(x = attn_weights_465_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_467_cast")]; + tensor var_11474_cast = softmax(axis = var_6849, x = attn_weights_467_cast)[name = tensor("op_11474_cast")]; + tensor attn_233_transpose_x_0 = const()[name = tensor("attn_233_transpose_x_0"), val = tensor(false)]; + tensor attn_233_transpose_y_0 = const()[name = tensor("attn_233_transpose_y_0"), val = tensor(true)]; + tensor attn_233_cast = matmul(transpose_x = attn_233_transpose_x_0, transpose_y = attn_233_transpose_y_0, x = var_11470_cast, y = var_11474_cast)[name = tensor("attn_233_cast")]; + tensor var_11478 = const()[name = tensor("op_11478"), val = tensor([2, 1280, 1, -1])]; + tensor input_667_cast = reshape(shape = var_11478, x = attn_233_cast)[name = tensor("input_667_cast")]; + tensor var_11483 = const()[name = tensor("op_11483"), val = tensor([1, 1])]; + tensor var_11485 = const()[name = tensor("op_11485"), val = tensor([1, 1])]; + tensor var_11487_pad_type_0 = const()[name = tensor("op_11487_pad_type_0"), val = tensor("custom")]; + tensor var_11487_pad_0 = const()[name = tensor("op_11487_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1187040640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188269504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188269696)))]; + tensor var_11487_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_11485, groups = var_6865, pad = var_11487_pad_0, pad_type = var_11487_pad_type_0, strides = var_11483, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_667_cast)[name = tensor("op_11487_cast")]; + tensor inputs_351_cast = add(x = var_11487_cast, y = inputs_349_cast)[name = tensor("inputs_351_cast")]; + tensor var_11491 = const()[name = tensor("op_11491"), val = tensor([1])]; + tensor channels_mean_351_cast = reduce_mean(axes = var_11491, keep_dims = var_6860, x = inputs_351_cast)[name = tensor("channels_mean_351_cast")]; + tensor zero_mean_351_cast = sub(x = inputs_351_cast, y = channels_mean_351_cast)[name = tensor("zero_mean_351_cast")]; + tensor zero_mean_sq_351_cast = mul(x = zero_mean_351_cast, y = zero_mean_351_cast)[name = tensor("zero_mean_sq_351_cast")]; + tensor var_11495 = const()[name = tensor("op_11495"), val = tensor([1])]; + tensor var_11496_cast = reduce_mean(axes = var_11495, keep_dims = var_6860, x = zero_mean_sq_351_cast)[name = tensor("op_11496_cast")]; + tensor var_11497_to_fp16 = const()[name = tensor("op_11497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11498_cast = add(x = var_11496_cast, y = var_11497_to_fp16)[name = tensor("op_11498_cast")]; + tensor denom_351_epsilon_0_to_fp16 = const()[name = tensor("denom_351_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_351_cast = rsqrt(epsilon = denom_351_epsilon_0_to_fp16, x = var_11498_cast)[name = tensor("denom_351_cast")]; + tensor out_351_cast = mul(x = zero_mean_351_cast, y = denom_351_cast)[name = tensor("out_351_cast")]; + tensor var_11502_to_fp16 = const()[name = tensor("op_11502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188272320)))]; + tensor var_11503_cast = add(x = out_351_cast, y = var_11502_to_fp16)[name = tensor("op_11503_cast")]; + tensor var_11505_to_fp16 = const()[name = tensor("op_11505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188274944)))]; + tensor hidden_states_461_cast = mul(x = var_11503_cast, y = var_11505_to_fp16)[name = tensor("hidden_states_461_cast")]; + tensor var_11512 = const()[name = tensor("op_11512"), val = tensor([1, 1])]; + tensor var_11514 = const()[name = tensor("op_11514"), val = tensor([1, 1])]; + tensor q_235_pad_type_0 = const()[name = tensor("q_235_pad_type_0"), val = tensor("custom")]; + tensor q_235_pad_0 = const()[name = tensor("q_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188277568))), lut = tensor([-0x1.c8cp-6, -0x1.17p-7, 0x1.178p-7, 0x1.c9p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_235_cast = conv(dilations = var_11514, groups = var_6865, pad = q_235_pad_0, pad_type = q_235_pad_type_0, strides = var_11512, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_461_cast)[name = tensor("q_235_cast")]; + tensor var_11518 = const()[name = tensor("op_11518"), val = tensor([1, 1])]; + tensor var_11520 = const()[name = tensor("op_11520"), val = tensor([1, 1])]; + tensor k_235_pad_type_0 = const()[name = tensor("k_235_pad_type_0"), val = tensor("custom")]; + tensor k_235_pad_0 = const()[name = tensor("k_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1188687232))), lut = tensor([-0x1.718p-6, -0x1.b18p-8, 0x1.b68p-8, 0x1.728p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_235_cast = conv(dilations = var_11520, groups = var_6865, pad = k_235_pad_0, pad_type = k_235_pad_type_0, strides = var_11518, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_235_cast")]; + tensor var_11524 = const()[name = tensor("op_11524"), val = tensor([1, 1])]; + tensor var_11526 = const()[name = tensor("op_11526"), val = tensor([1, 1])]; + tensor v_235_pad_type_0 = const()[name = tensor("v_235_pad_type_0"), val = tensor("custom")]; + tensor v_235_pad_0 = const()[name = tensor("v_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1189342656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190653440))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_235_cast = conv(dilations = var_11526, groups = var_6865, pad = v_235_pad_0, pad_type = v_235_pad_type_0, strides = var_11524, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_235_cast")]; + tensor var_11530 = const()[name = tensor("op_11530"), val = tensor([2, 20, 64, -1])]; + tensor var_11531_cast = reshape(shape = var_11530, x = q_235_cast)[name = tensor("op_11531_cast")]; + tensor var_11532 = const()[name = tensor("op_11532"), val = tensor([2, 20, 64, -1])]; + tensor var_11533_cast = reshape(shape = var_11532, x = k_235_cast)[name = tensor("op_11533_cast")]; + tensor var_11534 = const()[name = tensor("op_11534"), val = tensor([2, 20, 64, -1])]; + tensor var_11535_cast = reshape(shape = var_11534, x = v_235_cast)[name = tensor("op_11535_cast")]; + tensor attn_weights_469_transpose_x_0 = const()[name = tensor("attn_weights_469_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_469_transpose_y_0 = const()[name = tensor("attn_weights_469_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_469_cast = matmul(transpose_x = attn_weights_469_transpose_x_0, transpose_y = attn_weights_469_transpose_y_0, x = var_11531_cast, y = var_11533_cast)[name = tensor("attn_weights_469_cast")]; + tensor attn_weights_471_cast = mul(x = attn_weights_469_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_471_cast")]; + tensor var_11539_cast = softmax(axis = var_6849, x = attn_weights_471_cast)[name = tensor("op_11539_cast")]; + tensor attn_235_transpose_x_0 = const()[name = tensor("attn_235_transpose_x_0"), val = tensor(false)]; + tensor attn_235_transpose_y_0 = const()[name = tensor("attn_235_transpose_y_0"), val = tensor(true)]; + tensor attn_235_cast = matmul(transpose_x = attn_235_transpose_x_0, transpose_y = attn_235_transpose_y_0, x = var_11535_cast, y = var_11539_cast)[name = tensor("attn_235_cast")]; + tensor var_11543 = const()[name = tensor("op_11543"), val = tensor([2, 1280, 1, -1])]; + tensor input_669_cast = reshape(shape = var_11543, x = attn_235_cast)[name = tensor("input_669_cast")]; + tensor var_11548 = const()[name = tensor("op_11548"), val = tensor([1, 1])]; + tensor var_11550 = const()[name = tensor("op_11550"), val = tensor([1, 1])]; + tensor var_11552_pad_type_0 = const()[name = tensor("op_11552_pad_type_0"), val = tensor("custom")]; + tensor var_11552_pad_0 = const()[name = tensor("op_11552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1190653568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191472832))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191472960)))]; + tensor var_11552_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_11550, groups = var_6865, pad = var_11552_pad_0, pad_type = var_11552_pad_type_0, strides = var_11548, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_669_cast)[name = tensor("op_11552_cast")]; + tensor inputs_353_cast = add(x = var_11552_cast, y = inputs_351_cast)[name = tensor("inputs_353_cast")]; + tensor var_11556 = const()[name = tensor("op_11556"), val = tensor([1])]; + tensor channels_mean_353_cast = reduce_mean(axes = var_11556, keep_dims = var_6860, x = inputs_353_cast)[name = tensor("channels_mean_353_cast")]; + tensor zero_mean_353_cast = sub(x = inputs_353_cast, y = channels_mean_353_cast)[name = tensor("zero_mean_353_cast")]; + tensor zero_mean_sq_353_cast = mul(x = zero_mean_353_cast, y = zero_mean_353_cast)[name = tensor("zero_mean_sq_353_cast")]; + tensor var_11560 = const()[name = tensor("op_11560"), val = tensor([1])]; + tensor var_11561_cast = reduce_mean(axes = var_11560, keep_dims = var_6860, x = zero_mean_sq_353_cast)[name = tensor("op_11561_cast")]; + tensor var_11562_to_fp16 = const()[name = tensor("op_11562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11563_cast = add(x = var_11561_cast, y = var_11562_to_fp16)[name = tensor("op_11563_cast")]; + tensor denom_353_epsilon_0_to_fp16 = const()[name = tensor("denom_353_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_353_cast = rsqrt(epsilon = denom_353_epsilon_0_to_fp16, x = var_11563_cast)[name = tensor("denom_353_cast")]; + tensor out_353_cast = mul(x = zero_mean_353_cast, y = denom_353_cast)[name = tensor("out_353_cast")]; + tensor var_11567_to_fp16 = const()[name = tensor("op_11567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191475584)))]; + tensor var_11568_cast = add(x = out_353_cast, y = var_11567_to_fp16)[name = tensor("op_11568_cast")]; + tensor var_11570_to_fp16 = const()[name = tensor("op_11570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191478208)))]; + tensor input_671_cast = mul(x = var_11568_cast, y = var_11570_to_fp16)[name = tensor("input_671_cast")]; + tensor var_11578 = const()[name = tensor("op_11578"), val = tensor([1, 1])]; + tensor var_11580 = const()[name = tensor("op_11580"), val = tensor([1, 1])]; + tensor var_11582_pad_type_0 = const()[name = tensor("op_11582_pad_type_0"), val = tensor("custom")]; + tensor var_11582_pad_0 = const()[name = tensor("op_11582_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1191480832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201311296))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201311488)))]; + tensor var_11582_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_11580, groups = var_6865, pad = var_11582_pad_0, pad_type = var_11582_pad_type_0, strides = var_11578, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_671_cast)[name = tensor("op_11582_cast")]; + tensor var_11583_split_sizes_0 = const()[name = tensor("op_11583_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11583_axis_0 = const()[name = tensor("op_11583_axis_0"), val = tensor(1)]; + tensor var_11583_cast_0, tensor var_11583_cast_1 = split(axis = var_11583_axis_0, split_sizes = var_11583_split_sizes_0, x = var_11582_cast)[name = tensor("op_11583_cast")]; + tensor var_11585_mode_0 = const()[name = tensor("op_11585_mode_0"), val = tensor("EXACT")]; + tensor var_11585_cast = gelu(mode = var_11585_mode_0, x = var_11583_cast_1)[name = tensor("op_11585_cast")]; + tensor input_673_cast = mul(x = var_11583_cast_0, y = var_11585_cast)[name = tensor("input_673_cast")]; + tensor var_11589 = const()[name = tensor("op_11589"), val = tensor([1, 1])]; + tensor var_11591 = const()[name = tensor("op_11591"), val = tensor([1, 1])]; + tensor var_11593_pad_type_0 = const()[name = tensor("op_11593_pad_type_0"), val = tensor("custom")]; + tensor var_11593_pad_0 = const()[name = tensor("op_11593_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1201332032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206247296))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206247488)))]; + tensor var_11593_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_11591, groups = var_6865, pad = var_11593_pad_0, pad_type = var_11593_pad_type_0, strides = var_11589, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_673_cast)[name = tensor("op_11593_cast")]; + tensor inputs_355_cast = add(x = var_11593_cast, y = inputs_353_cast)[name = tensor("inputs_355_cast")]; + tensor var_11603 = const()[name = tensor("op_11603"), val = tensor([1])]; + tensor channels_mean_355_cast = reduce_mean(axes = var_11603, keep_dims = var_6860, x = inputs_355_cast)[name = tensor("channels_mean_355_cast")]; + tensor zero_mean_355_cast = sub(x = inputs_355_cast, y = channels_mean_355_cast)[name = tensor("zero_mean_355_cast")]; + tensor zero_mean_sq_355_cast = mul(x = zero_mean_355_cast, y = zero_mean_355_cast)[name = tensor("zero_mean_sq_355_cast")]; + tensor var_11607 = const()[name = tensor("op_11607"), val = tensor([1])]; + tensor var_11608_cast = reduce_mean(axes = var_11607, keep_dims = var_6860, x = zero_mean_sq_355_cast)[name = tensor("op_11608_cast")]; + tensor var_11609_to_fp16 = const()[name = tensor("op_11609_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11610_cast = add(x = var_11608_cast, y = var_11609_to_fp16)[name = tensor("op_11610_cast")]; + tensor denom_355_epsilon_0_to_fp16 = const()[name = tensor("denom_355_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_355_cast = rsqrt(epsilon = denom_355_epsilon_0_to_fp16, x = var_11610_cast)[name = tensor("denom_355_cast")]; + tensor out_355_cast = mul(x = zero_mean_355_cast, y = denom_355_cast)[name = tensor("out_355_cast")]; + tensor var_11614_to_fp16 = const()[name = tensor("op_11614_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206250112)))]; + tensor var_11615_cast = add(x = out_355_cast, y = var_11614_to_fp16)[name = tensor("op_11615_cast")]; + tensor var_11617_to_fp16 = const()[name = tensor("op_11617_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206252736)))]; + tensor hidden_states_465_cast = mul(x = var_11615_cast, y = var_11617_to_fp16)[name = tensor("hidden_states_465_cast")]; + tensor var_11624 = const()[name = tensor("op_11624"), val = tensor([1, 1])]; + tensor var_11626 = const()[name = tensor("op_11626"), val = tensor([1, 1])]; + tensor q_237_pad_type_0 = const()[name = tensor("q_237_pad_type_0"), val = tensor("custom")]; + tensor q_237_pad_0 = const()[name = tensor("q_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1206255360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207074624))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_237_cast = conv(dilations = var_11626, groups = var_6865, pad = q_237_pad_0, pad_type = q_237_pad_type_0, strides = var_11624, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("q_237_cast")]; + tensor var_11630 = const()[name = tensor("op_11630"), val = tensor([1, 1])]; + tensor var_11632 = const()[name = tensor("op_11632"), val = tensor([1, 1])]; + tensor k_237_pad_type_0 = const()[name = tensor("k_237_pad_type_0"), val = tensor("custom")]; + tensor k_237_pad_0 = const()[name = tensor("k_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207074752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207894016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_237_cast = conv(dilations = var_11632, groups = var_6865, pad = k_237_pad_0, pad_type = k_237_pad_type_0, strides = var_11630, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("k_237_cast")]; + tensor var_11636 = const()[name = tensor("op_11636"), val = tensor([1, 1])]; + tensor var_11638 = const()[name = tensor("op_11638"), val = tensor([1, 1])]; + tensor v_237_pad_type_0 = const()[name = tensor("v_237_pad_type_0"), val = tensor("custom")]; + tensor v_237_pad_0 = const()[name = tensor("v_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1207894144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209123008))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_237_cast = conv(dilations = var_11638, groups = var_6865, pad = v_237_pad_0, pad_type = v_237_pad_type_0, strides = var_11636, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("v_237_cast")]; + tensor var_11642 = const()[name = tensor("op_11642"), val = tensor([2, 20, 64, -1])]; + tensor var_11643_cast = reshape(shape = var_11642, x = q_237_cast)[name = tensor("op_11643_cast")]; + tensor var_11644 = const()[name = tensor("op_11644"), val = tensor([2, 20, 64, -1])]; + tensor var_11645_cast = reshape(shape = var_11644, x = k_237_cast)[name = tensor("op_11645_cast")]; + tensor var_11646 = const()[name = tensor("op_11646"), val = tensor([2, 20, 64, -1])]; + tensor var_11647_cast = reshape(shape = var_11646, x = v_237_cast)[name = tensor("op_11647_cast")]; + tensor attn_weights_473_transpose_x_0 = const()[name = tensor("attn_weights_473_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_473_transpose_y_0 = const()[name = tensor("attn_weights_473_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_473_cast = matmul(transpose_x = attn_weights_473_transpose_x_0, transpose_y = attn_weights_473_transpose_y_0, x = var_11643_cast, y = var_11645_cast)[name = tensor("attn_weights_473_cast")]; + tensor attn_weights_475_cast = mul(x = attn_weights_473_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_475_cast")]; + tensor var_11651_cast = softmax(axis = var_6849, x = attn_weights_475_cast)[name = tensor("op_11651_cast")]; + tensor attn_237_transpose_x_0 = const()[name = tensor("attn_237_transpose_x_0"), val = tensor(false)]; + tensor attn_237_transpose_y_0 = const()[name = tensor("attn_237_transpose_y_0"), val = tensor(true)]; + tensor attn_237_cast = matmul(transpose_x = attn_237_transpose_x_0, transpose_y = attn_237_transpose_y_0, x = var_11647_cast, y = var_11651_cast)[name = tensor("attn_237_cast")]; + tensor var_11655 = const()[name = tensor("op_11655"), val = tensor([2, 1280, 1, -1])]; + tensor input_675_cast = reshape(shape = var_11655, x = attn_237_cast)[name = tensor("input_675_cast")]; + tensor var_11660 = const()[name = tensor("op_11660"), val = tensor([1, 1])]; + tensor var_11662 = const()[name = tensor("op_11662"), val = tensor([1, 1])]; + tensor var_11664_pad_type_0 = const()[name = tensor("op_11664_pad_type_0"), val = tensor("custom")]; + tensor var_11664_pad_0 = const()[name = tensor("op_11664_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1209123200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210352064))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210352256)))]; + tensor var_11664_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_11662, groups = var_6865, pad = var_11664_pad_0, pad_type = var_11664_pad_type_0, strides = var_11660, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_675_cast)[name = tensor("op_11664_cast")]; + tensor inputs_357_cast = add(x = var_11664_cast, y = inputs_355_cast)[name = tensor("inputs_357_cast")]; + tensor var_11668 = const()[name = tensor("op_11668"), val = tensor([1])]; + tensor channels_mean_357_cast = reduce_mean(axes = var_11668, keep_dims = var_6860, x = inputs_357_cast)[name = tensor("channels_mean_357_cast")]; + tensor zero_mean_357_cast = sub(x = inputs_357_cast, y = channels_mean_357_cast)[name = tensor("zero_mean_357_cast")]; + tensor zero_mean_sq_357_cast = mul(x = zero_mean_357_cast, y = zero_mean_357_cast)[name = tensor("zero_mean_sq_357_cast")]; + tensor var_11672 = const()[name = tensor("op_11672"), val = tensor([1])]; + tensor var_11673_cast = reduce_mean(axes = var_11672, keep_dims = var_6860, x = zero_mean_sq_357_cast)[name = tensor("op_11673_cast")]; + tensor var_11674_to_fp16 = const()[name = tensor("op_11674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11675_cast = add(x = var_11673_cast, y = var_11674_to_fp16)[name = tensor("op_11675_cast")]; + tensor denom_357_epsilon_0_to_fp16 = const()[name = tensor("denom_357_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_357_cast = rsqrt(epsilon = denom_357_epsilon_0_to_fp16, x = var_11675_cast)[name = tensor("denom_357_cast")]; + tensor out_357_cast = mul(x = zero_mean_357_cast, y = denom_357_cast)[name = tensor("out_357_cast")]; + tensor var_11679_to_fp16 = const()[name = tensor("op_11679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210354880)))]; + tensor var_11680_cast = add(x = out_357_cast, y = var_11679_to_fp16)[name = tensor("op_11680_cast")]; + tensor var_11682_to_fp16 = const()[name = tensor("op_11682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210357504)))]; + tensor hidden_states_467_cast = mul(x = var_11680_cast, y = var_11682_to_fp16)[name = tensor("hidden_states_467_cast")]; + tensor var_11689 = const()[name = tensor("op_11689"), val = tensor([1, 1])]; + tensor var_11691 = const()[name = tensor("op_11691"), val = tensor([1, 1])]; + tensor q_239_pad_type_0 = const()[name = tensor("q_239_pad_type_0"), val = tensor("custom")]; + tensor q_239_pad_0 = const()[name = tensor("q_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210360128))), lut = tensor([-0x1.b08p-6, -0x1.0c4p-7, 0x1.09p-7, 0x1.af4p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_239_cast = conv(dilations = var_11691, groups = var_6865, pad = q_239_pad_0, pad_type = q_239_pad_type_0, strides = var_11689, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_467_cast)[name = tensor("q_239_cast")]; + tensor var_11695 = const()[name = tensor("op_11695"), val = tensor([1, 1])]; + tensor var_11697 = const()[name = tensor("op_11697"), val = tensor([1, 1])]; + tensor k_239_pad_type_0 = const()[name = tensor("k_239_pad_type_0"), val = tensor("custom")]; + tensor k_239_pad_0 = const()[name = tensor("k_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1210769792))), lut = tensor([-0x1.50cp-6, -0x1.90cp-8, 0x1.8e4p-8, 0x1.504p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_239_cast = conv(dilations = var_11697, groups = var_6865, pad = k_239_pad_0, pad_type = k_239_pad_type_0, strides = var_11695, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_239_cast")]; + tensor var_11701 = const()[name = tensor("op_11701"), val = tensor([1, 1])]; + tensor var_11703 = const()[name = tensor("op_11703"), val = tensor([1, 1])]; + tensor v_239_pad_type_0 = const()[name = tensor("v_239_pad_type_0"), val = tensor("custom")]; + tensor v_239_pad_0 = const()[name = tensor("v_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1211425216))), lut = tensor([-0x1.80cp-6, -0x1.ac8p-8, 0x1.b34p-8, 0x1.82cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_239_cast = conv(dilations = var_11703, groups = var_6865, pad = v_239_pad_0, pad_type = v_239_pad_type_0, strides = var_11701, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_239_cast")]; + tensor var_11707 = const()[name = tensor("op_11707"), val = tensor([2, 20, 64, -1])]; + tensor var_11708_cast = reshape(shape = var_11707, x = q_239_cast)[name = tensor("op_11708_cast")]; + tensor var_11709 = const()[name = tensor("op_11709"), val = tensor([2, 20, 64, -1])]; + tensor var_11710_cast = reshape(shape = var_11709, x = k_239_cast)[name = tensor("op_11710_cast")]; + tensor var_11711 = const()[name = tensor("op_11711"), val = tensor([2, 20, 64, -1])]; + tensor var_11712_cast = reshape(shape = var_11711, x = v_239_cast)[name = tensor("op_11712_cast")]; + tensor attn_weights_477_transpose_x_0 = const()[name = tensor("attn_weights_477_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_477_transpose_y_0 = const()[name = tensor("attn_weights_477_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_477_cast = matmul(transpose_x = attn_weights_477_transpose_x_0, transpose_y = attn_weights_477_transpose_y_0, x = var_11708_cast, y = var_11710_cast)[name = tensor("attn_weights_477_cast")]; + tensor attn_weights_479_cast = mul(x = attn_weights_477_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_479_cast")]; + tensor var_11716_cast = softmax(axis = var_6849, x = attn_weights_479_cast)[name = tensor("op_11716_cast")]; + tensor attn_239_transpose_x_0 = const()[name = tensor("attn_239_transpose_x_0"), val = tensor(false)]; + tensor attn_239_transpose_y_0 = const()[name = tensor("attn_239_transpose_y_0"), val = tensor(true)]; + tensor attn_239_cast = matmul(transpose_x = attn_239_transpose_x_0, transpose_y = attn_239_transpose_y_0, x = var_11712_cast, y = var_11716_cast)[name = tensor("attn_239_cast")]; + tensor var_11720 = const()[name = tensor("op_11720"), val = tensor([2, 1280, 1, -1])]; + tensor input_677_cast = reshape(shape = var_11720, x = attn_239_cast)[name = tensor("input_677_cast")]; + tensor var_11725 = const()[name = tensor("op_11725"), val = tensor([1, 1])]; + tensor var_11727 = const()[name = tensor("op_11727"), val = tensor([1, 1])]; + tensor var_11729_pad_type_0 = const()[name = tensor("op_11729_pad_type_0"), val = tensor("custom")]; + tensor var_11729_pad_0 = const()[name = tensor("op_11729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212080640))), lut = tensor([-0x1.7e8p-8, 0x1.7fp-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212285504)))]; + tensor var_11729_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_11727, groups = var_6865, pad = var_11729_pad_0, pad_type = var_11729_pad_type_0, strides = var_11725, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_677_cast)[name = tensor("op_11729_cast")]; + tensor inputs_359_cast = add(x = var_11729_cast, y = inputs_357_cast)[name = tensor("inputs_359_cast")]; + tensor var_11733 = const()[name = tensor("op_11733"), val = tensor([1])]; + tensor channels_mean_359_cast = reduce_mean(axes = var_11733, keep_dims = var_6860, x = inputs_359_cast)[name = tensor("channels_mean_359_cast")]; + tensor zero_mean_359_cast = sub(x = inputs_359_cast, y = channels_mean_359_cast)[name = tensor("zero_mean_359_cast")]; + tensor zero_mean_sq_359_cast = mul(x = zero_mean_359_cast, y = zero_mean_359_cast)[name = tensor("zero_mean_sq_359_cast")]; + tensor var_11737 = const()[name = tensor("op_11737"), val = tensor([1])]; + tensor var_11738_cast = reduce_mean(axes = var_11737, keep_dims = var_6860, x = zero_mean_sq_359_cast)[name = tensor("op_11738_cast")]; + tensor var_11739_to_fp16 = const()[name = tensor("op_11739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11740_cast = add(x = var_11738_cast, y = var_11739_to_fp16)[name = tensor("op_11740_cast")]; + tensor denom_359_epsilon_0_to_fp16 = const()[name = tensor("denom_359_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_359_cast = rsqrt(epsilon = denom_359_epsilon_0_to_fp16, x = var_11740_cast)[name = tensor("denom_359_cast")]; + tensor out_359_cast = mul(x = zero_mean_359_cast, y = denom_359_cast)[name = tensor("out_359_cast")]; + tensor var_11744_to_fp16 = const()[name = tensor("op_11744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212288128)))]; + tensor var_11745_cast = add(x = out_359_cast, y = var_11744_to_fp16)[name = tensor("op_11745_cast")]; + tensor var_11747_to_fp16 = const()[name = tensor("op_11747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212290752)))]; + tensor input_679_cast = mul(x = var_11745_cast, y = var_11747_to_fp16)[name = tensor("input_679_cast")]; + tensor var_11755 = const()[name = tensor("op_11755"), val = tensor([1, 1])]; + tensor var_11757 = const()[name = tensor("op_11757"), val = tensor([1, 1])]; + tensor var_11759_pad_type_0 = const()[name = tensor("op_11759_pad_type_0"), val = tensor("custom")]; + tensor var_11759_pad_0 = const()[name = tensor("op_11759_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1212293376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222123840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222124032)))]; + tensor var_11759_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_11757, groups = var_6865, pad = var_11759_pad_0, pad_type = var_11759_pad_type_0, strides = var_11755, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_679_cast)[name = tensor("op_11759_cast")]; + tensor var_11760_split_sizes_0 = const()[name = tensor("op_11760_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11760_axis_0 = const()[name = tensor("op_11760_axis_0"), val = tensor(1)]; + tensor var_11760_cast_0, tensor var_11760_cast_1 = split(axis = var_11760_axis_0, split_sizes = var_11760_split_sizes_0, x = var_11759_cast)[name = tensor("op_11760_cast")]; + tensor var_11762_mode_0 = const()[name = tensor("op_11762_mode_0"), val = tensor("EXACT")]; + tensor var_11762_cast = gelu(mode = var_11762_mode_0, x = var_11760_cast_1)[name = tensor("op_11762_cast")]; + tensor input_681_cast = mul(x = var_11760_cast_0, y = var_11762_cast)[name = tensor("input_681_cast")]; + tensor var_11766 = const()[name = tensor("op_11766"), val = tensor([1, 1])]; + tensor var_11768 = const()[name = tensor("op_11768"), val = tensor([1, 1])]; + tensor var_11770_pad_type_0 = const()[name = tensor("op_11770_pad_type_0"), val = tensor("custom")]; + tensor var_11770_pad_0 = const()[name = tensor("op_11770_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222144576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227059840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227060032)))]; + tensor var_11770_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_11768, groups = var_6865, pad = var_11770_pad_0, pad_type = var_11770_pad_type_0, strides = var_11766, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_681_cast)[name = tensor("op_11770_cast")]; + tensor inputs_361_cast = add(x = var_11770_cast, y = inputs_359_cast)[name = tensor("inputs_361_cast")]; + tensor var_11780 = const()[name = tensor("op_11780"), val = tensor([1])]; + tensor channels_mean_361_cast = reduce_mean(axes = var_11780, keep_dims = var_6860, x = inputs_361_cast)[name = tensor("channels_mean_361_cast")]; + tensor zero_mean_361_cast = sub(x = inputs_361_cast, y = channels_mean_361_cast)[name = tensor("zero_mean_361_cast")]; + tensor zero_mean_sq_361_cast = mul(x = zero_mean_361_cast, y = zero_mean_361_cast)[name = tensor("zero_mean_sq_361_cast")]; + tensor var_11784 = const()[name = tensor("op_11784"), val = tensor([1])]; + tensor var_11785_cast = reduce_mean(axes = var_11784, keep_dims = var_6860, x = zero_mean_sq_361_cast)[name = tensor("op_11785_cast")]; + tensor var_11786_to_fp16 = const()[name = tensor("op_11786_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11787_cast = add(x = var_11785_cast, y = var_11786_to_fp16)[name = tensor("op_11787_cast")]; + tensor denom_361_epsilon_0_to_fp16 = const()[name = tensor("denom_361_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_361_cast = rsqrt(epsilon = denom_361_epsilon_0_to_fp16, x = var_11787_cast)[name = tensor("denom_361_cast")]; + tensor out_361_cast = mul(x = zero_mean_361_cast, y = denom_361_cast)[name = tensor("out_361_cast")]; + tensor var_11791_to_fp16 = const()[name = tensor("op_11791_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227062656)))]; + tensor var_11792_cast = add(x = out_361_cast, y = var_11791_to_fp16)[name = tensor("op_11792_cast")]; + tensor var_11794_to_fp16 = const()[name = tensor("op_11794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227065280)))]; + tensor hidden_states_471_cast = mul(x = var_11792_cast, y = var_11794_to_fp16)[name = tensor("hidden_states_471_cast")]; + tensor var_11801 = const()[name = tensor("op_11801"), val = tensor([1, 1])]; + tensor var_11803 = const()[name = tensor("op_11803"), val = tensor([1, 1])]; + tensor q_241_pad_type_0 = const()[name = tensor("q_241_pad_type_0"), val = tensor("custom")]; + tensor q_241_pad_0 = const()[name = tensor("q_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227067904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227887168))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_241_cast = conv(dilations = var_11803, groups = var_6865, pad = q_241_pad_0, pad_type = q_241_pad_type_0, strides = var_11801, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("q_241_cast")]; + tensor var_11807 = const()[name = tensor("op_11807"), val = tensor([1, 1])]; + tensor var_11809 = const()[name = tensor("op_11809"), val = tensor([1, 1])]; + tensor k_241_pad_type_0 = const()[name = tensor("k_241_pad_type_0"), val = tensor("custom")]; + tensor k_241_pad_0 = const()[name = tensor("k_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1227887296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228706560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_241_cast = conv(dilations = var_11809, groups = var_6865, pad = k_241_pad_0, pad_type = k_241_pad_type_0, strides = var_11807, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("k_241_cast")]; + tensor var_11813 = const()[name = tensor("op_11813"), val = tensor([1, 1])]; + tensor var_11815 = const()[name = tensor("op_11815"), val = tensor([1, 1])]; + tensor v_241_pad_type_0 = const()[name = tensor("v_241_pad_type_0"), val = tensor("custom")]; + tensor v_241_pad_0 = const()[name = tensor("v_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228706688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229935552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_241_cast = conv(dilations = var_11815, groups = var_6865, pad = v_241_pad_0, pad_type = v_241_pad_type_0, strides = var_11813, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("v_241_cast")]; + tensor var_11819 = const()[name = tensor("op_11819"), val = tensor([2, 20, 64, -1])]; + tensor var_11820_cast = reshape(shape = var_11819, x = q_241_cast)[name = tensor("op_11820_cast")]; + tensor var_11821 = const()[name = tensor("op_11821"), val = tensor([2, 20, 64, -1])]; + tensor var_11822_cast = reshape(shape = var_11821, x = k_241_cast)[name = tensor("op_11822_cast")]; + tensor var_11823 = const()[name = tensor("op_11823"), val = tensor([2, 20, 64, -1])]; + tensor var_11824_cast = reshape(shape = var_11823, x = v_241_cast)[name = tensor("op_11824_cast")]; + tensor attn_weights_481_transpose_x_0 = const()[name = tensor("attn_weights_481_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_481_transpose_y_0 = const()[name = tensor("attn_weights_481_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_481_cast = matmul(transpose_x = attn_weights_481_transpose_x_0, transpose_y = attn_weights_481_transpose_y_0, x = var_11820_cast, y = var_11822_cast)[name = tensor("attn_weights_481_cast")]; + tensor attn_weights_483_cast = mul(x = attn_weights_481_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_483_cast")]; + tensor var_11828_cast = softmax(axis = var_6849, x = attn_weights_483_cast)[name = tensor("op_11828_cast")]; + tensor attn_241_transpose_x_0 = const()[name = tensor("attn_241_transpose_x_0"), val = tensor(false)]; + tensor attn_241_transpose_y_0 = const()[name = tensor("attn_241_transpose_y_0"), val = tensor(true)]; + tensor attn_241_cast = matmul(transpose_x = attn_241_transpose_x_0, transpose_y = attn_241_transpose_y_0, x = var_11824_cast, y = var_11828_cast)[name = tensor("attn_241_cast")]; + tensor var_11832 = const()[name = tensor("op_11832"), val = tensor([2, 1280, 1, -1])]; + tensor input_683_cast = reshape(shape = var_11832, x = attn_241_cast)[name = tensor("input_683_cast")]; + tensor var_11837 = const()[name = tensor("op_11837"), val = tensor([1, 1])]; + tensor var_11839 = const()[name = tensor("op_11839"), val = tensor([1, 1])]; + tensor var_11841_pad_type_0 = const()[name = tensor("op_11841_pad_type_0"), val = tensor("custom")]; + tensor var_11841_pad_0 = const()[name = tensor("op_11841_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229935744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231164608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231164800)))]; + tensor var_11841_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_11839, groups = var_6865, pad = var_11841_pad_0, pad_type = var_11841_pad_type_0, strides = var_11837, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_683_cast)[name = tensor("op_11841_cast")]; + tensor inputs_363_cast = add(x = var_11841_cast, y = inputs_361_cast)[name = tensor("inputs_363_cast")]; + tensor var_11845 = const()[name = tensor("op_11845"), val = tensor([1])]; + tensor channels_mean_363_cast = reduce_mean(axes = var_11845, keep_dims = var_6860, x = inputs_363_cast)[name = tensor("channels_mean_363_cast")]; + tensor zero_mean_363_cast = sub(x = inputs_363_cast, y = channels_mean_363_cast)[name = tensor("zero_mean_363_cast")]; + tensor zero_mean_sq_363_cast = mul(x = zero_mean_363_cast, y = zero_mean_363_cast)[name = tensor("zero_mean_sq_363_cast")]; + tensor var_11849 = const()[name = tensor("op_11849"), val = tensor([1])]; + tensor var_11850_cast = reduce_mean(axes = var_11849, keep_dims = var_6860, x = zero_mean_sq_363_cast)[name = tensor("op_11850_cast")]; + tensor var_11851_to_fp16 = const()[name = tensor("op_11851_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11852_cast = add(x = var_11850_cast, y = var_11851_to_fp16)[name = tensor("op_11852_cast")]; + tensor denom_363_epsilon_0_to_fp16 = const()[name = tensor("denom_363_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_363_cast = rsqrt(epsilon = denom_363_epsilon_0_to_fp16, x = var_11852_cast)[name = tensor("denom_363_cast")]; + tensor out_363_cast = mul(x = zero_mean_363_cast, y = denom_363_cast)[name = tensor("out_363_cast")]; + tensor var_11856_to_fp16 = const()[name = tensor("op_11856_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231167424)))]; + tensor var_11857_cast = add(x = out_363_cast, y = var_11856_to_fp16)[name = tensor("op_11857_cast")]; + tensor var_11859_to_fp16 = const()[name = tensor("op_11859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231170048)))]; + tensor hidden_states_473_cast = mul(x = var_11857_cast, y = var_11859_to_fp16)[name = tensor("hidden_states_473_cast")]; + tensor var_11866 = const()[name = tensor("op_11866"), val = tensor([1, 1])]; + tensor var_11868 = const()[name = tensor("op_11868"), val = tensor([1, 1])]; + tensor q_243_pad_type_0 = const()[name = tensor("q_243_pad_type_0"), val = tensor("custom")]; + tensor q_243_pad_0 = const()[name = tensor("q_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231172672))), lut = tensor([-0x1.81cp-6, -0x1.e1p-8, 0x1.e7p-8, 0x1.83cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_243_cast = conv(dilations = var_11868, groups = var_6865, pad = q_243_pad_0, pad_type = q_243_pad_type_0, strides = var_11866, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_473_cast)[name = tensor("q_243_cast")]; + tensor var_11872 = const()[name = tensor("op_11872"), val = tensor([1, 1])]; + tensor var_11874 = const()[name = tensor("op_11874"), val = tensor([1, 1])]; + tensor k_243_pad_type_0 = const()[name = tensor("k_243_pad_type_0"), val = tensor("custom")]; + tensor k_243_pad_0 = const()[name = tensor("k_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231582336))), lut = tensor([-0x1.22p-6, -0x1.5dp-8, 0x1.5f8p-8, 0x1.224p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_243_cast = conv(dilations = var_11874, groups = var_6865, pad = k_243_pad_0, pad_type = k_243_pad_type_0, strides = var_11872, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_243_cast")]; + tensor var_11878 = const()[name = tensor("op_11878"), val = tensor([1, 1])]; + tensor var_11880 = const()[name = tensor("op_11880"), val = tensor([1, 1])]; + tensor v_243_pad_type_0 = const()[name = tensor("v_243_pad_type_0"), val = tensor("custom")]; + tensor v_243_pad_0 = const()[name = tensor("v_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232237760))), lut = tensor([-0x1.4cp-6, -0x1.7bp-8, 0x1.798p-8, 0x1.4b8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_243_cast = conv(dilations = var_11880, groups = var_6865, pad = v_243_pad_0, pad_type = v_243_pad_type_0, strides = var_11878, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_243_cast")]; + tensor var_11884 = const()[name = tensor("op_11884"), val = tensor([2, 20, 64, -1])]; + tensor var_11885_cast = reshape(shape = var_11884, x = q_243_cast)[name = tensor("op_11885_cast")]; + tensor var_11886 = const()[name = tensor("op_11886"), val = tensor([2, 20, 64, -1])]; + tensor var_11887_cast = reshape(shape = var_11886, x = k_243_cast)[name = tensor("op_11887_cast")]; + tensor var_11888 = const()[name = tensor("op_11888"), val = tensor([2, 20, 64, -1])]; + tensor var_11889_cast = reshape(shape = var_11888, x = v_243_cast)[name = tensor("op_11889_cast")]; + tensor attn_weights_485_transpose_x_0 = const()[name = tensor("attn_weights_485_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_485_transpose_y_0 = const()[name = tensor("attn_weights_485_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_485_cast = matmul(transpose_x = attn_weights_485_transpose_x_0, transpose_y = attn_weights_485_transpose_y_0, x = var_11885_cast, y = var_11887_cast)[name = tensor("attn_weights_485_cast")]; + tensor attn_weights_487_cast = mul(x = attn_weights_485_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_487_cast")]; + tensor var_11893_cast = softmax(axis = var_6849, x = attn_weights_487_cast)[name = tensor("op_11893_cast")]; + tensor attn_243_transpose_x_0 = const()[name = tensor("attn_243_transpose_x_0"), val = tensor(false)]; + tensor attn_243_transpose_y_0 = const()[name = tensor("attn_243_transpose_y_0"), val = tensor(true)]; + tensor attn_243_cast = matmul(transpose_x = attn_243_transpose_x_0, transpose_y = attn_243_transpose_y_0, x = var_11889_cast, y = var_11893_cast)[name = tensor("attn_243_cast")]; + tensor var_11897 = const()[name = tensor("op_11897"), val = tensor([2, 1280, 1, -1])]; + tensor input_685_cast = reshape(shape = var_11897, x = attn_243_cast)[name = tensor("input_685_cast")]; + tensor var_11902 = const()[name = tensor("op_11902"), val = tensor([1, 1])]; + tensor var_11904 = const()[name = tensor("op_11904"), val = tensor([1, 1])]; + tensor var_11906_pad_type_0 = const()[name = tensor("op_11906_pad_type_0"), val = tensor("custom")]; + tensor var_11906_pad_0 = const()[name = tensor("op_11906_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1232893184))), lut = tensor([-0x1.4bcp-7, -0x1.8a8p-9, 0x1.84cp-9, 0x1.4a8p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233302848)))]; + tensor var_11906_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_11904, groups = var_6865, pad = var_11906_pad_0, pad_type = var_11906_pad_type_0, strides = var_11902, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_685_cast)[name = tensor("op_11906_cast")]; + tensor inputs_365_cast = add(x = var_11906_cast, y = inputs_363_cast)[name = tensor("inputs_365_cast")]; + tensor var_11910 = const()[name = tensor("op_11910"), val = tensor([1])]; + tensor channels_mean_365_cast = reduce_mean(axes = var_11910, keep_dims = var_6860, x = inputs_365_cast)[name = tensor("channels_mean_365_cast")]; + tensor zero_mean_365_cast = sub(x = inputs_365_cast, y = channels_mean_365_cast)[name = tensor("zero_mean_365_cast")]; + tensor zero_mean_sq_365_cast = mul(x = zero_mean_365_cast, y = zero_mean_365_cast)[name = tensor("zero_mean_sq_365_cast")]; + tensor var_11914 = const()[name = tensor("op_11914"), val = tensor([1])]; + tensor var_11915_cast = reduce_mean(axes = var_11914, keep_dims = var_6860, x = zero_mean_sq_365_cast)[name = tensor("op_11915_cast")]; + tensor var_11916_to_fp16 = const()[name = tensor("op_11916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11917_cast = add(x = var_11915_cast, y = var_11916_to_fp16)[name = tensor("op_11917_cast")]; + tensor denom_365_epsilon_0_to_fp16 = const()[name = tensor("denom_365_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_365_cast = rsqrt(epsilon = denom_365_epsilon_0_to_fp16, x = var_11917_cast)[name = tensor("denom_365_cast")]; + tensor out_365_cast = mul(x = zero_mean_365_cast, y = denom_365_cast)[name = tensor("out_365_cast")]; + tensor var_11921_to_fp16 = const()[name = tensor("op_11921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233305472)))]; + tensor var_11922_cast = add(x = out_365_cast, y = var_11921_to_fp16)[name = tensor("op_11922_cast")]; + tensor var_11924_to_fp16 = const()[name = tensor("op_11924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233308096)))]; + tensor input_687_cast = mul(x = var_11922_cast, y = var_11924_to_fp16)[name = tensor("input_687_cast")]; + tensor var_11932 = const()[name = tensor("op_11932"), val = tensor([1, 1])]; + tensor var_11934 = const()[name = tensor("op_11934"), val = tensor([1, 1])]; + tensor var_11936_pad_type_0 = const()[name = tensor("op_11936_pad_type_0"), val = tensor("custom")]; + tensor var_11936_pad_0 = const()[name = tensor("op_11936_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233310720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243141184))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243141376)))]; + tensor var_11936_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_11934, groups = var_6865, pad = var_11936_pad_0, pad_type = var_11936_pad_type_0, strides = var_11932, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_687_cast)[name = tensor("op_11936_cast")]; + tensor var_11937_split_sizes_0 = const()[name = tensor("op_11937_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11937_axis_0 = const()[name = tensor("op_11937_axis_0"), val = tensor(1)]; + tensor var_11937_cast_0, tensor var_11937_cast_1 = split(axis = var_11937_axis_0, split_sizes = var_11937_split_sizes_0, x = var_11936_cast)[name = tensor("op_11937_cast")]; + tensor var_11939_mode_0 = const()[name = tensor("op_11939_mode_0"), val = tensor("EXACT")]; + tensor var_11939_cast = gelu(mode = var_11939_mode_0, x = var_11937_cast_1)[name = tensor("op_11939_cast")]; + tensor input_689_cast = mul(x = var_11937_cast_0, y = var_11939_cast)[name = tensor("input_689_cast")]; + tensor var_11943 = const()[name = tensor("op_11943"), val = tensor([1, 1])]; + tensor var_11945 = const()[name = tensor("op_11945"), val = tensor([1, 1])]; + tensor var_11947_pad_type_0 = const()[name = tensor("op_11947_pad_type_0"), val = tensor("custom")]; + tensor var_11947_pad_0 = const()[name = tensor("op_11947_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1243161920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248077184))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248077376)))]; + tensor var_11947_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_11945, groups = var_6865, pad = var_11947_pad_0, pad_type = var_11947_pad_type_0, strides = var_11943, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_689_cast)[name = tensor("op_11947_cast")]; + tensor inputs_367_cast = add(x = var_11947_cast, y = inputs_365_cast)[name = tensor("inputs_367_cast")]; + tensor var_11957 = const()[name = tensor("op_11957"), val = tensor([1])]; + tensor channels_mean_367_cast = reduce_mean(axes = var_11957, keep_dims = var_6860, x = inputs_367_cast)[name = tensor("channels_mean_367_cast")]; + tensor zero_mean_367_cast = sub(x = inputs_367_cast, y = channels_mean_367_cast)[name = tensor("zero_mean_367_cast")]; + tensor zero_mean_sq_367_cast = mul(x = zero_mean_367_cast, y = zero_mean_367_cast)[name = tensor("zero_mean_sq_367_cast")]; + tensor var_11961 = const()[name = tensor("op_11961"), val = tensor([1])]; + tensor var_11962_cast = reduce_mean(axes = var_11961, keep_dims = var_6860, x = zero_mean_sq_367_cast)[name = tensor("op_11962_cast")]; + tensor var_11963_to_fp16 = const()[name = tensor("op_11963_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11964_cast = add(x = var_11962_cast, y = var_11963_to_fp16)[name = tensor("op_11964_cast")]; + tensor denom_367_epsilon_0_to_fp16 = const()[name = tensor("denom_367_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_367_cast = rsqrt(epsilon = denom_367_epsilon_0_to_fp16, x = var_11964_cast)[name = tensor("denom_367_cast")]; + tensor out_367_cast = mul(x = zero_mean_367_cast, y = denom_367_cast)[name = tensor("out_367_cast")]; + tensor var_11968_to_fp16 = const()[name = tensor("op_11968_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248080000)))]; + tensor var_11969_cast = add(x = out_367_cast, y = var_11968_to_fp16)[name = tensor("op_11969_cast")]; + tensor var_11971_to_fp16 = const()[name = tensor("op_11971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248082624)))]; + tensor hidden_states_477_cast = mul(x = var_11969_cast, y = var_11971_to_fp16)[name = tensor("hidden_states_477_cast")]; + tensor var_11978 = const()[name = tensor("op_11978"), val = tensor([1, 1])]; + tensor var_11980 = const()[name = tensor("op_11980"), val = tensor([1, 1])]; + tensor q_245_pad_type_0 = const()[name = tensor("q_245_pad_type_0"), val = tensor("custom")]; + tensor q_245_pad_0 = const()[name = tensor("q_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248085248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248904512))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_245_cast = conv(dilations = var_11980, groups = var_6865, pad = q_245_pad_0, pad_type = q_245_pad_type_0, strides = var_11978, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("q_245_cast")]; + tensor var_11984 = const()[name = tensor("op_11984"), val = tensor([1, 1])]; + tensor var_11986 = const()[name = tensor("op_11986"), val = tensor([1, 1])]; + tensor k_245_pad_type_0 = const()[name = tensor("k_245_pad_type_0"), val = tensor("custom")]; + tensor k_245_pad_0 = const()[name = tensor("k_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1248904640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249723904))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_245_cast = conv(dilations = var_11986, groups = var_6865, pad = k_245_pad_0, pad_type = k_245_pad_type_0, strides = var_11984, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("k_245_cast")]; + tensor var_11990 = const()[name = tensor("op_11990"), val = tensor([1, 1])]; + tensor var_11992 = const()[name = tensor("op_11992"), val = tensor([1, 1])]; + tensor v_245_pad_type_0 = const()[name = tensor("v_245_pad_type_0"), val = tensor("custom")]; + tensor v_245_pad_0 = const()[name = tensor("v_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1249724032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1250952896))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_245_cast = conv(dilations = var_11992, groups = var_6865, pad = v_245_pad_0, pad_type = v_245_pad_type_0, strides = var_11990, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("v_245_cast")]; + tensor var_11996 = const()[name = tensor("op_11996"), val = tensor([2, 20, 64, -1])]; + tensor var_11997_cast = reshape(shape = var_11996, x = q_245_cast)[name = tensor("op_11997_cast")]; + tensor var_11998 = const()[name = tensor("op_11998"), val = tensor([2, 20, 64, -1])]; + tensor var_11999_cast = reshape(shape = var_11998, x = k_245_cast)[name = tensor("op_11999_cast")]; + tensor var_12000 = const()[name = tensor("op_12000"), val = tensor([2, 20, 64, -1])]; + tensor var_12001_cast = reshape(shape = var_12000, x = v_245_cast)[name = tensor("op_12001_cast")]; + tensor attn_weights_489_transpose_x_0 = const()[name = tensor("attn_weights_489_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_489_transpose_y_0 = const()[name = tensor("attn_weights_489_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_489_cast = matmul(transpose_x = attn_weights_489_transpose_x_0, transpose_y = attn_weights_489_transpose_y_0, x = var_11997_cast, y = var_11999_cast)[name = tensor("attn_weights_489_cast")]; + tensor attn_weights_491_cast = mul(x = attn_weights_489_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_491_cast")]; + tensor var_12005_cast = softmax(axis = var_6849, x = attn_weights_491_cast)[name = tensor("op_12005_cast")]; + tensor attn_245_transpose_x_0 = const()[name = tensor("attn_245_transpose_x_0"), val = tensor(false)]; + tensor attn_245_transpose_y_0 = const()[name = tensor("attn_245_transpose_y_0"), val = tensor(true)]; + tensor attn_245_cast = matmul(transpose_x = attn_245_transpose_x_0, transpose_y = attn_245_transpose_y_0, x = var_12001_cast, y = var_12005_cast)[name = tensor("attn_245_cast")]; + tensor var_12009 = const()[name = tensor("op_12009"), val = tensor([2, 1280, 1, -1])]; + tensor input_691_cast = reshape(shape = var_12009, x = attn_245_cast)[name = tensor("input_691_cast")]; + tensor var_12014 = const()[name = tensor("op_12014"), val = tensor([1, 1])]; + tensor var_12016 = const()[name = tensor("op_12016"), val = tensor([1, 1])]; + tensor var_12018_pad_type_0 = const()[name = tensor("op_12018_pad_type_0"), val = tensor("custom")]; + tensor var_12018_pad_0 = const()[name = tensor("op_12018_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1250953088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252181952))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252182144)))]; + tensor var_12018_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_12016, groups = var_6865, pad = var_12018_pad_0, pad_type = var_12018_pad_type_0, strides = var_12014, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_691_cast)[name = tensor("op_12018_cast")]; + tensor inputs_369_cast = add(x = var_12018_cast, y = inputs_367_cast)[name = tensor("inputs_369_cast")]; + tensor var_12022 = const()[name = tensor("op_12022"), val = tensor([1])]; + tensor channels_mean_369_cast = reduce_mean(axes = var_12022, keep_dims = var_6860, x = inputs_369_cast)[name = tensor("channels_mean_369_cast")]; + tensor zero_mean_369_cast = sub(x = inputs_369_cast, y = channels_mean_369_cast)[name = tensor("zero_mean_369_cast")]; + tensor zero_mean_sq_369_cast = mul(x = zero_mean_369_cast, y = zero_mean_369_cast)[name = tensor("zero_mean_sq_369_cast")]; + tensor var_12026 = const()[name = tensor("op_12026"), val = tensor([1])]; + tensor var_12027_cast = reduce_mean(axes = var_12026, keep_dims = var_6860, x = zero_mean_sq_369_cast)[name = tensor("op_12027_cast")]; + tensor var_12028_to_fp16 = const()[name = tensor("op_12028_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12029_cast = add(x = var_12027_cast, y = var_12028_to_fp16)[name = tensor("op_12029_cast")]; + tensor denom_369_epsilon_0_to_fp16 = const()[name = tensor("denom_369_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_369_cast = rsqrt(epsilon = denom_369_epsilon_0_to_fp16, x = var_12029_cast)[name = tensor("denom_369_cast")]; + tensor out_369_cast = mul(x = zero_mean_369_cast, y = denom_369_cast)[name = tensor("out_369_cast")]; + tensor var_12033_to_fp16 = const()[name = tensor("op_12033_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252184768)))]; + tensor var_12034_cast = add(x = out_369_cast, y = var_12033_to_fp16)[name = tensor("op_12034_cast")]; + tensor var_12036_to_fp16 = const()[name = tensor("op_12036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252187392)))]; + tensor hidden_states_479_cast = mul(x = var_12034_cast, y = var_12036_to_fp16)[name = tensor("hidden_states_479_cast")]; + tensor var_12043 = const()[name = tensor("op_12043"), val = tensor([1, 1])]; + tensor var_12045 = const()[name = tensor("op_12045"), val = tensor([1, 1])]; + tensor q_247_pad_type_0 = const()[name = tensor("q_247_pad_type_0"), val = tensor("custom")]; + tensor q_247_pad_0 = const()[name = tensor("q_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1252190016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1253418880))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_247_cast = conv(dilations = var_12045, groups = var_6865, pad = q_247_pad_0, pad_type = q_247_pad_type_0, strides = var_12043, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_479_cast)[name = tensor("q_247_cast")]; + tensor var_12049 = const()[name = tensor("op_12049"), val = tensor([1, 1])]; + tensor var_12051 = const()[name = tensor("op_12051"), val = tensor([1, 1])]; + tensor k_247_pad_type_0 = const()[name = tensor("k_247_pad_type_0"), val = tensor("custom")]; + tensor k_247_pad_0 = const()[name = tensor("k_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1253419072))), lut = tensor([-0x1.128p-6, -0x1.4dp-8, 0x1.4e8p-8, 0x1.12cp-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_247_cast = conv(dilations = var_12051, groups = var_6865, pad = k_247_pad_0, pad_type = k_247_pad_type_0, strides = var_12049, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_247_cast")]; + tensor var_12055 = const()[name = tensor("op_12055"), val = tensor([1, 1])]; + tensor var_12057 = const()[name = tensor("op_12057"), val = tensor([1, 1])]; + tensor v_247_pad_type_0 = const()[name = tensor("v_247_pad_type_0"), val = tensor("custom")]; + tensor v_247_pad_0 = const()[name = tensor("v_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254074496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255385280))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_247_cast = conv(dilations = var_12057, groups = var_6865, pad = v_247_pad_0, pad_type = v_247_pad_type_0, strides = var_12055, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_247_cast")]; + tensor var_12061 = const()[name = tensor("op_12061"), val = tensor([2, 20, 64, -1])]; + tensor var_12062_cast = reshape(shape = var_12061, x = q_247_cast)[name = tensor("op_12062_cast")]; + tensor var_12063 = const()[name = tensor("op_12063"), val = tensor([2, 20, 64, -1])]; + tensor var_12064_cast = reshape(shape = var_12063, x = k_247_cast)[name = tensor("op_12064_cast")]; + tensor var_12065 = const()[name = tensor("op_12065"), val = tensor([2, 20, 64, -1])]; + tensor var_12066_cast = reshape(shape = var_12065, x = v_247_cast)[name = tensor("op_12066_cast")]; + tensor attn_weights_493_transpose_x_0 = const()[name = tensor("attn_weights_493_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_493_transpose_y_0 = const()[name = tensor("attn_weights_493_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_493_cast = matmul(transpose_x = attn_weights_493_transpose_x_0, transpose_y = attn_weights_493_transpose_y_0, x = var_12062_cast, y = var_12064_cast)[name = tensor("attn_weights_493_cast")]; + tensor attn_weights_495_cast = mul(x = attn_weights_493_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_495_cast")]; + tensor var_12070_cast = softmax(axis = var_6849, x = attn_weights_495_cast)[name = tensor("op_12070_cast")]; + tensor attn_247_transpose_x_0 = const()[name = tensor("attn_247_transpose_x_0"), val = tensor(false)]; + tensor attn_247_transpose_y_0 = const()[name = tensor("attn_247_transpose_y_0"), val = tensor(true)]; + tensor attn_247_cast = matmul(transpose_x = attn_247_transpose_x_0, transpose_y = attn_247_transpose_y_0, x = var_12066_cast, y = var_12070_cast)[name = tensor("attn_247_cast")]; + tensor var_12074 = const()[name = tensor("op_12074"), val = tensor([2, 1280, 1, -1])]; + tensor input_693_cast = reshape(shape = var_12074, x = attn_247_cast)[name = tensor("input_693_cast")]; + tensor var_12079 = const()[name = tensor("op_12079"), val = tensor([1, 1])]; + tensor var_12081 = const()[name = tensor("op_12081"), val = tensor([1, 1])]; + tensor var_12083_pad_type_0 = const()[name = tensor("op_12083_pad_type_0"), val = tensor("custom")]; + tensor var_12083_pad_0 = const()[name = tensor("op_12083_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255385408))), lut = tensor([-0x1.614p-8, 0x1.6p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255590272)))]; + tensor var_12083_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_12081, groups = var_6865, pad = var_12083_pad_0, pad_type = var_12083_pad_type_0, strides = var_12079, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_693_cast)[name = tensor("op_12083_cast")]; + tensor inputs_371_cast = add(x = var_12083_cast, y = inputs_369_cast)[name = tensor("inputs_371_cast")]; + tensor var_12087 = const()[name = tensor("op_12087"), val = tensor([1])]; + tensor channels_mean_371_cast = reduce_mean(axes = var_12087, keep_dims = var_6860, x = inputs_371_cast)[name = tensor("channels_mean_371_cast")]; + tensor zero_mean_371_cast = sub(x = inputs_371_cast, y = channels_mean_371_cast)[name = tensor("zero_mean_371_cast")]; + tensor zero_mean_sq_371_cast = mul(x = zero_mean_371_cast, y = zero_mean_371_cast)[name = tensor("zero_mean_sq_371_cast")]; + tensor var_12091 = const()[name = tensor("op_12091"), val = tensor([1])]; + tensor var_12092_cast = reduce_mean(axes = var_12091, keep_dims = var_6860, x = zero_mean_sq_371_cast)[name = tensor("op_12092_cast")]; + tensor var_12093_to_fp16 = const()[name = tensor("op_12093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12094_cast = add(x = var_12092_cast, y = var_12093_to_fp16)[name = tensor("op_12094_cast")]; + tensor denom_371_epsilon_0_to_fp16 = const()[name = tensor("denom_371_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_371_cast = rsqrt(epsilon = denom_371_epsilon_0_to_fp16, x = var_12094_cast)[name = tensor("denom_371_cast")]; + tensor out_371_cast = mul(x = zero_mean_371_cast, y = denom_371_cast)[name = tensor("out_371_cast")]; + tensor var_12098_to_fp16 = const()[name = tensor("op_12098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255592896)))]; + tensor var_12099_cast = add(x = out_371_cast, y = var_12098_to_fp16)[name = tensor("op_12099_cast")]; + tensor var_12101_to_fp16 = const()[name = tensor("op_12101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255595520)))]; + tensor input_695_cast = mul(x = var_12099_cast, y = var_12101_to_fp16)[name = tensor("input_695_cast")]; + tensor var_12109 = const()[name = tensor("op_12109"), val = tensor([1, 1])]; + tensor var_12111 = const()[name = tensor("op_12111"), val = tensor([1, 1])]; + tensor var_12113_pad_type_0 = const()[name = tensor("op_12113_pad_type_0"), val = tensor("custom")]; + tensor var_12113_pad_0 = const()[name = tensor("op_12113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255598144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265428608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265428800)))]; + tensor var_12113_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_12111, groups = var_6865, pad = var_12113_pad_0, pad_type = var_12113_pad_type_0, strides = var_12109, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_695_cast)[name = tensor("op_12113_cast")]; + tensor var_12114_split_sizes_0 = const()[name = tensor("op_12114_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12114_axis_0 = const()[name = tensor("op_12114_axis_0"), val = tensor(1)]; + tensor var_12114_cast_0, tensor var_12114_cast_1 = split(axis = var_12114_axis_0, split_sizes = var_12114_split_sizes_0, x = var_12113_cast)[name = tensor("op_12114_cast")]; + tensor var_12116_mode_0 = const()[name = tensor("op_12116_mode_0"), val = tensor("EXACT")]; + tensor var_12116_cast = gelu(mode = var_12116_mode_0, x = var_12114_cast_1)[name = tensor("op_12116_cast")]; + tensor input_697_cast = mul(x = var_12114_cast_0, y = var_12116_cast)[name = tensor("input_697_cast")]; + tensor var_12120 = const()[name = tensor("op_12120"), val = tensor([1, 1])]; + tensor var_12122 = const()[name = tensor("op_12122"), val = tensor([1, 1])]; + tensor var_12124_pad_type_0 = const()[name = tensor("op_12124_pad_type_0"), val = tensor("custom")]; + tensor var_12124_pad_0 = const()[name = tensor("op_12124_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265449344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270364608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270364800)))]; + tensor var_12124_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_12122, groups = var_6865, pad = var_12124_pad_0, pad_type = var_12124_pad_type_0, strides = var_12120, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_697_cast)[name = tensor("op_12124_cast")]; + tensor inputs_373_cast = add(x = var_12124_cast, y = inputs_371_cast)[name = tensor("inputs_373_cast")]; + tensor var_12134 = const()[name = tensor("op_12134"), val = tensor([1])]; + tensor channels_mean_373_cast = reduce_mean(axes = var_12134, keep_dims = var_6860, x = inputs_373_cast)[name = tensor("channels_mean_373_cast")]; + tensor zero_mean_373_cast = sub(x = inputs_373_cast, y = channels_mean_373_cast)[name = tensor("zero_mean_373_cast")]; + tensor zero_mean_sq_373_cast = mul(x = zero_mean_373_cast, y = zero_mean_373_cast)[name = tensor("zero_mean_sq_373_cast")]; + tensor var_12138 = const()[name = tensor("op_12138"), val = tensor([1])]; + tensor var_12139_cast = reduce_mean(axes = var_12138, keep_dims = var_6860, x = zero_mean_sq_373_cast)[name = tensor("op_12139_cast")]; + tensor var_12140_to_fp16 = const()[name = tensor("op_12140_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12141_cast = add(x = var_12139_cast, y = var_12140_to_fp16)[name = tensor("op_12141_cast")]; + tensor denom_373_epsilon_0_to_fp16 = const()[name = tensor("denom_373_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_373_cast = rsqrt(epsilon = denom_373_epsilon_0_to_fp16, x = var_12141_cast)[name = tensor("denom_373_cast")]; + tensor out_373_cast = mul(x = zero_mean_373_cast, y = denom_373_cast)[name = tensor("out_373_cast")]; + tensor var_12145_to_fp16 = const()[name = tensor("op_12145_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270367424)))]; + tensor var_12146_cast = add(x = out_373_cast, y = var_12145_to_fp16)[name = tensor("op_12146_cast")]; + tensor var_12148_to_fp16 = const()[name = tensor("op_12148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270370048)))]; + tensor hidden_states_483_cast = mul(x = var_12146_cast, y = var_12148_to_fp16)[name = tensor("hidden_states_483_cast")]; + tensor var_12155 = const()[name = tensor("op_12155"), val = tensor([1, 1])]; + tensor var_12157 = const()[name = tensor("op_12157"), val = tensor([1, 1])]; + tensor q_249_pad_type_0 = const()[name = tensor("q_249_pad_type_0"), val = tensor("custom")]; + tensor q_249_pad_0 = const()[name = tensor("q_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1270372672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271191936))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_249_cast = conv(dilations = var_12157, groups = var_6865, pad = q_249_pad_0, pad_type = q_249_pad_type_0, strides = var_12155, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("q_249_cast")]; + tensor var_12161 = const()[name = tensor("op_12161"), val = tensor([1, 1])]; + tensor var_12163 = const()[name = tensor("op_12163"), val = tensor([1, 1])]; + tensor k_249_pad_type_0 = const()[name = tensor("k_249_pad_type_0"), val = tensor("custom")]; + tensor k_249_pad_0 = const()[name = tensor("k_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1271192064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272011328))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_249_cast = conv(dilations = var_12163, groups = var_6865, pad = k_249_pad_0, pad_type = k_249_pad_type_0, strides = var_12161, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("k_249_cast")]; + tensor var_12167 = const()[name = tensor("op_12167"), val = tensor([1, 1])]; + tensor var_12169 = const()[name = tensor("op_12169"), val = tensor([1, 1])]; + tensor v_249_pad_type_0 = const()[name = tensor("v_249_pad_type_0"), val = tensor("custom")]; + tensor v_249_pad_0 = const()[name = tensor("v_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1272011456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273240320))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_249_cast = conv(dilations = var_12169, groups = var_6865, pad = v_249_pad_0, pad_type = v_249_pad_type_0, strides = var_12167, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("v_249_cast")]; + tensor var_12173 = const()[name = tensor("op_12173"), val = tensor([2, 20, 64, -1])]; + tensor var_12174_cast = reshape(shape = var_12173, x = q_249_cast)[name = tensor("op_12174_cast")]; + tensor var_12175 = const()[name = tensor("op_12175"), val = tensor([2, 20, 64, -1])]; + tensor var_12176_cast = reshape(shape = var_12175, x = k_249_cast)[name = tensor("op_12176_cast")]; + tensor var_12177 = const()[name = tensor("op_12177"), val = tensor([2, 20, 64, -1])]; + tensor var_12178_cast = reshape(shape = var_12177, x = v_249_cast)[name = tensor("op_12178_cast")]; + tensor attn_weights_497_transpose_x_0 = const()[name = tensor("attn_weights_497_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_497_transpose_y_0 = const()[name = tensor("attn_weights_497_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_497_cast = matmul(transpose_x = attn_weights_497_transpose_x_0, transpose_y = attn_weights_497_transpose_y_0, x = var_12174_cast, y = var_12176_cast)[name = tensor("attn_weights_497_cast")]; + tensor attn_weights_499_cast = mul(x = attn_weights_497_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_499_cast")]; + tensor var_12182_cast = softmax(axis = var_6849, x = attn_weights_499_cast)[name = tensor("op_12182_cast")]; + tensor attn_249_transpose_x_0 = const()[name = tensor("attn_249_transpose_x_0"), val = tensor(false)]; + tensor attn_249_transpose_y_0 = const()[name = tensor("attn_249_transpose_y_0"), val = tensor(true)]; + tensor attn_249_cast = matmul(transpose_x = attn_249_transpose_x_0, transpose_y = attn_249_transpose_y_0, x = var_12178_cast, y = var_12182_cast)[name = tensor("attn_249_cast")]; + tensor var_12186 = const()[name = tensor("op_12186"), val = tensor([2, 1280, 1, -1])]; + tensor input_699_cast = reshape(shape = var_12186, x = attn_249_cast)[name = tensor("input_699_cast")]; + tensor var_12191 = const()[name = tensor("op_12191"), val = tensor([1, 1])]; + tensor var_12193 = const()[name = tensor("op_12193"), val = tensor([1, 1])]; + tensor var_12195_pad_type_0 = const()[name = tensor("op_12195_pad_type_0"), val = tensor("custom")]; + tensor var_12195_pad_0 = const()[name = tensor("op_12195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1273240512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274469376))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274469568)))]; + tensor var_12195_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_12193, groups = var_6865, pad = var_12195_pad_0, pad_type = var_12195_pad_type_0, strides = var_12191, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_699_cast)[name = tensor("op_12195_cast")]; + tensor inputs_375_cast = add(x = var_12195_cast, y = inputs_373_cast)[name = tensor("inputs_375_cast")]; + tensor var_12199 = const()[name = tensor("op_12199"), val = tensor([1])]; + tensor channels_mean_375_cast = reduce_mean(axes = var_12199, keep_dims = var_6860, x = inputs_375_cast)[name = tensor("channels_mean_375_cast")]; + tensor zero_mean_375_cast = sub(x = inputs_375_cast, y = channels_mean_375_cast)[name = tensor("zero_mean_375_cast")]; + tensor zero_mean_sq_375_cast = mul(x = zero_mean_375_cast, y = zero_mean_375_cast)[name = tensor("zero_mean_sq_375_cast")]; + tensor var_12203 = const()[name = tensor("op_12203"), val = tensor([1])]; + tensor var_12204_cast = reduce_mean(axes = var_12203, keep_dims = var_6860, x = zero_mean_sq_375_cast)[name = tensor("op_12204_cast")]; + tensor var_12205_to_fp16 = const()[name = tensor("op_12205_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12206_cast = add(x = var_12204_cast, y = var_12205_to_fp16)[name = tensor("op_12206_cast")]; + tensor denom_375_epsilon_0_to_fp16 = const()[name = tensor("denom_375_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_375_cast = rsqrt(epsilon = denom_375_epsilon_0_to_fp16, x = var_12206_cast)[name = tensor("denom_375_cast")]; + tensor out_375_cast = mul(x = zero_mean_375_cast, y = denom_375_cast)[name = tensor("out_375_cast")]; + tensor var_12210_to_fp16 = const()[name = tensor("op_12210_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274472192)))]; + tensor var_12211_cast = add(x = out_375_cast, y = var_12210_to_fp16)[name = tensor("op_12211_cast")]; + tensor var_12213_to_fp16 = const()[name = tensor("op_12213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274474816)))]; + tensor hidden_states_485_cast = mul(x = var_12211_cast, y = var_12213_to_fp16)[name = tensor("hidden_states_485_cast")]; + tensor var_12220 = const()[name = tensor("op_12220"), val = tensor([1, 1])]; + tensor var_12222 = const()[name = tensor("op_12222"), val = tensor([1, 1])]; + tensor q_251_pad_type_0 = const()[name = tensor("q_251_pad_type_0"), val = tensor("custom")]; + tensor q_251_pad_0 = const()[name = tensor("q_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274477440))), lut = tensor([-0x1.648p-6, -0x1.c48p-8, 0x1.c1p-8, 0x1.644p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_251_cast = conv(dilations = var_12222, groups = var_6865, pad = q_251_pad_0, pad_type = q_251_pad_type_0, strides = var_12220, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_485_cast)[name = tensor("q_251_cast")]; + tensor var_12226 = const()[name = tensor("op_12226"), val = tensor([1, 1])]; + tensor var_12228 = const()[name = tensor("op_12228"), val = tensor([1, 1])]; + tensor k_251_pad_type_0 = const()[name = tensor("k_251_pad_type_0"), val = tensor("custom")]; + tensor k_251_pad_0 = const()[name = tensor("k_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1274887104))), lut = tensor([-0x1.efp-7, -0x1.328p-8, 0x1.318p-8, 0x1.ee8p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_251_cast = conv(dilations = var_12228, groups = var_6865, pad = k_251_pad_0, pad_type = k_251_pad_type_0, strides = var_12226, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_251_cast")]; + tensor var_12232 = const()[name = tensor("op_12232"), val = tensor([1, 1])]; + tensor var_12234 = const()[name = tensor("op_12234"), val = tensor([1, 1])]; + tensor v_251_pad_type_0 = const()[name = tensor("v_251_pad_type_0"), val = tensor("custom")]; + tensor v_251_pad_0 = const()[name = tensor("v_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1275542528))), lut = tensor([-0x1.1b8p-6, -0x1.4bcp-8, 0x1.478p-8, 0x1.1a8p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_251_cast = conv(dilations = var_12234, groups = var_6865, pad = v_251_pad_0, pad_type = v_251_pad_type_0, strides = var_12232, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_251_cast")]; + tensor var_12238 = const()[name = tensor("op_12238"), val = tensor([2, 20, 64, -1])]; + tensor var_12239_cast = reshape(shape = var_12238, x = q_251_cast)[name = tensor("op_12239_cast")]; + tensor var_12240 = const()[name = tensor("op_12240"), val = tensor([2, 20, 64, -1])]; + tensor var_12241_cast = reshape(shape = var_12240, x = k_251_cast)[name = tensor("op_12241_cast")]; + tensor var_12242 = const()[name = tensor("op_12242"), val = tensor([2, 20, 64, -1])]; + tensor var_12243_cast = reshape(shape = var_12242, x = v_251_cast)[name = tensor("op_12243_cast")]; + tensor attn_weights_501_transpose_x_0 = const()[name = tensor("attn_weights_501_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_501_transpose_y_0 = const()[name = tensor("attn_weights_501_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_501_cast = matmul(transpose_x = attn_weights_501_transpose_x_0, transpose_y = attn_weights_501_transpose_y_0, x = var_12239_cast, y = var_12241_cast)[name = tensor("attn_weights_501_cast")]; + tensor attn_weights_503_cast = mul(x = attn_weights_501_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_503_cast")]; + tensor var_12247_cast = softmax(axis = var_6849, x = attn_weights_503_cast)[name = tensor("op_12247_cast")]; + tensor attn_251_transpose_x_0 = const()[name = tensor("attn_251_transpose_x_0"), val = tensor(false)]; + tensor attn_251_transpose_y_0 = const()[name = tensor("attn_251_transpose_y_0"), val = tensor(true)]; + tensor attn_251_cast = matmul(transpose_x = attn_251_transpose_x_0, transpose_y = attn_251_transpose_y_0, x = var_12243_cast, y = var_12247_cast)[name = tensor("attn_251_cast")]; + tensor var_12251 = const()[name = tensor("op_12251"), val = tensor([2, 1280, 1, -1])]; + tensor input_701_cast = reshape(shape = var_12251, x = attn_251_cast)[name = tensor("input_701_cast")]; + tensor var_12256 = const()[name = tensor("op_12256"), val = tensor([1, 1])]; + tensor var_12258 = const()[name = tensor("op_12258"), val = tensor([1, 1])]; + tensor var_12260_pad_type_0 = const()[name = tensor("op_12260_pad_type_0"), val = tensor("custom")]; + tensor var_12260_pad_0 = const()[name = tensor("op_12260_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276197952))), lut = tensor([-0x1.478p-8, 0x1.488p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276402816)))]; + tensor var_12260_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_12258, groups = var_6865, pad = var_12260_pad_0, pad_type = var_12260_pad_type_0, strides = var_12256, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_701_cast)[name = tensor("op_12260_cast")]; + tensor inputs_377_cast = add(x = var_12260_cast, y = inputs_375_cast)[name = tensor("inputs_377_cast")]; + tensor var_12264 = const()[name = tensor("op_12264"), val = tensor([1])]; + tensor channels_mean_377_cast = reduce_mean(axes = var_12264, keep_dims = var_6860, x = inputs_377_cast)[name = tensor("channels_mean_377_cast")]; + tensor zero_mean_377_cast = sub(x = inputs_377_cast, y = channels_mean_377_cast)[name = tensor("zero_mean_377_cast")]; + tensor zero_mean_sq_377_cast = mul(x = zero_mean_377_cast, y = zero_mean_377_cast)[name = tensor("zero_mean_sq_377_cast")]; + tensor var_12268 = const()[name = tensor("op_12268"), val = tensor([1])]; + tensor var_12269_cast = reduce_mean(axes = var_12268, keep_dims = var_6860, x = zero_mean_sq_377_cast)[name = tensor("op_12269_cast")]; + tensor var_12270_to_fp16 = const()[name = tensor("op_12270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12271_cast = add(x = var_12269_cast, y = var_12270_to_fp16)[name = tensor("op_12271_cast")]; + tensor denom_377_epsilon_0_to_fp16 = const()[name = tensor("denom_377_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_377_cast = rsqrt(epsilon = denom_377_epsilon_0_to_fp16, x = var_12271_cast)[name = tensor("denom_377_cast")]; + tensor out_377_cast = mul(x = zero_mean_377_cast, y = denom_377_cast)[name = tensor("out_377_cast")]; + tensor var_12275_to_fp16 = const()[name = tensor("op_12275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276405440)))]; + tensor var_12276_cast = add(x = out_377_cast, y = var_12275_to_fp16)[name = tensor("op_12276_cast")]; + tensor var_12278_to_fp16 = const()[name = tensor("op_12278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276408064)))]; + tensor input_703_cast = mul(x = var_12276_cast, y = var_12278_to_fp16)[name = tensor("input_703_cast")]; + tensor var_12286 = const()[name = tensor("op_12286"), val = tensor([1, 1])]; + tensor var_12288 = const()[name = tensor("op_12288"), val = tensor([1, 1])]; + tensor var_12290_pad_type_0 = const()[name = tensor("op_12290_pad_type_0"), val = tensor("custom")]; + tensor var_12290_pad_0 = const()[name = tensor("op_12290_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1276410688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286241152))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286241344)))]; + tensor var_12290_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_12288, groups = var_6865, pad = var_12290_pad_0, pad_type = var_12290_pad_type_0, strides = var_12286, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_703_cast)[name = tensor("op_12290_cast")]; + tensor var_12291_split_sizes_0 = const()[name = tensor("op_12291_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12291_axis_0 = const()[name = tensor("op_12291_axis_0"), val = tensor(1)]; + tensor var_12291_cast_0, tensor var_12291_cast_1 = split(axis = var_12291_axis_0, split_sizes = var_12291_split_sizes_0, x = var_12290_cast)[name = tensor("op_12291_cast")]; + tensor var_12293_mode_0 = const()[name = tensor("op_12293_mode_0"), val = tensor("EXACT")]; + tensor var_12293_cast = gelu(mode = var_12293_mode_0, x = var_12291_cast_1)[name = tensor("op_12293_cast")]; + tensor input_705_cast = mul(x = var_12291_cast_0, y = var_12293_cast)[name = tensor("input_705_cast")]; + tensor var_12297 = const()[name = tensor("op_12297"), val = tensor([1, 1])]; + tensor var_12299 = const()[name = tensor("op_12299"), val = tensor([1, 1])]; + tensor var_12301_pad_type_0 = const()[name = tensor("op_12301_pad_type_0"), val = tensor("custom")]; + tensor var_12301_pad_0 = const()[name = tensor("op_12301_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286261888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291177152))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291177344)))]; + tensor var_12301_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_12299, groups = var_6865, pad = var_12301_pad_0, pad_type = var_12301_pad_type_0, strides = var_12297, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_705_cast)[name = tensor("op_12301_cast")]; + tensor inputs_379_cast = add(x = var_12301_cast, y = inputs_377_cast)[name = tensor("inputs_379_cast")]; + tensor var_12311 = const()[name = tensor("op_12311"), val = tensor([1])]; + tensor channels_mean_379_cast = reduce_mean(axes = var_12311, keep_dims = var_6860, x = inputs_379_cast)[name = tensor("channels_mean_379_cast")]; + tensor zero_mean_379_cast = sub(x = inputs_379_cast, y = channels_mean_379_cast)[name = tensor("zero_mean_379_cast")]; + tensor zero_mean_sq_379_cast = mul(x = zero_mean_379_cast, y = zero_mean_379_cast)[name = tensor("zero_mean_sq_379_cast")]; + tensor var_12315 = const()[name = tensor("op_12315"), val = tensor([1])]; + tensor var_12316_cast = reduce_mean(axes = var_12315, keep_dims = var_6860, x = zero_mean_sq_379_cast)[name = tensor("op_12316_cast")]; + tensor var_12317_to_fp16 = const()[name = tensor("op_12317_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12318_cast = add(x = var_12316_cast, y = var_12317_to_fp16)[name = tensor("op_12318_cast")]; + tensor denom_379_epsilon_0_to_fp16 = const()[name = tensor("denom_379_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_379_cast = rsqrt(epsilon = denom_379_epsilon_0_to_fp16, x = var_12318_cast)[name = tensor("denom_379_cast")]; + tensor out_379_cast = mul(x = zero_mean_379_cast, y = denom_379_cast)[name = tensor("out_379_cast")]; + tensor var_12322_to_fp16 = const()[name = tensor("op_12322_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291179968)))]; + tensor var_12323_cast = add(x = out_379_cast, y = var_12322_to_fp16)[name = tensor("op_12323_cast")]; + tensor var_12325_to_fp16 = const()[name = tensor("op_12325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291182592)))]; + tensor hidden_states_489_cast = mul(x = var_12323_cast, y = var_12325_to_fp16)[name = tensor("hidden_states_489_cast")]; + tensor var_12332 = const()[name = tensor("op_12332"), val = tensor([1, 1])]; + tensor var_12334 = const()[name = tensor("op_12334"), val = tensor([1, 1])]; + tensor q_253_pad_type_0 = const()[name = tensor("q_253_pad_type_0"), val = tensor("custom")]; + tensor q_253_pad_0 = const()[name = tensor("q_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291185216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292004480))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_253_cast = conv(dilations = var_12334, groups = var_6865, pad = q_253_pad_0, pad_type = q_253_pad_type_0, strides = var_12332, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("q_253_cast")]; + tensor var_12338 = const()[name = tensor("op_12338"), val = tensor([1, 1])]; + tensor var_12340 = const()[name = tensor("op_12340"), val = tensor([1, 1])]; + tensor k_253_pad_type_0 = const()[name = tensor("k_253_pad_type_0"), val = tensor("custom")]; + tensor k_253_pad_0 = const()[name = tensor("k_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292004608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292823872))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_253_cast = conv(dilations = var_12340, groups = var_6865, pad = k_253_pad_0, pad_type = k_253_pad_type_0, strides = var_12338, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("k_253_cast")]; + tensor var_12344 = const()[name = tensor("op_12344"), val = tensor([1, 1])]; + tensor var_12346 = const()[name = tensor("op_12346"), val = tensor([1, 1])]; + tensor v_253_pad_type_0 = const()[name = tensor("v_253_pad_type_0"), val = tensor("custom")]; + tensor v_253_pad_0 = const()[name = tensor("v_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1292824000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294052864))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_253_cast = conv(dilations = var_12346, groups = var_6865, pad = v_253_pad_0, pad_type = v_253_pad_type_0, strides = var_12344, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("v_253_cast")]; + tensor var_12350 = const()[name = tensor("op_12350"), val = tensor([2, 20, 64, -1])]; + tensor var_12351_cast = reshape(shape = var_12350, x = q_253_cast)[name = tensor("op_12351_cast")]; + tensor var_12352 = const()[name = tensor("op_12352"), val = tensor([2, 20, 64, -1])]; + tensor var_12353_cast = reshape(shape = var_12352, x = k_253_cast)[name = tensor("op_12353_cast")]; + tensor var_12354 = const()[name = tensor("op_12354"), val = tensor([2, 20, 64, -1])]; + tensor var_12355_cast = reshape(shape = var_12354, x = v_253_cast)[name = tensor("op_12355_cast")]; + tensor attn_weights_505_transpose_x_0 = const()[name = tensor("attn_weights_505_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_505_transpose_y_0 = const()[name = tensor("attn_weights_505_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_505_cast = matmul(transpose_x = attn_weights_505_transpose_x_0, transpose_y = attn_weights_505_transpose_y_0, x = var_12351_cast, y = var_12353_cast)[name = tensor("attn_weights_505_cast")]; + tensor attn_weights_507_cast = mul(x = attn_weights_505_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_507_cast")]; + tensor var_12359_cast = softmax(axis = var_6849, x = attn_weights_507_cast)[name = tensor("op_12359_cast")]; + tensor attn_253_transpose_x_0 = const()[name = tensor("attn_253_transpose_x_0"), val = tensor(false)]; + tensor attn_253_transpose_y_0 = const()[name = tensor("attn_253_transpose_y_0"), val = tensor(true)]; + tensor attn_253_cast = matmul(transpose_x = attn_253_transpose_x_0, transpose_y = attn_253_transpose_y_0, x = var_12355_cast, y = var_12359_cast)[name = tensor("attn_253_cast")]; + tensor var_12363 = const()[name = tensor("op_12363"), val = tensor([2, 1280, 1, -1])]; + tensor input_707_cast = reshape(shape = var_12363, x = attn_253_cast)[name = tensor("input_707_cast")]; + tensor var_12368 = const()[name = tensor("op_12368"), val = tensor([1, 1])]; + tensor var_12370 = const()[name = tensor("op_12370"), val = tensor([1, 1])]; + tensor var_12372_pad_type_0 = const()[name = tensor("op_12372_pad_type_0"), val = tensor("custom")]; + tensor var_12372_pad_0 = const()[name = tensor("op_12372_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1294053056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295281920))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295282112)))]; + tensor var_12372_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_12370, groups = var_6865, pad = var_12372_pad_0, pad_type = var_12372_pad_type_0, strides = var_12368, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_707_cast)[name = tensor("op_12372_cast")]; + tensor inputs_381_cast = add(x = var_12372_cast, y = inputs_379_cast)[name = tensor("inputs_381_cast")]; + tensor var_12376 = const()[name = tensor("op_12376"), val = tensor([1])]; + tensor channels_mean_381_cast = reduce_mean(axes = var_12376, keep_dims = var_6860, x = inputs_381_cast)[name = tensor("channels_mean_381_cast")]; + tensor zero_mean_381_cast = sub(x = inputs_381_cast, y = channels_mean_381_cast)[name = tensor("zero_mean_381_cast")]; + tensor zero_mean_sq_381_cast = mul(x = zero_mean_381_cast, y = zero_mean_381_cast)[name = tensor("zero_mean_sq_381_cast")]; + tensor var_12380 = const()[name = tensor("op_12380"), val = tensor([1])]; + tensor var_12381_cast = reduce_mean(axes = var_12380, keep_dims = var_6860, x = zero_mean_sq_381_cast)[name = tensor("op_12381_cast")]; + tensor var_12382_to_fp16 = const()[name = tensor("op_12382_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12383_cast = add(x = var_12381_cast, y = var_12382_to_fp16)[name = tensor("op_12383_cast")]; + tensor denom_381_epsilon_0_to_fp16 = const()[name = tensor("denom_381_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_381_cast = rsqrt(epsilon = denom_381_epsilon_0_to_fp16, x = var_12383_cast)[name = tensor("denom_381_cast")]; + tensor out_381_cast = mul(x = zero_mean_381_cast, y = denom_381_cast)[name = tensor("out_381_cast")]; + tensor var_12387_to_fp16 = const()[name = tensor("op_12387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295284736)))]; + tensor var_12388_cast = add(x = out_381_cast, y = var_12387_to_fp16)[name = tensor("op_12388_cast")]; + tensor var_12390_to_fp16 = const()[name = tensor("op_12390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295287360)))]; + tensor hidden_states_491_cast = mul(x = var_12388_cast, y = var_12390_to_fp16)[name = tensor("hidden_states_491_cast")]; + tensor var_12397 = const()[name = tensor("op_12397"), val = tensor([1, 1])]; + tensor var_12399 = const()[name = tensor("op_12399"), val = tensor([1, 1])]; + tensor q_255_pad_type_0 = const()[name = tensor("q_255_pad_type_0"), val = tensor("custom")]; + tensor q_255_pad_0 = const()[name = tensor("q_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295289984))), lut = tensor([-0x1.698p-6, -0x1.c78p-8, 0x1.c54p-8, 0x1.688p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_255_cast = conv(dilations = var_12399, groups = var_6865, pad = q_255_pad_0, pad_type = q_255_pad_type_0, strides = var_12397, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_491_cast)[name = tensor("q_255_cast")]; + tensor var_12403 = const()[name = tensor("op_12403"), val = tensor([1, 1])]; + tensor var_12405 = const()[name = tensor("op_12405"), val = tensor([1, 1])]; + tensor k_255_pad_type_0 = const()[name = tensor("k_255_pad_type_0"), val = tensor("custom")]; + tensor k_255_pad_0 = const()[name = tensor("k_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295699648))), lut = tensor([-0x1.fdcp-8, 0x1.fep-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_255_cast = conv(dilations = var_12405, groups = var_6865, pad = k_255_pad_0, pad_type = k_255_pad_type_0, strides = var_12403, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_255_cast")]; + tensor var_12409 = const()[name = tensor("op_12409"), val = tensor([1, 1])]; + tensor var_12411 = const()[name = tensor("op_12411"), val = tensor([1, 1])]; + tensor v_255_pad_type_0 = const()[name = tensor("v_255_pad_type_0"), val = tensor("custom")]; + tensor v_255_pad_0 = const()[name = tensor("v_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296027392))), lut = tensor([-0x1.e5cp-7, -0x1.21cp-8, 0x1.20cp-8, 0x1.e54p-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_255_cast = conv(dilations = var_12411, groups = var_6865, pad = v_255_pad_0, pad_type = v_255_pad_type_0, strides = var_12409, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_255_cast")]; + tensor var_12415 = const()[name = tensor("op_12415"), val = tensor([2, 20, 64, -1])]; + tensor var_12416_cast = reshape(shape = var_12415, x = q_255_cast)[name = tensor("op_12416_cast")]; + tensor var_12417 = const()[name = tensor("op_12417"), val = tensor([2, 20, 64, -1])]; + tensor var_12418_cast = reshape(shape = var_12417, x = k_255_cast)[name = tensor("op_12418_cast")]; + tensor var_12419 = const()[name = tensor("op_12419"), val = tensor([2, 20, 64, -1])]; + tensor var_12420_cast = reshape(shape = var_12419, x = v_255_cast)[name = tensor("op_12420_cast")]; + tensor attn_weights_509_transpose_x_0 = const()[name = tensor("attn_weights_509_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_509_transpose_y_0 = const()[name = tensor("attn_weights_509_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_509_cast = matmul(transpose_x = attn_weights_509_transpose_x_0, transpose_y = attn_weights_509_transpose_y_0, x = var_12416_cast, y = var_12418_cast)[name = tensor("attn_weights_509_cast")]; + tensor attn_weights_511_cast = mul(x = attn_weights_509_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_511_cast")]; + tensor var_12424_cast = softmax(axis = var_6849, x = attn_weights_511_cast)[name = tensor("op_12424_cast")]; + tensor attn_255_transpose_x_0 = const()[name = tensor("attn_255_transpose_x_0"), val = tensor(false)]; + tensor attn_255_transpose_y_0 = const()[name = tensor("attn_255_transpose_y_0"), val = tensor(true)]; + tensor attn_255_cast = matmul(transpose_x = attn_255_transpose_x_0, transpose_y = attn_255_transpose_y_0, x = var_12420_cast, y = var_12424_cast)[name = tensor("attn_255_cast")]; + tensor var_12428 = const()[name = tensor("op_12428"), val = tensor([2, 1280, 1, -1])]; + tensor input_709_cast = reshape(shape = var_12428, x = attn_255_cast)[name = tensor("input_709_cast")]; + tensor var_12433 = const()[name = tensor("op_12433"), val = tensor([1, 1])]; + tensor var_12435 = const()[name = tensor("op_12435"), val = tensor([1, 1])]; + tensor var_12437_pad_type_0 = const()[name = tensor("op_12437_pad_type_0"), val = tensor("custom")]; + tensor var_12437_pad_0 = const()[name = tensor("op_12437_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296682816))), lut = tensor([-0x1.414p-8, 0x1.42p-8]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296887680)))]; + tensor var_12437_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_12435, groups = var_6865, pad = var_12437_pad_0, pad_type = var_12437_pad_type_0, strides = var_12433, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_709_cast)[name = tensor("op_12437_cast")]; + tensor inputs_383_cast = add(x = var_12437_cast, y = inputs_381_cast)[name = tensor("inputs_383_cast")]; + tensor var_12441 = const()[name = tensor("op_12441"), val = tensor([1])]; + tensor channels_mean_383_cast = reduce_mean(axes = var_12441, keep_dims = var_6860, x = inputs_383_cast)[name = tensor("channels_mean_383_cast")]; + tensor zero_mean_383_cast = sub(x = inputs_383_cast, y = channels_mean_383_cast)[name = tensor("zero_mean_383_cast")]; + tensor zero_mean_sq_383_cast = mul(x = zero_mean_383_cast, y = zero_mean_383_cast)[name = tensor("zero_mean_sq_383_cast")]; + tensor var_12445 = const()[name = tensor("op_12445"), val = tensor([1])]; + tensor var_12446_cast = reduce_mean(axes = var_12445, keep_dims = var_6860, x = zero_mean_sq_383_cast)[name = tensor("op_12446_cast")]; + tensor var_12447_to_fp16 = const()[name = tensor("op_12447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12448_cast = add(x = var_12446_cast, y = var_12447_to_fp16)[name = tensor("op_12448_cast")]; + tensor denom_383_epsilon_0_to_fp16 = const()[name = tensor("denom_383_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_383_cast = rsqrt(epsilon = denom_383_epsilon_0_to_fp16, x = var_12448_cast)[name = tensor("denom_383_cast")]; + tensor out_383_cast = mul(x = zero_mean_383_cast, y = denom_383_cast)[name = tensor("out_383_cast")]; + tensor var_12452_to_fp16 = const()[name = tensor("op_12452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296890304)))]; + tensor var_12453_cast = add(x = out_383_cast, y = var_12452_to_fp16)[name = tensor("op_12453_cast")]; + tensor var_12455_to_fp16 = const()[name = tensor("op_12455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296892928)))]; + tensor input_711_cast = mul(x = var_12453_cast, y = var_12455_to_fp16)[name = tensor("input_711_cast")]; + tensor var_12463 = const()[name = tensor("op_12463"), val = tensor([1, 1])]; + tensor var_12465 = const()[name = tensor("op_12465"), val = tensor([1, 1])]; + tensor var_12467_pad_type_0 = const()[name = tensor("op_12467_pad_type_0"), val = tensor("custom")]; + tensor var_12467_pad_0 = const()[name = tensor("op_12467_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296895552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306726016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306726208)))]; + tensor var_12467_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_12465, groups = var_6865, pad = var_12467_pad_0, pad_type = var_12467_pad_type_0, strides = var_12463, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_711_cast)[name = tensor("op_12467_cast")]; + tensor var_12468_split_sizes_0 = const()[name = tensor("op_12468_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12468_axis_0 = const()[name = tensor("op_12468_axis_0"), val = tensor(1)]; + tensor var_12468_cast_0, tensor var_12468_cast_1 = split(axis = var_12468_axis_0, split_sizes = var_12468_split_sizes_0, x = var_12467_cast)[name = tensor("op_12468_cast")]; + tensor var_12470_mode_0 = const()[name = tensor("op_12470_mode_0"), val = tensor("EXACT")]; + tensor var_12470_cast = gelu(mode = var_12470_mode_0, x = var_12468_cast_1)[name = tensor("op_12470_cast")]; + tensor input_713_cast = mul(x = var_12468_cast_0, y = var_12470_cast)[name = tensor("input_713_cast")]; + tensor var_12474 = const()[name = tensor("op_12474"), val = tensor([1, 1])]; + tensor var_12476 = const()[name = tensor("op_12476"), val = tensor([1, 1])]; + tensor var_12478_pad_type_0 = const()[name = tensor("op_12478_pad_type_0"), val = tensor("custom")]; + tensor var_12478_pad_0 = const()[name = tensor("op_12478_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1306746752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311662016))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311662208)))]; + tensor var_12478_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_12476, groups = var_6865, pad = var_12478_pad_0, pad_type = var_12478_pad_type_0, strides = var_12474, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_713_cast)[name = tensor("op_12478_cast")]; + tensor hidden_states_495_cast = add(x = var_12478_cast, y = inputs_383_cast)[name = tensor("hidden_states_495_cast")]; + tensor var_12480 = const()[name = tensor("op_12480"), val = tensor([2, 1280, 32, 32])]; + tensor input_715_cast = reshape(shape = var_12480, x = hidden_states_495_cast)[name = tensor("input_715_cast")]; + tensor var_12484 = const()[name = tensor("op_12484"), val = tensor([1, 1])]; + tensor var_12486 = const()[name = tensor("op_12486"), val = tensor([1, 1])]; + tensor hidden_states_497_pad_type_0 = const()[name = tensor("hidden_states_497_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_497_pad_0 = const()[name = tensor("hidden_states_497_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1311664832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312893696))), name = tensor("up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312893888)))]; + tensor hidden_states_497_cast = conv(bias = up_blocks_0_attentions_2_proj_out_bias_to_fp16, dilations = var_12486, groups = var_6865, pad = hidden_states_497_pad_0, pad_type = hidden_states_497_pad_type_0, strides = var_12484, weight = up_blocks_0_attentions_2_proj_out_weight_to_fp16_palettized, x = input_715_cast)[name = tensor("hidden_states_497_cast")]; + tensor input_717_cast = add(x = hidden_states_497_cast, y = hidden_states_431_cast)[name = tensor("input_717_cast")]; + tensor input_719_scale_factor_height_0 = const()[name = tensor("input_719_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_719_scale_factor_width_0 = const()[name = tensor("input_719_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_719_cast = upsample_nearest_neighbor(scale_factor_height = input_719_scale_factor_height_0, scale_factor_width = input_719_scale_factor_width_0, x = input_717_cast)[name = tensor("input_719_cast")]; + tensor var_12495 = const()[name = tensor("op_12495"), val = tensor([1, 1])]; + tensor var_12497 = const()[name = tensor("op_12497"), val = tensor([1, 1])]; + tensor hidden_states_499_pad_type_0 = const()[name = tensor("hidden_states_499_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_499_pad_0 = const()[name = tensor("hidden_states_499_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1312896512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327642176))), name = tensor("up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_0_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327642752)))]; + tensor hidden_states_499_cast = conv(bias = up_blocks_0_upsamplers_0_conv_bias_to_fp16, dilations = var_12497, groups = var_6865, pad = hidden_states_499_pad_0, pad_type = hidden_states_499_pad_type_0, strides = var_12495, weight = up_blocks_0_upsamplers_0_conv_weight_to_fp16_palettized, x = input_719_cast)[name = tensor("hidden_states_499_cast")]; + tensor var_12502 = const()[name = tensor("op_12502"), val = tensor(3)]; + tensor var_12513 = const()[name = tensor("op_12513"), val = tensor(true)]; + tensor var_12518 = const()[name = tensor("op_12518"), val = tensor(1)]; + tensor input_721_interleave_0 = const()[name = tensor("input_721_interleave_0"), val = tensor(false)]; + tensor input_721_cast = concat(axis = var_12518, interleave = input_721_interleave_0, values = (hidden_states_499_cast, input_113_cast))[name = tensor("input_721_cast")]; + tensor reshape_120_shape_0 = const()[name = tensor("reshape_120_shape_0"), val = tensor([2, 32, 60, 64, 64])]; + tensor reshape_120_cast = reshape(shape = reshape_120_shape_0, x = input_721_cast)[name = tensor("reshape_120_cast")]; + tensor reduce_mean_90_axes_0 = const()[name = tensor("reduce_mean_90_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_90_keep_dims_0 = const()[name = tensor("reduce_mean_90_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_90_cast = reduce_mean(axes = reduce_mean_90_axes_0, keep_dims = reduce_mean_90_keep_dims_0, x = reshape_120_cast)[name = tensor("reduce_mean_90_cast")]; + tensor sub_60_cast = sub(x = reshape_120_cast, y = reduce_mean_90_cast)[name = tensor("sub_60_cast")]; + tensor square_30_cast = square(x = sub_60_cast)[name = tensor("square_30_cast")]; + tensor reduce_mean_92_axes_0 = const()[name = tensor("reduce_mean_92_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_92_keep_dims_0 = const()[name = tensor("reduce_mean_92_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_92_cast = reduce_mean(axes = reduce_mean_92_axes_0, keep_dims = reduce_mean_92_keep_dims_0, x = square_30_cast)[name = tensor("reduce_mean_92_cast")]; + tensor add_60_y_0_to_fp16 = const()[name = tensor("add_60_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_60_cast = add(x = reduce_mean_92_cast, y = add_60_y_0_to_fp16)[name = tensor("add_60_cast")]; + tensor sqrt_30_cast = sqrt(x = add_60_cast)[name = tensor("sqrt_30_cast")]; + tensor real_div_30_cast = real_div(x = sub_60_cast, y = sqrt_30_cast)[name = tensor("real_div_30_cast")]; + tensor reshape_121_shape_0 = const()[name = tensor("reshape_121_shape_0"), val = tensor([2, 1920, 64, 64])]; + tensor reshape_121_cast = reshape(shape = reshape_121_shape_0, x = real_div_30_cast)[name = tensor("reshape_121_cast")]; + tensor add_61_gamma_0_to_fp16 = const()[name = tensor("add_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327645376)))]; + tensor add_61_beta_0_to_fp16 = const()[name = tensor("add_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327649280)))]; + tensor add_61_epsilon_0_to_fp16 = const()[name = tensor("add_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_61_cast = batch_norm(beta = add_61_beta_0_to_fp16, epsilon = add_61_epsilon_0_to_fp16, gamma = add_61_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_121_cast)[name = tensor("add_61_cast")]; + tensor input_725_cast = silu(x = add_61_cast)[name = tensor("input_725_cast")]; + tensor var_12547 = const()[name = tensor("op_12547"), val = tensor([1, 1])]; + tensor var_12549 = const()[name = tensor("op_12549"), val = tensor([1, 1])]; + tensor hidden_states_501_pad_type_0 = const()[name = tensor("hidden_states_501_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_501_pad_0 = const()[name = tensor("hidden_states_501_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327653184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335947648))), name = tensor("up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1920, 3, 3])]; + tensor up_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335947840)))]; + tensor hidden_states_501_cast = conv(bias = up_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_12549, groups = var_12518, pad = hidden_states_501_pad_0, pad_type = hidden_states_501_pad_type_0, strides = var_12547, weight = up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_725_cast)[name = tensor("hidden_states_501_cast")]; + tensor var_12555 = const()[name = tensor("op_12555"), val = tensor([1, 1])]; + tensor var_12557 = const()[name = tensor("op_12557"), val = tensor([1, 1])]; + tensor temb_23_pad_type_0 = const()[name = tensor("temb_23_pad_type_0"), val = tensor("custom")]; + tensor temb_23_pad_0 = const()[name = tensor("temb_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1335949184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336563648))), name = tensor("up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336563840)))]; + tensor temb_23_cast = conv(bias = up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_12557, groups = var_12518, pad = temb_23_pad_0, pad_type = temb_23_pad_type_0, strides = var_12555, weight = up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_23_cast")]; + tensor input_729_cast = add(x = hidden_states_501_cast, y = temb_23_cast)[name = tensor("input_729_cast")]; + tensor reshape_124_shape_0 = const()[name = tensor("reshape_124_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_124_cast = reshape(shape = reshape_124_shape_0, x = input_729_cast)[name = tensor("reshape_124_cast")]; + tensor reduce_mean_93_axes_0 = const()[name = tensor("reduce_mean_93_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_93_keep_dims_0 = const()[name = tensor("reduce_mean_93_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_93_cast = reduce_mean(axes = reduce_mean_93_axes_0, keep_dims = reduce_mean_93_keep_dims_0, x = reshape_124_cast)[name = tensor("reduce_mean_93_cast")]; + tensor sub_62_cast = sub(x = reshape_124_cast, y = reduce_mean_93_cast)[name = tensor("sub_62_cast")]; + tensor square_31_cast = square(x = sub_62_cast)[name = tensor("square_31_cast")]; + tensor reduce_mean_95_axes_0 = const()[name = tensor("reduce_mean_95_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_95_keep_dims_0 = const()[name = tensor("reduce_mean_95_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_95_cast = reduce_mean(axes = reduce_mean_95_axes_0, keep_dims = reduce_mean_95_keep_dims_0, x = square_31_cast)[name = tensor("reduce_mean_95_cast")]; + tensor add_62_y_0_to_fp16 = const()[name = tensor("add_62_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_62_cast = add(x = reduce_mean_95_cast, y = add_62_y_0_to_fp16)[name = tensor("add_62_cast")]; + tensor sqrt_31_cast = sqrt(x = add_62_cast)[name = tensor("sqrt_31_cast")]; + tensor real_div_31_cast = real_div(x = sub_62_cast, y = sqrt_31_cast)[name = tensor("real_div_31_cast")]; + tensor reshape_125_shape_0 = const()[name = tensor("reshape_125_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_125_cast = reshape(shape = reshape_125_shape_0, x = real_div_31_cast)[name = tensor("reshape_125_cast")]; + tensor add_63_gamma_0_to_fp16 = const()[name = tensor("add_63_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336565184)))]; + tensor add_63_beta_0_to_fp16 = const()[name = tensor("add_63_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336566528)))]; + tensor add_63_epsilon_0_to_fp16 = const()[name = tensor("add_63_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_63_cast = batch_norm(beta = add_63_beta_0_to_fp16, epsilon = add_63_epsilon_0_to_fp16, gamma = add_63_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_125_cast)[name = tensor("add_63_cast")]; + tensor input_733_cast = silu(x = add_63_cast)[name = tensor("input_733_cast")]; + tensor var_12567 = const()[name = tensor("op_12567"), val = tensor([1, 1])]; + tensor var_12569 = const()[name = tensor("op_12569"), val = tensor([1, 1])]; + tensor hidden_states_503_pad_type_0 = const()[name = tensor("hidden_states_503_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_503_pad_0 = const()[name = tensor("hidden_states_503_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1336567872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340254336))), name = tensor("up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340254912)))]; + tensor hidden_states_503_cast = conv(bias = up_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_12569, groups = var_12518, pad = hidden_states_503_pad_0, pad_type = hidden_states_503_pad_type_0, strides = var_12567, weight = up_blocks_1_resnets_0_conv2_weight_to_fp16_palettized, x = input_733_cast)[name = tensor("hidden_states_503_cast")]; + tensor var_12574 = const()[name = tensor("op_12574"), val = tensor([1, 1])]; + tensor var_12576 = const()[name = tensor("op_12576"), val = tensor([1, 1])]; + tensor x_11_pad_type_0 = const()[name = tensor("x_11_pad_type_0"), val = tensor("custom")]; + tensor x_11_pad_0 = const()[name = tensor("x_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340256256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341485120))), name = tensor("up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([640, 1920, 1, 1])]; + tensor up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341485696)))]; + tensor x_11_cast = conv(bias = up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_12576, groups = var_12518, pad = x_11_pad_0, pad_type = x_11_pad_type_0, strides = var_12574, weight = up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_721_cast)[name = tensor("x_11_cast")]; + tensor hidden_states_505_cast = add(x = x_11_cast, y = hidden_states_503_cast)[name = tensor("hidden_states_505_cast")]; + tensor reshape_128_shape_0 = const()[name = tensor("reshape_128_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_128_cast = reshape(shape = reshape_128_shape_0, x = hidden_states_505_cast)[name = tensor("reshape_128_cast")]; + tensor reduce_mean_96_axes_0 = const()[name = tensor("reduce_mean_96_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_96_keep_dims_0 = const()[name = tensor("reduce_mean_96_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_96_cast = reduce_mean(axes = reduce_mean_96_axes_0, keep_dims = reduce_mean_96_keep_dims_0, x = reshape_128_cast)[name = tensor("reduce_mean_96_cast")]; + tensor sub_64_cast = sub(x = reshape_128_cast, y = reduce_mean_96_cast)[name = tensor("sub_64_cast")]; + tensor square_32_cast = square(x = sub_64_cast)[name = tensor("square_32_cast")]; + tensor reduce_mean_98_axes_0 = const()[name = tensor("reduce_mean_98_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_98_keep_dims_0 = const()[name = tensor("reduce_mean_98_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_98_cast = reduce_mean(axes = reduce_mean_98_axes_0, keep_dims = reduce_mean_98_keep_dims_0, x = square_32_cast)[name = tensor("reduce_mean_98_cast")]; + tensor add_64_y_0_to_fp16 = const()[name = tensor("add_64_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_64_cast = add(x = reduce_mean_98_cast, y = add_64_y_0_to_fp16)[name = tensor("add_64_cast")]; + tensor sqrt_32_cast = sqrt(x = add_64_cast)[name = tensor("sqrt_32_cast")]; + tensor real_div_32_cast = real_div(x = sub_64_cast, y = sqrt_32_cast)[name = tensor("real_div_32_cast")]; + tensor reshape_129_shape_0 = const()[name = tensor("reshape_129_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_129_cast = reshape(shape = reshape_129_shape_0, x = real_div_32_cast)[name = tensor("reshape_129_cast")]; + tensor add_65_gamma_0_to_fp16 = const()[name = tensor("add_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341487040)))]; + tensor add_65_beta_0_to_fp16 = const()[name = tensor("add_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341488384)))]; + tensor add_65_epsilon_0_to_fp16 = const()[name = tensor("add_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_65_cast = batch_norm(beta = add_65_beta_0_to_fp16, epsilon = add_65_epsilon_0_to_fp16, gamma = add_65_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_129_cast)[name = tensor("add_65_cast")]; + tensor var_12598 = const()[name = tensor("op_12598"), val = tensor([1, 1])]; + tensor var_12600 = const()[name = tensor("op_12600"), val = tensor([1, 1])]; + tensor hidden_states_507_pad_type_0 = const()[name = tensor("hidden_states_507_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_507_pad_0 = const()[name = tensor("hidden_states_507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341489728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341899392))), name = tensor("up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341899968)))]; + tensor hidden_states_507_cast = conv(bias = up_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_12600, groups = var_12518, pad = hidden_states_507_pad_0, pad_type = hidden_states_507_pad_type_0, strides = var_12598, weight = up_blocks_1_attentions_0_proj_in_weight_to_fp16_palettized, x = add_65_cast)[name = tensor("hidden_states_507_cast")]; + tensor var_12605 = const()[name = tensor("op_12605"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_385_cast = reshape(shape = var_12605, x = hidden_states_507_cast)[name = tensor("inputs_385_cast")]; + tensor var_12615 = const()[name = tensor("op_12615"), val = tensor([1])]; + tensor channels_mean_385_cast = reduce_mean(axes = var_12615, keep_dims = var_12513, x = inputs_385_cast)[name = tensor("channels_mean_385_cast")]; + tensor zero_mean_385_cast = sub(x = inputs_385_cast, y = channels_mean_385_cast)[name = tensor("zero_mean_385_cast")]; + tensor zero_mean_sq_385_cast = mul(x = zero_mean_385_cast, y = zero_mean_385_cast)[name = tensor("zero_mean_sq_385_cast")]; + tensor var_12619 = const()[name = tensor("op_12619"), val = tensor([1])]; + tensor var_12620_cast = reduce_mean(axes = var_12619, keep_dims = var_12513, x = zero_mean_sq_385_cast)[name = tensor("op_12620_cast")]; + tensor var_12621_to_fp16 = const()[name = tensor("op_12621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12622_cast = add(x = var_12620_cast, y = var_12621_to_fp16)[name = tensor("op_12622_cast")]; + tensor denom_385_epsilon_0_to_fp16 = const()[name = tensor("denom_385_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_385_cast = rsqrt(epsilon = denom_385_epsilon_0_to_fp16, x = var_12622_cast)[name = tensor("denom_385_cast")]; + tensor out_385_cast = mul(x = zero_mean_385_cast, y = denom_385_cast)[name = tensor("out_385_cast")]; + tensor var_12626_to_fp16 = const()[name = tensor("op_12626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341901312)))]; + tensor var_12627_cast = add(x = out_385_cast, y = var_12626_to_fp16)[name = tensor("op_12627_cast")]; + tensor var_12629_to_fp16 = const()[name = tensor("op_12629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341902656)))]; + tensor hidden_states_509_cast = mul(x = var_12627_cast, y = var_12629_to_fp16)[name = tensor("hidden_states_509_cast")]; + tensor var_12636 = const()[name = tensor("op_12636"), val = tensor([1, 1])]; + tensor var_12638 = const()[name = tensor("op_12638"), val = tensor([1, 1])]; + tensor q_257_pad_type_0 = const()[name = tensor("q_257_pad_type_0"), val = tensor("custom")]; + tensor q_257_pad_0 = const()[name = tensor("q_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1341904000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342211264))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_257_cast = conv(dilations = var_12638, groups = var_12518, pad = q_257_pad_0, pad_type = q_257_pad_type_0, strides = var_12636, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("q_257_cast")]; + tensor var_12642 = const()[name = tensor("op_12642"), val = tensor([1, 1])]; + tensor var_12644 = const()[name = tensor("op_12644"), val = tensor([1, 1])]; + tensor k_257_pad_type_0 = const()[name = tensor("k_257_pad_type_0"), val = tensor("custom")]; + tensor k_257_pad_0 = const()[name = tensor("k_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342211456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342518720))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_257_cast = conv(dilations = var_12644, groups = var_12518, pad = k_257_pad_0, pad_type = k_257_pad_type_0, strides = var_12642, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("k_257_cast")]; + tensor var_12648 = const()[name = tensor("op_12648"), val = tensor([1, 1])]; + tensor var_12650 = const()[name = tensor("op_12650"), val = tensor([1, 1])]; + tensor v_257_pad_type_0 = const()[name = tensor("v_257_pad_type_0"), val = tensor("custom")]; + tensor v_257_pad_0 = const()[name = tensor("v_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342518912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342928576))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_257_cast = conv(dilations = var_12650, groups = var_12518, pad = v_257_pad_0, pad_type = v_257_pad_type_0, strides = var_12648, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("v_257_cast")]; + tensor var_12654 = const()[name = tensor("op_12654"), val = tensor([2, 10, 64, -1])]; + tensor var_12655_cast = reshape(shape = var_12654, x = q_257_cast)[name = tensor("op_12655_cast")]; + tensor var_12656 = const()[name = tensor("op_12656"), val = tensor([2, 10, 64, -1])]; + tensor var_12657_cast = reshape(shape = var_12656, x = k_257_cast)[name = tensor("op_12657_cast")]; + tensor var_12658 = const()[name = tensor("op_12658"), val = tensor([2, 10, 64, -1])]; + tensor var_12659_cast = reshape(shape = var_12658, x = v_257_cast)[name = tensor("op_12659_cast")]; + tensor attn_weights_513_transpose_x_0 = const()[name = tensor("attn_weights_513_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_513_transpose_y_0 = const()[name = tensor("attn_weights_513_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_513_cast = matmul(transpose_x = attn_weights_513_transpose_x_0, transpose_y = attn_weights_513_transpose_y_0, x = var_12655_cast, y = var_12657_cast)[name = tensor("attn_weights_513_cast")]; + tensor var_12509_to_fp16 = const()[name = tensor("op_12509_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_515_cast = mul(x = attn_weights_513_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_515_cast")]; + tensor var_12663_cast = softmax(axis = var_12502, x = attn_weights_515_cast)[name = tensor("op_12663_cast")]; + tensor attn_257_transpose_x_0 = const()[name = tensor("attn_257_transpose_x_0"), val = tensor(false)]; + tensor attn_257_transpose_y_0 = const()[name = tensor("attn_257_transpose_y_0"), val = tensor(true)]; + tensor attn_257_cast = matmul(transpose_x = attn_257_transpose_x_0, transpose_y = attn_257_transpose_y_0, x = var_12659_cast, y = var_12663_cast)[name = tensor("attn_257_cast")]; + tensor var_12667 = const()[name = tensor("op_12667"), val = tensor([2, 640, 1, -1])]; + tensor input_737_cast = reshape(shape = var_12667, x = attn_257_cast)[name = tensor("input_737_cast")]; + tensor var_12672 = const()[name = tensor("op_12672"), val = tensor([1, 1])]; + tensor var_12674 = const()[name = tensor("op_12674"), val = tensor([1, 1])]; + tensor var_12676_pad_type_0 = const()[name = tensor("op_12676_pad_type_0"), val = tensor("custom")]; + tensor var_12676_pad_0 = const()[name = tensor("op_12676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1342929152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343338816))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343339392)))]; + tensor var_12676_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_12674, groups = var_12518, pad = var_12676_pad_0, pad_type = var_12676_pad_type_0, strides = var_12672, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_737_cast)[name = tensor("op_12676_cast")]; + tensor inputs_387_cast = add(x = var_12676_cast, y = inputs_385_cast)[name = tensor("inputs_387_cast")]; + tensor var_12680 = const()[name = tensor("op_12680"), val = tensor([1])]; + tensor channels_mean_387_cast = reduce_mean(axes = var_12680, keep_dims = var_12513, x = inputs_387_cast)[name = tensor("channels_mean_387_cast")]; + tensor zero_mean_387_cast = sub(x = inputs_387_cast, y = channels_mean_387_cast)[name = tensor("zero_mean_387_cast")]; + tensor zero_mean_sq_387_cast = mul(x = zero_mean_387_cast, y = zero_mean_387_cast)[name = tensor("zero_mean_sq_387_cast")]; + tensor var_12684 = const()[name = tensor("op_12684"), val = tensor([1])]; + tensor var_12685_cast = reduce_mean(axes = var_12684, keep_dims = var_12513, x = zero_mean_sq_387_cast)[name = tensor("op_12685_cast")]; + tensor var_12686_to_fp16 = const()[name = tensor("op_12686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12687_cast = add(x = var_12685_cast, y = var_12686_to_fp16)[name = tensor("op_12687_cast")]; + tensor denom_387_epsilon_0_to_fp16 = const()[name = tensor("denom_387_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_387_cast = rsqrt(epsilon = denom_387_epsilon_0_to_fp16, x = var_12687_cast)[name = tensor("denom_387_cast")]; + tensor out_387_cast = mul(x = zero_mean_387_cast, y = denom_387_cast)[name = tensor("out_387_cast")]; + tensor var_12691_to_fp16 = const()[name = tensor("op_12691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343340736)))]; + tensor var_12692_cast = add(x = out_387_cast, y = var_12691_to_fp16)[name = tensor("op_12692_cast")]; + tensor var_12694_to_fp16 = const()[name = tensor("op_12694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343342080)))]; + tensor hidden_states_511_cast = mul(x = var_12692_cast, y = var_12694_to_fp16)[name = tensor("hidden_states_511_cast")]; + tensor var_12701 = const()[name = tensor("op_12701"), val = tensor([1, 1])]; + tensor var_12703 = const()[name = tensor("op_12703"), val = tensor([1, 1])]; + tensor q_259_pad_type_0 = const()[name = tensor("q_259_pad_type_0"), val = tensor("custom")]; + tensor q_259_pad_0 = const()[name = tensor("q_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343343424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343753088))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_259_cast = conv(dilations = var_12703, groups = var_12518, pad = q_259_pad_0, pad_type = q_259_pad_type_0, strides = var_12701, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_511_cast)[name = tensor("q_259_cast")]; + tensor var_12707 = const()[name = tensor("op_12707"), val = tensor([1, 1])]; + tensor var_12709 = const()[name = tensor("op_12709"), val = tensor([1, 1])]; + tensor k_259_pad_type_0 = const()[name = tensor("k_259_pad_type_0"), val = tensor("custom")]; + tensor k_259_pad_0 = const()[name = tensor("k_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1343753664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344736768))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_259_cast = conv(dilations = var_12709, groups = var_12518, pad = k_259_pad_0, pad_type = k_259_pad_type_0, strides = var_12707, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_259_cast")]; + tensor var_12713 = const()[name = tensor("op_12713"), val = tensor([1, 1])]; + tensor var_12715 = const()[name = tensor("op_12715"), val = tensor([1, 1])]; + tensor v_259_pad_type_0 = const()[name = tensor("v_259_pad_type_0"), val = tensor("custom")]; + tensor v_259_pad_0 = const()[name = tensor("v_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1344736960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345392384))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_259_cast = conv(dilations = var_12715, groups = var_12518, pad = v_259_pad_0, pad_type = v_259_pad_type_0, strides = var_12713, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_259_cast")]; + tensor var_12719 = const()[name = tensor("op_12719"), val = tensor([2, 10, 64, -1])]; + tensor var_12720_cast = reshape(shape = var_12719, x = q_259_cast)[name = tensor("op_12720_cast")]; + tensor var_12721 = const()[name = tensor("op_12721"), val = tensor([2, 10, 64, -1])]; + tensor var_12722_cast = reshape(shape = var_12721, x = k_259_cast)[name = tensor("op_12722_cast")]; + tensor var_12723 = const()[name = tensor("op_12723"), val = tensor([2, 10, 64, -1])]; + tensor var_12724_cast = reshape(shape = var_12723, x = v_259_cast)[name = tensor("op_12724_cast")]; + tensor attn_weights_517_transpose_x_0 = const()[name = tensor("attn_weights_517_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_517_transpose_y_0 = const()[name = tensor("attn_weights_517_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_517_cast = matmul(transpose_x = attn_weights_517_transpose_x_0, transpose_y = attn_weights_517_transpose_y_0, x = var_12720_cast, y = var_12722_cast)[name = tensor("attn_weights_517_cast")]; + tensor attn_weights_519_cast = mul(x = attn_weights_517_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_519_cast")]; + tensor var_12728_cast = softmax(axis = var_12502, x = attn_weights_519_cast)[name = tensor("op_12728_cast")]; + tensor attn_259_transpose_x_0 = const()[name = tensor("attn_259_transpose_x_0"), val = tensor(false)]; + tensor attn_259_transpose_y_0 = const()[name = tensor("attn_259_transpose_y_0"), val = tensor(true)]; + tensor attn_259_cast = matmul(transpose_x = attn_259_transpose_x_0, transpose_y = attn_259_transpose_y_0, x = var_12724_cast, y = var_12728_cast)[name = tensor("attn_259_cast")]; + tensor var_12732 = const()[name = tensor("op_12732"), val = tensor([2, 640, 1, -1])]; + tensor input_739_cast = reshape(shape = var_12732, x = attn_259_cast)[name = tensor("input_739_cast")]; + tensor var_12737 = const()[name = tensor("op_12737"), val = tensor([1, 1])]; + tensor var_12739 = const()[name = tensor("op_12739"), val = tensor([1, 1])]; + tensor var_12741_pad_type_0 = const()[name = tensor("op_12741_pad_type_0"), val = tensor("custom")]; + tensor var_12741_pad_0 = const()[name = tensor("op_12741_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345392512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345699776))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345699968)))]; + tensor var_12741_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_12739, groups = var_12518, pad = var_12741_pad_0, pad_type = var_12741_pad_type_0, strides = var_12737, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_739_cast)[name = tensor("op_12741_cast")]; + tensor inputs_389_cast = add(x = var_12741_cast, y = inputs_387_cast)[name = tensor("inputs_389_cast")]; + tensor var_12745 = const()[name = tensor("op_12745"), val = tensor([1])]; + tensor channels_mean_389_cast = reduce_mean(axes = var_12745, keep_dims = var_12513, x = inputs_389_cast)[name = tensor("channels_mean_389_cast")]; + tensor zero_mean_389_cast = sub(x = inputs_389_cast, y = channels_mean_389_cast)[name = tensor("zero_mean_389_cast")]; + tensor zero_mean_sq_389_cast = mul(x = zero_mean_389_cast, y = zero_mean_389_cast)[name = tensor("zero_mean_sq_389_cast")]; + tensor var_12749 = const()[name = tensor("op_12749"), val = tensor([1])]; + tensor var_12750_cast = reduce_mean(axes = var_12749, keep_dims = var_12513, x = zero_mean_sq_389_cast)[name = tensor("op_12750_cast")]; + tensor var_12751_to_fp16 = const()[name = tensor("op_12751_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12752_cast = add(x = var_12750_cast, y = var_12751_to_fp16)[name = tensor("op_12752_cast")]; + tensor denom_389_epsilon_0_to_fp16 = const()[name = tensor("denom_389_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_389_cast = rsqrt(epsilon = denom_389_epsilon_0_to_fp16, x = var_12752_cast)[name = tensor("denom_389_cast")]; + tensor out_389_cast = mul(x = zero_mean_389_cast, y = denom_389_cast)[name = tensor("out_389_cast")]; + tensor var_12756_to_fp16 = const()[name = tensor("op_12756_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345701312)))]; + tensor var_12757_cast = add(x = out_389_cast, y = var_12756_to_fp16)[name = tensor("op_12757_cast")]; + tensor var_12759_to_fp16 = const()[name = tensor("op_12759_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345702656)))]; + tensor input_741_cast = mul(x = var_12757_cast, y = var_12759_to_fp16)[name = tensor("input_741_cast")]; + tensor var_12767 = const()[name = tensor("op_12767"), val = tensor([1, 1])]; + tensor var_12769 = const()[name = tensor("op_12769"), val = tensor([1, 1])]; + tensor var_12771_pad_type_0 = const()[name = tensor("op_12771_pad_type_0"), val = tensor("custom")]; + tensor var_12771_pad_0 = const()[name = tensor("op_12771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1345704000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348980864))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348981440)))]; + tensor var_12771_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_12769, groups = var_12518, pad = var_12771_pad_0, pad_type = var_12771_pad_type_0, strides = var_12767, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_741_cast)[name = tensor("op_12771_cast")]; + tensor var_12772_split_sizes_0 = const()[name = tensor("op_12772_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12772_axis_0 = const()[name = tensor("op_12772_axis_0"), val = tensor(1)]; + tensor var_12772_cast_0, tensor var_12772_cast_1 = split(axis = var_12772_axis_0, split_sizes = var_12772_split_sizes_0, x = var_12771_cast)[name = tensor("op_12772_cast")]; + tensor var_12774_mode_0 = const()[name = tensor("op_12774_mode_0"), val = tensor("EXACT")]; + tensor var_12774_cast = gelu(mode = var_12774_mode_0, x = var_12772_cast_1)[name = tensor("op_12774_cast")]; + tensor input_743_cast = mul(x = var_12772_cast_0, y = var_12774_cast)[name = tensor("input_743_cast")]; + tensor var_12778 = const()[name = tensor("op_12778"), val = tensor([1, 1])]; + tensor var_12780 = const()[name = tensor("op_12780"), val = tensor([1, 1])]; + tensor var_12782_pad_type_0 = const()[name = tensor("op_12782_pad_type_0"), val = tensor("custom")]; + tensor var_12782_pad_0 = const()[name = tensor("op_12782_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348991744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350630208))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350630784)))]; + tensor var_12782_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_12780, groups = var_12518, pad = var_12782_pad_0, pad_type = var_12782_pad_type_0, strides = var_12778, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_743_cast)[name = tensor("op_12782_cast")]; + tensor inputs_391_cast = add(x = var_12782_cast, y = inputs_389_cast)[name = tensor("inputs_391_cast")]; + tensor var_12792 = const()[name = tensor("op_12792"), val = tensor([1])]; + tensor channels_mean_391_cast = reduce_mean(axes = var_12792, keep_dims = var_12513, x = inputs_391_cast)[name = tensor("channels_mean_391_cast")]; + tensor zero_mean_391_cast = sub(x = inputs_391_cast, y = channels_mean_391_cast)[name = tensor("zero_mean_391_cast")]; + tensor zero_mean_sq_391_cast = mul(x = zero_mean_391_cast, y = zero_mean_391_cast)[name = tensor("zero_mean_sq_391_cast")]; + tensor var_12796 = const()[name = tensor("op_12796"), val = tensor([1])]; + tensor var_12797_cast = reduce_mean(axes = var_12796, keep_dims = var_12513, x = zero_mean_sq_391_cast)[name = tensor("op_12797_cast")]; + tensor var_12798_to_fp16 = const()[name = tensor("op_12798_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12799_cast = add(x = var_12797_cast, y = var_12798_to_fp16)[name = tensor("op_12799_cast")]; + tensor denom_391_epsilon_0_to_fp16 = const()[name = tensor("denom_391_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_391_cast = rsqrt(epsilon = denom_391_epsilon_0_to_fp16, x = var_12799_cast)[name = tensor("denom_391_cast")]; + tensor out_391_cast = mul(x = zero_mean_391_cast, y = denom_391_cast)[name = tensor("out_391_cast")]; + tensor var_12803_to_fp16 = const()[name = tensor("op_12803_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350632128)))]; + tensor var_12804_cast = add(x = out_391_cast, y = var_12803_to_fp16)[name = tensor("op_12804_cast")]; + tensor var_12806_to_fp16 = const()[name = tensor("op_12806_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350633472)))]; + tensor hidden_states_515_cast = mul(x = var_12804_cast, y = var_12806_to_fp16)[name = tensor("hidden_states_515_cast")]; + tensor var_12813 = const()[name = tensor("op_12813"), val = tensor([1, 1])]; + tensor var_12815 = const()[name = tensor("op_12815"), val = tensor([1, 1])]; + tensor q_261_pad_type_0 = const()[name = tensor("q_261_pad_type_0"), val = tensor("custom")]; + tensor q_261_pad_0 = const()[name = tensor("q_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350634816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350942080))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_261_cast = conv(dilations = var_12815, groups = var_12518, pad = q_261_pad_0, pad_type = q_261_pad_type_0, strides = var_12813, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("q_261_cast")]; + tensor var_12819 = const()[name = tensor("op_12819"), val = tensor([1, 1])]; + tensor var_12821 = const()[name = tensor("op_12821"), val = tensor([1, 1])]; + tensor k_261_pad_type_0 = const()[name = tensor("k_261_pad_type_0"), val = tensor("custom")]; + tensor k_261_pad_0 = const()[name = tensor("k_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1350942272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351249536))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_261_cast = conv(dilations = var_12821, groups = var_12518, pad = k_261_pad_0, pad_type = k_261_pad_type_0, strides = var_12819, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("k_261_cast")]; + tensor var_12825 = const()[name = tensor("op_12825"), val = tensor([1, 1])]; + tensor var_12827 = const()[name = tensor("op_12827"), val = tensor([1, 1])]; + tensor v_261_pad_type_0 = const()[name = tensor("v_261_pad_type_0"), val = tensor("custom")]; + tensor v_261_pad_0 = const()[name = tensor("v_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351249728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351659392))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_261_cast = conv(dilations = var_12827, groups = var_12518, pad = v_261_pad_0, pad_type = v_261_pad_type_0, strides = var_12825, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("v_261_cast")]; + tensor var_12831 = const()[name = tensor("op_12831"), val = tensor([2, 10, 64, -1])]; + tensor var_12832_cast = reshape(shape = var_12831, x = q_261_cast)[name = tensor("op_12832_cast")]; + tensor var_12833 = const()[name = tensor("op_12833"), val = tensor([2, 10, 64, -1])]; + tensor var_12834_cast = reshape(shape = var_12833, x = k_261_cast)[name = tensor("op_12834_cast")]; + tensor var_12835 = const()[name = tensor("op_12835"), val = tensor([2, 10, 64, -1])]; + tensor var_12836_cast = reshape(shape = var_12835, x = v_261_cast)[name = tensor("op_12836_cast")]; + tensor attn_weights_521_transpose_x_0 = const()[name = tensor("attn_weights_521_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_521_transpose_y_0 = const()[name = tensor("attn_weights_521_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_521_cast = matmul(transpose_x = attn_weights_521_transpose_x_0, transpose_y = attn_weights_521_transpose_y_0, x = var_12832_cast, y = var_12834_cast)[name = tensor("attn_weights_521_cast")]; + tensor attn_weights_523_cast = mul(x = attn_weights_521_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_523_cast")]; + tensor var_12840_cast = softmax(axis = var_12502, x = attn_weights_523_cast)[name = tensor("op_12840_cast")]; + tensor attn_261_transpose_x_0 = const()[name = tensor("attn_261_transpose_x_0"), val = tensor(false)]; + tensor attn_261_transpose_y_0 = const()[name = tensor("attn_261_transpose_y_0"), val = tensor(true)]; + tensor attn_261_cast = matmul(transpose_x = attn_261_transpose_x_0, transpose_y = attn_261_transpose_y_0, x = var_12836_cast, y = var_12840_cast)[name = tensor("attn_261_cast")]; + tensor var_12844 = const()[name = tensor("op_12844"), val = tensor([2, 640, 1, -1])]; + tensor input_745_cast = reshape(shape = var_12844, x = attn_261_cast)[name = tensor("input_745_cast")]; + tensor var_12849 = const()[name = tensor("op_12849"), val = tensor([1, 1])]; + tensor var_12851 = const()[name = tensor("op_12851"), val = tensor([1, 1])]; + tensor var_12853_pad_type_0 = const()[name = tensor("op_12853_pad_type_0"), val = tensor("custom")]; + tensor var_12853_pad_0 = const()[name = tensor("op_12853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351659968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352069632))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352070208)))]; + tensor var_12853_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_12851, groups = var_12518, pad = var_12853_pad_0, pad_type = var_12853_pad_type_0, strides = var_12849, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_745_cast)[name = tensor("op_12853_cast")]; + tensor inputs_393_cast = add(x = var_12853_cast, y = inputs_391_cast)[name = tensor("inputs_393_cast")]; + tensor var_12857 = const()[name = tensor("op_12857"), val = tensor([1])]; + tensor channels_mean_393_cast = reduce_mean(axes = var_12857, keep_dims = var_12513, x = inputs_393_cast)[name = tensor("channels_mean_393_cast")]; + tensor zero_mean_393_cast = sub(x = inputs_393_cast, y = channels_mean_393_cast)[name = tensor("zero_mean_393_cast")]; + tensor zero_mean_sq_393_cast = mul(x = zero_mean_393_cast, y = zero_mean_393_cast)[name = tensor("zero_mean_sq_393_cast")]; + tensor var_12861 = const()[name = tensor("op_12861"), val = tensor([1])]; + tensor var_12862_cast = reduce_mean(axes = var_12861, keep_dims = var_12513, x = zero_mean_sq_393_cast)[name = tensor("op_12862_cast")]; + tensor var_12863_to_fp16 = const()[name = tensor("op_12863_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12864_cast = add(x = var_12862_cast, y = var_12863_to_fp16)[name = tensor("op_12864_cast")]; + tensor denom_393_epsilon_0_to_fp16 = const()[name = tensor("denom_393_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_393_cast = rsqrt(epsilon = denom_393_epsilon_0_to_fp16, x = var_12864_cast)[name = tensor("denom_393_cast")]; + tensor out_393_cast = mul(x = zero_mean_393_cast, y = denom_393_cast)[name = tensor("out_393_cast")]; + tensor var_12868_to_fp16 = const()[name = tensor("op_12868_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352071552)))]; + tensor var_12869_cast = add(x = out_393_cast, y = var_12868_to_fp16)[name = tensor("op_12869_cast")]; + tensor var_12871_to_fp16 = const()[name = tensor("op_12871_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352072896)))]; + tensor hidden_states_517_cast = mul(x = var_12869_cast, y = var_12871_to_fp16)[name = tensor("hidden_states_517_cast")]; + tensor var_12878 = const()[name = tensor("op_12878"), val = tensor([1, 1])]; + tensor var_12880 = const()[name = tensor("op_12880"), val = tensor([1, 1])]; + tensor q_263_pad_type_0 = const()[name = tensor("q_263_pad_type_0"), val = tensor("custom")]; + tensor q_263_pad_0 = const()[name = tensor("q_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352074240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352279104))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_263_cast = conv(dilations = var_12880, groups = var_12518, pad = q_263_pad_0, pad_type = q_263_pad_type_0, strides = var_12878, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_517_cast)[name = tensor("q_263_cast")]; + tensor var_12884 = const()[name = tensor("op_12884"), val = tensor([1, 1])]; + tensor var_12886 = const()[name = tensor("op_12886"), val = tensor([1, 1])]; + tensor k_263_pad_type_0 = const()[name = tensor("k_263_pad_type_0"), val = tensor("custom")]; + tensor k_263_pad_0 = const()[name = tensor("k_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352279232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352934656))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_263_cast = conv(dilations = var_12886, groups = var_12518, pad = k_263_pad_0, pad_type = k_263_pad_type_0, strides = var_12884, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_263_cast")]; + tensor var_12890 = const()[name = tensor("op_12890"), val = tensor([1, 1])]; + tensor var_12892 = const()[name = tensor("op_12892"), val = tensor([1, 1])]; + tensor v_263_pad_type_0 = const()[name = tensor("v_263_pad_type_0"), val = tensor("custom")]; + tensor v_263_pad_0 = const()[name = tensor("v_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1352934784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353590208))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_263_cast = conv(dilations = var_12892, groups = var_12518, pad = v_263_pad_0, pad_type = v_263_pad_type_0, strides = var_12890, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_263_cast")]; + tensor var_12896 = const()[name = tensor("op_12896"), val = tensor([2, 10, 64, -1])]; + tensor var_12897_cast = reshape(shape = var_12896, x = q_263_cast)[name = tensor("op_12897_cast")]; + tensor var_12898 = const()[name = tensor("op_12898"), val = tensor([2, 10, 64, -1])]; + tensor var_12899_cast = reshape(shape = var_12898, x = k_263_cast)[name = tensor("op_12899_cast")]; + tensor var_12900 = const()[name = tensor("op_12900"), val = tensor([2, 10, 64, -1])]; + tensor var_12901_cast = reshape(shape = var_12900, x = v_263_cast)[name = tensor("op_12901_cast")]; + tensor attn_weights_525_transpose_x_0 = const()[name = tensor("attn_weights_525_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_525_transpose_y_0 = const()[name = tensor("attn_weights_525_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_525_cast = matmul(transpose_x = attn_weights_525_transpose_x_0, transpose_y = attn_weights_525_transpose_y_0, x = var_12897_cast, y = var_12899_cast)[name = tensor("attn_weights_525_cast")]; + tensor attn_weights_527_cast = mul(x = attn_weights_525_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_527_cast")]; + tensor var_12905_cast = softmax(axis = var_12502, x = attn_weights_527_cast)[name = tensor("op_12905_cast")]; + tensor attn_263_transpose_x_0 = const()[name = tensor("attn_263_transpose_x_0"), val = tensor(false)]; + tensor attn_263_transpose_y_0 = const()[name = tensor("attn_263_transpose_y_0"), val = tensor(true)]; + tensor attn_263_cast = matmul(transpose_x = attn_263_transpose_x_0, transpose_y = attn_263_transpose_y_0, x = var_12901_cast, y = var_12905_cast)[name = tensor("attn_263_cast")]; + tensor var_12909 = const()[name = tensor("op_12909"), val = tensor([2, 640, 1, -1])]; + tensor input_747_cast = reshape(shape = var_12909, x = attn_263_cast)[name = tensor("input_747_cast")]; + tensor var_12914 = const()[name = tensor("op_12914"), val = tensor([1, 1])]; + tensor var_12916 = const()[name = tensor("op_12916"), val = tensor([1, 1])]; + tensor var_12918_pad_type_0 = const()[name = tensor("op_12918_pad_type_0"), val = tensor("custom")]; + tensor var_12918_pad_0 = const()[name = tensor("op_12918_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353590336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353897600))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353897792)))]; + tensor var_12918_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_12916, groups = var_12518, pad = var_12918_pad_0, pad_type = var_12918_pad_type_0, strides = var_12914, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_747_cast)[name = tensor("op_12918_cast")]; + tensor inputs_395_cast = add(x = var_12918_cast, y = inputs_393_cast)[name = tensor("inputs_395_cast")]; + tensor var_12922 = const()[name = tensor("op_12922"), val = tensor([1])]; + tensor channels_mean_395_cast = reduce_mean(axes = var_12922, keep_dims = var_12513, x = inputs_395_cast)[name = tensor("channels_mean_395_cast")]; + tensor zero_mean_395_cast = sub(x = inputs_395_cast, y = channels_mean_395_cast)[name = tensor("zero_mean_395_cast")]; + tensor zero_mean_sq_395_cast = mul(x = zero_mean_395_cast, y = zero_mean_395_cast)[name = tensor("zero_mean_sq_395_cast")]; + tensor var_12926 = const()[name = tensor("op_12926"), val = tensor([1])]; + tensor var_12927_cast = reduce_mean(axes = var_12926, keep_dims = var_12513, x = zero_mean_sq_395_cast)[name = tensor("op_12927_cast")]; + tensor var_12928_to_fp16 = const()[name = tensor("op_12928_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12929_cast = add(x = var_12927_cast, y = var_12928_to_fp16)[name = tensor("op_12929_cast")]; + tensor denom_395_epsilon_0_to_fp16 = const()[name = tensor("denom_395_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_395_cast = rsqrt(epsilon = denom_395_epsilon_0_to_fp16, x = var_12929_cast)[name = tensor("denom_395_cast")]; + tensor out_395_cast = mul(x = zero_mean_395_cast, y = denom_395_cast)[name = tensor("out_395_cast")]; + tensor var_12933_to_fp16 = const()[name = tensor("op_12933_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353899136)))]; + tensor var_12934_cast = add(x = out_395_cast, y = var_12933_to_fp16)[name = tensor("op_12934_cast")]; + tensor var_12936_to_fp16 = const()[name = tensor("op_12936_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353900480)))]; + tensor input_749_cast = mul(x = var_12934_cast, y = var_12936_to_fp16)[name = tensor("input_749_cast")]; + tensor var_12944 = const()[name = tensor("op_12944"), val = tensor([1, 1])]; + tensor var_12946 = const()[name = tensor("op_12946"), val = tensor([1, 1])]; + tensor var_12948_pad_type_0 = const()[name = tensor("op_12948_pad_type_0"), val = tensor("custom")]; + tensor var_12948_pad_0 = const()[name = tensor("op_12948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353901824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357178688))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357179264)))]; + tensor var_12948_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_12946, groups = var_12518, pad = var_12948_pad_0, pad_type = var_12948_pad_type_0, strides = var_12944, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_749_cast)[name = tensor("op_12948_cast")]; + tensor var_12949_split_sizes_0 = const()[name = tensor("op_12949_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12949_axis_0 = const()[name = tensor("op_12949_axis_0"), val = tensor(1)]; + tensor var_12949_cast_0, tensor var_12949_cast_1 = split(axis = var_12949_axis_0, split_sizes = var_12949_split_sizes_0, x = var_12948_cast)[name = tensor("op_12949_cast")]; + tensor var_12951_mode_0 = const()[name = tensor("op_12951_mode_0"), val = tensor("EXACT")]; + tensor var_12951_cast = gelu(mode = var_12951_mode_0, x = var_12949_cast_1)[name = tensor("op_12951_cast")]; + tensor input_751_cast = mul(x = var_12949_cast_0, y = var_12951_cast)[name = tensor("input_751_cast")]; + tensor var_12955 = const()[name = tensor("op_12955"), val = tensor([1, 1])]; + tensor var_12957 = const()[name = tensor("op_12957"), val = tensor([1, 1])]; + tensor var_12959_pad_type_0 = const()[name = tensor("op_12959_pad_type_0"), val = tensor("custom")]; + tensor var_12959_pad_0 = const()[name = tensor("op_12959_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1357189568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358828032))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358828608)))]; + tensor var_12959_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_12957, groups = var_12518, pad = var_12959_pad_0, pad_type = var_12959_pad_type_0, strides = var_12955, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_751_cast)[name = tensor("op_12959_cast")]; + tensor hidden_states_521_cast = add(x = var_12959_cast, y = inputs_395_cast)[name = tensor("hidden_states_521_cast")]; + tensor var_12961 = const()[name = tensor("op_12961"), val = tensor([2, 640, 64, 64])]; + tensor input_753_cast = reshape(shape = var_12961, x = hidden_states_521_cast)[name = tensor("input_753_cast")]; + tensor var_12965 = const()[name = tensor("op_12965"), val = tensor([1, 1])]; + tensor var_12967 = const()[name = tensor("op_12967"), val = tensor([1, 1])]; + tensor hidden_states_523_pad_type_0 = const()[name = tensor("hidden_states_523_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_523_pad_0 = const()[name = tensor("hidden_states_523_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358829952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359239616))), name = tensor("up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359240192)))]; + tensor hidden_states_523_cast = conv(bias = up_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_12967, groups = var_12518, pad = hidden_states_523_pad_0, pad_type = hidden_states_523_pad_type_0, strides = var_12965, weight = up_blocks_1_attentions_0_proj_out_weight_to_fp16_palettized, x = input_753_cast)[name = tensor("hidden_states_523_cast")]; + tensor hidden_states_525_cast = add(x = hidden_states_523_cast, y = hidden_states_505_cast)[name = tensor("hidden_states_525_cast")]; + tensor input_755_interleave_0 = const()[name = tensor("input_755_interleave_0"), val = tensor(false)]; + tensor input_755_cast = concat(axis = var_12518, interleave = input_755_interleave_0, values = (hidden_states_525_cast, input_79_cast))[name = tensor("input_755_cast")]; + tensor reshape_132_shape_0 = const()[name = tensor("reshape_132_shape_0"), val = tensor([2, 32, 40, 64, 64])]; + tensor reshape_132_cast = reshape(shape = reshape_132_shape_0, x = input_755_cast)[name = tensor("reshape_132_cast")]; + tensor reduce_mean_99_axes_0 = const()[name = tensor("reduce_mean_99_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_99_keep_dims_0 = const()[name = tensor("reduce_mean_99_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_99_cast = reduce_mean(axes = reduce_mean_99_axes_0, keep_dims = reduce_mean_99_keep_dims_0, x = reshape_132_cast)[name = tensor("reduce_mean_99_cast")]; + tensor sub_66_cast = sub(x = reshape_132_cast, y = reduce_mean_99_cast)[name = tensor("sub_66_cast")]; + tensor square_33_cast = square(x = sub_66_cast)[name = tensor("square_33_cast")]; + tensor reduce_mean_101_axes_0 = const()[name = tensor("reduce_mean_101_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_101_keep_dims_0 = const()[name = tensor("reduce_mean_101_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_101_cast = reduce_mean(axes = reduce_mean_101_axes_0, keep_dims = reduce_mean_101_keep_dims_0, x = square_33_cast)[name = tensor("reduce_mean_101_cast")]; + tensor add_66_y_0_to_fp16 = const()[name = tensor("add_66_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_66_cast = add(x = reduce_mean_101_cast, y = add_66_y_0_to_fp16)[name = tensor("add_66_cast")]; + tensor sqrt_33_cast = sqrt(x = add_66_cast)[name = tensor("sqrt_33_cast")]; + tensor real_div_33_cast = real_div(x = sub_66_cast, y = sqrt_33_cast)[name = tensor("real_div_33_cast")]; + tensor reshape_133_shape_0 = const()[name = tensor("reshape_133_shape_0"), val = tensor([2, 1280, 64, 64])]; + tensor reshape_133_cast = reshape(shape = reshape_133_shape_0, x = real_div_33_cast)[name = tensor("reshape_133_cast")]; + tensor add_67_gamma_0_to_fp16 = const()[name = tensor("add_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359241536)))]; + tensor add_67_beta_0_to_fp16 = const()[name = tensor("add_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359244160)))]; + tensor add_67_epsilon_0_to_fp16 = const()[name = tensor("add_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_67_cast = batch_norm(beta = add_67_beta_0_to_fp16, epsilon = add_67_epsilon_0_to_fp16, gamma = add_67_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_133_cast)[name = tensor("add_67_cast")]; + tensor input_759_cast = silu(x = add_67_cast)[name = tensor("input_759_cast")]; + tensor var_12985 = const()[name = tensor("op_12985"), val = tensor([1, 1])]; + tensor var_12987 = const()[name = tensor("op_12987"), val = tensor([1, 1])]; + tensor hidden_states_527_pad_type_0 = const()[name = tensor("hidden_states_527_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_527_pad_0 = const()[name = tensor("hidden_states_527_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359246784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366619648))), name = tensor("up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1280, 3, 3])]; + tensor up_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366620224)))]; + tensor hidden_states_527_cast = conv(bias = up_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_12987, groups = var_12518, pad = hidden_states_527_pad_0, pad_type = hidden_states_527_pad_type_0, strides = var_12985, weight = up_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_759_cast)[name = tensor("hidden_states_527_cast")]; + tensor var_12993 = const()[name = tensor("op_12993"), val = tensor([1, 1])]; + tensor var_12995 = const()[name = tensor("op_12995"), val = tensor([1, 1])]; + tensor temb_25_pad_type_0 = const()[name = tensor("temb_25_pad_type_0"), val = tensor("custom")]; + tensor temb_25_pad_0 = const()[name = tensor("temb_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1366621568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367440832))), name = tensor("up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367441408)))]; + tensor temb_25_cast = conv(bias = up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_12995, groups = var_12518, pad = temb_25_pad_0, pad_type = temb_25_pad_type_0, strides = var_12993, weight = up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_25_cast")]; + tensor input_763_cast = add(x = hidden_states_527_cast, y = temb_25_cast)[name = tensor("input_763_cast")]; + tensor reshape_136_shape_0 = const()[name = tensor("reshape_136_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_136_cast = reshape(shape = reshape_136_shape_0, x = input_763_cast)[name = tensor("reshape_136_cast")]; + tensor reduce_mean_102_axes_0 = const()[name = tensor("reduce_mean_102_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_102_keep_dims_0 = const()[name = tensor("reduce_mean_102_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_102_cast = reduce_mean(axes = reduce_mean_102_axes_0, keep_dims = reduce_mean_102_keep_dims_0, x = reshape_136_cast)[name = tensor("reduce_mean_102_cast")]; + tensor sub_68_cast = sub(x = reshape_136_cast, y = reduce_mean_102_cast)[name = tensor("sub_68_cast")]; + tensor square_34_cast = square(x = sub_68_cast)[name = tensor("square_34_cast")]; + tensor reduce_mean_104_axes_0 = const()[name = tensor("reduce_mean_104_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_104_keep_dims_0 = const()[name = tensor("reduce_mean_104_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_104_cast = reduce_mean(axes = reduce_mean_104_axes_0, keep_dims = reduce_mean_104_keep_dims_0, x = square_34_cast)[name = tensor("reduce_mean_104_cast")]; + tensor add_68_y_0_to_fp16 = const()[name = tensor("add_68_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_68_cast = add(x = reduce_mean_104_cast, y = add_68_y_0_to_fp16)[name = tensor("add_68_cast")]; + tensor sqrt_34_cast = sqrt(x = add_68_cast)[name = tensor("sqrt_34_cast")]; + tensor real_div_34_cast = real_div(x = sub_68_cast, y = sqrt_34_cast)[name = tensor("real_div_34_cast")]; + tensor reshape_137_shape_0 = const()[name = tensor("reshape_137_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_137_cast = reshape(shape = reshape_137_shape_0, x = real_div_34_cast)[name = tensor("reshape_137_cast")]; + tensor add_69_gamma_0_to_fp16 = const()[name = tensor("add_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367442752)))]; + tensor add_69_beta_0_to_fp16 = const()[name = tensor("add_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367444096)))]; + tensor add_69_epsilon_0_to_fp16 = const()[name = tensor("add_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_69_cast = batch_norm(beta = add_69_beta_0_to_fp16, epsilon = add_69_epsilon_0_to_fp16, gamma = add_69_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_137_cast)[name = tensor("add_69_cast")]; + tensor input_767_cast = silu(x = add_69_cast)[name = tensor("input_767_cast")]; + tensor var_13005 = const()[name = tensor("op_13005"), val = tensor([1, 1])]; + tensor var_13007 = const()[name = tensor("op_13007"), val = tensor([1, 1])]; + tensor hidden_states_529_pad_type_0 = const()[name = tensor("hidden_states_529_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_529_pad_0 = const()[name = tensor("hidden_states_529_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1367445440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371131904))), name = tensor("up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371132480)))]; + tensor hidden_states_529_cast = conv(bias = up_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_13007, groups = var_12518, pad = hidden_states_529_pad_0, pad_type = hidden_states_529_pad_type_0, strides = var_13005, weight = up_blocks_1_resnets_1_conv2_weight_to_fp16_palettized, x = input_767_cast)[name = tensor("hidden_states_529_cast")]; + tensor var_13012 = const()[name = tensor("op_13012"), val = tensor([1, 1])]; + tensor var_13014 = const()[name = tensor("op_13014"), val = tensor([1, 1])]; + tensor x_13_pad_type_0 = const()[name = tensor("x_13_pad_type_0"), val = tensor("custom")]; + tensor x_13_pad_0 = const()[name = tensor("x_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1371133824)))]; + tensor up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372772288)))]; + tensor x_13_cast = conv(bias = up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13014, groups = var_12518, pad = x_13_pad_0, pad_type = x_13_pad_type_0, strides = var_13012, weight = up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16, x = input_755_cast)[name = tensor("x_13_cast")]; + tensor hidden_states_531_cast = add(x = x_13_cast, y = hidden_states_529_cast)[name = tensor("hidden_states_531_cast")]; + tensor reshape_140_shape_0 = const()[name = tensor("reshape_140_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_140_cast = reshape(shape = reshape_140_shape_0, x = hidden_states_531_cast)[name = tensor("reshape_140_cast")]; + tensor reduce_mean_105_axes_0 = const()[name = tensor("reduce_mean_105_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_105_keep_dims_0 = const()[name = tensor("reduce_mean_105_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_105_cast = reduce_mean(axes = reduce_mean_105_axes_0, keep_dims = reduce_mean_105_keep_dims_0, x = reshape_140_cast)[name = tensor("reduce_mean_105_cast")]; + tensor sub_70_cast = sub(x = reshape_140_cast, y = reduce_mean_105_cast)[name = tensor("sub_70_cast")]; + tensor square_35_cast = square(x = sub_70_cast)[name = tensor("square_35_cast")]; + tensor reduce_mean_107_axes_0 = const()[name = tensor("reduce_mean_107_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_107_keep_dims_0 = const()[name = tensor("reduce_mean_107_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_107_cast = reduce_mean(axes = reduce_mean_107_axes_0, keep_dims = reduce_mean_107_keep_dims_0, x = square_35_cast)[name = tensor("reduce_mean_107_cast")]; + tensor add_70_y_0_to_fp16 = const()[name = tensor("add_70_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_70_cast = add(x = reduce_mean_107_cast, y = add_70_y_0_to_fp16)[name = tensor("add_70_cast")]; + tensor sqrt_35_cast = sqrt(x = add_70_cast)[name = tensor("sqrt_35_cast")]; + tensor real_div_35_cast = real_div(x = sub_70_cast, y = sqrt_35_cast)[name = tensor("real_div_35_cast")]; + tensor reshape_141_shape_0 = const()[name = tensor("reshape_141_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_141_cast = reshape(shape = reshape_141_shape_0, x = real_div_35_cast)[name = tensor("reshape_141_cast")]; + tensor add_71_gamma_0_to_fp16 = const()[name = tensor("add_71_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372773632)))]; + tensor add_71_beta_0_to_fp16 = const()[name = tensor("add_71_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372774976)))]; + tensor add_71_epsilon_0_to_fp16 = const()[name = tensor("add_71_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_71_cast = batch_norm(beta = add_71_beta_0_to_fp16, epsilon = add_71_epsilon_0_to_fp16, gamma = add_71_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_141_cast)[name = tensor("add_71_cast")]; + tensor var_13036 = const()[name = tensor("op_13036"), val = tensor([1, 1])]; + tensor var_13038 = const()[name = tensor("op_13038"), val = tensor([1, 1])]; + tensor hidden_states_533_pad_type_0 = const()[name = tensor("hidden_states_533_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_533_pad_0 = const()[name = tensor("hidden_states_533_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372776320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373185984))), name = tensor("up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373186560)))]; + tensor hidden_states_533_cast = conv(bias = up_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_13038, groups = var_12518, pad = hidden_states_533_pad_0, pad_type = hidden_states_533_pad_type_0, strides = var_13036, weight = up_blocks_1_attentions_1_proj_in_weight_to_fp16_palettized, x = add_71_cast)[name = tensor("hidden_states_533_cast")]; + tensor var_13043 = const()[name = tensor("op_13043"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_397_cast = reshape(shape = var_13043, x = hidden_states_533_cast)[name = tensor("inputs_397_cast")]; + tensor var_13053 = const()[name = tensor("op_13053"), val = tensor([1])]; + tensor channels_mean_397_cast = reduce_mean(axes = var_13053, keep_dims = var_12513, x = inputs_397_cast)[name = tensor("channels_mean_397_cast")]; + tensor zero_mean_397_cast = sub(x = inputs_397_cast, y = channels_mean_397_cast)[name = tensor("zero_mean_397_cast")]; + tensor zero_mean_sq_397_cast = mul(x = zero_mean_397_cast, y = zero_mean_397_cast)[name = tensor("zero_mean_sq_397_cast")]; + tensor var_13057 = const()[name = tensor("op_13057"), val = tensor([1])]; + tensor var_13058_cast = reduce_mean(axes = var_13057, keep_dims = var_12513, x = zero_mean_sq_397_cast)[name = tensor("op_13058_cast")]; + tensor var_13059_to_fp16 = const()[name = tensor("op_13059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13060_cast = add(x = var_13058_cast, y = var_13059_to_fp16)[name = tensor("op_13060_cast")]; + tensor denom_397_epsilon_0_to_fp16 = const()[name = tensor("denom_397_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_397_cast = rsqrt(epsilon = denom_397_epsilon_0_to_fp16, x = var_13060_cast)[name = tensor("denom_397_cast")]; + tensor out_397_cast = mul(x = zero_mean_397_cast, y = denom_397_cast)[name = tensor("out_397_cast")]; + tensor var_13064_to_fp16 = const()[name = tensor("op_13064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373187904)))]; + tensor var_13065_cast = add(x = out_397_cast, y = var_13064_to_fp16)[name = tensor("op_13065_cast")]; + tensor var_13067_to_fp16 = const()[name = tensor("op_13067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373189248)))]; + tensor hidden_states_535_cast = mul(x = var_13065_cast, y = var_13067_to_fp16)[name = tensor("hidden_states_535_cast")]; + tensor var_13074 = const()[name = tensor("op_13074"), val = tensor([1, 1])]; + tensor var_13076 = const()[name = tensor("op_13076"), val = tensor([1, 1])]; + tensor q_265_pad_type_0 = const()[name = tensor("q_265_pad_type_0"), val = tensor("custom")]; + tensor q_265_pad_0 = const()[name = tensor("q_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373190592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373497856))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_265_cast = conv(dilations = var_13076, groups = var_12518, pad = q_265_pad_0, pad_type = q_265_pad_type_0, strides = var_13074, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("q_265_cast")]; + tensor var_13080 = const()[name = tensor("op_13080"), val = tensor([1, 1])]; + tensor var_13082 = const()[name = tensor("op_13082"), val = tensor([1, 1])]; + tensor k_265_pad_type_0 = const()[name = tensor("k_265_pad_type_0"), val = tensor("custom")]; + tensor k_265_pad_0 = const()[name = tensor("k_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373498048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373805312))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_265_cast = conv(dilations = var_13082, groups = var_12518, pad = k_265_pad_0, pad_type = k_265_pad_type_0, strides = var_13080, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("k_265_cast")]; + tensor var_13086 = const()[name = tensor("op_13086"), val = tensor([1, 1])]; + tensor var_13088 = const()[name = tensor("op_13088"), val = tensor([1, 1])]; + tensor v_265_pad_type_0 = const()[name = tensor("v_265_pad_type_0"), val = tensor("custom")]; + tensor v_265_pad_0 = const()[name = tensor("v_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1373805504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374215168))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_265_cast = conv(dilations = var_13088, groups = var_12518, pad = v_265_pad_0, pad_type = v_265_pad_type_0, strides = var_13086, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("v_265_cast")]; + tensor var_13092 = const()[name = tensor("op_13092"), val = tensor([2, 10, 64, -1])]; + tensor var_13093_cast = reshape(shape = var_13092, x = q_265_cast)[name = tensor("op_13093_cast")]; + tensor var_13094 = const()[name = tensor("op_13094"), val = tensor([2, 10, 64, -1])]; + tensor var_13095_cast = reshape(shape = var_13094, x = k_265_cast)[name = tensor("op_13095_cast")]; + tensor var_13096 = const()[name = tensor("op_13096"), val = tensor([2, 10, 64, -1])]; + tensor var_13097_cast = reshape(shape = var_13096, x = v_265_cast)[name = tensor("op_13097_cast")]; + tensor attn_weights_529_transpose_x_0 = const()[name = tensor("attn_weights_529_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_529_transpose_y_0 = const()[name = tensor("attn_weights_529_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_529_cast = matmul(transpose_x = attn_weights_529_transpose_x_0, transpose_y = attn_weights_529_transpose_y_0, x = var_13093_cast, y = var_13095_cast)[name = tensor("attn_weights_529_cast")]; + tensor attn_weights_531_cast = mul(x = attn_weights_529_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_531_cast")]; + tensor var_13101_cast = softmax(axis = var_12502, x = attn_weights_531_cast)[name = tensor("op_13101_cast")]; + tensor attn_265_transpose_x_0 = const()[name = tensor("attn_265_transpose_x_0"), val = tensor(false)]; + tensor attn_265_transpose_y_0 = const()[name = tensor("attn_265_transpose_y_0"), val = tensor(true)]; + tensor attn_265_cast = matmul(transpose_x = attn_265_transpose_x_0, transpose_y = attn_265_transpose_y_0, x = var_13097_cast, y = var_13101_cast)[name = tensor("attn_265_cast")]; + tensor var_13105 = const()[name = tensor("op_13105"), val = tensor([2, 640, 1, -1])]; + tensor input_771_cast = reshape(shape = var_13105, x = attn_265_cast)[name = tensor("input_771_cast")]; + tensor var_13110 = const()[name = tensor("op_13110"), val = tensor([1, 1])]; + tensor var_13112 = const()[name = tensor("op_13112"), val = tensor([1, 1])]; + tensor var_13114_pad_type_0 = const()[name = tensor("op_13114_pad_type_0"), val = tensor("custom")]; + tensor var_13114_pad_0 = const()[name = tensor("op_13114_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374215744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374625408))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374625984)))]; + tensor var_13114_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13112, groups = var_12518, pad = var_13114_pad_0, pad_type = var_13114_pad_type_0, strides = var_13110, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_771_cast)[name = tensor("op_13114_cast")]; + tensor inputs_399_cast = add(x = var_13114_cast, y = inputs_397_cast)[name = tensor("inputs_399_cast")]; + tensor var_13118 = const()[name = tensor("op_13118"), val = tensor([1])]; + tensor channels_mean_399_cast = reduce_mean(axes = var_13118, keep_dims = var_12513, x = inputs_399_cast)[name = tensor("channels_mean_399_cast")]; + tensor zero_mean_399_cast = sub(x = inputs_399_cast, y = channels_mean_399_cast)[name = tensor("zero_mean_399_cast")]; + tensor zero_mean_sq_399_cast = mul(x = zero_mean_399_cast, y = zero_mean_399_cast)[name = tensor("zero_mean_sq_399_cast")]; + tensor var_13122 = const()[name = tensor("op_13122"), val = tensor([1])]; + tensor var_13123_cast = reduce_mean(axes = var_13122, keep_dims = var_12513, x = zero_mean_sq_399_cast)[name = tensor("op_13123_cast")]; + tensor var_13124_to_fp16 = const()[name = tensor("op_13124_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13125_cast = add(x = var_13123_cast, y = var_13124_to_fp16)[name = tensor("op_13125_cast")]; + tensor denom_399_epsilon_0_to_fp16 = const()[name = tensor("denom_399_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_399_cast = rsqrt(epsilon = denom_399_epsilon_0_to_fp16, x = var_13125_cast)[name = tensor("denom_399_cast")]; + tensor out_399_cast = mul(x = zero_mean_399_cast, y = denom_399_cast)[name = tensor("out_399_cast")]; + tensor var_13129_to_fp16 = const()[name = tensor("op_13129_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374627328)))]; + tensor var_13130_cast = add(x = out_399_cast, y = var_13129_to_fp16)[name = tensor("op_13130_cast")]; + tensor var_13132_to_fp16 = const()[name = tensor("op_13132_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374628672)))]; + tensor hidden_states_537_cast = mul(x = var_13130_cast, y = var_13132_to_fp16)[name = tensor("hidden_states_537_cast")]; + tensor var_13139 = const()[name = tensor("op_13139"), val = tensor([1, 1])]; + tensor var_13141 = const()[name = tensor("op_13141"), val = tensor([1, 1])]; + tensor q_267_pad_type_0 = const()[name = tensor("q_267_pad_type_0"), val = tensor("custom")]; + tensor q_267_pad_0 = const()[name = tensor("q_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374630016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374937280))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_267_cast = conv(dilations = var_13141, groups = var_12518, pad = q_267_pad_0, pad_type = q_267_pad_type_0, strides = var_13139, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_537_cast)[name = tensor("q_267_cast")]; + tensor var_13145 = const()[name = tensor("op_13145"), val = tensor([1, 1])]; + tensor var_13147 = const()[name = tensor("op_13147"), val = tensor([1, 1])]; + tensor k_267_pad_type_0 = const()[name = tensor("k_267_pad_type_0"), val = tensor("custom")]; + tensor k_267_pad_0 = const()[name = tensor("k_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1374937472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375920576))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_267_cast = conv(dilations = var_13147, groups = var_12518, pad = k_267_pad_0, pad_type = k_267_pad_type_0, strides = var_13145, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_267_cast")]; + tensor var_13151 = const()[name = tensor("op_13151"), val = tensor([1, 1])]; + tensor var_13153 = const()[name = tensor("op_13153"), val = tensor([1, 1])]; + tensor v_267_pad_type_0 = const()[name = tensor("v_267_pad_type_0"), val = tensor("custom")]; + tensor v_267_pad_0 = const()[name = tensor("v_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1375920768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1376903872))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_267_cast = conv(dilations = var_13153, groups = var_12518, pad = v_267_pad_0, pad_type = v_267_pad_type_0, strides = var_13151, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_267_cast")]; + tensor var_13157 = const()[name = tensor("op_13157"), val = tensor([2, 10, 64, -1])]; + tensor var_13158_cast = reshape(shape = var_13157, x = q_267_cast)[name = tensor("op_13158_cast")]; + tensor var_13159 = const()[name = tensor("op_13159"), val = tensor([2, 10, 64, -1])]; + tensor var_13160_cast = reshape(shape = var_13159, x = k_267_cast)[name = tensor("op_13160_cast")]; + tensor var_13161 = const()[name = tensor("op_13161"), val = tensor([2, 10, 64, -1])]; + tensor var_13162_cast = reshape(shape = var_13161, x = v_267_cast)[name = tensor("op_13162_cast")]; + tensor attn_weights_533_transpose_x_0 = const()[name = tensor("attn_weights_533_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_533_transpose_y_0 = const()[name = tensor("attn_weights_533_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_533_cast = matmul(transpose_x = attn_weights_533_transpose_x_0, transpose_y = attn_weights_533_transpose_y_0, x = var_13158_cast, y = var_13160_cast)[name = tensor("attn_weights_533_cast")]; + tensor attn_weights_535_cast = mul(x = attn_weights_533_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_535_cast")]; + tensor var_13166_cast = softmax(axis = var_12502, x = attn_weights_535_cast)[name = tensor("op_13166_cast")]; + tensor attn_267_transpose_x_0 = const()[name = tensor("attn_267_transpose_x_0"), val = tensor(false)]; + tensor attn_267_transpose_y_0 = const()[name = tensor("attn_267_transpose_y_0"), val = tensor(true)]; + tensor attn_267_cast = matmul(transpose_x = attn_267_transpose_x_0, transpose_y = attn_267_transpose_y_0, x = var_13162_cast, y = var_13166_cast)[name = tensor("attn_267_cast")]; + tensor var_13170 = const()[name = tensor("op_13170"), val = tensor([2, 640, 1, -1])]; + tensor input_773_cast = reshape(shape = var_13170, x = attn_267_cast)[name = tensor("input_773_cast")]; + tensor var_13175 = const()[name = tensor("op_13175"), val = tensor([1, 1])]; + tensor var_13177 = const()[name = tensor("op_13177"), val = tensor([1, 1])]; + tensor var_13179_pad_type_0 = const()[name = tensor("op_13179_pad_type_0"), val = tensor("custom")]; + tensor var_13179_pad_0 = const()[name = tensor("op_13179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1376904064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377211328))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377211520)))]; + tensor var_13179_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13177, groups = var_12518, pad = var_13179_pad_0, pad_type = var_13179_pad_type_0, strides = var_13175, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_773_cast)[name = tensor("op_13179_cast")]; + tensor inputs_401_cast = add(x = var_13179_cast, y = inputs_399_cast)[name = tensor("inputs_401_cast")]; + tensor var_13183 = const()[name = tensor("op_13183"), val = tensor([1])]; + tensor channels_mean_401_cast = reduce_mean(axes = var_13183, keep_dims = var_12513, x = inputs_401_cast)[name = tensor("channels_mean_401_cast")]; + tensor zero_mean_401_cast = sub(x = inputs_401_cast, y = channels_mean_401_cast)[name = tensor("zero_mean_401_cast")]; + tensor zero_mean_sq_401_cast = mul(x = zero_mean_401_cast, y = zero_mean_401_cast)[name = tensor("zero_mean_sq_401_cast")]; + tensor var_13187 = const()[name = tensor("op_13187"), val = tensor([1])]; + tensor var_13188_cast = reduce_mean(axes = var_13187, keep_dims = var_12513, x = zero_mean_sq_401_cast)[name = tensor("op_13188_cast")]; + tensor var_13189_to_fp16 = const()[name = tensor("op_13189_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13190_cast = add(x = var_13188_cast, y = var_13189_to_fp16)[name = tensor("op_13190_cast")]; + tensor denom_401_epsilon_0_to_fp16 = const()[name = tensor("denom_401_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_401_cast = rsqrt(epsilon = denom_401_epsilon_0_to_fp16, x = var_13190_cast)[name = tensor("denom_401_cast")]; + tensor out_401_cast = mul(x = zero_mean_401_cast, y = denom_401_cast)[name = tensor("out_401_cast")]; + tensor var_13194_to_fp16 = const()[name = tensor("op_13194_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377212864)))]; + tensor var_13195_cast = add(x = out_401_cast, y = var_13194_to_fp16)[name = tensor("op_13195_cast")]; + tensor var_13197_to_fp16 = const()[name = tensor("op_13197_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377214208)))]; + tensor input_775_cast = mul(x = var_13195_cast, y = var_13197_to_fp16)[name = tensor("input_775_cast")]; + tensor var_13205 = const()[name = tensor("op_13205"), val = tensor([1, 1])]; + tensor var_13207 = const()[name = tensor("op_13207"), val = tensor([1, 1])]; + tensor var_13209_pad_type_0 = const()[name = tensor("op_13209_pad_type_0"), val = tensor("custom")]; + tensor var_13209_pad_0 = const()[name = tensor("op_13209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1377215552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380492416))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380492992)))]; + tensor var_13209_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13207, groups = var_12518, pad = var_13209_pad_0, pad_type = var_13209_pad_type_0, strides = var_13205, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_775_cast)[name = tensor("op_13209_cast")]; + tensor var_13210_split_sizes_0 = const()[name = tensor("op_13210_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13210_axis_0 = const()[name = tensor("op_13210_axis_0"), val = tensor(1)]; + tensor var_13210_cast_0, tensor var_13210_cast_1 = split(axis = var_13210_axis_0, split_sizes = var_13210_split_sizes_0, x = var_13209_cast)[name = tensor("op_13210_cast")]; + tensor var_13212_mode_0 = const()[name = tensor("op_13212_mode_0"), val = tensor("EXACT")]; + tensor var_13212_cast = gelu(mode = var_13212_mode_0, x = var_13210_cast_1)[name = tensor("op_13212_cast")]; + tensor input_777_cast = mul(x = var_13210_cast_0, y = var_13212_cast)[name = tensor("input_777_cast")]; + tensor var_13216 = const()[name = tensor("op_13216"), val = tensor([1, 1])]; + tensor var_13218 = const()[name = tensor("op_13218"), val = tensor([1, 1])]; + tensor var_13220_pad_type_0 = const()[name = tensor("op_13220_pad_type_0"), val = tensor("custom")]; + tensor var_13220_pad_0 = const()[name = tensor("op_13220_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380503296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382141760))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382142336)))]; + tensor var_13220_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13218, groups = var_12518, pad = var_13220_pad_0, pad_type = var_13220_pad_type_0, strides = var_13216, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_777_cast)[name = tensor("op_13220_cast")]; + tensor inputs_403_cast = add(x = var_13220_cast, y = inputs_401_cast)[name = tensor("inputs_403_cast")]; + tensor var_13230 = const()[name = tensor("op_13230"), val = tensor([1])]; + tensor channels_mean_403_cast = reduce_mean(axes = var_13230, keep_dims = var_12513, x = inputs_403_cast)[name = tensor("channels_mean_403_cast")]; + tensor zero_mean_403_cast = sub(x = inputs_403_cast, y = channels_mean_403_cast)[name = tensor("zero_mean_403_cast")]; + tensor zero_mean_sq_403_cast = mul(x = zero_mean_403_cast, y = zero_mean_403_cast)[name = tensor("zero_mean_sq_403_cast")]; + tensor var_13234 = const()[name = tensor("op_13234"), val = tensor([1])]; + tensor var_13235_cast = reduce_mean(axes = var_13234, keep_dims = var_12513, x = zero_mean_sq_403_cast)[name = tensor("op_13235_cast")]; + tensor var_13236_to_fp16 = const()[name = tensor("op_13236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13237_cast = add(x = var_13235_cast, y = var_13236_to_fp16)[name = tensor("op_13237_cast")]; + tensor denom_403_epsilon_0_to_fp16 = const()[name = tensor("denom_403_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_403_cast = rsqrt(epsilon = denom_403_epsilon_0_to_fp16, x = var_13237_cast)[name = tensor("denom_403_cast")]; + tensor out_403_cast = mul(x = zero_mean_403_cast, y = denom_403_cast)[name = tensor("out_403_cast")]; + tensor var_13241_to_fp16 = const()[name = tensor("op_13241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382143680)))]; + tensor var_13242_cast = add(x = out_403_cast, y = var_13241_to_fp16)[name = tensor("op_13242_cast")]; + tensor var_13244_to_fp16 = const()[name = tensor("op_13244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382145024)))]; + tensor hidden_states_541_cast = mul(x = var_13242_cast, y = var_13244_to_fp16)[name = tensor("hidden_states_541_cast")]; + tensor var_13251 = const()[name = tensor("op_13251"), val = tensor([1, 1])]; + tensor var_13253 = const()[name = tensor("op_13253"), val = tensor([1, 1])]; + tensor q_269_pad_type_0 = const()[name = tensor("q_269_pad_type_0"), val = tensor("custom")]; + tensor q_269_pad_0 = const()[name = tensor("q_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382146368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382453632))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_269_cast = conv(dilations = var_13253, groups = var_12518, pad = q_269_pad_0, pad_type = q_269_pad_type_0, strides = var_13251, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("q_269_cast")]; + tensor var_13257 = const()[name = tensor("op_13257"), val = tensor([1, 1])]; + tensor var_13259 = const()[name = tensor("op_13259"), val = tensor([1, 1])]; + tensor k_269_pad_type_0 = const()[name = tensor("k_269_pad_type_0"), val = tensor("custom")]; + tensor k_269_pad_0 = const()[name = tensor("k_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382453824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382863488))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_269_cast = conv(dilations = var_13259, groups = var_12518, pad = k_269_pad_0, pad_type = k_269_pad_type_0, strides = var_13257, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("k_269_cast")]; + tensor var_13263 = const()[name = tensor("op_13263"), val = tensor([1, 1])]; + tensor var_13265 = const()[name = tensor("op_13265"), val = tensor([1, 1])]; + tensor v_269_pad_type_0 = const()[name = tensor("v_269_pad_type_0"), val = tensor("custom")]; + tensor v_269_pad_0 = const()[name = tensor("v_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382864064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383273728))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_269_cast = conv(dilations = var_13265, groups = var_12518, pad = v_269_pad_0, pad_type = v_269_pad_type_0, strides = var_13263, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("v_269_cast")]; + tensor var_13269 = const()[name = tensor("op_13269"), val = tensor([2, 10, 64, -1])]; + tensor var_13270_cast = reshape(shape = var_13269, x = q_269_cast)[name = tensor("op_13270_cast")]; + tensor var_13271 = const()[name = tensor("op_13271"), val = tensor([2, 10, 64, -1])]; + tensor var_13272_cast = reshape(shape = var_13271, x = k_269_cast)[name = tensor("op_13272_cast")]; + tensor var_13273 = const()[name = tensor("op_13273"), val = tensor([2, 10, 64, -1])]; + tensor var_13274_cast = reshape(shape = var_13273, x = v_269_cast)[name = tensor("op_13274_cast")]; + tensor attn_weights_537_transpose_x_0 = const()[name = tensor("attn_weights_537_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_537_transpose_y_0 = const()[name = tensor("attn_weights_537_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_537_cast = matmul(transpose_x = attn_weights_537_transpose_x_0, transpose_y = attn_weights_537_transpose_y_0, x = var_13270_cast, y = var_13272_cast)[name = tensor("attn_weights_537_cast")]; + tensor attn_weights_539_cast = mul(x = attn_weights_537_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_539_cast")]; + tensor var_13278_cast = softmax(axis = var_12502, x = attn_weights_539_cast)[name = tensor("op_13278_cast")]; + tensor attn_269_transpose_x_0 = const()[name = tensor("attn_269_transpose_x_0"), val = tensor(false)]; + tensor attn_269_transpose_y_0 = const()[name = tensor("attn_269_transpose_y_0"), val = tensor(true)]; + tensor attn_269_cast = matmul(transpose_x = attn_269_transpose_x_0, transpose_y = attn_269_transpose_y_0, x = var_13274_cast, y = var_13278_cast)[name = tensor("attn_269_cast")]; + tensor var_13282 = const()[name = tensor("op_13282"), val = tensor([2, 640, 1, -1])]; + tensor input_779_cast = reshape(shape = var_13282, x = attn_269_cast)[name = tensor("input_779_cast")]; + tensor var_13287 = const()[name = tensor("op_13287"), val = tensor([1, 1])]; + tensor var_13289 = const()[name = tensor("op_13289"), val = tensor([1, 1])]; + tensor var_13291_pad_type_0 = const()[name = tensor("op_13291_pad_type_0"), val = tensor("custom")]; + tensor var_13291_pad_0 = const()[name = tensor("op_13291_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383274304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383683968))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383684544)))]; + tensor var_13291_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13289, groups = var_12518, pad = var_13291_pad_0, pad_type = var_13291_pad_type_0, strides = var_13287, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_779_cast)[name = tensor("op_13291_cast")]; + tensor inputs_405_cast = add(x = var_13291_cast, y = inputs_403_cast)[name = tensor("inputs_405_cast")]; + tensor var_13295 = const()[name = tensor("op_13295"), val = tensor([1])]; + tensor channels_mean_405_cast = reduce_mean(axes = var_13295, keep_dims = var_12513, x = inputs_405_cast)[name = tensor("channels_mean_405_cast")]; + tensor zero_mean_405_cast = sub(x = inputs_405_cast, y = channels_mean_405_cast)[name = tensor("zero_mean_405_cast")]; + tensor zero_mean_sq_405_cast = mul(x = zero_mean_405_cast, y = zero_mean_405_cast)[name = tensor("zero_mean_sq_405_cast")]; + tensor var_13299 = const()[name = tensor("op_13299"), val = tensor([1])]; + tensor var_13300_cast = reduce_mean(axes = var_13299, keep_dims = var_12513, x = zero_mean_sq_405_cast)[name = tensor("op_13300_cast")]; + tensor var_13301_to_fp16 = const()[name = tensor("op_13301_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13302_cast = add(x = var_13300_cast, y = var_13301_to_fp16)[name = tensor("op_13302_cast")]; + tensor denom_405_epsilon_0_to_fp16 = const()[name = tensor("denom_405_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_405_cast = rsqrt(epsilon = denom_405_epsilon_0_to_fp16, x = var_13302_cast)[name = tensor("denom_405_cast")]; + tensor out_405_cast = mul(x = zero_mean_405_cast, y = denom_405_cast)[name = tensor("out_405_cast")]; + tensor var_13306_to_fp16 = const()[name = tensor("op_13306_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383685888)))]; + tensor var_13307_cast = add(x = out_405_cast, y = var_13306_to_fp16)[name = tensor("op_13307_cast")]; + tensor var_13309_to_fp16 = const()[name = tensor("op_13309_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383687232)))]; + tensor hidden_states_543_cast = mul(x = var_13307_cast, y = var_13309_to_fp16)[name = tensor("hidden_states_543_cast")]; + tensor var_13316 = const()[name = tensor("op_13316"), val = tensor([1, 1])]; + tensor var_13318 = const()[name = tensor("op_13318"), val = tensor([1, 1])]; + tensor q_271_pad_type_0 = const()[name = tensor("q_271_pad_type_0"), val = tensor("custom")]; + tensor q_271_pad_0 = const()[name = tensor("q_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383688576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383893440))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_271_cast = conv(dilations = var_13318, groups = var_12518, pad = q_271_pad_0, pad_type = q_271_pad_type_0, strides = var_13316, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_543_cast)[name = tensor("q_271_cast")]; + tensor var_13322 = const()[name = tensor("op_13322"), val = tensor([1, 1])]; + tensor var_13324 = const()[name = tensor("op_13324"), val = tensor([1, 1])]; + tensor k_271_pad_type_0 = const()[name = tensor("k_271_pad_type_0"), val = tensor("custom")]; + tensor k_271_pad_0 = const()[name = tensor("k_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1383893568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384876672))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_271_cast = conv(dilations = var_13324, groups = var_12518, pad = k_271_pad_0, pad_type = k_271_pad_type_0, strides = var_13322, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_271_cast")]; + tensor var_13328 = const()[name = tensor("op_13328"), val = tensor([1, 1])]; + tensor var_13330 = const()[name = tensor("op_13330"), val = tensor([1, 1])]; + tensor v_271_pad_type_0 = const()[name = tensor("v_271_pad_type_0"), val = tensor("custom")]; + tensor v_271_pad_0 = const()[name = tensor("v_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384876864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385859968))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_271_cast = conv(dilations = var_13330, groups = var_12518, pad = v_271_pad_0, pad_type = v_271_pad_type_0, strides = var_13328, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_271_cast")]; + tensor var_13334 = const()[name = tensor("op_13334"), val = tensor([2, 10, 64, -1])]; + tensor var_13335_cast = reshape(shape = var_13334, x = q_271_cast)[name = tensor("op_13335_cast")]; + tensor var_13336 = const()[name = tensor("op_13336"), val = tensor([2, 10, 64, -1])]; + tensor var_13337_cast = reshape(shape = var_13336, x = k_271_cast)[name = tensor("op_13337_cast")]; + tensor var_13338 = const()[name = tensor("op_13338"), val = tensor([2, 10, 64, -1])]; + tensor var_13339_cast = reshape(shape = var_13338, x = v_271_cast)[name = tensor("op_13339_cast")]; + tensor attn_weights_541_transpose_x_0 = const()[name = tensor("attn_weights_541_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_541_transpose_y_0 = const()[name = tensor("attn_weights_541_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_541_cast = matmul(transpose_x = attn_weights_541_transpose_x_0, transpose_y = attn_weights_541_transpose_y_0, x = var_13335_cast, y = var_13337_cast)[name = tensor("attn_weights_541_cast")]; + tensor attn_weights_543_cast = mul(x = attn_weights_541_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_543_cast")]; + tensor var_13343_cast = softmax(axis = var_12502, x = attn_weights_543_cast)[name = tensor("op_13343_cast")]; + tensor attn_271_transpose_x_0 = const()[name = tensor("attn_271_transpose_x_0"), val = tensor(false)]; + tensor attn_271_transpose_y_0 = const()[name = tensor("attn_271_transpose_y_0"), val = tensor(true)]; + tensor attn_271_cast = matmul(transpose_x = attn_271_transpose_x_0, transpose_y = attn_271_transpose_y_0, x = var_13339_cast, y = var_13343_cast)[name = tensor("attn_271_cast")]; + tensor var_13347 = const()[name = tensor("op_13347"), val = tensor([2, 640, 1, -1])]; + tensor input_781_cast = reshape(shape = var_13347, x = attn_271_cast)[name = tensor("input_781_cast")]; + tensor var_13352 = const()[name = tensor("op_13352"), val = tensor([1, 1])]; + tensor var_13354 = const()[name = tensor("op_13354"), val = tensor([1, 1])]; + tensor var_13356_pad_type_0 = const()[name = tensor("op_13356_pad_type_0"), val = tensor("custom")]; + tensor var_13356_pad_0 = const()[name = tensor("op_13356_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385860160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386167424))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386167616)))]; + tensor var_13356_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13354, groups = var_12518, pad = var_13356_pad_0, pad_type = var_13356_pad_type_0, strides = var_13352, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_781_cast)[name = tensor("op_13356_cast")]; + tensor inputs_407_cast = add(x = var_13356_cast, y = inputs_405_cast)[name = tensor("inputs_407_cast")]; + tensor var_13360 = const()[name = tensor("op_13360"), val = tensor([1])]; + tensor channels_mean_407_cast = reduce_mean(axes = var_13360, keep_dims = var_12513, x = inputs_407_cast)[name = tensor("channels_mean_407_cast")]; + tensor zero_mean_407_cast = sub(x = inputs_407_cast, y = channels_mean_407_cast)[name = tensor("zero_mean_407_cast")]; + tensor zero_mean_sq_407_cast = mul(x = zero_mean_407_cast, y = zero_mean_407_cast)[name = tensor("zero_mean_sq_407_cast")]; + tensor var_13364 = const()[name = tensor("op_13364"), val = tensor([1])]; + tensor var_13365_cast = reduce_mean(axes = var_13364, keep_dims = var_12513, x = zero_mean_sq_407_cast)[name = tensor("op_13365_cast")]; + tensor var_13366_to_fp16 = const()[name = tensor("op_13366_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13367_cast = add(x = var_13365_cast, y = var_13366_to_fp16)[name = tensor("op_13367_cast")]; + tensor denom_407_epsilon_0_to_fp16 = const()[name = tensor("denom_407_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_407_cast = rsqrt(epsilon = denom_407_epsilon_0_to_fp16, x = var_13367_cast)[name = tensor("denom_407_cast")]; + tensor out_407_cast = mul(x = zero_mean_407_cast, y = denom_407_cast)[name = tensor("out_407_cast")]; + tensor var_13371_to_fp16 = const()[name = tensor("op_13371_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386168960)))]; + tensor var_13372_cast = add(x = out_407_cast, y = var_13371_to_fp16)[name = tensor("op_13372_cast")]; + tensor var_13374_to_fp16 = const()[name = tensor("op_13374_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386170304)))]; + tensor input_783_cast = mul(x = var_13372_cast, y = var_13374_to_fp16)[name = tensor("input_783_cast")]; + tensor var_13382 = const()[name = tensor("op_13382"), val = tensor([1, 1])]; + tensor var_13384 = const()[name = tensor("op_13384"), val = tensor([1, 1])]; + tensor var_13386_pad_type_0 = const()[name = tensor("op_13386_pad_type_0"), val = tensor("custom")]; + tensor var_13386_pad_0 = const()[name = tensor("op_13386_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1386171648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389448512))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389449088)))]; + tensor var_13386_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13384, groups = var_12518, pad = var_13386_pad_0, pad_type = var_13386_pad_type_0, strides = var_13382, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_783_cast)[name = tensor("op_13386_cast")]; + tensor var_13387_split_sizes_0 = const()[name = tensor("op_13387_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13387_axis_0 = const()[name = tensor("op_13387_axis_0"), val = tensor(1)]; + tensor var_13387_cast_0, tensor var_13387_cast_1 = split(axis = var_13387_axis_0, split_sizes = var_13387_split_sizes_0, x = var_13386_cast)[name = tensor("op_13387_cast")]; + tensor var_13389_mode_0 = const()[name = tensor("op_13389_mode_0"), val = tensor("EXACT")]; + tensor var_13389_cast = gelu(mode = var_13389_mode_0, x = var_13387_cast_1)[name = tensor("op_13389_cast")]; + tensor input_785_cast = mul(x = var_13387_cast_0, y = var_13389_cast)[name = tensor("input_785_cast")]; + tensor var_13393 = const()[name = tensor("op_13393"), val = tensor([1, 1])]; + tensor var_13395 = const()[name = tensor("op_13395"), val = tensor([1, 1])]; + tensor var_13397_pad_type_0 = const()[name = tensor("op_13397_pad_type_0"), val = tensor("custom")]; + tensor var_13397_pad_0 = const()[name = tensor("op_13397_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389459392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391097856))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391098432)))]; + tensor var_13397_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13395, groups = var_12518, pad = var_13397_pad_0, pad_type = var_13397_pad_type_0, strides = var_13393, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_785_cast)[name = tensor("op_13397_cast")]; + tensor hidden_states_547_cast = add(x = var_13397_cast, y = inputs_407_cast)[name = tensor("hidden_states_547_cast")]; + tensor var_13399 = const()[name = tensor("op_13399"), val = tensor([2, 640, 64, 64])]; + tensor input_787_cast = reshape(shape = var_13399, x = hidden_states_547_cast)[name = tensor("input_787_cast")]; + tensor var_13403 = const()[name = tensor("op_13403"), val = tensor([1, 1])]; + tensor var_13405 = const()[name = tensor("op_13405"), val = tensor([1, 1])]; + tensor hidden_states_549_pad_type_0 = const()[name = tensor("hidden_states_549_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_549_pad_0 = const()[name = tensor("hidden_states_549_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391099776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391509440))), name = tensor("up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391510016)))]; + tensor hidden_states_549_cast = conv(bias = up_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_13405, groups = var_12518, pad = hidden_states_549_pad_0, pad_type = hidden_states_549_pad_type_0, strides = var_13403, weight = up_blocks_1_attentions_1_proj_out_weight_to_fp16_palettized, x = input_787_cast)[name = tensor("hidden_states_549_cast")]; + tensor hidden_states_551_cast = add(x = hidden_states_549_cast, y = hidden_states_531_cast)[name = tensor("hidden_states_551_cast")]; + tensor input_789_interleave_0 = const()[name = tensor("input_789_interleave_0"), val = tensor(false)]; + tensor input_789_cast = concat(axis = var_12518, interleave = input_789_interleave_0, values = (hidden_states_551_cast, input_45_cast))[name = tensor("input_789_cast")]; + tensor reshape_144_shape_0 = const()[name = tensor("reshape_144_shape_0"), val = tensor([2, 32, 30, 64, 64])]; + tensor reshape_144_cast = reshape(shape = reshape_144_shape_0, x = input_789_cast)[name = tensor("reshape_144_cast")]; + tensor reduce_mean_108_axes_0 = const()[name = tensor("reduce_mean_108_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_108_keep_dims_0 = const()[name = tensor("reduce_mean_108_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_108_cast = reduce_mean(axes = reduce_mean_108_axes_0, keep_dims = reduce_mean_108_keep_dims_0, x = reshape_144_cast)[name = tensor("reduce_mean_108_cast")]; + tensor sub_72_cast = sub(x = reshape_144_cast, y = reduce_mean_108_cast)[name = tensor("sub_72_cast")]; + tensor square_36_cast = square(x = sub_72_cast)[name = tensor("square_36_cast")]; + tensor reduce_mean_110_axes_0 = const()[name = tensor("reduce_mean_110_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_110_keep_dims_0 = const()[name = tensor("reduce_mean_110_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_110_cast = reduce_mean(axes = reduce_mean_110_axes_0, keep_dims = reduce_mean_110_keep_dims_0, x = square_36_cast)[name = tensor("reduce_mean_110_cast")]; + tensor add_72_y_0_to_fp16 = const()[name = tensor("add_72_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_72_cast = add(x = reduce_mean_110_cast, y = add_72_y_0_to_fp16)[name = tensor("add_72_cast")]; + tensor sqrt_36_cast = sqrt(x = add_72_cast)[name = tensor("sqrt_36_cast")]; + tensor real_div_36_cast = real_div(x = sub_72_cast, y = sqrt_36_cast)[name = tensor("real_div_36_cast")]; + tensor reshape_145_shape_0 = const()[name = tensor("reshape_145_shape_0"), val = tensor([2, 960, 64, 64])]; + tensor reshape_145_cast = reshape(shape = reshape_145_shape_0, x = real_div_36_cast)[name = tensor("reshape_145_cast")]; + tensor add_73_mean_0_to_fp16 = const()[name = tensor("add_73_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391511360)))]; + tensor add_73_variance_0_to_fp16 = const()[name = tensor("add_73_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391513344)))]; + tensor add_73_gamma_0_to_fp16 = const()[name = tensor("add_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391515328)))]; + tensor add_73_beta_0_to_fp16 = const()[name = tensor("add_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391517312)))]; + tensor add_73_epsilon_0_to_fp16 = const()[name = tensor("add_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_73_cast = batch_norm(beta = add_73_beta_0_to_fp16, epsilon = add_73_epsilon_0_to_fp16, gamma = add_73_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_145_cast)[name = tensor("add_73_cast")]; + tensor input_793_cast = silu(x = add_73_cast)[name = tensor("input_793_cast")]; + tensor var_13423 = const()[name = tensor("op_13423"), val = tensor([1, 1])]; + tensor var_13425 = const()[name = tensor("op_13425"), val = tensor([1, 1])]; + tensor hidden_states_553_pad_type_0 = const()[name = tensor("hidden_states_553_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_553_pad_0 = const()[name = tensor("hidden_states_553_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1391519296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397048960))), name = tensor("up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([640, 960, 3, 3])]; + tensor up_blocks_1_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397049536)))]; + tensor hidden_states_553_cast = conv(bias = up_blocks_1_resnets_2_conv1_bias_to_fp16, dilations = var_13425, groups = var_12518, pad = hidden_states_553_pad_0, pad_type = hidden_states_553_pad_type_0, strides = var_13423, weight = up_blocks_1_resnets_2_conv1_weight_to_fp16_palettized, x = input_793_cast)[name = tensor("hidden_states_553_cast")]; + tensor var_13431 = const()[name = tensor("op_13431"), val = tensor([1, 1])]; + tensor var_13433 = const()[name = tensor("op_13433"), val = tensor([1, 1])]; + tensor temb_27_pad_type_0 = const()[name = tensor("temb_27_pad_type_0"), val = tensor("custom")]; + tensor temb_27_pad_0 = const()[name = tensor("temb_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397050880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397870144))), name = tensor("up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397870720)))]; + tensor temb_27_cast = conv(bias = up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13433, groups = var_12518, pad = temb_27_pad_0, pad_type = temb_27_pad_type_0, strides = var_13431, weight = up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_27_cast")]; + tensor input_797_cast = add(x = hidden_states_553_cast, y = temb_27_cast)[name = tensor("input_797_cast")]; + tensor reshape_148_shape_0 = const()[name = tensor("reshape_148_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_148_cast = reshape(shape = reshape_148_shape_0, x = input_797_cast)[name = tensor("reshape_148_cast")]; + tensor reduce_mean_111_axes_0 = const()[name = tensor("reduce_mean_111_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_111_keep_dims_0 = const()[name = tensor("reduce_mean_111_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_111_cast = reduce_mean(axes = reduce_mean_111_axes_0, keep_dims = reduce_mean_111_keep_dims_0, x = reshape_148_cast)[name = tensor("reduce_mean_111_cast")]; + tensor sub_74_cast = sub(x = reshape_148_cast, y = reduce_mean_111_cast)[name = tensor("sub_74_cast")]; + tensor square_37_cast = square(x = sub_74_cast)[name = tensor("square_37_cast")]; + tensor reduce_mean_113_axes_0 = const()[name = tensor("reduce_mean_113_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_113_keep_dims_0 = const()[name = tensor("reduce_mean_113_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_113_cast = reduce_mean(axes = reduce_mean_113_axes_0, keep_dims = reduce_mean_113_keep_dims_0, x = square_37_cast)[name = tensor("reduce_mean_113_cast")]; + tensor add_74_y_0_to_fp16 = const()[name = tensor("add_74_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_74_cast = add(x = reduce_mean_113_cast, y = add_74_y_0_to_fp16)[name = tensor("add_74_cast")]; + tensor sqrt_37_cast = sqrt(x = add_74_cast)[name = tensor("sqrt_37_cast")]; + tensor real_div_37_cast = real_div(x = sub_74_cast, y = sqrt_37_cast)[name = tensor("real_div_37_cast")]; + tensor reshape_149_shape_0 = const()[name = tensor("reshape_149_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_149_cast = reshape(shape = reshape_149_shape_0, x = real_div_37_cast)[name = tensor("reshape_149_cast")]; + tensor add_75_gamma_0_to_fp16 = const()[name = tensor("add_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397872064)))]; + tensor add_75_beta_0_to_fp16 = const()[name = tensor("add_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397873408)))]; + tensor add_75_epsilon_0_to_fp16 = const()[name = tensor("add_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_75_cast = batch_norm(beta = add_75_beta_0_to_fp16, epsilon = add_75_epsilon_0_to_fp16, gamma = add_75_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_149_cast)[name = tensor("add_75_cast")]; + tensor input_801_cast = silu(x = add_75_cast)[name = tensor("input_801_cast")]; + tensor var_13443 = const()[name = tensor("op_13443"), val = tensor([1, 1])]; + tensor var_13445 = const()[name = tensor("op_13445"), val = tensor([1, 1])]; + tensor hidden_states_555_pad_type_0 = const()[name = tensor("hidden_states_555_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_555_pad_0 = const()[name = tensor("hidden_states_555_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1397874752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401561216))), name = tensor("up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor up_blocks_1_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401561792)))]; + tensor hidden_states_555_cast = conv(bias = up_blocks_1_resnets_2_conv2_bias_to_fp16, dilations = var_13445, groups = var_12518, pad = hidden_states_555_pad_0, pad_type = hidden_states_555_pad_type_0, strides = var_13443, weight = up_blocks_1_resnets_2_conv2_weight_to_fp16_palettized, x = input_801_cast)[name = tensor("hidden_states_555_cast")]; + tensor var_13450 = const()[name = tensor("op_13450"), val = tensor([1, 1])]; + tensor var_13452 = const()[name = tensor("op_13452"), val = tensor([1, 1])]; + tensor x_15_pad_type_0 = const()[name = tensor("x_15_pad_type_0"), val = tensor("custom")]; + tensor x_15_pad_0 = const()[name = tensor("x_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1401563136)))]; + tensor up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402792000)))]; + tensor x_15_cast = conv(bias = up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13452, groups = var_12518, pad = x_15_pad_0, pad_type = x_15_pad_type_0, strides = var_13450, weight = up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16, x = input_789_cast)[name = tensor("x_15_cast")]; + tensor hidden_states_557_cast = add(x = x_15_cast, y = hidden_states_555_cast)[name = tensor("hidden_states_557_cast")]; + tensor reshape_152_shape_0 = const()[name = tensor("reshape_152_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_152_cast = reshape(shape = reshape_152_shape_0, x = hidden_states_557_cast)[name = tensor("reshape_152_cast")]; + tensor reduce_mean_114_axes_0 = const()[name = tensor("reduce_mean_114_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_114_keep_dims_0 = const()[name = tensor("reduce_mean_114_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_114_cast = reduce_mean(axes = reduce_mean_114_axes_0, keep_dims = reduce_mean_114_keep_dims_0, x = reshape_152_cast)[name = tensor("reduce_mean_114_cast")]; + tensor sub_76_cast = sub(x = reshape_152_cast, y = reduce_mean_114_cast)[name = tensor("sub_76_cast")]; + tensor square_38_cast = square(x = sub_76_cast)[name = tensor("square_38_cast")]; + tensor reduce_mean_116_axes_0 = const()[name = tensor("reduce_mean_116_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_116_keep_dims_0 = const()[name = tensor("reduce_mean_116_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_116_cast = reduce_mean(axes = reduce_mean_116_axes_0, keep_dims = reduce_mean_116_keep_dims_0, x = square_38_cast)[name = tensor("reduce_mean_116_cast")]; + tensor add_76_y_0_to_fp16 = const()[name = tensor("add_76_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_76_cast = add(x = reduce_mean_116_cast, y = add_76_y_0_to_fp16)[name = tensor("add_76_cast")]; + tensor sqrt_38_cast = sqrt(x = add_76_cast)[name = tensor("sqrt_38_cast")]; + tensor real_div_38_cast = real_div(x = sub_76_cast, y = sqrt_38_cast)[name = tensor("real_div_38_cast")]; + tensor reshape_153_shape_0 = const()[name = tensor("reshape_153_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_153_cast = reshape(shape = reshape_153_shape_0, x = real_div_38_cast)[name = tensor("reshape_153_cast")]; + tensor add_77_gamma_0_to_fp16 = const()[name = tensor("add_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402793344)))]; + tensor add_77_beta_0_to_fp16 = const()[name = tensor("add_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402794688)))]; + tensor add_77_epsilon_0_to_fp16 = const()[name = tensor("add_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_77_cast = batch_norm(beta = add_77_beta_0_to_fp16, epsilon = add_77_epsilon_0_to_fp16, gamma = add_77_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_153_cast)[name = tensor("add_77_cast")]; + tensor var_13474 = const()[name = tensor("op_13474"), val = tensor([1, 1])]; + tensor var_13476 = const()[name = tensor("op_13476"), val = tensor([1, 1])]; + tensor hidden_states_559_pad_type_0 = const()[name = tensor("hidden_states_559_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_559_pad_0 = const()[name = tensor("hidden_states_559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1402796032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403205696))), name = tensor("up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403206272)))]; + tensor hidden_states_559_cast = conv(bias = up_blocks_1_attentions_2_proj_in_bias_to_fp16, dilations = var_13476, groups = var_12518, pad = hidden_states_559_pad_0, pad_type = hidden_states_559_pad_type_0, strides = var_13474, weight = up_blocks_1_attentions_2_proj_in_weight_to_fp16_palettized, x = add_77_cast)[name = tensor("hidden_states_559_cast")]; + tensor var_13481 = const()[name = tensor("op_13481"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_409_cast = reshape(shape = var_13481, x = hidden_states_559_cast)[name = tensor("inputs_409_cast")]; + tensor var_13491 = const()[name = tensor("op_13491"), val = tensor([1])]; + tensor channels_mean_409_cast = reduce_mean(axes = var_13491, keep_dims = var_12513, x = inputs_409_cast)[name = tensor("channels_mean_409_cast")]; + tensor zero_mean_409_cast = sub(x = inputs_409_cast, y = channels_mean_409_cast)[name = tensor("zero_mean_409_cast")]; + tensor zero_mean_sq_409_cast = mul(x = zero_mean_409_cast, y = zero_mean_409_cast)[name = tensor("zero_mean_sq_409_cast")]; + tensor var_13495 = const()[name = tensor("op_13495"), val = tensor([1])]; + tensor var_13496_cast = reduce_mean(axes = var_13495, keep_dims = var_12513, x = zero_mean_sq_409_cast)[name = tensor("op_13496_cast")]; + tensor var_13497_to_fp16 = const()[name = tensor("op_13497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13498_cast = add(x = var_13496_cast, y = var_13497_to_fp16)[name = tensor("op_13498_cast")]; + tensor denom_409_epsilon_0_to_fp16 = const()[name = tensor("denom_409_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_409_cast = rsqrt(epsilon = denom_409_epsilon_0_to_fp16, x = var_13498_cast)[name = tensor("denom_409_cast")]; + tensor out_409_cast = mul(x = zero_mean_409_cast, y = denom_409_cast)[name = tensor("out_409_cast")]; + tensor var_13502_to_fp16 = const()[name = tensor("op_13502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403207616)))]; + tensor var_13503_cast = add(x = out_409_cast, y = var_13502_to_fp16)[name = tensor("op_13503_cast")]; + tensor var_13505_to_fp16 = const()[name = tensor("op_13505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403208960)))]; + tensor hidden_states_561_cast = mul(x = var_13503_cast, y = var_13505_to_fp16)[name = tensor("hidden_states_561_cast")]; + tensor var_13512 = const()[name = tensor("op_13512"), val = tensor([1, 1])]; + tensor var_13514 = const()[name = tensor("op_13514"), val = tensor([1, 1])]; + tensor q_273_pad_type_0 = const()[name = tensor("q_273_pad_type_0"), val = tensor("custom")]; + tensor q_273_pad_0 = const()[name = tensor("q_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403210304))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403517568))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_273_cast = conv(dilations = var_13514, groups = var_12518, pad = q_273_pad_0, pad_type = q_273_pad_type_0, strides = var_13512, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("q_273_cast")]; + tensor var_13518 = const()[name = tensor("op_13518"), val = tensor([1, 1])]; + tensor var_13520 = const()[name = tensor("op_13520"), val = tensor([1, 1])]; + tensor k_273_pad_type_0 = const()[name = tensor("k_273_pad_type_0"), val = tensor("custom")]; + tensor k_273_pad_0 = const()[name = tensor("k_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403517760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403825024))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_273_cast = conv(dilations = var_13520, groups = var_12518, pad = k_273_pad_0, pad_type = k_273_pad_type_0, strides = var_13518, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("k_273_cast")]; + tensor var_13524 = const()[name = tensor("op_13524"), val = tensor([1, 1])]; + tensor var_13526 = const()[name = tensor("op_13526"), val = tensor([1, 1])]; + tensor v_273_pad_type_0 = const()[name = tensor("v_273_pad_type_0"), val = tensor("custom")]; + tensor v_273_pad_0 = const()[name = tensor("v_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403825216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404234880))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_273_cast = conv(dilations = var_13526, groups = var_12518, pad = v_273_pad_0, pad_type = v_273_pad_type_0, strides = var_13524, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("v_273_cast")]; + tensor var_13530 = const()[name = tensor("op_13530"), val = tensor([2, 10, 64, -1])]; + tensor var_13531_cast = reshape(shape = var_13530, x = q_273_cast)[name = tensor("op_13531_cast")]; + tensor var_13532 = const()[name = tensor("op_13532"), val = tensor([2, 10, 64, -1])]; + tensor var_13533_cast = reshape(shape = var_13532, x = k_273_cast)[name = tensor("op_13533_cast")]; + tensor var_13534 = const()[name = tensor("op_13534"), val = tensor([2, 10, 64, -1])]; + tensor var_13535_cast = reshape(shape = var_13534, x = v_273_cast)[name = tensor("op_13535_cast")]; + tensor attn_weights_545_transpose_x_0 = const()[name = tensor("attn_weights_545_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_545_transpose_y_0 = const()[name = tensor("attn_weights_545_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_545_cast = matmul(transpose_x = attn_weights_545_transpose_x_0, transpose_y = attn_weights_545_transpose_y_0, x = var_13531_cast, y = var_13533_cast)[name = tensor("attn_weights_545_cast")]; + tensor attn_weights_547_cast = mul(x = attn_weights_545_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_547_cast")]; + tensor var_13539_cast = softmax(axis = var_12502, x = attn_weights_547_cast)[name = tensor("op_13539_cast")]; + tensor attn_273_transpose_x_0 = const()[name = tensor("attn_273_transpose_x_0"), val = tensor(false)]; + tensor attn_273_transpose_y_0 = const()[name = tensor("attn_273_transpose_y_0"), val = tensor(true)]; + tensor attn_273_cast = matmul(transpose_x = attn_273_transpose_x_0, transpose_y = attn_273_transpose_y_0, x = var_13535_cast, y = var_13539_cast)[name = tensor("attn_273_cast")]; + tensor var_13543 = const()[name = tensor("op_13543"), val = tensor([2, 640, 1, -1])]; + tensor input_805_cast = reshape(shape = var_13543, x = attn_273_cast)[name = tensor("input_805_cast")]; + tensor var_13548 = const()[name = tensor("op_13548"), val = tensor([1, 1])]; + tensor var_13550 = const()[name = tensor("op_13550"), val = tensor([1, 1])]; + tensor var_13552_pad_type_0 = const()[name = tensor("op_13552_pad_type_0"), val = tensor("custom")]; + tensor var_13552_pad_0 = const()[name = tensor("op_13552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404235456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404645120))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404645696)))]; + tensor var_13552_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13550, groups = var_12518, pad = var_13552_pad_0, pad_type = var_13552_pad_type_0, strides = var_13548, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_805_cast)[name = tensor("op_13552_cast")]; + tensor inputs_411_cast = add(x = var_13552_cast, y = inputs_409_cast)[name = tensor("inputs_411_cast")]; + tensor var_13556 = const()[name = tensor("op_13556"), val = tensor([1])]; + tensor channels_mean_411_cast = reduce_mean(axes = var_13556, keep_dims = var_12513, x = inputs_411_cast)[name = tensor("channels_mean_411_cast")]; + tensor zero_mean_411_cast = sub(x = inputs_411_cast, y = channels_mean_411_cast)[name = tensor("zero_mean_411_cast")]; + tensor zero_mean_sq_411_cast = mul(x = zero_mean_411_cast, y = zero_mean_411_cast)[name = tensor("zero_mean_sq_411_cast")]; + tensor var_13560 = const()[name = tensor("op_13560"), val = tensor([1])]; + tensor var_13561_cast = reduce_mean(axes = var_13560, keep_dims = var_12513, x = zero_mean_sq_411_cast)[name = tensor("op_13561_cast")]; + tensor var_13562_to_fp16 = const()[name = tensor("op_13562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13563_cast = add(x = var_13561_cast, y = var_13562_to_fp16)[name = tensor("op_13563_cast")]; + tensor denom_411_epsilon_0_to_fp16 = const()[name = tensor("denom_411_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_411_cast = rsqrt(epsilon = denom_411_epsilon_0_to_fp16, x = var_13563_cast)[name = tensor("denom_411_cast")]; + tensor out_411_cast = mul(x = zero_mean_411_cast, y = denom_411_cast)[name = tensor("out_411_cast")]; + tensor var_13567_to_fp16 = const()[name = tensor("op_13567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404647040)))]; + tensor var_13568_cast = add(x = out_411_cast, y = var_13567_to_fp16)[name = tensor("op_13568_cast")]; + tensor var_13570_to_fp16 = const()[name = tensor("op_13570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404648384)))]; + tensor hidden_states_563_cast = mul(x = var_13568_cast, y = var_13570_to_fp16)[name = tensor("hidden_states_563_cast")]; + tensor var_13577 = const()[name = tensor("op_13577"), val = tensor([1, 1])]; + tensor var_13579 = const()[name = tensor("op_13579"), val = tensor([1, 1])]; + tensor q_275_pad_type_0 = const()[name = tensor("q_275_pad_type_0"), val = tensor("custom")]; + tensor q_275_pad_0 = const()[name = tensor("q_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404649728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404956992))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_275_cast = conv(dilations = var_13579, groups = var_12518, pad = q_275_pad_0, pad_type = q_275_pad_type_0, strides = var_13577, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_563_cast)[name = tensor("q_275_cast")]; + tensor var_13583 = const()[name = tensor("op_13583"), val = tensor([1, 1])]; + tensor var_13585 = const()[name = tensor("op_13585"), val = tensor([1, 1])]; + tensor k_275_pad_type_0 = const()[name = tensor("k_275_pad_type_0"), val = tensor("custom")]; + tensor k_275_pad_0 = const()[name = tensor("k_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1404957184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405940288))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_275_cast = conv(dilations = var_13585, groups = var_12518, pad = k_275_pad_0, pad_type = k_275_pad_type_0, strides = var_13583, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_275_cast")]; + tensor var_13589 = const()[name = tensor("op_13589"), val = tensor([1, 1])]; + tensor var_13591 = const()[name = tensor("op_13591"), val = tensor([1, 1])]; + tensor v_275_pad_type_0 = const()[name = tensor("v_275_pad_type_0"), val = tensor("custom")]; + tensor v_275_pad_0 = const()[name = tensor("v_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1405940480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406923584))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_275_cast = conv(dilations = var_13591, groups = var_12518, pad = v_275_pad_0, pad_type = v_275_pad_type_0, strides = var_13589, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_275_cast")]; + tensor var_13595 = const()[name = tensor("op_13595"), val = tensor([2, 10, 64, -1])]; + tensor var_13596_cast = reshape(shape = var_13595, x = q_275_cast)[name = tensor("op_13596_cast")]; + tensor var_13597 = const()[name = tensor("op_13597"), val = tensor([2, 10, 64, -1])]; + tensor var_13598_cast = reshape(shape = var_13597, x = k_275_cast)[name = tensor("op_13598_cast")]; + tensor var_13599 = const()[name = tensor("op_13599"), val = tensor([2, 10, 64, -1])]; + tensor var_13600_cast = reshape(shape = var_13599, x = v_275_cast)[name = tensor("op_13600_cast")]; + tensor attn_weights_549_transpose_x_0 = const()[name = tensor("attn_weights_549_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_549_transpose_y_0 = const()[name = tensor("attn_weights_549_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_549_cast = matmul(transpose_x = attn_weights_549_transpose_x_0, transpose_y = attn_weights_549_transpose_y_0, x = var_13596_cast, y = var_13598_cast)[name = tensor("attn_weights_549_cast")]; + tensor attn_weights_551_cast = mul(x = attn_weights_549_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_551_cast")]; + tensor var_13604_cast = softmax(axis = var_12502, x = attn_weights_551_cast)[name = tensor("op_13604_cast")]; + tensor attn_275_transpose_x_0 = const()[name = tensor("attn_275_transpose_x_0"), val = tensor(false)]; + tensor attn_275_transpose_y_0 = const()[name = tensor("attn_275_transpose_y_0"), val = tensor(true)]; + tensor attn_275_cast = matmul(transpose_x = attn_275_transpose_x_0, transpose_y = attn_275_transpose_y_0, x = var_13600_cast, y = var_13604_cast)[name = tensor("attn_275_cast")]; + tensor var_13608 = const()[name = tensor("op_13608"), val = tensor([2, 640, 1, -1])]; + tensor input_807_cast = reshape(shape = var_13608, x = attn_275_cast)[name = tensor("input_807_cast")]; + tensor var_13613 = const()[name = tensor("op_13613"), val = tensor([1, 1])]; + tensor var_13615 = const()[name = tensor("op_13615"), val = tensor([1, 1])]; + tensor var_13617_pad_type_0 = const()[name = tensor("op_13617_pad_type_0"), val = tensor("custom")]; + tensor var_13617_pad_0 = const()[name = tensor("op_13617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1406923776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407231040))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407231232)))]; + tensor var_13617_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13615, groups = var_12518, pad = var_13617_pad_0, pad_type = var_13617_pad_type_0, strides = var_13613, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_807_cast)[name = tensor("op_13617_cast")]; + tensor inputs_413_cast = add(x = var_13617_cast, y = inputs_411_cast)[name = tensor("inputs_413_cast")]; + tensor var_13621 = const()[name = tensor("op_13621"), val = tensor([1])]; + tensor channels_mean_413_cast = reduce_mean(axes = var_13621, keep_dims = var_12513, x = inputs_413_cast)[name = tensor("channels_mean_413_cast")]; + tensor zero_mean_413_cast = sub(x = inputs_413_cast, y = channels_mean_413_cast)[name = tensor("zero_mean_413_cast")]; + tensor zero_mean_sq_413_cast = mul(x = zero_mean_413_cast, y = zero_mean_413_cast)[name = tensor("zero_mean_sq_413_cast")]; + tensor var_13625 = const()[name = tensor("op_13625"), val = tensor([1])]; + tensor var_13626_cast = reduce_mean(axes = var_13625, keep_dims = var_12513, x = zero_mean_sq_413_cast)[name = tensor("op_13626_cast")]; + tensor var_13627_to_fp16 = const()[name = tensor("op_13627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13628_cast = add(x = var_13626_cast, y = var_13627_to_fp16)[name = tensor("op_13628_cast")]; + tensor denom_413_epsilon_0_to_fp16 = const()[name = tensor("denom_413_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_413_cast = rsqrt(epsilon = denom_413_epsilon_0_to_fp16, x = var_13628_cast)[name = tensor("denom_413_cast")]; + tensor out_413_cast = mul(x = zero_mean_413_cast, y = denom_413_cast)[name = tensor("out_413_cast")]; + tensor var_13632_to_fp16 = const()[name = tensor("op_13632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407232576)))]; + tensor var_13633_cast = add(x = out_413_cast, y = var_13632_to_fp16)[name = tensor("op_13633_cast")]; + tensor var_13635_to_fp16 = const()[name = tensor("op_13635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407233920)))]; + tensor input_809_cast = mul(x = var_13633_cast, y = var_13635_to_fp16)[name = tensor("input_809_cast")]; + tensor var_13643 = const()[name = tensor("op_13643"), val = tensor([1, 1])]; + tensor var_13645 = const()[name = tensor("op_13645"), val = tensor([1, 1])]; + tensor var_13647_pad_type_0 = const()[name = tensor("op_13647_pad_type_0"), val = tensor("custom")]; + tensor var_13647_pad_0 = const()[name = tensor("op_13647_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1407235264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410512128))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410512704)))]; + tensor var_13647_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13645, groups = var_12518, pad = var_13647_pad_0, pad_type = var_13647_pad_type_0, strides = var_13643, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_809_cast)[name = tensor("op_13647_cast")]; + tensor var_13648_split_sizes_0 = const()[name = tensor("op_13648_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13648_axis_0 = const()[name = tensor("op_13648_axis_0"), val = tensor(1)]; + tensor var_13648_cast_0, tensor var_13648_cast_1 = split(axis = var_13648_axis_0, split_sizes = var_13648_split_sizes_0, x = var_13647_cast)[name = tensor("op_13648_cast")]; + tensor var_13650_mode_0 = const()[name = tensor("op_13650_mode_0"), val = tensor("EXACT")]; + tensor var_13650_cast = gelu(mode = var_13650_mode_0, x = var_13648_cast_1)[name = tensor("op_13650_cast")]; + tensor input_811_cast = mul(x = var_13648_cast_0, y = var_13650_cast)[name = tensor("input_811_cast")]; + tensor var_13654 = const()[name = tensor("op_13654"), val = tensor([1, 1])]; + tensor var_13656 = const()[name = tensor("op_13656"), val = tensor([1, 1])]; + tensor var_13658_pad_type_0 = const()[name = tensor("op_13658_pad_type_0"), val = tensor("custom")]; + tensor var_13658_pad_0 = const()[name = tensor("op_13658_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1410523008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412161472))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412162048)))]; + tensor var_13658_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13656, groups = var_12518, pad = var_13658_pad_0, pad_type = var_13658_pad_type_0, strides = var_13654, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_811_cast)[name = tensor("op_13658_cast")]; + tensor inputs_415_cast = add(x = var_13658_cast, y = inputs_413_cast)[name = tensor("inputs_415_cast")]; + tensor var_13668 = const()[name = tensor("op_13668"), val = tensor([1])]; + tensor channels_mean_415_cast = reduce_mean(axes = var_13668, keep_dims = var_12513, x = inputs_415_cast)[name = tensor("channels_mean_415_cast")]; + tensor zero_mean_415_cast = sub(x = inputs_415_cast, y = channels_mean_415_cast)[name = tensor("zero_mean_415_cast")]; + tensor zero_mean_sq_415_cast = mul(x = zero_mean_415_cast, y = zero_mean_415_cast)[name = tensor("zero_mean_sq_415_cast")]; + tensor var_13672 = const()[name = tensor("op_13672"), val = tensor([1])]; + tensor var_13673_cast = reduce_mean(axes = var_13672, keep_dims = var_12513, x = zero_mean_sq_415_cast)[name = tensor("op_13673_cast")]; + tensor var_13674_to_fp16 = const()[name = tensor("op_13674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13675_cast = add(x = var_13673_cast, y = var_13674_to_fp16)[name = tensor("op_13675_cast")]; + tensor denom_415_epsilon_0_to_fp16 = const()[name = tensor("denom_415_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_415_cast = rsqrt(epsilon = denom_415_epsilon_0_to_fp16, x = var_13675_cast)[name = tensor("denom_415_cast")]; + tensor out_415_cast = mul(x = zero_mean_415_cast, y = denom_415_cast)[name = tensor("out_415_cast")]; + tensor var_13679_to_fp16 = const()[name = tensor("op_13679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412163392)))]; + tensor var_13680_cast = add(x = out_415_cast, y = var_13679_to_fp16)[name = tensor("op_13680_cast")]; + tensor var_13682_to_fp16 = const()[name = tensor("op_13682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412164736)))]; + tensor hidden_states_567_cast = mul(x = var_13680_cast, y = var_13682_to_fp16)[name = tensor("hidden_states_567_cast")]; + tensor var_13689 = const()[name = tensor("op_13689"), val = tensor([1, 1])]; + tensor var_13691 = const()[name = tensor("op_13691"), val = tensor([1, 1])]; + tensor q_277_pad_type_0 = const()[name = tensor("q_277_pad_type_0"), val = tensor("custom")]; + tensor q_277_pad_0 = const()[name = tensor("q_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412166080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412575744))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_277_cast = conv(dilations = var_13691, groups = var_12518, pad = q_277_pad_0, pad_type = q_277_pad_type_0, strides = var_13689, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("q_277_cast")]; + tensor var_13695 = const()[name = tensor("op_13695"), val = tensor([1, 1])]; + tensor var_13697 = const()[name = tensor("op_13697"), val = tensor([1, 1])]; + tensor k_277_pad_type_0 = const()[name = tensor("k_277_pad_type_0"), val = tensor("custom")]; + tensor k_277_pad_0 = const()[name = tensor("k_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412576320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412985984))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_277_cast = conv(dilations = var_13697, groups = var_12518, pad = k_277_pad_0, pad_type = k_277_pad_type_0, strides = var_13695, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("k_277_cast")]; + tensor var_13701 = const()[name = tensor("op_13701"), val = tensor([1, 1])]; + tensor var_13703 = const()[name = tensor("op_13703"), val = tensor([1, 1])]; + tensor v_277_pad_type_0 = const()[name = tensor("v_277_pad_type_0"), val = tensor("custom")]; + tensor v_277_pad_0 = const()[name = tensor("v_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412986560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413396224))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_277_cast = conv(dilations = var_13703, groups = var_12518, pad = v_277_pad_0, pad_type = v_277_pad_type_0, strides = var_13701, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_567_cast)[name = tensor("v_277_cast")]; + tensor var_13707 = const()[name = tensor("op_13707"), val = tensor([2, 10, 64, -1])]; + tensor var_13708_cast = reshape(shape = var_13707, x = q_277_cast)[name = tensor("op_13708_cast")]; + tensor var_13709 = const()[name = tensor("op_13709"), val = tensor([2, 10, 64, -1])]; + tensor var_13710_cast = reshape(shape = var_13709, x = k_277_cast)[name = tensor("op_13710_cast")]; + tensor var_13711 = const()[name = tensor("op_13711"), val = tensor([2, 10, 64, -1])]; + tensor var_13712_cast = reshape(shape = var_13711, x = v_277_cast)[name = tensor("op_13712_cast")]; + tensor attn_weights_553_transpose_x_0 = const()[name = tensor("attn_weights_553_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_553_transpose_y_0 = const()[name = tensor("attn_weights_553_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_553_cast = matmul(transpose_x = attn_weights_553_transpose_x_0, transpose_y = attn_weights_553_transpose_y_0, x = var_13708_cast, y = var_13710_cast)[name = tensor("attn_weights_553_cast")]; + tensor attn_weights_555_cast = mul(x = attn_weights_553_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_555_cast")]; + tensor var_13716_cast = softmax(axis = var_12502, x = attn_weights_555_cast)[name = tensor("op_13716_cast")]; + tensor attn_277_transpose_x_0 = const()[name = tensor("attn_277_transpose_x_0"), val = tensor(false)]; + tensor attn_277_transpose_y_0 = const()[name = tensor("attn_277_transpose_y_0"), val = tensor(true)]; + tensor attn_277_cast = matmul(transpose_x = attn_277_transpose_x_0, transpose_y = attn_277_transpose_y_0, x = var_13712_cast, y = var_13716_cast)[name = tensor("attn_277_cast")]; + tensor var_13720 = const()[name = tensor("op_13720"), val = tensor([2, 640, 1, -1])]; + tensor input_813_cast = reshape(shape = var_13720, x = attn_277_cast)[name = tensor("input_813_cast")]; + tensor var_13725 = const()[name = tensor("op_13725"), val = tensor([1, 1])]; + tensor var_13727 = const()[name = tensor("op_13727"), val = tensor([1, 1])]; + tensor var_13729_pad_type_0 = const()[name = tensor("op_13729_pad_type_0"), val = tensor("custom")]; + tensor var_13729_pad_0 = const()[name = tensor("op_13729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413396800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413806464))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413807040)))]; + tensor var_13729_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13727, groups = var_12518, pad = var_13729_pad_0, pad_type = var_13729_pad_type_0, strides = var_13725, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_813_cast)[name = tensor("op_13729_cast")]; + tensor inputs_417_cast = add(x = var_13729_cast, y = inputs_415_cast)[name = tensor("inputs_417_cast")]; + tensor var_13733 = const()[name = tensor("op_13733"), val = tensor([1])]; + tensor channels_mean_417_cast = reduce_mean(axes = var_13733, keep_dims = var_12513, x = inputs_417_cast)[name = tensor("channels_mean_417_cast")]; + tensor zero_mean_417_cast = sub(x = inputs_417_cast, y = channels_mean_417_cast)[name = tensor("zero_mean_417_cast")]; + tensor zero_mean_sq_417_cast = mul(x = zero_mean_417_cast, y = zero_mean_417_cast)[name = tensor("zero_mean_sq_417_cast")]; + tensor var_13737 = const()[name = tensor("op_13737"), val = tensor([1])]; + tensor var_13738_cast = reduce_mean(axes = var_13737, keep_dims = var_12513, x = zero_mean_sq_417_cast)[name = tensor("op_13738_cast")]; + tensor var_13739_to_fp16 = const()[name = tensor("op_13739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13740_cast = add(x = var_13738_cast, y = var_13739_to_fp16)[name = tensor("op_13740_cast")]; + tensor denom_417_epsilon_0_to_fp16 = const()[name = tensor("denom_417_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_417_cast = rsqrt(epsilon = denom_417_epsilon_0_to_fp16, x = var_13740_cast)[name = tensor("denom_417_cast")]; + tensor out_417_cast = mul(x = zero_mean_417_cast, y = denom_417_cast)[name = tensor("out_417_cast")]; + tensor var_13744_to_fp16 = const()[name = tensor("op_13744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413808384)))]; + tensor var_13745_cast = add(x = out_417_cast, y = var_13744_to_fp16)[name = tensor("op_13745_cast")]; + tensor var_13747_to_fp16 = const()[name = tensor("op_13747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413809728)))]; + tensor hidden_states_569_cast = mul(x = var_13745_cast, y = var_13747_to_fp16)[name = tensor("hidden_states_569_cast")]; + tensor var_13754 = const()[name = tensor("op_13754"), val = tensor([1, 1])]; + tensor var_13756 = const()[name = tensor("op_13756"), val = tensor([1, 1])]; + tensor q_pad_type_0 = const()[name = tensor("q_pad_type_0"), val = tensor("custom")]; + tensor q_pad_0 = const()[name = tensor("q_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1413811072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414118336))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_cast = conv(dilations = var_13756, groups = var_12518, pad = q_pad_0, pad_type = q_pad_type_0, strides = var_13754, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_569_cast)[name = tensor("q_cast")]; + tensor var_13760 = const()[name = tensor("op_13760"), val = tensor([1, 1])]; + tensor var_13762 = const()[name = tensor("op_13762"), val = tensor([1, 1])]; + tensor k_pad_type_0 = const()[name = tensor("k_pad_type_0"), val = tensor("custom")]; + tensor k_pad_0 = const()[name = tensor("k_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414118528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1415101632))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_cast = conv(dilations = var_13762, groups = var_12518, pad = k_pad_0, pad_type = k_pad_type_0, strides = var_13760, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_cast")]; + tensor var_13766 = const()[name = tensor("op_13766"), val = tensor([1, 1])]; + tensor var_13768 = const()[name = tensor("op_13768"), val = tensor([1, 1])]; + tensor v_pad_type_0 = const()[name = tensor("v_pad_type_0"), val = tensor("custom")]; + tensor v_pad_0 = const()[name = tensor("v_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1415101824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416084928))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_cast = conv(dilations = var_13768, groups = var_12518, pad = v_pad_0, pad_type = v_pad_type_0, strides = var_13766, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_cast")]; + tensor var_13772 = const()[name = tensor("op_13772"), val = tensor([2, 10, 64, -1])]; + tensor var_13773_cast = reshape(shape = var_13772, x = q_cast)[name = tensor("op_13773_cast")]; + tensor var_13774 = const()[name = tensor("op_13774"), val = tensor([2, 10, 64, -1])]; + tensor var_13775_cast = reshape(shape = var_13774, x = k_cast)[name = tensor("op_13775_cast")]; + tensor var_13776 = const()[name = tensor("op_13776"), val = tensor([2, 10, 64, -1])]; + tensor var_13777_cast = reshape(shape = var_13776, x = v_cast)[name = tensor("op_13777_cast")]; + tensor attn_weights_557_transpose_x_0 = const()[name = tensor("attn_weights_557_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_557_transpose_y_0 = const()[name = tensor("attn_weights_557_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_557_cast = matmul(transpose_x = attn_weights_557_transpose_x_0, transpose_y = attn_weights_557_transpose_y_0, x = var_13773_cast, y = var_13775_cast)[name = tensor("attn_weights_557_cast")]; + tensor attn_weights_cast = mul(x = attn_weights_557_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_cast")]; + tensor var_13781_cast = softmax(axis = var_12502, x = attn_weights_cast)[name = tensor("op_13781_cast")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_13777_cast, y = var_13781_cast)[name = tensor("attn_cast")]; + tensor var_13785 = const()[name = tensor("op_13785"), val = tensor([2, 640, 1, -1])]; + tensor input_815_cast = reshape(shape = var_13785, x = attn_cast)[name = tensor("input_815_cast")]; + tensor var_13790 = const()[name = tensor("op_13790"), val = tensor([1, 1])]; + tensor var_13792 = const()[name = tensor("op_13792"), val = tensor([1, 1])]; + tensor var_13794_pad_type_0 = const()[name = tensor("op_13794_pad_type_0"), val = tensor("custom")]; + tensor var_13794_pad_0 = const()[name = tensor("op_13794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416085120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416392384))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416392576)))]; + tensor var_13794_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13792, groups = var_12518, pad = var_13794_pad_0, pad_type = var_13794_pad_type_0, strides = var_13790, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_815_cast)[name = tensor("op_13794_cast")]; + tensor inputs_cast = add(x = var_13794_cast, y = inputs_417_cast)[name = tensor("inputs_cast")]; + tensor var_13798 = const()[name = tensor("op_13798"), val = tensor([1])]; + tensor channels_mean_cast = reduce_mean(axes = var_13798, keep_dims = var_12513, x = inputs_cast)[name = tensor("channels_mean_cast")]; + tensor zero_mean_cast = sub(x = inputs_cast, y = channels_mean_cast)[name = tensor("zero_mean_cast")]; + tensor zero_mean_sq_cast = mul(x = zero_mean_cast, y = zero_mean_cast)[name = tensor("zero_mean_sq_cast")]; + tensor var_13802 = const()[name = tensor("op_13802"), val = tensor([1])]; + tensor var_13803_cast = reduce_mean(axes = var_13802, keep_dims = var_12513, x = zero_mean_sq_cast)[name = tensor("op_13803_cast")]; + tensor var_13804_to_fp16 = const()[name = tensor("op_13804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13805_cast = add(x = var_13803_cast, y = var_13804_to_fp16)[name = tensor("op_13805_cast")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_13805_cast)[name = tensor("denom_cast")]; + tensor out_cast = mul(x = zero_mean_cast, y = denom_cast)[name = tensor("out_cast")]; + tensor var_13809_to_fp16 = const()[name = tensor("op_13809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416393920)))]; + tensor var_13810_cast = add(x = out_cast, y = var_13809_to_fp16)[name = tensor("op_13810_cast")]; + tensor var_13812_to_fp16 = const()[name = tensor("op_13812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416395264)))]; + tensor input_817_cast = mul(x = var_13810_cast, y = var_13812_to_fp16)[name = tensor("input_817_cast")]; + tensor var_13820 = const()[name = tensor("op_13820"), val = tensor([1, 1])]; + tensor var_13822 = const()[name = tensor("op_13822"), val = tensor([1, 1])]; + tensor var_13824_pad_type_0 = const()[name = tensor("op_13824_pad_type_0"), val = tensor("custom")]; + tensor var_13824_pad_0 = const()[name = tensor("op_13824_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416396608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419673472))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([5120, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419674048)))]; + tensor var_13824_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13822, groups = var_12518, pad = var_13824_pad_0, pad_type = var_13824_pad_type_0, strides = var_13820, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_817_cast)[name = tensor("op_13824_cast")]; + tensor var_13825_split_sizes_0 = const()[name = tensor("op_13825_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13825_axis_0 = const()[name = tensor("op_13825_axis_0"), val = tensor(1)]; + tensor var_13825_cast_0, tensor var_13825_cast_1 = split(axis = var_13825_axis_0, split_sizes = var_13825_split_sizes_0, x = var_13824_cast)[name = tensor("op_13825_cast")]; + tensor var_13827_mode_0 = const()[name = tensor("op_13827_mode_0"), val = tensor("EXACT")]; + tensor var_13827_cast = gelu(mode = var_13827_mode_0, x = var_13825_cast_1)[name = tensor("op_13827_cast")]; + tensor input_819_cast = mul(x = var_13825_cast_0, y = var_13827_cast)[name = tensor("input_819_cast")]; + tensor var_13831 = const()[name = tensor("op_13831"), val = tensor([1, 1])]; + tensor var_13833 = const()[name = tensor("op_13833"), val = tensor([1, 1])]; + tensor var_13835_pad_type_0 = const()[name = tensor("op_13835_pad_type_0"), val = tensor("custom")]; + tensor var_13835_pad_0 = const()[name = tensor("op_13835_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419684352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421322816))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([640, 2560, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421323392)))]; + tensor var_13835_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13833, groups = var_12518, pad = var_13835_pad_0, pad_type = var_13835_pad_type_0, strides = var_13831, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_819_cast)[name = tensor("op_13835_cast")]; + tensor hidden_states_573_cast = add(x = var_13835_cast, y = inputs_cast)[name = tensor("hidden_states_573_cast")]; + tensor var_13837 = const()[name = tensor("op_13837"), val = tensor([2, 640, 64, 64])]; + tensor input_821_cast = reshape(shape = var_13837, x = hidden_states_573_cast)[name = tensor("input_821_cast")]; + tensor var_13841 = const()[name = tensor("op_13841"), val = tensor([1, 1])]; + tensor var_13843 = const()[name = tensor("op_13843"), val = tensor([1, 1])]; + tensor hidden_states_575_pad_type_0 = const()[name = tensor("hidden_states_575_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_575_pad_0 = const()[name = tensor("hidden_states_575_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421324736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421734400))), name = tensor("up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421734976)))]; + tensor hidden_states_575_cast = conv(bias = up_blocks_1_attentions_2_proj_out_bias_to_fp16, dilations = var_13843, groups = var_12518, pad = hidden_states_575_pad_0, pad_type = hidden_states_575_pad_type_0, strides = var_13841, weight = up_blocks_1_attentions_2_proj_out_weight_to_fp16_palettized, x = input_821_cast)[name = tensor("hidden_states_575_cast")]; + tensor input_823_cast = add(x = hidden_states_575_cast, y = hidden_states_557_cast)[name = tensor("input_823_cast")]; + tensor input_825_scale_factor_height_0 = const()[name = tensor("input_825_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_825_scale_factor_width_0 = const()[name = tensor("input_825_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_825_cast = upsample_nearest_neighbor(scale_factor_height = input_825_scale_factor_height_0, scale_factor_width = input_825_scale_factor_width_0, x = input_823_cast)[name = tensor("input_825_cast")]; + tensor var_13852 = const()[name = tensor("op_13852"), val = tensor([1, 1])]; + tensor var_13854 = const()[name = tensor("op_13854"), val = tensor([1, 1])]; + tensor hidden_states_577_pad_type_0 = const()[name = tensor("hidden_states_577_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_577_pad_0 = const()[name = tensor("hidden_states_577_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_upsamplers_0_conv_weight_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1421736320)))]; + tensor up_blocks_1_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429109184)))]; + tensor hidden_states_577_cast = conv(bias = up_blocks_1_upsamplers_0_conv_bias_to_fp16, dilations = var_13854, groups = var_12518, pad = hidden_states_577_pad_0, pad_type = hidden_states_577_pad_type_0, strides = var_13852, weight = up_blocks_1_upsamplers_0_conv_weight_to_fp16, x = input_825_cast)[name = tensor("hidden_states_577_cast")]; + tensor var_13862 = const()[name = tensor("op_13862"), val = tensor(1)]; + tensor input_827_interleave_0 = const()[name = tensor("input_827_interleave_0"), val = tensor(false)]; + tensor input_827_cast = concat(axis = var_13862, interleave = input_827_interleave_0, values = (hidden_states_577_cast, input_43_cast))[name = tensor("input_827_cast")]; + tensor reshape_156_shape_0 = const()[name = tensor("reshape_156_shape_0"), val = tensor([2, 32, 30, 128, 128])]; + tensor reshape_156_cast = reshape(shape = reshape_156_shape_0, x = input_827_cast)[name = tensor("reshape_156_cast")]; + tensor reduce_mean_117_axes_0 = const()[name = tensor("reduce_mean_117_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_117_keep_dims_0 = const()[name = tensor("reduce_mean_117_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_117_cast = reduce_mean(axes = reduce_mean_117_axes_0, keep_dims = reduce_mean_117_keep_dims_0, x = reshape_156_cast)[name = tensor("reduce_mean_117_cast")]; + tensor sub_78_cast = sub(x = reshape_156_cast, y = reduce_mean_117_cast)[name = tensor("sub_78_cast")]; + tensor square_39_cast = square(x = sub_78_cast)[name = tensor("square_39_cast")]; + tensor reduce_mean_119_axes_0 = const()[name = tensor("reduce_mean_119_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_119_keep_dims_0 = const()[name = tensor("reduce_mean_119_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_119_cast = reduce_mean(axes = reduce_mean_119_axes_0, keep_dims = reduce_mean_119_keep_dims_0, x = square_39_cast)[name = tensor("reduce_mean_119_cast")]; + tensor add_78_y_0_to_fp16 = const()[name = tensor("add_78_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_78_cast = add(x = reduce_mean_119_cast, y = add_78_y_0_to_fp16)[name = tensor("add_78_cast")]; + tensor sqrt_39_cast = sqrt(x = add_78_cast)[name = tensor("sqrt_39_cast")]; + tensor real_div_39_cast = real_div(x = sub_78_cast, y = sqrt_39_cast)[name = tensor("real_div_39_cast")]; + tensor reshape_157_shape_0 = const()[name = tensor("reshape_157_shape_0"), val = tensor([2, 960, 128, 128])]; + tensor reshape_157_cast = reshape(shape = reshape_157_shape_0, x = real_div_39_cast)[name = tensor("reshape_157_cast")]; + tensor add_79_gamma_0_to_fp16 = const()[name = tensor("add_79_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429110528)))]; + tensor add_79_beta_0_to_fp16 = const()[name = tensor("add_79_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429112512)))]; + tensor add_79_epsilon_0_to_fp16 = const()[name = tensor("add_79_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_79_cast = batch_norm(beta = add_79_beta_0_to_fp16, epsilon = add_79_epsilon_0_to_fp16, gamma = add_79_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_157_cast)[name = tensor("add_79_cast")]; + tensor input_831_cast = silu(x = add_79_cast)[name = tensor("input_831_cast")]; + tensor var_13883 = const()[name = tensor("op_13883"), val = tensor([1, 1])]; + tensor var_13885 = const()[name = tensor("op_13885"), val = tensor([1, 1])]; + tensor hidden_states_579_pad_type_0 = const()[name = tensor("hidden_states_579_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_579_pad_0 = const()[name = tensor("hidden_states_579_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1429114496)))]; + tensor up_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434644160)))]; + tensor hidden_states_579_cast = conv(bias = up_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_13885, groups = var_13862, pad = hidden_states_579_pad_0, pad_type = hidden_states_579_pad_type_0, strides = var_13883, weight = up_blocks_2_resnets_0_conv1_weight_to_fp16, x = input_831_cast)[name = tensor("hidden_states_579_cast")]; + tensor var_13891 = const()[name = tensor("op_13891"), val = tensor([1, 1])]; + tensor var_13893 = const()[name = tensor("op_13893"), val = tensor([1, 1])]; + tensor temb_29_pad_type_0 = const()[name = tensor("temb_29_pad_type_0"), val = tensor("custom")]; + tensor temb_29_pad_0 = const()[name = tensor("temb_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1434644864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435054528))), name = tensor("up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435055104)))]; + tensor temb_29_cast = conv(bias = up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_13893, groups = var_13862, pad = temb_29_pad_0, pad_type = temb_29_pad_type_0, strides = var_13891, weight = up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_29_cast")]; + tensor input_835_cast = add(x = hidden_states_579_cast, y = temb_29_cast)[name = tensor("input_835_cast")]; + tensor reshape_160_shape_0 = const()[name = tensor("reshape_160_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_160_cast = reshape(shape = reshape_160_shape_0, x = input_835_cast)[name = tensor("reshape_160_cast")]; + tensor reduce_mean_120_axes_0 = const()[name = tensor("reduce_mean_120_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_120_keep_dims_0 = const()[name = tensor("reduce_mean_120_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_120_cast = reduce_mean(axes = reduce_mean_120_axes_0, keep_dims = reduce_mean_120_keep_dims_0, x = reshape_160_cast)[name = tensor("reduce_mean_120_cast")]; + tensor sub_80_cast = sub(x = reshape_160_cast, y = reduce_mean_120_cast)[name = tensor("sub_80_cast")]; + tensor square_40_cast = square(x = sub_80_cast)[name = tensor("square_40_cast")]; + tensor reduce_mean_122_axes_0 = const()[name = tensor("reduce_mean_122_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_122_keep_dims_0 = const()[name = tensor("reduce_mean_122_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_122_cast = reduce_mean(axes = reduce_mean_122_axes_0, keep_dims = reduce_mean_122_keep_dims_0, x = square_40_cast)[name = tensor("reduce_mean_122_cast")]; + tensor add_80_y_0_to_fp16 = const()[name = tensor("add_80_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_80_cast = add(x = reduce_mean_122_cast, y = add_80_y_0_to_fp16)[name = tensor("add_80_cast")]; + tensor sqrt_40_cast = sqrt(x = add_80_cast)[name = tensor("sqrt_40_cast")]; + tensor real_div_40_cast = real_div(x = sub_80_cast, y = sqrt_40_cast)[name = tensor("real_div_40_cast")]; + tensor reshape_161_shape_0 = const()[name = tensor("reshape_161_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_161_cast = reshape(shape = reshape_161_shape_0, x = real_div_40_cast)[name = tensor("reshape_161_cast")]; + tensor add_81_gamma_0_to_fp16 = const()[name = tensor("add_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435055808)))]; + tensor add_81_beta_0_to_fp16 = const()[name = tensor("add_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435056512)))]; + tensor add_81_epsilon_0_to_fp16 = const()[name = tensor("add_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_81_cast = batch_norm(beta = add_81_beta_0_to_fp16, epsilon = add_81_epsilon_0_to_fp16, gamma = add_81_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_161_cast)[name = tensor("add_81_cast")]; + tensor input_839_cast = silu(x = add_81_cast)[name = tensor("input_839_cast")]; + tensor var_13903 = const()[name = tensor("op_13903"), val = tensor([1, 1])]; + tensor var_13905 = const()[name = tensor("op_13905"), val = tensor([1, 1])]; + tensor hidden_states_581_pad_type_0 = const()[name = tensor("hidden_states_581_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_581_pad_0 = const()[name = tensor("hidden_states_581_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1435057216)))]; + tensor up_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436900480)))]; + tensor hidden_states_581_cast = conv(bias = up_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_13905, groups = var_13862, pad = hidden_states_581_pad_0, pad_type = hidden_states_581_pad_type_0, strides = var_13903, weight = up_blocks_2_resnets_0_conv2_weight_to_fp16, x = input_839_cast)[name = tensor("hidden_states_581_cast")]; + tensor var_13910 = const()[name = tensor("op_13910"), val = tensor([1, 1])]; + tensor var_13912 = const()[name = tensor("op_13912"), val = tensor([1, 1])]; + tensor x_17_pad_type_0 = const()[name = tensor("x_17_pad_type_0"), val = tensor("custom")]; + tensor x_17_pad_0 = const()[name = tensor("x_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1436901184)))]; + tensor up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437515648)))]; + tensor x_17_cast = conv(bias = up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_13912, groups = var_13862, pad = x_17_pad_0, pad_type = x_17_pad_type_0, strides = var_13910, weight = up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16, x = input_827_cast)[name = tensor("x_17_cast")]; + tensor hidden_states_583_cast = add(x = x_17_cast, y = hidden_states_581_cast)[name = tensor("hidden_states_583_cast")]; + tensor input_841_interleave_0 = const()[name = tensor("input_841_interleave_0"), val = tensor(false)]; + tensor input_841_cast = concat(axis = var_13862, interleave = input_841_interleave_0, values = (hidden_states_583_cast, input_29_cast))[name = tensor("input_841_cast")]; + tensor reshape_164_shape_0 = const()[name = tensor("reshape_164_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_164_cast = reshape(shape = reshape_164_shape_0, x = input_841_cast)[name = tensor("reshape_164_cast")]; + tensor reduce_mean_123_axes_0 = const()[name = tensor("reduce_mean_123_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_123_keep_dims_0 = const()[name = tensor("reduce_mean_123_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_123_cast = reduce_mean(axes = reduce_mean_123_axes_0, keep_dims = reduce_mean_123_keep_dims_0, x = reshape_164_cast)[name = tensor("reduce_mean_123_cast")]; + tensor sub_82_cast = sub(x = reshape_164_cast, y = reduce_mean_123_cast)[name = tensor("sub_82_cast")]; + tensor square_41_cast = square(x = sub_82_cast)[name = tensor("square_41_cast")]; + tensor reduce_mean_125_axes_0 = const()[name = tensor("reduce_mean_125_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_125_keep_dims_0 = const()[name = tensor("reduce_mean_125_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_125_cast = reduce_mean(axes = reduce_mean_125_axes_0, keep_dims = reduce_mean_125_keep_dims_0, x = square_41_cast)[name = tensor("reduce_mean_125_cast")]; + tensor add_82_y_0_to_fp16 = const()[name = tensor("add_82_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_82_cast = add(x = reduce_mean_125_cast, y = add_82_y_0_to_fp16)[name = tensor("add_82_cast")]; + tensor sqrt_41_cast = sqrt(x = add_82_cast)[name = tensor("sqrt_41_cast")]; + tensor real_div_41_cast = real_div(x = sub_82_cast, y = sqrt_41_cast)[name = tensor("real_div_41_cast")]; + tensor reshape_165_shape_0 = const()[name = tensor("reshape_165_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_165_cast = reshape(shape = reshape_165_shape_0, x = real_div_41_cast)[name = tensor("reshape_165_cast")]; + tensor add_83_gamma_0_to_fp16 = const()[name = tensor("add_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437516352)))]; + tensor add_83_beta_0_to_fp16 = const()[name = tensor("add_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437517696)))]; + tensor add_83_epsilon_0_to_fp16 = const()[name = tensor("add_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_83_cast = batch_norm(beta = add_83_beta_0_to_fp16, epsilon = add_83_epsilon_0_to_fp16, gamma = add_83_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_165_cast)[name = tensor("add_83_cast")]; + tensor input_845_cast = silu(x = add_83_cast)[name = tensor("input_845_cast")]; + tensor var_13930 = const()[name = tensor("op_13930"), val = tensor([1, 1])]; + tensor var_13932 = const()[name = tensor("op_13932"), val = tensor([1, 1])]; + tensor hidden_states_585_pad_type_0 = const()[name = tensor("hidden_states_585_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_585_pad_0 = const()[name = tensor("hidden_states_585_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1437519040)))]; + tensor up_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441205504)))]; + tensor hidden_states_585_cast = conv(bias = up_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_13932, groups = var_13862, pad = hidden_states_585_pad_0, pad_type = hidden_states_585_pad_type_0, strides = var_13930, weight = up_blocks_2_resnets_1_conv1_weight_to_fp16, x = input_845_cast)[name = tensor("hidden_states_585_cast")]; + tensor var_13938 = const()[name = tensor("op_13938"), val = tensor([1, 1])]; + tensor var_13940 = const()[name = tensor("op_13940"), val = tensor([1, 1])]; + tensor temb_31_pad_type_0 = const()[name = tensor("temb_31_pad_type_0"), val = tensor("custom")]; + tensor temb_31_pad_0 = const()[name = tensor("temb_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441206208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441615872))), name = tensor("up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441616448)))]; + tensor temb_31_cast = conv(bias = up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_13940, groups = var_13862, pad = temb_31_pad_0, pad_type = temb_31_pad_type_0, strides = var_13938, weight = up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_31_cast")]; + tensor input_849_cast = add(x = hidden_states_585_cast, y = temb_31_cast)[name = tensor("input_849_cast")]; + tensor reshape_168_shape_0 = const()[name = tensor("reshape_168_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_168_cast = reshape(shape = reshape_168_shape_0, x = input_849_cast)[name = tensor("reshape_168_cast")]; + tensor reduce_mean_126_axes_0 = const()[name = tensor("reduce_mean_126_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_126_keep_dims_0 = const()[name = tensor("reduce_mean_126_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_126_cast = reduce_mean(axes = reduce_mean_126_axes_0, keep_dims = reduce_mean_126_keep_dims_0, x = reshape_168_cast)[name = tensor("reduce_mean_126_cast")]; + tensor sub_84_cast = sub(x = reshape_168_cast, y = reduce_mean_126_cast)[name = tensor("sub_84_cast")]; + tensor square_42_cast = square(x = sub_84_cast)[name = tensor("square_42_cast")]; + tensor reduce_mean_128_axes_0 = const()[name = tensor("reduce_mean_128_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_128_keep_dims_0 = const()[name = tensor("reduce_mean_128_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_128_cast = reduce_mean(axes = reduce_mean_128_axes_0, keep_dims = reduce_mean_128_keep_dims_0, x = square_42_cast)[name = tensor("reduce_mean_128_cast")]; + tensor add_84_y_0_to_fp16 = const()[name = tensor("add_84_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_84_cast = add(x = reduce_mean_128_cast, y = add_84_y_0_to_fp16)[name = tensor("add_84_cast")]; + tensor sqrt_42_cast = sqrt(x = add_84_cast)[name = tensor("sqrt_42_cast")]; + tensor real_div_42_cast = real_div(x = sub_84_cast, y = sqrt_42_cast)[name = tensor("real_div_42_cast")]; + tensor reshape_169_shape_0 = const()[name = tensor("reshape_169_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_169_cast = reshape(shape = reshape_169_shape_0, x = real_div_42_cast)[name = tensor("reshape_169_cast")]; + tensor add_85_gamma_0_to_fp16 = const()[name = tensor("add_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441617152)))]; + tensor add_85_beta_0_to_fp16 = const()[name = tensor("add_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441617856)))]; + tensor add_85_epsilon_0_to_fp16 = const()[name = tensor("add_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_85_cast = batch_norm(beta = add_85_beta_0_to_fp16, epsilon = add_85_epsilon_0_to_fp16, gamma = add_85_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_169_cast)[name = tensor("add_85_cast")]; + tensor input_853_cast = silu(x = add_85_cast)[name = tensor("input_853_cast")]; + tensor var_13950 = const()[name = tensor("op_13950"), val = tensor([1, 1])]; + tensor var_13952 = const()[name = tensor("op_13952"), val = tensor([1, 1])]; + tensor hidden_states_587_pad_type_0 = const()[name = tensor("hidden_states_587_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_587_pad_0 = const()[name = tensor("hidden_states_587_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441618560)))]; + tensor up_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443461824)))]; + tensor hidden_states_587_cast = conv(bias = up_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_13952, groups = var_13862, pad = hidden_states_587_pad_0, pad_type = hidden_states_587_pad_type_0, strides = var_13950, weight = up_blocks_2_resnets_1_conv2_weight_to_fp16, x = input_853_cast)[name = tensor("hidden_states_587_cast")]; + tensor var_13957 = const()[name = tensor("op_13957"), val = tensor([1, 1])]; + tensor var_13959 = const()[name = tensor("op_13959"), val = tensor([1, 1])]; + tensor x_19_pad_type_0 = const()[name = tensor("x_19_pad_type_0"), val = tensor("custom")]; + tensor x_19_pad_0 = const()[name = tensor("x_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443462528)))]; + tensor up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443872192)))]; + tensor x_19_cast = conv(bias = up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13959, groups = var_13862, pad = x_19_pad_0, pad_type = x_19_pad_type_0, strides = var_13957, weight = up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16, x = input_841_cast)[name = tensor("x_19_cast")]; + tensor hidden_states_589_cast = add(x = x_19_cast, y = hidden_states_587_cast)[name = tensor("hidden_states_589_cast")]; + tensor input_855_interleave_0 = const()[name = tensor("input_855_interleave_0"), val = tensor(false)]; + tensor input_855_cast = concat(axis = var_13862, interleave = input_855_interleave_0, values = (hidden_states_589_cast, input_13_cast))[name = tensor("input_855_cast")]; + tensor reshape_172_shape_0 = const()[name = tensor("reshape_172_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_172_cast = reshape(shape = reshape_172_shape_0, x = input_855_cast)[name = tensor("reshape_172_cast")]; + tensor reduce_mean_129_axes_0 = const()[name = tensor("reduce_mean_129_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_129_keep_dims_0 = const()[name = tensor("reduce_mean_129_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_129_cast = reduce_mean(axes = reduce_mean_129_axes_0, keep_dims = reduce_mean_129_keep_dims_0, x = reshape_172_cast)[name = tensor("reduce_mean_129_cast")]; + tensor sub_86_cast = sub(x = reshape_172_cast, y = reduce_mean_129_cast)[name = tensor("sub_86_cast")]; + tensor square_43_cast = square(x = sub_86_cast)[name = tensor("square_43_cast")]; + tensor reduce_mean_131_axes_0 = const()[name = tensor("reduce_mean_131_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_131_keep_dims_0 = const()[name = tensor("reduce_mean_131_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_131_cast = reduce_mean(axes = reduce_mean_131_axes_0, keep_dims = reduce_mean_131_keep_dims_0, x = square_43_cast)[name = tensor("reduce_mean_131_cast")]; + tensor add_86_y_0_to_fp16 = const()[name = tensor("add_86_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_86_cast = add(x = reduce_mean_131_cast, y = add_86_y_0_to_fp16)[name = tensor("add_86_cast")]; + tensor sqrt_43_cast = sqrt(x = add_86_cast)[name = tensor("sqrt_43_cast")]; + tensor real_div_43_cast = real_div(x = sub_86_cast, y = sqrt_43_cast)[name = tensor("real_div_43_cast")]; + tensor reshape_173_shape_0 = const()[name = tensor("reshape_173_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_173_cast = reshape(shape = reshape_173_shape_0, x = real_div_43_cast)[name = tensor("reshape_173_cast")]; + tensor add_87_gamma_0_to_fp16 = const()[name = tensor("add_87_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443872896)))]; + tensor add_87_beta_0_to_fp16 = const()[name = tensor("add_87_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443874240)))]; + tensor add_87_epsilon_0_to_fp16 = const()[name = tensor("add_87_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_87_cast = batch_norm(beta = add_87_beta_0_to_fp16, epsilon = add_87_epsilon_0_to_fp16, gamma = add_87_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_173_cast)[name = tensor("add_87_cast")]; + tensor input_859_cast = silu(x = add_87_cast)[name = tensor("input_859_cast")]; + tensor var_13977 = const()[name = tensor("op_13977"), val = tensor([1, 1])]; + tensor var_13979 = const()[name = tensor("op_13979"), val = tensor([1, 1])]; + tensor hidden_states_591_pad_type_0 = const()[name = tensor("hidden_states_591_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_591_pad_0 = const()[name = tensor("hidden_states_591_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1443875584)))]; + tensor up_blocks_2_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447562048)))]; + tensor hidden_states_591_cast = conv(bias = up_blocks_2_resnets_2_conv1_bias_to_fp16, dilations = var_13979, groups = var_13862, pad = hidden_states_591_pad_0, pad_type = hidden_states_591_pad_type_0, strides = var_13977, weight = up_blocks_2_resnets_2_conv1_weight_to_fp16, x = input_859_cast)[name = tensor("hidden_states_591_cast")]; + tensor var_13985 = const()[name = tensor("op_13985"), val = tensor([1, 1])]; + tensor var_13987 = const()[name = tensor("op_13987"), val = tensor([1, 1])]; + tensor temb_pad_type_0 = const()[name = tensor("temb_pad_type_0"), val = tensor("custom")]; + tensor temb_pad_0 = const()[name = tensor("temb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447562752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447972416))), name = tensor("up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([320, 1280, 1, 1])]; + tensor up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447972992)))]; + tensor temb_cast = conv(bias = up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13987, groups = var_13862, pad = temb_pad_0, pad_type = temb_pad_type_0, strides = var_13985, weight = up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_cast")]; + tensor input_863_cast = add(x = hidden_states_591_cast, y = temb_cast)[name = tensor("input_863_cast")]; + tensor reshape_176_shape_0 = const()[name = tensor("reshape_176_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_176_cast = reshape(shape = reshape_176_shape_0, x = input_863_cast)[name = tensor("reshape_176_cast")]; + tensor reduce_mean_132_axes_0 = const()[name = tensor("reduce_mean_132_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_132_keep_dims_0 = const()[name = tensor("reduce_mean_132_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_132_cast = reduce_mean(axes = reduce_mean_132_axes_0, keep_dims = reduce_mean_132_keep_dims_0, x = reshape_176_cast)[name = tensor("reduce_mean_132_cast")]; + tensor sub_88_cast = sub(x = reshape_176_cast, y = reduce_mean_132_cast)[name = tensor("sub_88_cast")]; + tensor square_44_cast = square(x = sub_88_cast)[name = tensor("square_44_cast")]; + tensor reduce_mean_134_axes_0 = const()[name = tensor("reduce_mean_134_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_134_keep_dims_0 = const()[name = tensor("reduce_mean_134_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_134_cast = reduce_mean(axes = reduce_mean_134_axes_0, keep_dims = reduce_mean_134_keep_dims_0, x = square_44_cast)[name = tensor("reduce_mean_134_cast")]; + tensor add_88_y_0_to_fp16 = const()[name = tensor("add_88_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_88_cast = add(x = reduce_mean_134_cast, y = add_88_y_0_to_fp16)[name = tensor("add_88_cast")]; + tensor sqrt_44_cast = sqrt(x = add_88_cast)[name = tensor("sqrt_44_cast")]; + tensor real_div_44_cast = real_div(x = sub_88_cast, y = sqrt_44_cast)[name = tensor("real_div_44_cast")]; + tensor reshape_177_shape_0 = const()[name = tensor("reshape_177_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_177_cast = reshape(shape = reshape_177_shape_0, x = real_div_44_cast)[name = tensor("reshape_177_cast")]; + tensor add_89_gamma_0_to_fp16 = const()[name = tensor("add_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447973696)))]; + tensor add_89_beta_0_to_fp16 = const()[name = tensor("add_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447974400)))]; + tensor add_89_epsilon_0_to_fp16 = const()[name = tensor("add_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_89_cast = batch_norm(beta = add_89_beta_0_to_fp16, epsilon = add_89_epsilon_0_to_fp16, gamma = add_89_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_177_cast)[name = tensor("add_89_cast")]; + tensor input_867_cast = silu(x = add_89_cast)[name = tensor("input_867_cast")]; + tensor var_13997 = const()[name = tensor("op_13997"), val = tensor([1, 1])]; + tensor var_13999 = const()[name = tensor("op_13999"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1447975104)))]; + tensor up_blocks_2_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449818368)))]; + tensor hidden_states_cast = conv(bias = up_blocks_2_resnets_2_conv2_bias_to_fp16, dilations = var_13999, groups = var_13862, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_13997, weight = up_blocks_2_resnets_2_conv2_weight_to_fp16, x = input_867_cast)[name = tensor("hidden_states_cast")]; + tensor var_14004 = const()[name = tensor("op_14004"), val = tensor([1, 1])]; + tensor var_14006 = const()[name = tensor("op_14006"), val = tensor([1, 1])]; + tensor x_pad_type_0 = const()[name = tensor("x_pad_type_0"), val = tensor("custom")]; + tensor x_pad_0 = const()[name = tensor("x_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449819072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450023936))), name = tensor("up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([320, 640, 1, 1])]; + tensor up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450024512)))]; + tensor x_cast = conv(bias = up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_14006, groups = var_13862, pad = x_pad_0, pad_type = x_pad_type_0, strides = var_14004, weight = up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16_palettized, x = input_855_cast)[name = tensor("x_cast")]; + tensor input_869_cast = add(x = x_cast, y = hidden_states_cast)[name = tensor("input_869_cast")]; + tensor reshape_180_shape_0 = const()[name = tensor("reshape_180_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_180_cast = reshape(shape = reshape_180_shape_0, x = input_869_cast)[name = tensor("reshape_180_cast")]; + tensor reduce_mean_135_axes_0 = const()[name = tensor("reduce_mean_135_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_135_keep_dims_0 = const()[name = tensor("reduce_mean_135_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_135_cast = reduce_mean(axes = reduce_mean_135_axes_0, keep_dims = reduce_mean_135_keep_dims_0, x = reshape_180_cast)[name = tensor("reduce_mean_135_cast")]; + tensor sub_90_cast = sub(x = reshape_180_cast, y = reduce_mean_135_cast)[name = tensor("sub_90_cast")]; + tensor square_45_cast = square(x = sub_90_cast)[name = tensor("square_45_cast")]; + tensor reduce_mean_137_axes_0 = const()[name = tensor("reduce_mean_137_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_137_keep_dims_0 = const()[name = tensor("reduce_mean_137_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_137_cast = reduce_mean(axes = reduce_mean_137_axes_0, keep_dims = reduce_mean_137_keep_dims_0, x = square_45_cast)[name = tensor("reduce_mean_137_cast")]; + tensor add_90_y_0_to_fp16 = const()[name = tensor("add_90_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_90_cast = add(x = reduce_mean_137_cast, y = add_90_y_0_to_fp16)[name = tensor("add_90_cast")]; + tensor sqrt_45_cast = sqrt(x = add_90_cast)[name = tensor("sqrt_45_cast")]; + tensor real_div_45_cast = real_div(x = sub_90_cast, y = sqrt_45_cast)[name = tensor("real_div_45_cast")]; + tensor reshape_181_shape_0 = const()[name = tensor("reshape_181_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_181_cast = reshape(shape = reshape_181_shape_0, x = real_div_45_cast)[name = tensor("reshape_181_cast")]; + tensor add_91_gamma_0_to_fp16 = const()[name = tensor("add_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450025216)))]; + tensor add_91_beta_0_to_fp16 = const()[name = tensor("add_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450025920)))]; + tensor add_91_epsilon_0_to_fp16 = const()[name = tensor("add_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_91_cast = batch_norm(beta = add_91_beta_0_to_fp16, epsilon = add_91_epsilon_0_to_fp16, gamma = add_91_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_181_cast)[name = tensor("add_91_cast")]; + tensor input_cast = silu(x = add_91_cast)[name = tensor("input_cast")]; + tensor var_14020 = const()[name = tensor("op_14020"), val = tensor(1)]; + tensor var_14023 = const()[name = tensor("op_14023"), val = tensor([1, 1])]; + tensor var_14025 = const()[name = tensor("op_14025"), val = tensor([1, 1])]; + tensor var_14027_pad_type_0 = const()[name = tensor("op_14027_pad_type_0"), val = tensor("custom")]; + tensor var_14027_pad_0 = const()[name = tensor("op_14027_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_out_weight_to_fp16 = const()[name = tensor("conv_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450026624)))]; + tensor conv_out_bias_to_fp16 = const()[name = tensor("conv_out_bias_to_fp16"), val = tensor([0x1.664p-9, -0x1.72p-10, 0x1.06p-9, -0x1.9b8p-9])]; + tensor var_14027_cast = conv(bias = conv_out_bias_to_fp16, dilations = var_14025, groups = var_14020, pad = var_14027_pad_0, pad_type = var_14027_pad_type_0, strides = var_14023, weight = conv_out_weight_to_fp16, x = input_cast)[name = tensor("op_14027_cast")]; + tensor var_14027_cast_to_fp32_dtype_0 = const()[name = tensor("op_14027_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor noise_pred = cast(dtype = var_14027_cast_to_fp32_dtype_0, x = var_14027_cast)[name = tensor("cast_0")]; + } -> (noise_pred); +} \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..11a02126a20d91b2bfba01311502816d2bdfae06 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9219e9fcaa60ff76a31ea84658b485264b50f66147c058e18a7cfd456bb0f3a +size 1450049728 diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..f898d00ada006078d5d01952acfc3913efc3b7db --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d32aa25518ec6d20563c61f0cc8f0b9ecca6d31665e4a5345a4b44d4c7706004 +size 2135914 diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..11a02126a20d91b2bfba01311502816d2bdfae06 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9219e9fcaa60ff76a31ea84658b485264b50f66147c058e18a7cfd456bb0f3a +size 1450049728 diff --git a/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Manifest.json b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..00b631085269f7bb0e00ce95ffe19b59a348d4de --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_4_50_bit_mixedpalette/Unet.mlpackage/Manifest.json @@ -0,0 +1,18 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "4aab2a70-8353-41d1-81e0-1761fcebde0f": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "58e673b4-eef0-4992-a74f-f42d8ec45a72": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "58e673b4-eef0-4992-a74f-f42d8ec45a72" +} diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..a52eca0ccec366f56bc5788a31bfb9f6b80bd0ad --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/analytics/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2d5efe24fc618c686ca0bdf464effe50936a519a1368345391bcd4d393ebc44 +size 243 diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/coremldata.bin b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/coremldata.bin new file mode 100644 index 0000000000000000000000000000000000000000..6ec4882d93c41f4d859f419572182f8503ec75fc --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/coremldata.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aea3887ffdc8e059925f3981259e1cd3227b827e5f91edff613c73ac0ea16f6 +size 1338 diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/metadata.json b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c1f0f15aec1ab77fdc5eebb16987d6c230463f41 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/metadata.json @@ -0,0 +1,124 @@ +[ + { + "shortDescription" : "Stable Diffusion generates images conditioned on text or other images as input through the diffusion process. Please refer to https:\/\/arxiv.org\/abs\/2112.10752 for details.", + "metadataOutputVersion" : "3.0", + "outputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float32", + "formattedType" : "MultiArray (Float32)", + "shortDescription" : "Same shape and dtype as the `sample` input. The predicted noise to facilitate the reverse diffusion (denoising) process", + "shape" : "[]", + "name" : "noise_pred", + "type" : "MultiArray" + } + ], + "version" : "diffusers\/stable-diffusion-xl-base-1.0", + "modelParameters" : [ + + ], + "author" : "Please refer to the Model Card available at huggingface.co\/diffusers\/stable-diffusion-xl-base-1.0", + "specificationVersion" : 7, + "storagePrecision" : "Mixed (Float16, Palettized (1 bits), Palettized (2 bits), Palettized (4 bits), Palettized (6 bits), Palettized (8 bits))", + "license" : "OpenRAIL (https:\/\/huggingface.co\/spaces\/CompVis\/stable-diffusion-license)", + "mlProgramOperationTypeHistogram" : { + "UpsampleNearestNeighbor" : 2, + "Ios16.reduceMean" : 512, + "Ios16.sin" : 2, + "Ios16.softmax" : 140, + "Split" : 70, + "Ios16.add" : 722, + "Concat" : 14, + "Ios16.realDiv" : 46, + "Ios16.square" : 46, + "ExpandDims" : 6, + "Ios16.sub" : 256, + "Ios16.cast" : 1, + "Ios16.conv" : 794, + "Ios16.constexprLutToDense" : 697, + "Ios16.gelu" : 70, + "Ios16.matmul" : 280, + "Ios16.batchNorm" : 46, + "Ios16.reshape" : 675, + "Ios16.rsqrt" : 210, + "Ios16.silu" : 38, + "Ios16.sqrt" : 46, + "Ios16.mul" : 842, + "Ios16.cos" : 2, + "SliceByIndex" : 4 + }, + "computePrecision" : "Mixed (Float16, Float32, Int32)", + "isUpdatable" : "0", + "availability" : { + "macOS" : "13.0", + "tvOS" : "16.0", + "visionOS" : "1.0", + "watchOS" : "9.0", + "iOS" : "16.0", + "macCatalyst" : "16.0" + }, + "modelType" : { + "name" : "MLModelType_mlProgram" + }, + "inputSchema" : [ + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 4 × 128 × 128)", + "shortDescription" : "The low resolution latent feature maps being denoised through reverse diffusion", + "shape" : "[2, 4, 128, 128]", + "name" : "sample", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2)", + "shortDescription" : "A value emitted by the associated scheduler object to condition the model on a given noise schedule", + "shape" : "[2]", + "name" : "timestep", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 2048 × 1 × 77)", + "shortDescription" : "Output embeddings from the associated text_encoder model to condition to generated image on text. A maximum of 77 tokens (~40 words) are allowed. Longer text is truncated. Shorter text does not reduce computation.", + "shape" : "[2, 2048, 1, 77]", + "name" : "encoder_hidden_states", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 12)", + "shortDescription" : "", + "shape" : "[12]", + "name" : "time_ids", + "type" : "MultiArray" + }, + { + "hasShapeFlexibility" : "0", + "isOptional" : "0", + "dataType" : "Float16", + "formattedType" : "MultiArray (Float16 2 × 1280)", + "shortDescription" : "", + "shape" : "[2, 1280]", + "name" : "text_embeds", + "type" : "MultiArray" + } + ], + "userDefinedMetadata" : { + "com.github.apple.coremltools.version" : "7.0b1", + "com.github.apple.coremltools.source" : "torch==2.0.1+cu117", + "com.github.apple.ml-stable-diffusion.version" : "1.0.0" + }, + "generatedClassName" : "recipe_6_55_bit_mixedpalette", + "method" : "predict" + } +] \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/model.mil b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/model.mil new file mode 100644 index 0000000000000000000000000000000000000000..94ae7dcc5dc2fc7529eac13939ff248ad0951e26 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/model.mil @@ -0,0 +1,12327 @@ +program(1.0) +[buildInfo = dict, tensor>({{"coremlc-component-MIL", "5.33.5"}, {"coremlc-version", "1877.0.48"}})] +{ + func main(tensor encoder_hidden_states, tensor sample, tensor text_embeds, tensor time_ids, tensor timestep) { + tensor var_24 = const()[name = tensor("op_24"), val = tensor(-1)]; + tensor var_41_axes_0 = const()[name = tensor("op_41_axes_0"), val = tensor([1])]; + tensor var_41_cast = expand_dims(axes = var_41_axes_0, x = timestep)[name = tensor("op_41_cast")]; + tensor var_43_to_fp16 = const()[name = tensor("op_43_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(64)))]; + tensor emb_3_cast = mul(x = var_41_cast, y = var_43_to_fp16)[name = tensor("emb_3_cast")]; + tensor var_48_cast = sin(x = emb_3_cast)[name = tensor("op_48_cast")]; + tensor var_49_cast = cos(x = emb_3_cast)[name = tensor("op_49_cast")]; + tensor emb_7_interleave_0 = const()[name = tensor("emb_7_interleave_0"), val = tensor(false)]; + tensor emb_7_cast = concat(axis = var_24, interleave = emb_7_interleave_0, values = (var_48_cast, var_49_cast))[name = tensor("emb_7_cast")]; + tensor var_53_begin_0 = const()[name = tensor("op_53_begin_0"), val = tensor([0, 160])]; + tensor var_53_end_0 = const()[name = tensor("op_53_end_0"), val = tensor([2, 320])]; + tensor var_53_end_mask_0 = const()[name = tensor("op_53_end_mask_0"), val = tensor([true, true])]; + tensor var_53_cast = slice_by_index(begin = var_53_begin_0, end = var_53_end_0, end_mask = var_53_end_mask_0, x = emb_7_cast)[name = tensor("op_53_cast")]; + tensor var_55_begin_0 = const()[name = tensor("op_55_begin_0"), val = tensor([0, 0])]; + tensor var_55_end_0 = const()[name = tensor("op_55_end_0"), val = tensor([2, 160])]; + tensor var_55_end_mask_0 = const()[name = tensor("op_55_end_mask_0"), val = tensor([true, false])]; + tensor var_55_cast = slice_by_index(begin = var_55_begin_0, end = var_55_end_0, end_mask = var_55_end_mask_0, x = emb_7_cast)[name = tensor("op_55_cast")]; + tensor sample_3_interleave_0 = const()[name = tensor("sample_3_interleave_0"), val = tensor(false)]; + tensor sample_3_cast = concat(axis = var_24, interleave = sample_3_interleave_0, values = (var_53_cast, var_55_cast))[name = tensor("sample_3_cast")]; + tensor var_58 = const()[name = tensor("op_58"), val = tensor(1)]; + tensor var_65_axes_0 = const()[name = tensor("op_65_axes_0"), val = tensor([-1])]; + tensor var_65_cast = expand_dims(axes = var_65_axes_0, x = sample_3_cast)[name = tensor("op_65_cast")]; + tensor input_1_axes_0 = const()[name = tensor("input_1_axes_0"), val = tensor([-1])]; + tensor input_1_cast = expand_dims(axes = input_1_axes_0, x = var_65_cast)[name = tensor("input_1_cast")]; + tensor var_69 = const()[name = tensor("op_69"), val = tensor([1, 1])]; + tensor var_71 = const()[name = tensor("op_71"), val = tensor([1, 1])]; + tensor input_3_pad_type_0 = const()[name = tensor("input_3_pad_type_0"), val = tensor("custom")]; + tensor input_3_pad_0 = const()[name = tensor("input_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_1_weight_to_fp16 = const()[name = tensor("time_embedding_linear_1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448)))]; + tensor time_embedding_linear_1_bias_to_fp16 = const()[name = tensor("time_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(819712)))]; + tensor input_3_cast = conv(bias = time_embedding_linear_1_bias_to_fp16, dilations = var_71, groups = var_58, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = var_69, weight = time_embedding_linear_1_weight_to_fp16, x = input_1_cast)[name = tensor("input_3_cast")]; + tensor input_5_cast = silu(x = input_3_cast)[name = tensor("input_5_cast")]; + tensor var_77 = const()[name = tensor("op_77"), val = tensor([1, 1])]; + tensor var_79 = const()[name = tensor("op_79"), val = tensor([1, 1])]; + tensor emb_pad_type_0 = const()[name = tensor("emb_pad_type_0"), val = tensor("custom")]; + tensor emb_pad_0 = const()[name = tensor("emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor time_embedding_linear_2_weight_to_fp16 = const()[name = tensor("time_embedding_linear_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(822336)))]; + tensor time_embedding_linear_2_bias_to_fp16 = const()[name = tensor("time_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4099200)))]; + tensor emb_cast = conv(bias = time_embedding_linear_2_bias_to_fp16, dilations = var_79, groups = var_58, pad = emb_pad_0, pad_type = emb_pad_type_0, strides = var_77, weight = time_embedding_linear_2_weight_to_fp16, x = input_5_cast)[name = tensor("emb_cast")]; + tensor var_85 = const()[name = tensor("op_85"), val = tensor(-1)]; + tensor var_102_axes_0 = const()[name = tensor("op_102_axes_0"), val = tensor([1])]; + tensor var_102_cast = expand_dims(axes = var_102_axes_0, x = time_ids)[name = tensor("op_102_cast")]; + tensor var_104_to_fp16 = const()[name = tensor("op_104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4101824)))]; + tensor emb_11_cast = mul(x = var_102_cast, y = var_104_to_fp16)[name = tensor("emb_11_cast")]; + tensor var_109_cast = sin(x = emb_11_cast)[name = tensor("op_109_cast")]; + tensor var_110_cast = cos(x = emb_11_cast)[name = tensor("op_110_cast")]; + tensor emb_15_interleave_0 = const()[name = tensor("emb_15_interleave_0"), val = tensor(false)]; + tensor emb_15_cast = concat(axis = var_85, interleave = emb_15_interleave_0, values = (var_109_cast, var_110_cast))[name = tensor("emb_15_cast")]; + tensor var_114_begin_0 = const()[name = tensor("op_114_begin_0"), val = tensor([0, 128])]; + tensor var_114_end_0 = const()[name = tensor("op_114_end_0"), val = tensor([12, 256])]; + tensor var_114_end_mask_0 = const()[name = tensor("op_114_end_mask_0"), val = tensor([true, true])]; + tensor var_114_cast = slice_by_index(begin = var_114_begin_0, end = var_114_end_0, end_mask = var_114_end_mask_0, x = emb_15_cast)[name = tensor("op_114_cast")]; + tensor var_116_begin_0 = const()[name = tensor("op_116_begin_0"), val = tensor([0, 0])]; + tensor var_116_end_0 = const()[name = tensor("op_116_end_0"), val = tensor([12, 128])]; + tensor var_116_end_mask_0 = const()[name = tensor("op_116_end_mask_0"), val = tensor([true, false])]; + tensor var_116_cast = slice_by_index(begin = var_116_begin_0, end = var_116_end_0, end_mask = var_116_end_mask_0, x = emb_15_cast)[name = tensor("op_116_cast")]; + tensor time_embeds_1_interleave_0 = const()[name = tensor("time_embeds_1_interleave_0"), val = tensor(false)]; + tensor time_embeds_1_cast = concat(axis = var_85, interleave = time_embeds_1_interleave_0, values = (var_114_cast, var_116_cast))[name = tensor("time_embeds_1_cast")]; + tensor var_124 = const()[name = tensor("op_124"), val = tensor([2, -1])]; + tensor time_embeds_cast = reshape(shape = var_124, x = time_embeds_1_cast)[name = tensor("time_embeds_cast")]; + tensor var_127 = const()[name = tensor("op_127"), val = tensor(-1)]; + tensor sample_interleave_0 = const()[name = tensor("sample_interleave_0"), val = tensor(false)]; + tensor sample_cast = concat(axis = var_127, interleave = sample_interleave_0, values = (text_embeds, time_embeds_cast))[name = tensor("sample_cast")]; + tensor var_129 = const()[name = tensor("op_129"), val = tensor(1)]; + tensor var_136_axes_0 = const()[name = tensor("op_136_axes_0"), val = tensor([-1])]; + tensor var_136_cast = expand_dims(axes = var_136_axes_0, x = sample_cast)[name = tensor("op_136_cast")]; + tensor input_7_axes_0 = const()[name = tensor("input_7_axes_0"), val = tensor([-1])]; + tensor input_7_cast = expand_dims(axes = input_7_axes_0, x = var_136_cast)[name = tensor("input_7_cast")]; + tensor var_140 = const()[name = tensor("op_140"), val = tensor([1, 1])]; + tensor var_142 = const()[name = tensor("op_142"), val = tensor([1, 1])]; + tensor input_9_pad_type_0 = const()[name = tensor("input_9_pad_type_0"), val = tensor("custom")]; + tensor input_9_pad_0 = const()[name = tensor("input_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_1_weight_to_fp16 = const()[name = tensor("add_embedding_linear_1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(4102144)))]; + tensor add_embedding_linear_1_bias_to_fp16 = const()[name = tensor("add_embedding_linear_1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11311168)))]; + tensor input_9_cast = conv(bias = add_embedding_linear_1_bias_to_fp16, dilations = var_142, groups = var_129, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = var_140, weight = add_embedding_linear_1_weight_to_fp16, x = input_7_cast)[name = tensor("input_9_cast")]; + tensor input_11_cast = silu(x = input_9_cast)[name = tensor("input_11_cast")]; + tensor var_148 = const()[name = tensor("op_148"), val = tensor([1, 1])]; + tensor var_150 = const()[name = tensor("op_150"), val = tensor([1, 1])]; + tensor aug_emb_pad_type_0 = const()[name = tensor("aug_emb_pad_type_0"), val = tensor("custom")]; + tensor aug_emb_pad_0 = const()[name = tensor("aug_emb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor add_embedding_linear_2_weight_to_fp16 = const()[name = tensor("add_embedding_linear_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(11313792)))]; + tensor add_embedding_linear_2_bias_to_fp16 = const()[name = tensor("add_embedding_linear_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14590656)))]; + tensor aug_emb_cast = conv(bias = add_embedding_linear_2_bias_to_fp16, dilations = var_150, groups = var_129, pad = aug_emb_pad_0, pad_type = aug_emb_pad_type_0, strides = var_148, weight = add_embedding_linear_2_weight_to_fp16, x = input_11_cast)[name = tensor("aug_emb_cast")]; + tensor input_19_cast = add(x = emb_cast, y = aug_emb_cast)[name = tensor("input_19_cast")]; + tensor var_158 = const()[name = tensor("op_158"), val = tensor(1)]; + tensor var_161 = const()[name = tensor("op_161"), val = tensor([1, 1])]; + tensor var_163 = const()[name = tensor("op_163"), val = tensor([1, 1])]; + tensor input_13_pad_type_0 = const()[name = tensor("input_13_pad_type_0"), val = tensor("custom")]; + tensor input_13_pad_0 = const()[name = tensor("input_13_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_in_weight_to_fp16 = const()[name = tensor("conv_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14593280)))]; + tensor conv_in_bias_to_fp16 = const()[name = tensor("conv_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14616384)))]; + tensor input_13_cast = conv(bias = conv_in_bias_to_fp16, dilations = var_163, groups = var_158, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = var_161, weight = conv_in_weight_to_fp16, x = sample)[name = tensor("input_13_cast")]; + tensor var_172 = const()[name = tensor("op_172"), val = tensor(1)]; + tensor reshape_0_shape_0 = const()[name = tensor("reshape_0_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_0_cast = reshape(shape = reshape_0_shape_0, x = input_13_cast)[name = tensor("reshape_0_cast")]; + tensor reduce_mean_0_axes_0 = const()[name = tensor("reduce_mean_0_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_0_keep_dims_0 = const()[name = tensor("reduce_mean_0_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_0_cast = reduce_mean(axes = reduce_mean_0_axes_0, keep_dims = reduce_mean_0_keep_dims_0, x = reshape_0_cast)[name = tensor("reduce_mean_0_cast")]; + tensor sub_0_cast = sub(x = reshape_0_cast, y = reduce_mean_0_cast)[name = tensor("sub_0_cast")]; + tensor square_0_cast = square(x = sub_0_cast)[name = tensor("square_0_cast")]; + tensor reduce_mean_2_axes_0 = const()[name = tensor("reduce_mean_2_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_2_keep_dims_0 = const()[name = tensor("reduce_mean_2_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_2_cast = reduce_mean(axes = reduce_mean_2_axes_0, keep_dims = reduce_mean_2_keep_dims_0, x = square_0_cast)[name = tensor("reduce_mean_2_cast")]; + tensor add_0_y_0_to_fp16 = const()[name = tensor("add_0_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_0_cast = add(x = reduce_mean_2_cast, y = add_0_y_0_to_fp16)[name = tensor("add_0_cast")]; + tensor sqrt_0_cast = sqrt(x = add_0_cast)[name = tensor("sqrt_0_cast")]; + tensor real_div_0_cast = real_div(x = sub_0_cast, y = sqrt_0_cast)[name = tensor("real_div_0_cast")]; + tensor reshape_1_shape_0 = const()[name = tensor("reshape_1_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_1_cast = reshape(shape = reshape_1_shape_0, x = real_div_0_cast)[name = tensor("reshape_1_cast")]; + tensor add_1_mean_0_to_fp16 = const()[name = tensor("add_1_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14617088)))]; + tensor add_1_variance_0_to_fp16 = const()[name = tensor("add_1_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14617792)))]; + tensor add_1_gamma_0_to_fp16 = const()[name = tensor("add_1_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14618496)))]; + tensor add_1_beta_0_to_fp16 = const()[name = tensor("add_1_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14619200)))]; + tensor add_1_epsilon_0_to_fp16 = const()[name = tensor("add_1_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_1_cast = batch_norm(beta = add_1_beta_0_to_fp16, epsilon = add_1_epsilon_0_to_fp16, gamma = add_1_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_1_cast)[name = tensor("add_1_cast")]; + tensor input_17_cast = silu(x = add_1_cast)[name = tensor("input_17_cast")]; + tensor var_190 = const()[name = tensor("op_190"), val = tensor([1, 1])]; + tensor var_192 = const()[name = tensor("op_192"), val = tensor([1, 1])]; + tensor hidden_states_1_pad_type_0 = const()[name = tensor("hidden_states_1_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_1_pad_0 = const()[name = tensor("hidden_states_1_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(14619904)))]; + tensor down_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16463168)))]; + tensor hidden_states_1_cast = conv(bias = down_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_192, groups = var_172, pad = hidden_states_1_pad_0, pad_type = hidden_states_1_pad_type_0, strides = var_190, weight = down_blocks_0_resnets_0_conv1_weight_to_fp16, x = input_17_cast)[name = tensor("hidden_states_1_cast")]; + tensor input_21_cast = silu(x = input_19_cast)[name = tensor("input_21_cast")]; + tensor var_198 = const()[name = tensor("op_198"), val = tensor([1, 1])]; + tensor var_200 = const()[name = tensor("op_200"), val = tensor([1, 1])]; + tensor temb_1_pad_type_0 = const()[name = tensor("temb_1_pad_type_0"), val = tensor("custom")]; + tensor temb_1_pad_0 = const()[name = tensor("temb_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(16463872)))]; + tensor down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17283136)))]; + tensor temb_1_cast = conv(bias = down_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_200, groups = var_172, pad = temb_1_pad_0, pad_type = temb_1_pad_type_0, strides = var_198, weight = down_blocks_0_resnets_0_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_1_cast")]; + tensor input_23_cast = add(x = hidden_states_1_cast, y = temb_1_cast)[name = tensor("input_23_cast")]; + tensor reshape_4_shape_0 = const()[name = tensor("reshape_4_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_4_cast = reshape(shape = reshape_4_shape_0, x = input_23_cast)[name = tensor("reshape_4_cast")]; + tensor reduce_mean_3_axes_0 = const()[name = tensor("reduce_mean_3_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_3_keep_dims_0 = const()[name = tensor("reduce_mean_3_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_3_cast = reduce_mean(axes = reduce_mean_3_axes_0, keep_dims = reduce_mean_3_keep_dims_0, x = reshape_4_cast)[name = tensor("reduce_mean_3_cast")]; + tensor sub_2_cast = sub(x = reshape_4_cast, y = reduce_mean_3_cast)[name = tensor("sub_2_cast")]; + tensor square_1_cast = square(x = sub_2_cast)[name = tensor("square_1_cast")]; + tensor reduce_mean_5_axes_0 = const()[name = tensor("reduce_mean_5_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_5_keep_dims_0 = const()[name = tensor("reduce_mean_5_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_5_cast = reduce_mean(axes = reduce_mean_5_axes_0, keep_dims = reduce_mean_5_keep_dims_0, x = square_1_cast)[name = tensor("reduce_mean_5_cast")]; + tensor add_2_y_0_to_fp16 = const()[name = tensor("add_2_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_2_cast = add(x = reduce_mean_5_cast, y = add_2_y_0_to_fp16)[name = tensor("add_2_cast")]; + tensor sqrt_1_cast = sqrt(x = add_2_cast)[name = tensor("sqrt_1_cast")]; + tensor real_div_1_cast = real_div(x = sub_2_cast, y = sqrt_1_cast)[name = tensor("real_div_1_cast")]; + tensor reshape_5_shape_0 = const()[name = tensor("reshape_5_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_5_cast = reshape(shape = reshape_5_shape_0, x = real_div_1_cast)[name = tensor("reshape_5_cast")]; + tensor add_3_gamma_0_to_fp16 = const()[name = tensor("add_3_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17283840)))]; + tensor add_3_beta_0_to_fp16 = const()[name = tensor("add_3_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17284544)))]; + tensor add_3_epsilon_0_to_fp16 = const()[name = tensor("add_3_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_3_cast = batch_norm(beta = add_3_beta_0_to_fp16, epsilon = add_3_epsilon_0_to_fp16, gamma = add_3_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_5_cast)[name = tensor("add_3_cast")]; + tensor input_27_cast = silu(x = add_3_cast)[name = tensor("input_27_cast")]; + tensor var_210 = const()[name = tensor("op_210"), val = tensor([1, 1])]; + tensor var_212 = const()[name = tensor("op_212"), val = tensor([1, 1])]; + tensor hidden_states_3_pad_type_0 = const()[name = tensor("hidden_states_3_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_3_pad_0 = const()[name = tensor("hidden_states_3_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(17285248)))]; + tensor down_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19128512)))]; + tensor hidden_states_3_cast = conv(bias = down_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_212, groups = var_172, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = var_210, weight = down_blocks_0_resnets_0_conv2_weight_to_fp16, x = input_27_cast)[name = tensor("hidden_states_3_cast")]; + tensor input_29_cast = add(x = input_13_cast, y = hidden_states_3_cast)[name = tensor("input_29_cast")]; + tensor reshape_8_shape_0 = const()[name = tensor("reshape_8_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_8_cast = reshape(shape = reshape_8_shape_0, x = input_29_cast)[name = tensor("reshape_8_cast")]; + tensor reduce_mean_6_axes_0 = const()[name = tensor("reduce_mean_6_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_6_keep_dims_0 = const()[name = tensor("reduce_mean_6_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_6_cast = reduce_mean(axes = reduce_mean_6_axes_0, keep_dims = reduce_mean_6_keep_dims_0, x = reshape_8_cast)[name = tensor("reduce_mean_6_cast")]; + tensor sub_4_cast = sub(x = reshape_8_cast, y = reduce_mean_6_cast)[name = tensor("sub_4_cast")]; + tensor square_2_cast = square(x = sub_4_cast)[name = tensor("square_2_cast")]; + tensor reduce_mean_8_axes_0 = const()[name = tensor("reduce_mean_8_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_8_keep_dims_0 = const()[name = tensor("reduce_mean_8_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_8_cast = reduce_mean(axes = reduce_mean_8_axes_0, keep_dims = reduce_mean_8_keep_dims_0, x = square_2_cast)[name = tensor("reduce_mean_8_cast")]; + tensor add_4_y_0_to_fp16 = const()[name = tensor("add_4_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_4_cast = add(x = reduce_mean_8_cast, y = add_4_y_0_to_fp16)[name = tensor("add_4_cast")]; + tensor sqrt_2_cast = sqrt(x = add_4_cast)[name = tensor("sqrt_2_cast")]; + tensor real_div_2_cast = real_div(x = sub_4_cast, y = sqrt_2_cast)[name = tensor("real_div_2_cast")]; + tensor reshape_9_shape_0 = const()[name = tensor("reshape_9_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_9_cast = reshape(shape = reshape_9_shape_0, x = real_div_2_cast)[name = tensor("reshape_9_cast")]; + tensor add_5_gamma_0_to_fp16 = const()[name = tensor("add_5_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19129216)))]; + tensor add_5_beta_0_to_fp16 = const()[name = tensor("add_5_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19129920)))]; + tensor add_5_epsilon_0_to_fp16 = const()[name = tensor("add_5_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_5_cast = batch_norm(beta = add_5_beta_0_to_fp16, epsilon = add_5_epsilon_0_to_fp16, gamma = add_5_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_9_cast)[name = tensor("add_5_cast")]; + tensor input_33_cast = silu(x = add_5_cast)[name = tensor("input_33_cast")]; + tensor var_227 = const()[name = tensor("op_227"), val = tensor([1, 1])]; + tensor var_229 = const()[name = tensor("op_229"), val = tensor([1, 1])]; + tensor hidden_states_5_pad_type_0 = const()[name = tensor("hidden_states_5_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_5_pad_0 = const()[name = tensor("hidden_states_5_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(19130624)))]; + tensor down_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20973888)))]; + tensor hidden_states_5_cast = conv(bias = down_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_229, groups = var_172, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = var_227, weight = down_blocks_0_resnets_1_conv1_weight_to_fp16, x = input_33_cast)[name = tensor("hidden_states_5_cast")]; + tensor var_235 = const()[name = tensor("op_235"), val = tensor([1, 1])]; + tensor var_237 = const()[name = tensor("op_237"), val = tensor([1, 1])]; + tensor temb_3_pad_type_0 = const()[name = tensor("temb_3_pad_type_0"), val = tensor("custom")]; + tensor temb_3_pad_0 = const()[name = tensor("temb_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(20974592)))]; + tensor down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21793856)))]; + tensor temb_3_cast = conv(bias = down_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_237, groups = var_172, pad = temb_3_pad_0, pad_type = temb_3_pad_type_0, strides = var_235, weight = down_blocks_0_resnets_1_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_3_cast")]; + tensor input_37_cast = add(x = hidden_states_5_cast, y = temb_3_cast)[name = tensor("input_37_cast")]; + tensor reshape_12_shape_0 = const()[name = tensor("reshape_12_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_12_cast = reshape(shape = reshape_12_shape_0, x = input_37_cast)[name = tensor("reshape_12_cast")]; + tensor reduce_mean_9_axes_0 = const()[name = tensor("reduce_mean_9_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_9_keep_dims_0 = const()[name = tensor("reduce_mean_9_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_9_cast = reduce_mean(axes = reduce_mean_9_axes_0, keep_dims = reduce_mean_9_keep_dims_0, x = reshape_12_cast)[name = tensor("reduce_mean_9_cast")]; + tensor sub_6_cast = sub(x = reshape_12_cast, y = reduce_mean_9_cast)[name = tensor("sub_6_cast")]; + tensor square_3_cast = square(x = sub_6_cast)[name = tensor("square_3_cast")]; + tensor reduce_mean_11_axes_0 = const()[name = tensor("reduce_mean_11_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_11_keep_dims_0 = const()[name = tensor("reduce_mean_11_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_11_cast = reduce_mean(axes = reduce_mean_11_axes_0, keep_dims = reduce_mean_11_keep_dims_0, x = square_3_cast)[name = tensor("reduce_mean_11_cast")]; + tensor add_6_y_0_to_fp16 = const()[name = tensor("add_6_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_6_cast = add(x = reduce_mean_11_cast, y = add_6_y_0_to_fp16)[name = tensor("add_6_cast")]; + tensor sqrt_3_cast = sqrt(x = add_6_cast)[name = tensor("sqrt_3_cast")]; + tensor real_div_3_cast = real_div(x = sub_6_cast, y = sqrt_3_cast)[name = tensor("real_div_3_cast")]; + tensor reshape_13_shape_0 = const()[name = tensor("reshape_13_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_13_cast = reshape(shape = reshape_13_shape_0, x = real_div_3_cast)[name = tensor("reshape_13_cast")]; + tensor add_7_gamma_0_to_fp16 = const()[name = tensor("add_7_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21794560)))]; + tensor add_7_beta_0_to_fp16 = const()[name = tensor("add_7_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21795264)))]; + tensor add_7_epsilon_0_to_fp16 = const()[name = tensor("add_7_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_7_cast = batch_norm(beta = add_7_beta_0_to_fp16, epsilon = add_7_epsilon_0_to_fp16, gamma = add_7_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_13_cast)[name = tensor("add_7_cast")]; + tensor input_41_cast = silu(x = add_7_cast)[name = tensor("input_41_cast")]; + tensor var_247 = const()[name = tensor("op_247"), val = tensor([1, 1])]; + tensor var_249 = const()[name = tensor("op_249"), val = tensor([1, 1])]; + tensor hidden_states_7_pad_type_0 = const()[name = tensor("hidden_states_7_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_7_pad_0 = const()[name = tensor("hidden_states_7_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(21795968)))]; + tensor down_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23639232)))]; + tensor hidden_states_7_cast = conv(bias = down_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_249, groups = var_172, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = var_247, weight = down_blocks_0_resnets_1_conv2_weight_to_fp16, x = input_41_cast)[name = tensor("hidden_states_7_cast")]; + tensor input_43_cast = add(x = input_29_cast, y = hidden_states_7_cast)[name = tensor("input_43_cast")]; + tensor var_256 = const()[name = tensor("op_256"), val = tensor([2, 2])]; + tensor var_258 = const()[name = tensor("op_258"), val = tensor([1, 1])]; + tensor input_45_pad_type_0 = const()[name = tensor("input_45_pad_type_0"), val = tensor("custom")]; + tensor input_45_pad_0 = const()[name = tensor("input_45_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_0_downsamplers_0_conv_weight_to_fp16 = const()[name = tensor("down_blocks_0_downsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(23639936)))]; + tensor down_blocks_0_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_0_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25483200)))]; + tensor input_45_cast = conv(bias = down_blocks_0_downsamplers_0_conv_bias_to_fp16, dilations = var_258, groups = var_172, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = var_256, weight = down_blocks_0_downsamplers_0_conv_weight_to_fp16, x = input_43_cast)[name = tensor("input_45_cast")]; + tensor var_266 = const()[name = tensor("op_266"), val = tensor(3)]; + tensor var_277 = const()[name = tensor("op_277"), val = tensor(true)]; + tensor var_282 = const()[name = tensor("op_282"), val = tensor(1)]; + tensor reshape_16_shape_0 = const()[name = tensor("reshape_16_shape_0"), val = tensor([2, 32, 10, 64, 64])]; + tensor reshape_16_cast = reshape(shape = reshape_16_shape_0, x = input_45_cast)[name = tensor("reshape_16_cast")]; + tensor reduce_mean_12_axes_0 = const()[name = tensor("reduce_mean_12_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_12_keep_dims_0 = const()[name = tensor("reduce_mean_12_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_12_cast = reduce_mean(axes = reduce_mean_12_axes_0, keep_dims = reduce_mean_12_keep_dims_0, x = reshape_16_cast)[name = tensor("reduce_mean_12_cast")]; + tensor sub_8_cast = sub(x = reshape_16_cast, y = reduce_mean_12_cast)[name = tensor("sub_8_cast")]; + tensor square_4_cast = square(x = sub_8_cast)[name = tensor("square_4_cast")]; + tensor reduce_mean_14_axes_0 = const()[name = tensor("reduce_mean_14_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_14_keep_dims_0 = const()[name = tensor("reduce_mean_14_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_14_cast = reduce_mean(axes = reduce_mean_14_axes_0, keep_dims = reduce_mean_14_keep_dims_0, x = square_4_cast)[name = tensor("reduce_mean_14_cast")]; + tensor add_8_y_0_to_fp16 = const()[name = tensor("add_8_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_8_cast = add(x = reduce_mean_14_cast, y = add_8_y_0_to_fp16)[name = tensor("add_8_cast")]; + tensor sqrt_4_cast = sqrt(x = add_8_cast)[name = tensor("sqrt_4_cast")]; + tensor real_div_4_cast = real_div(x = sub_8_cast, y = sqrt_4_cast)[name = tensor("real_div_4_cast")]; + tensor reshape_17_shape_0 = const()[name = tensor("reshape_17_shape_0"), val = tensor([2, 320, 64, 64])]; + tensor reshape_17_cast = reshape(shape = reshape_17_shape_0, x = real_div_4_cast)[name = tensor("reshape_17_cast")]; + tensor add_9_gamma_0_to_fp16 = const()[name = tensor("add_9_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25483904)))]; + tensor add_9_beta_0_to_fp16 = const()[name = tensor("add_9_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25484608)))]; + tensor add_9_epsilon_0_to_fp16 = const()[name = tensor("add_9_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_9_cast = batch_norm(beta = add_9_beta_0_to_fp16, epsilon = add_9_epsilon_0_to_fp16, gamma = add_9_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_17_cast)[name = tensor("add_9_cast")]; + tensor input_49_cast = silu(x = add_9_cast)[name = tensor("input_49_cast")]; + tensor var_305 = const()[name = tensor("op_305"), val = tensor([1, 1])]; + tensor var_307 = const()[name = tensor("op_307"), val = tensor([1, 1])]; + tensor hidden_states_9_pad_type_0 = const()[name = tensor("hidden_states_9_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_9_pad_0 = const()[name = tensor("hidden_states_9_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(25485312)))]; + tensor down_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29171776)))]; + tensor hidden_states_9_cast = conv(bias = down_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_307, groups = var_282, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = var_305, weight = down_blocks_1_resnets_0_conv1_weight_to_fp16, x = input_49_cast)[name = tensor("hidden_states_9_cast")]; + tensor var_313 = const()[name = tensor("op_313"), val = tensor([1, 1])]; + tensor var_315 = const()[name = tensor("op_315"), val = tensor([1, 1])]; + tensor temb_5_pad_type_0 = const()[name = tensor("temb_5_pad_type_0"), val = tensor("custom")]; + tensor temb_5_pad_0 = const()[name = tensor("temb_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(29173120)))]; + tensor down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30811584)))]; + tensor temb_5_cast = conv(bias = down_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_315, groups = var_282, pad = temb_5_pad_0, pad_type = temb_5_pad_type_0, strides = var_313, weight = down_blocks_1_resnets_0_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_5_cast")]; + tensor input_53_cast = add(x = hidden_states_9_cast, y = temb_5_cast)[name = tensor("input_53_cast")]; + tensor reshape_20_shape_0 = const()[name = tensor("reshape_20_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_20_cast = reshape(shape = reshape_20_shape_0, x = input_53_cast)[name = tensor("reshape_20_cast")]; + tensor reduce_mean_15_axes_0 = const()[name = tensor("reduce_mean_15_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_15_keep_dims_0 = const()[name = tensor("reduce_mean_15_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_15_cast = reduce_mean(axes = reduce_mean_15_axes_0, keep_dims = reduce_mean_15_keep_dims_0, x = reshape_20_cast)[name = tensor("reduce_mean_15_cast")]; + tensor sub_10_cast = sub(x = reshape_20_cast, y = reduce_mean_15_cast)[name = tensor("sub_10_cast")]; + tensor square_5_cast = square(x = sub_10_cast)[name = tensor("square_5_cast")]; + tensor reduce_mean_17_axes_0 = const()[name = tensor("reduce_mean_17_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_17_keep_dims_0 = const()[name = tensor("reduce_mean_17_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_17_cast = reduce_mean(axes = reduce_mean_17_axes_0, keep_dims = reduce_mean_17_keep_dims_0, x = square_5_cast)[name = tensor("reduce_mean_17_cast")]; + tensor add_10_y_0_to_fp16 = const()[name = tensor("add_10_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_10_cast = add(x = reduce_mean_17_cast, y = add_10_y_0_to_fp16)[name = tensor("add_10_cast")]; + tensor sqrt_5_cast = sqrt(x = add_10_cast)[name = tensor("sqrt_5_cast")]; + tensor real_div_5_cast = real_div(x = sub_10_cast, y = sqrt_5_cast)[name = tensor("real_div_5_cast")]; + tensor reshape_21_shape_0 = const()[name = tensor("reshape_21_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_21_cast = reshape(shape = reshape_21_shape_0, x = real_div_5_cast)[name = tensor("reshape_21_cast")]; + tensor add_11_mean_0_to_fp16 = const()[name = tensor("add_11_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30812928)))]; + tensor add_11_variance_0_to_fp16 = const()[name = tensor("add_11_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30814272)))]; + tensor add_11_gamma_0_to_fp16 = const()[name = tensor("add_11_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30815616)))]; + tensor add_11_beta_0_to_fp16 = const()[name = tensor("add_11_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30816960)))]; + tensor add_11_epsilon_0_to_fp16 = const()[name = tensor("add_11_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_11_cast = batch_norm(beta = add_11_beta_0_to_fp16, epsilon = add_11_epsilon_0_to_fp16, gamma = add_11_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_21_cast)[name = tensor("add_11_cast")]; + tensor input_57_cast = silu(x = add_11_cast)[name = tensor("input_57_cast")]; + tensor var_325 = const()[name = tensor("op_325"), val = tensor([1, 1])]; + tensor var_327 = const()[name = tensor("op_327"), val = tensor([1, 1])]; + tensor hidden_states_11_pad_type_0 = const()[name = tensor("hidden_states_11_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_11_pad_0 = const()[name = tensor("hidden_states_11_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(30818304)))]; + tensor down_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38191168)))]; + tensor hidden_states_11_cast = conv(bias = down_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_327, groups = var_282, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = var_325, weight = down_blocks_1_resnets_0_conv2_weight_to_fp16, x = input_57_cast)[name = tensor("hidden_states_11_cast")]; + tensor var_332 = const()[name = tensor("op_332"), val = tensor([1, 1])]; + tensor var_334 = const()[name = tensor("op_334"), val = tensor([1, 1])]; + tensor x_1_pad_type_0 = const()[name = tensor("x_1_pad_type_0"), val = tensor("custom")]; + tensor x_1_pad_0 = const()[name = tensor("x_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38192512)))]; + tensor down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38602176)))]; + tensor x_1_cast = conv(bias = down_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_334, groups = var_282, pad = x_1_pad_0, pad_type = x_1_pad_type_0, strides = var_332, weight = down_blocks_1_resnets_0_conv_shortcut_weight_to_fp16, x = input_45_cast)[name = tensor("x_1_cast")]; + tensor hidden_states_13_cast = add(x = x_1_cast, y = hidden_states_11_cast)[name = tensor("hidden_states_13_cast")]; + tensor reshape_24_shape_0 = const()[name = tensor("reshape_24_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_24_cast = reshape(shape = reshape_24_shape_0, x = hidden_states_13_cast)[name = tensor("reshape_24_cast")]; + tensor reduce_mean_18_axes_0 = const()[name = tensor("reduce_mean_18_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_18_keep_dims_0 = const()[name = tensor("reduce_mean_18_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_18_cast = reduce_mean(axes = reduce_mean_18_axes_0, keep_dims = reduce_mean_18_keep_dims_0, x = reshape_24_cast)[name = tensor("reduce_mean_18_cast")]; + tensor sub_12_cast = sub(x = reshape_24_cast, y = reduce_mean_18_cast)[name = tensor("sub_12_cast")]; + tensor square_6_cast = square(x = sub_12_cast)[name = tensor("square_6_cast")]; + tensor reduce_mean_20_axes_0 = const()[name = tensor("reduce_mean_20_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_20_keep_dims_0 = const()[name = tensor("reduce_mean_20_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_20_cast = reduce_mean(axes = reduce_mean_20_axes_0, keep_dims = reduce_mean_20_keep_dims_0, x = square_6_cast)[name = tensor("reduce_mean_20_cast")]; + tensor add_12_y_0_to_fp16 = const()[name = tensor("add_12_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_12_cast = add(x = reduce_mean_20_cast, y = add_12_y_0_to_fp16)[name = tensor("add_12_cast")]; + tensor sqrt_6_cast = sqrt(x = add_12_cast)[name = tensor("sqrt_6_cast")]; + tensor real_div_6_cast = real_div(x = sub_12_cast, y = sqrt_6_cast)[name = tensor("real_div_6_cast")]; + tensor reshape_25_shape_0 = const()[name = tensor("reshape_25_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_25_cast = reshape(shape = reshape_25_shape_0, x = real_div_6_cast)[name = tensor("reshape_25_cast")]; + tensor add_13_gamma_0_to_fp16 = const()[name = tensor("add_13_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38603520)))]; + tensor add_13_beta_0_to_fp16 = const()[name = tensor("add_13_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38604864)))]; + tensor add_13_epsilon_0_to_fp16 = const()[name = tensor("add_13_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_13_cast = batch_norm(beta = add_13_beta_0_to_fp16, epsilon = add_13_epsilon_0_to_fp16, gamma = add_13_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_25_cast)[name = tensor("add_13_cast")]; + tensor var_356 = const()[name = tensor("op_356"), val = tensor([1, 1])]; + tensor var_358 = const()[name = tensor("op_358"), val = tensor([1, 1])]; + tensor hidden_states_15_pad_type_0 = const()[name = tensor("hidden_states_15_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_15_pad_0 = const()[name = tensor("hidden_states_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_in_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(38606208)))]; + tensor down_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39425472)))]; + tensor hidden_states_15_cast = conv(bias = down_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_358, groups = var_282, pad = hidden_states_15_pad_0, pad_type = hidden_states_15_pad_type_0, strides = var_356, weight = down_blocks_1_attentions_0_proj_in_weight_to_fp16, x = add_13_cast)[name = tensor("hidden_states_15_cast")]; + tensor var_363 = const()[name = tensor("op_363"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_1_cast = reshape(shape = var_363, x = hidden_states_15_cast)[name = tensor("inputs_1_cast")]; + tensor var_373 = const()[name = tensor("op_373"), val = tensor([1])]; + tensor channels_mean_1_cast = reduce_mean(axes = var_373, keep_dims = var_277, x = inputs_1_cast)[name = tensor("channels_mean_1_cast")]; + tensor zero_mean_1_cast = sub(x = inputs_1_cast, y = channels_mean_1_cast)[name = tensor("zero_mean_1_cast")]; + tensor zero_mean_sq_1_cast = mul(x = zero_mean_1_cast, y = zero_mean_1_cast)[name = tensor("zero_mean_sq_1_cast")]; + tensor var_377 = const()[name = tensor("op_377"), val = tensor([1])]; + tensor var_378_cast = reduce_mean(axes = var_377, keep_dims = var_277, x = zero_mean_sq_1_cast)[name = tensor("op_378_cast")]; + tensor var_379_to_fp16 = const()[name = tensor("op_379_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_380_cast = add(x = var_378_cast, y = var_379_to_fp16)[name = tensor("op_380_cast")]; + tensor denom_1_epsilon_0_to_fp16 = const()[name = tensor("denom_1_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_1_cast = rsqrt(epsilon = denom_1_epsilon_0_to_fp16, x = var_380_cast)[name = tensor("denom_1_cast")]; + tensor out_1_cast = mul(x = zero_mean_1_cast, y = denom_1_cast)[name = tensor("out_1_cast")]; + tensor var_384_to_fp16 = const()[name = tensor("op_384_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39426816)))]; + tensor var_385_cast = add(x = out_1_cast, y = var_384_to_fp16)[name = tensor("op_385_cast")]; + tensor var_387_to_fp16 = const()[name = tensor("op_387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39428160)))]; + tensor hidden_states_17_cast = mul(x = var_385_cast, y = var_387_to_fp16)[name = tensor("hidden_states_17_cast")]; + tensor var_394 = const()[name = tensor("op_394"), val = tensor([1, 1])]; + tensor var_396 = const()[name = tensor("op_396"), val = tensor([1, 1])]; + tensor q_1_pad_type_0 = const()[name = tensor("q_1_pad_type_0"), val = tensor("custom")]; + tensor q_1_pad_0 = const()[name = tensor("q_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39429504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39839168))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_1_cast = conv(dilations = var_396, groups = var_282, pad = q_1_pad_0, pad_type = q_1_pad_type_0, strides = var_394, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("q_1_cast")]; + tensor var_400 = const()[name = tensor("op_400"), val = tensor([1, 1])]; + tensor var_402 = const()[name = tensor("op_402"), val = tensor([1, 1])]; + tensor k_1_pad_type_0 = const()[name = tensor("k_1_pad_type_0"), val = tensor("custom")]; + tensor k_1_pad_0 = const()[name = tensor("k_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(39839744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40249408))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_1_cast = conv(dilations = var_402, groups = var_282, pad = k_1_pad_0, pad_type = k_1_pad_type_0, strides = var_400, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_17_cast)[name = tensor("k_1_cast")]; + tensor var_406 = const()[name = tensor("op_406"), val = tensor([1, 1])]; + tensor var_408 = const()[name = tensor("op_408"), val = tensor([1, 1])]; + tensor v_1_pad_type_0 = const()[name = tensor("v_1_pad_type_0"), val = tensor("custom")]; + tensor v_1_pad_0 = const()[name = tensor("v_1_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(40249984)))]; + tensor v_1_cast = conv(dilations = var_408, groups = var_282, pad = v_1_pad_0, pad_type = v_1_pad_type_0, strides = var_406, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16, x = hidden_states_17_cast)[name = tensor("v_1_cast")]; + tensor var_412 = const()[name = tensor("op_412"), val = tensor([2, 10, 64, -1])]; + tensor var_413_cast = reshape(shape = var_412, x = q_1_cast)[name = tensor("op_413_cast")]; + tensor var_414 = const()[name = tensor("op_414"), val = tensor([2, 10, 64, -1])]; + tensor var_415_cast = reshape(shape = var_414, x = k_1_cast)[name = tensor("op_415_cast")]; + tensor var_416 = const()[name = tensor("op_416"), val = tensor([2, 10, 64, -1])]; + tensor var_417_cast = reshape(shape = var_416, x = v_1_cast)[name = tensor("op_417_cast")]; + tensor attn_weights_1_transpose_x_0 = const()[name = tensor("attn_weights_1_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_1_transpose_y_0 = const()[name = tensor("attn_weights_1_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_1_cast = matmul(transpose_x = attn_weights_1_transpose_x_0, transpose_y = attn_weights_1_transpose_y_0, x = var_413_cast, y = var_415_cast)[name = tensor("attn_weights_1_cast")]; + tensor var_273_to_fp16 = const()[name = tensor("op_273_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_3_cast = mul(x = attn_weights_1_cast, y = var_273_to_fp16)[name = tensor("attn_weights_3_cast")]; + tensor var_421_cast = softmax(axis = var_266, x = attn_weights_3_cast)[name = tensor("op_421_cast")]; + tensor attn_1_transpose_x_0 = const()[name = tensor("attn_1_transpose_x_0"), val = tensor(false)]; + tensor attn_1_transpose_y_0 = const()[name = tensor("attn_1_transpose_y_0"), val = tensor(true)]; + tensor attn_1_cast = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_417_cast, y = var_421_cast)[name = tensor("attn_1_cast")]; + tensor var_425 = const()[name = tensor("op_425"), val = tensor([2, 640, 1, -1])]; + tensor input_61_cast = reshape(shape = var_425, x = attn_1_cast)[name = tensor("input_61_cast")]; + tensor var_430 = const()[name = tensor("op_430"), val = tensor([1, 1])]; + tensor var_432 = const()[name = tensor("op_432"), val = tensor([1, 1])]; + tensor var_434_pad_type_0 = const()[name = tensor("op_434_pad_type_0"), val = tensor("custom")]; + tensor var_434_pad_0 = const()[name = tensor("op_434_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41069248)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41888512)))]; + tensor var_434_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_432, groups = var_282, pad = var_434_pad_0, pad_type = var_434_pad_type_0, strides = var_430, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16, x = input_61_cast)[name = tensor("op_434_cast")]; + tensor inputs_3_cast = add(x = var_434_cast, y = inputs_1_cast)[name = tensor("inputs_3_cast")]; + tensor var_438 = const()[name = tensor("op_438"), val = tensor([1])]; + tensor channels_mean_3_cast = reduce_mean(axes = var_438, keep_dims = var_277, x = inputs_3_cast)[name = tensor("channels_mean_3_cast")]; + tensor zero_mean_3_cast = sub(x = inputs_3_cast, y = channels_mean_3_cast)[name = tensor("zero_mean_3_cast")]; + tensor zero_mean_sq_3_cast = mul(x = zero_mean_3_cast, y = zero_mean_3_cast)[name = tensor("zero_mean_sq_3_cast")]; + tensor var_442 = const()[name = tensor("op_442"), val = tensor([1])]; + tensor var_443_cast = reduce_mean(axes = var_442, keep_dims = var_277, x = zero_mean_sq_3_cast)[name = tensor("op_443_cast")]; + tensor var_444_to_fp16 = const()[name = tensor("op_444_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_445_cast = add(x = var_443_cast, y = var_444_to_fp16)[name = tensor("op_445_cast")]; + tensor denom_3_epsilon_0_to_fp16 = const()[name = tensor("denom_3_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_3_cast = rsqrt(epsilon = denom_3_epsilon_0_to_fp16, x = var_445_cast)[name = tensor("denom_3_cast")]; + tensor out_3_cast = mul(x = zero_mean_3_cast, y = denom_3_cast)[name = tensor("out_3_cast")]; + tensor var_449_to_fp16 = const()[name = tensor("op_449_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41889856)))]; + tensor var_450_cast = add(x = out_3_cast, y = var_449_to_fp16)[name = tensor("op_450_cast")]; + tensor var_452_to_fp16 = const()[name = tensor("op_452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41891200)))]; + tensor hidden_states_19_cast = mul(x = var_450_cast, y = var_452_to_fp16)[name = tensor("hidden_states_19_cast")]; + tensor var_459 = const()[name = tensor("op_459"), val = tensor([1, 1])]; + tensor var_461 = const()[name = tensor("op_461"), val = tensor([1, 1])]; + tensor q_3_pad_type_0 = const()[name = tensor("q_3_pad_type_0"), val = tensor("custom")]; + tensor q_3_pad_0 = const()[name = tensor("q_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(41892544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42199808))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_3_cast = conv(dilations = var_461, groups = var_282, pad = q_3_pad_0, pad_type = q_3_pad_type_0, strides = var_459, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_19_cast)[name = tensor("q_3_cast")]; + tensor var_465 = const()[name = tensor("op_465"), val = tensor([1, 1])]; + tensor var_467 = const()[name = tensor("op_467"), val = tensor([1, 1])]; + tensor k_3_pad_type_0 = const()[name = tensor("k_3_pad_type_0"), val = tensor("custom")]; + tensor k_3_pad_0 = const()[name = tensor("k_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(42200000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43510784))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_3_cast = conv(dilations = var_467, groups = var_282, pad = k_3_pad_0, pad_type = k_3_pad_type_0, strides = var_465, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_3_cast")]; + tensor var_471 = const()[name = tensor("op_471"), val = tensor([1, 1])]; + tensor var_473 = const()[name = tensor("op_473"), val = tensor([1, 1])]; + tensor v_3_pad_type_0 = const()[name = tensor("v_3_pad_type_0"), val = tensor("custom")]; + tensor v_3_pad_0 = const()[name = tensor("v_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(43511360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44822144))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_3_cast = conv(dilations = var_473, groups = var_282, pad = v_3_pad_0, pad_type = v_3_pad_type_0, strides = var_471, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_3_cast")]; + tensor var_477 = const()[name = tensor("op_477"), val = tensor([2, 10, 64, -1])]; + tensor var_478_cast = reshape(shape = var_477, x = q_3_cast)[name = tensor("op_478_cast")]; + tensor var_479 = const()[name = tensor("op_479"), val = tensor([2, 10, 64, -1])]; + tensor var_480_cast = reshape(shape = var_479, x = k_3_cast)[name = tensor("op_480_cast")]; + tensor var_481 = const()[name = tensor("op_481"), val = tensor([2, 10, 64, -1])]; + tensor var_482_cast = reshape(shape = var_481, x = v_3_cast)[name = tensor("op_482_cast")]; + tensor attn_weights_5_transpose_x_0 = const()[name = tensor("attn_weights_5_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_5_transpose_y_0 = const()[name = tensor("attn_weights_5_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_5_cast = matmul(transpose_x = attn_weights_5_transpose_x_0, transpose_y = attn_weights_5_transpose_y_0, x = var_478_cast, y = var_480_cast)[name = tensor("attn_weights_5_cast")]; + tensor attn_weights_7_cast = mul(x = attn_weights_5_cast, y = var_273_to_fp16)[name = tensor("attn_weights_7_cast")]; + tensor var_486_cast = softmax(axis = var_266, x = attn_weights_7_cast)[name = tensor("op_486_cast")]; + tensor attn_3_transpose_x_0 = const()[name = tensor("attn_3_transpose_x_0"), val = tensor(false)]; + tensor attn_3_transpose_y_0 = const()[name = tensor("attn_3_transpose_y_0"), val = tensor(true)]; + tensor attn_3_cast = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_482_cast, y = var_486_cast)[name = tensor("attn_3_cast")]; + tensor var_490 = const()[name = tensor("op_490"), val = tensor([2, 640, 1, -1])]; + tensor input_63_cast = reshape(shape = var_490, x = attn_3_cast)[name = tensor("input_63_cast")]; + tensor var_495 = const()[name = tensor("op_495"), val = tensor([1, 1])]; + tensor var_497 = const()[name = tensor("op_497"), val = tensor([1, 1])]; + tensor var_499_pad_type_0 = const()[name = tensor("op_499_pad_type_0"), val = tensor("custom")]; + tensor var_499_pad_0 = const()[name = tensor("op_499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(44822720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45232384))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45232960)))]; + tensor var_499_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_497, groups = var_282, pad = var_499_pad_0, pad_type = var_499_pad_type_0, strides = var_495, weight = down_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_63_cast)[name = tensor("op_499_cast")]; + tensor inputs_5_cast = add(x = var_499_cast, y = inputs_3_cast)[name = tensor("inputs_5_cast")]; + tensor var_503 = const()[name = tensor("op_503"), val = tensor([1])]; + tensor channels_mean_5_cast = reduce_mean(axes = var_503, keep_dims = var_277, x = inputs_5_cast)[name = tensor("channels_mean_5_cast")]; + tensor zero_mean_5_cast = sub(x = inputs_5_cast, y = channels_mean_5_cast)[name = tensor("zero_mean_5_cast")]; + tensor zero_mean_sq_5_cast = mul(x = zero_mean_5_cast, y = zero_mean_5_cast)[name = tensor("zero_mean_sq_5_cast")]; + tensor var_507 = const()[name = tensor("op_507"), val = tensor([1])]; + tensor var_508_cast = reduce_mean(axes = var_507, keep_dims = var_277, x = zero_mean_sq_5_cast)[name = tensor("op_508_cast")]; + tensor var_509_to_fp16 = const()[name = tensor("op_509_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_510_cast = add(x = var_508_cast, y = var_509_to_fp16)[name = tensor("op_510_cast")]; + tensor denom_5_epsilon_0_to_fp16 = const()[name = tensor("denom_5_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_5_cast = rsqrt(epsilon = denom_5_epsilon_0_to_fp16, x = var_510_cast)[name = tensor("denom_5_cast")]; + tensor out_5_cast = mul(x = zero_mean_5_cast, y = denom_5_cast)[name = tensor("out_5_cast")]; + tensor var_514_to_fp16 = const()[name = tensor("op_514_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45234304)))]; + tensor var_515_cast = add(x = out_5_cast, y = var_514_to_fp16)[name = tensor("op_515_cast")]; + tensor var_517_to_fp16 = const()[name = tensor("op_517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45235648)))]; + tensor input_65_cast = mul(x = var_515_cast, y = var_517_to_fp16)[name = tensor("input_65_cast")]; + tensor var_525 = const()[name = tensor("op_525"), val = tensor([1, 1])]; + tensor var_527 = const()[name = tensor("op_527"), val = tensor([1, 1])]; + tensor var_529_pad_type_0 = const()[name = tensor("op_529_pad_type_0"), val = tensor("custom")]; + tensor var_529_pad_0 = const()[name = tensor("op_529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(45236992)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51790656)))]; + tensor var_529_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_527, groups = var_282, pad = var_529_pad_0, pad_type = var_529_pad_type_0, strides = var_525, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16, x = input_65_cast)[name = tensor("op_529_cast")]; + tensor var_530_split_sizes_0 = const()[name = tensor("op_530_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_530_axis_0 = const()[name = tensor("op_530_axis_0"), val = tensor(1)]; + tensor var_530_cast_0, tensor var_530_cast_1 = split(axis = var_530_axis_0, split_sizes = var_530_split_sizes_0, x = var_529_cast)[name = tensor("op_530_cast")]; + tensor var_532_mode_0 = const()[name = tensor("op_532_mode_0"), val = tensor("EXACT")]; + tensor var_532_cast = gelu(mode = var_532_mode_0, x = var_530_cast_1)[name = tensor("op_532_cast")]; + tensor input_67_cast = mul(x = var_530_cast_0, y = var_532_cast)[name = tensor("input_67_cast")]; + tensor var_536 = const()[name = tensor("op_536"), val = tensor([1, 1])]; + tensor var_538 = const()[name = tensor("op_538"), val = tensor([1, 1])]; + tensor var_540_pad_type_0 = const()[name = tensor("op_540_pad_type_0"), val = tensor("custom")]; + tensor var_540_pad_0 = const()[name = tensor("op_540_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(51800960)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55077824)))]; + tensor var_540_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_538, groups = var_282, pad = var_540_pad_0, pad_type = var_540_pad_type_0, strides = var_536, weight = down_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16, x = input_67_cast)[name = tensor("op_540_cast")]; + tensor inputs_7_cast = add(x = var_540_cast, y = inputs_5_cast)[name = tensor("inputs_7_cast")]; + tensor var_550 = const()[name = tensor("op_550"), val = tensor([1])]; + tensor channels_mean_7_cast = reduce_mean(axes = var_550, keep_dims = var_277, x = inputs_7_cast)[name = tensor("channels_mean_7_cast")]; + tensor zero_mean_7_cast = sub(x = inputs_7_cast, y = channels_mean_7_cast)[name = tensor("zero_mean_7_cast")]; + tensor zero_mean_sq_7_cast = mul(x = zero_mean_7_cast, y = zero_mean_7_cast)[name = tensor("zero_mean_sq_7_cast")]; + tensor var_554 = const()[name = tensor("op_554"), val = tensor([1])]; + tensor var_555_cast = reduce_mean(axes = var_554, keep_dims = var_277, x = zero_mean_sq_7_cast)[name = tensor("op_555_cast")]; + tensor var_556_to_fp16 = const()[name = tensor("op_556_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_557_cast = add(x = var_555_cast, y = var_556_to_fp16)[name = tensor("op_557_cast")]; + tensor denom_7_epsilon_0_to_fp16 = const()[name = tensor("denom_7_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_7_cast = rsqrt(epsilon = denom_7_epsilon_0_to_fp16, x = var_557_cast)[name = tensor("denom_7_cast")]; + tensor out_7_cast = mul(x = zero_mean_7_cast, y = denom_7_cast)[name = tensor("out_7_cast")]; + tensor var_561_to_fp16 = const()[name = tensor("op_561_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55079168)))]; + tensor var_562_cast = add(x = out_7_cast, y = var_561_to_fp16)[name = tensor("op_562_cast")]; + tensor var_564_to_fp16 = const()[name = tensor("op_564_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55080512)))]; + tensor hidden_states_23_cast = mul(x = var_562_cast, y = var_564_to_fp16)[name = tensor("hidden_states_23_cast")]; + tensor var_571 = const()[name = tensor("op_571"), val = tensor([1, 1])]; + tensor var_573 = const()[name = tensor("op_573"), val = tensor([1, 1])]; + tensor q_5_pad_type_0 = const()[name = tensor("q_5_pad_type_0"), val = tensor("custom")]; + tensor q_5_pad_0 = const()[name = tensor("q_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55081856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55491520))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_5_cast = conv(dilations = var_573, groups = var_282, pad = q_5_pad_0, pad_type = q_5_pad_type_0, strides = var_571, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("q_5_cast")]; + tensor var_577 = const()[name = tensor("op_577"), val = tensor([1, 1])]; + tensor var_579 = const()[name = tensor("op_579"), val = tensor([1, 1])]; + tensor k_5_pad_type_0 = const()[name = tensor("k_5_pad_type_0"), val = tensor("custom")]; + tensor k_5_pad_0 = const()[name = tensor("k_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55492096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55901760))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_5_cast = conv(dilations = var_579, groups = var_282, pad = k_5_pad_0, pad_type = k_5_pad_type_0, strides = var_577, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_23_cast)[name = tensor("k_5_cast")]; + tensor var_583 = const()[name = tensor("op_583"), val = tensor([1, 1])]; + tensor var_585 = const()[name = tensor("op_585"), val = tensor([1, 1])]; + tensor v_5_pad_type_0 = const()[name = tensor("v_5_pad_type_0"), val = tensor("custom")]; + tensor v_5_pad_0 = const()[name = tensor("v_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(55902336)))]; + tensor v_5_cast = conv(dilations = var_585, groups = var_282, pad = v_5_pad_0, pad_type = v_5_pad_type_0, strides = var_583, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16, x = hidden_states_23_cast)[name = tensor("v_5_cast")]; + tensor var_589 = const()[name = tensor("op_589"), val = tensor([2, 10, 64, -1])]; + tensor var_590_cast = reshape(shape = var_589, x = q_5_cast)[name = tensor("op_590_cast")]; + tensor var_591 = const()[name = tensor("op_591"), val = tensor([2, 10, 64, -1])]; + tensor var_592_cast = reshape(shape = var_591, x = k_5_cast)[name = tensor("op_592_cast")]; + tensor var_593 = const()[name = tensor("op_593"), val = tensor([2, 10, 64, -1])]; + tensor var_594_cast = reshape(shape = var_593, x = v_5_cast)[name = tensor("op_594_cast")]; + tensor attn_weights_9_transpose_x_0 = const()[name = tensor("attn_weights_9_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_9_transpose_y_0 = const()[name = tensor("attn_weights_9_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_9_cast = matmul(transpose_x = attn_weights_9_transpose_x_0, transpose_y = attn_weights_9_transpose_y_0, x = var_590_cast, y = var_592_cast)[name = tensor("attn_weights_9_cast")]; + tensor attn_weights_11_cast = mul(x = attn_weights_9_cast, y = var_273_to_fp16)[name = tensor("attn_weights_11_cast")]; + tensor var_598_cast = softmax(axis = var_266, x = attn_weights_11_cast)[name = tensor("op_598_cast")]; + tensor attn_5_transpose_x_0 = const()[name = tensor("attn_5_transpose_x_0"), val = tensor(false)]; + tensor attn_5_transpose_y_0 = const()[name = tensor("attn_5_transpose_y_0"), val = tensor(true)]; + tensor attn_5_cast = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_594_cast, y = var_598_cast)[name = tensor("attn_5_cast")]; + tensor var_602 = const()[name = tensor("op_602"), val = tensor([2, 640, 1, -1])]; + tensor input_69_cast = reshape(shape = var_602, x = attn_5_cast)[name = tensor("input_69_cast")]; + tensor var_607 = const()[name = tensor("op_607"), val = tensor([1, 1])]; + tensor var_609 = const()[name = tensor("op_609"), val = tensor([1, 1])]; + tensor var_611_pad_type_0 = const()[name = tensor("op_611_pad_type_0"), val = tensor("custom")]; + tensor var_611_pad_0 = const()[name = tensor("op_611_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(56721600)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57540864)))]; + tensor var_611_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_609, groups = var_282, pad = var_611_pad_0, pad_type = var_611_pad_type_0, strides = var_607, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16, x = input_69_cast)[name = tensor("op_611_cast")]; + tensor inputs_9_cast = add(x = var_611_cast, y = inputs_7_cast)[name = tensor("inputs_9_cast")]; + tensor var_615 = const()[name = tensor("op_615"), val = tensor([1])]; + tensor channels_mean_9_cast = reduce_mean(axes = var_615, keep_dims = var_277, x = inputs_9_cast)[name = tensor("channels_mean_9_cast")]; + tensor zero_mean_9_cast = sub(x = inputs_9_cast, y = channels_mean_9_cast)[name = tensor("zero_mean_9_cast")]; + tensor zero_mean_sq_9_cast = mul(x = zero_mean_9_cast, y = zero_mean_9_cast)[name = tensor("zero_mean_sq_9_cast")]; + tensor var_619 = const()[name = tensor("op_619"), val = tensor([1])]; + tensor var_620_cast = reduce_mean(axes = var_619, keep_dims = var_277, x = zero_mean_sq_9_cast)[name = tensor("op_620_cast")]; + tensor var_621_to_fp16 = const()[name = tensor("op_621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_622_cast = add(x = var_620_cast, y = var_621_to_fp16)[name = tensor("op_622_cast")]; + tensor denom_9_epsilon_0_to_fp16 = const()[name = tensor("denom_9_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_9_cast = rsqrt(epsilon = denom_9_epsilon_0_to_fp16, x = var_622_cast)[name = tensor("denom_9_cast")]; + tensor out_9_cast = mul(x = zero_mean_9_cast, y = denom_9_cast)[name = tensor("out_9_cast")]; + tensor var_626_to_fp16 = const()[name = tensor("op_626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57542208)))]; + tensor var_627_cast = add(x = out_9_cast, y = var_626_to_fp16)[name = tensor("op_627_cast")]; + tensor var_629_to_fp16 = const()[name = tensor("op_629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57543552)))]; + tensor hidden_states_25_cast = mul(x = var_627_cast, y = var_629_to_fp16)[name = tensor("hidden_states_25_cast")]; + tensor var_636 = const()[name = tensor("op_636"), val = tensor([1, 1])]; + tensor var_638 = const()[name = tensor("op_638"), val = tensor([1, 1])]; + tensor q_7_pad_type_0 = const()[name = tensor("q_7_pad_type_0"), val = tensor("custom")]; + tensor q_7_pad_0 = const()[name = tensor("q_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57544896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57954560))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_7_cast = conv(dilations = var_638, groups = var_282, pad = q_7_pad_0, pad_type = q_7_pad_type_0, strides = var_636, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_25_cast)[name = tensor("q_7_cast")]; + tensor var_642 = const()[name = tensor("op_642"), val = tensor([1, 1])]; + tensor var_644 = const()[name = tensor("op_644"), val = tensor([1, 1])]; + tensor k_7_pad_type_0 = const()[name = tensor("k_7_pad_type_0"), val = tensor("custom")]; + tensor k_7_pad_0 = const()[name = tensor("k_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(57955136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(59265920))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_7_cast = conv(dilations = var_644, groups = var_282, pad = k_7_pad_0, pad_type = k_7_pad_type_0, strides = var_642, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_7_cast")]; + tensor var_648 = const()[name = tensor("op_648"), val = tensor([1, 1])]; + tensor var_650 = const()[name = tensor("op_650"), val = tensor([1, 1])]; + tensor v_7_pad_type_0 = const()[name = tensor("v_7_pad_type_0"), val = tensor("custom")]; + tensor v_7_pad_0 = const()[name = tensor("v_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(59266496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60577280))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_7_cast = conv(dilations = var_650, groups = var_282, pad = v_7_pad_0, pad_type = v_7_pad_type_0, strides = var_648, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_7_cast")]; + tensor var_654 = const()[name = tensor("op_654"), val = tensor([2, 10, 64, -1])]; + tensor var_655_cast = reshape(shape = var_654, x = q_7_cast)[name = tensor("op_655_cast")]; + tensor var_656 = const()[name = tensor("op_656"), val = tensor([2, 10, 64, -1])]; + tensor var_657_cast = reshape(shape = var_656, x = k_7_cast)[name = tensor("op_657_cast")]; + tensor var_658 = const()[name = tensor("op_658"), val = tensor([2, 10, 64, -1])]; + tensor var_659_cast = reshape(shape = var_658, x = v_7_cast)[name = tensor("op_659_cast")]; + tensor attn_weights_13_transpose_x_0 = const()[name = tensor("attn_weights_13_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_13_transpose_y_0 = const()[name = tensor("attn_weights_13_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_13_cast = matmul(transpose_x = attn_weights_13_transpose_x_0, transpose_y = attn_weights_13_transpose_y_0, x = var_655_cast, y = var_657_cast)[name = tensor("attn_weights_13_cast")]; + tensor attn_weights_15_cast = mul(x = attn_weights_13_cast, y = var_273_to_fp16)[name = tensor("attn_weights_15_cast")]; + tensor var_663_cast = softmax(axis = var_266, x = attn_weights_15_cast)[name = tensor("op_663_cast")]; + tensor attn_7_transpose_x_0 = const()[name = tensor("attn_7_transpose_x_0"), val = tensor(false)]; + tensor attn_7_transpose_y_0 = const()[name = tensor("attn_7_transpose_y_0"), val = tensor(true)]; + tensor attn_7_cast = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_659_cast, y = var_663_cast)[name = tensor("attn_7_cast")]; + tensor var_667 = const()[name = tensor("op_667"), val = tensor([2, 640, 1, -1])]; + tensor input_71_cast = reshape(shape = var_667, x = attn_7_cast)[name = tensor("input_71_cast")]; + tensor var_672 = const()[name = tensor("op_672"), val = tensor([1, 1])]; + tensor var_674 = const()[name = tensor("op_674"), val = tensor([1, 1])]; + tensor var_676_pad_type_0 = const()[name = tensor("op_676_pad_type_0"), val = tensor("custom")]; + tensor var_676_pad_0 = const()[name = tensor("op_676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60577856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60987520))), name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60988096)))]; + tensor var_676_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_674, groups = var_282, pad = var_676_pad_0, pad_type = var_676_pad_type_0, strides = var_672, weight = down_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_71_cast)[name = tensor("op_676_cast")]; + tensor inputs_11_cast = add(x = var_676_cast, y = inputs_9_cast)[name = tensor("inputs_11_cast")]; + tensor var_680 = const()[name = tensor("op_680"), val = tensor([1])]; + tensor channels_mean_11_cast = reduce_mean(axes = var_680, keep_dims = var_277, x = inputs_11_cast)[name = tensor("channels_mean_11_cast")]; + tensor zero_mean_11_cast = sub(x = inputs_11_cast, y = channels_mean_11_cast)[name = tensor("zero_mean_11_cast")]; + tensor zero_mean_sq_11_cast = mul(x = zero_mean_11_cast, y = zero_mean_11_cast)[name = tensor("zero_mean_sq_11_cast")]; + tensor var_684 = const()[name = tensor("op_684"), val = tensor([1])]; + tensor var_685_cast = reduce_mean(axes = var_684, keep_dims = var_277, x = zero_mean_sq_11_cast)[name = tensor("op_685_cast")]; + tensor var_686_to_fp16 = const()[name = tensor("op_686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_687_cast = add(x = var_685_cast, y = var_686_to_fp16)[name = tensor("op_687_cast")]; + tensor denom_11_epsilon_0_to_fp16 = const()[name = tensor("denom_11_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_11_cast = rsqrt(epsilon = denom_11_epsilon_0_to_fp16, x = var_687_cast)[name = tensor("denom_11_cast")]; + tensor out_11_cast = mul(x = zero_mean_11_cast, y = denom_11_cast)[name = tensor("out_11_cast")]; + tensor var_691_to_fp16 = const()[name = tensor("op_691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60989440)))]; + tensor var_692_cast = add(x = out_11_cast, y = var_691_to_fp16)[name = tensor("op_692_cast")]; + tensor var_694_to_fp16 = const()[name = tensor("op_694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60990784)))]; + tensor input_73_cast = mul(x = var_692_cast, y = var_694_to_fp16)[name = tensor("input_73_cast")]; + tensor var_702 = const()[name = tensor("op_702"), val = tensor([1, 1])]; + tensor var_704 = const()[name = tensor("op_704"), val = tensor([1, 1])]; + tensor var_706_pad_type_0 = const()[name = tensor("op_706_pad_type_0"), val = tensor("custom")]; + tensor var_706_pad_0 = const()[name = tensor("op_706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(60992128)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67545792)))]; + tensor var_706_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_704, groups = var_282, pad = var_706_pad_0, pad_type = var_706_pad_type_0, strides = var_702, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16, x = input_73_cast)[name = tensor("op_706_cast")]; + tensor var_707_split_sizes_0 = const()[name = tensor("op_707_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_707_axis_0 = const()[name = tensor("op_707_axis_0"), val = tensor(1)]; + tensor var_707_cast_0, tensor var_707_cast_1 = split(axis = var_707_axis_0, split_sizes = var_707_split_sizes_0, x = var_706_cast)[name = tensor("op_707_cast")]; + tensor var_709_mode_0 = const()[name = tensor("op_709_mode_0"), val = tensor("EXACT")]; + tensor var_709_cast = gelu(mode = var_709_mode_0, x = var_707_cast_1)[name = tensor("op_709_cast")]; + tensor input_75_cast = mul(x = var_707_cast_0, y = var_709_cast)[name = tensor("input_75_cast")]; + tensor var_713 = const()[name = tensor("op_713"), val = tensor([1, 1])]; + tensor var_715 = const()[name = tensor("op_715"), val = tensor([1, 1])]; + tensor var_717_pad_type_0 = const()[name = tensor("op_717_pad_type_0"), val = tensor("custom")]; + tensor var_717_pad_0 = const()[name = tensor("op_717_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(67556096)))]; + tensor down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70832960)))]; + tensor var_717_cast = conv(bias = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_715, groups = var_282, pad = var_717_pad_0, pad_type = var_717_pad_type_0, strides = var_713, weight = down_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16, x = input_75_cast)[name = tensor("op_717_cast")]; + tensor hidden_states_29_cast = add(x = var_717_cast, y = inputs_11_cast)[name = tensor("hidden_states_29_cast")]; + tensor var_719 = const()[name = tensor("op_719"), val = tensor([2, 640, 64, 64])]; + tensor input_77_cast = reshape(shape = var_719, x = hidden_states_29_cast)[name = tensor("input_77_cast")]; + tensor var_723 = const()[name = tensor("op_723"), val = tensor([1, 1])]; + tensor var_725 = const()[name = tensor("op_725"), val = tensor([1, 1])]; + tensor hidden_states_31_pad_type_0 = const()[name = tensor("hidden_states_31_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_31_pad_0 = const()[name = tensor("hidden_states_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_0_proj_out_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(70834304)))]; + tensor down_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71653568)))]; + tensor hidden_states_31_cast = conv(bias = down_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_725, groups = var_282, pad = hidden_states_31_pad_0, pad_type = hidden_states_31_pad_type_0, strides = var_723, weight = down_blocks_1_attentions_0_proj_out_weight_to_fp16, x = input_77_cast)[name = tensor("hidden_states_31_cast")]; + tensor input_79_cast = add(x = hidden_states_31_cast, y = hidden_states_13_cast)[name = tensor("input_79_cast")]; + tensor reshape_28_shape_0 = const()[name = tensor("reshape_28_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_28_cast = reshape(shape = reshape_28_shape_0, x = input_79_cast)[name = tensor("reshape_28_cast")]; + tensor reduce_mean_21_axes_0 = const()[name = tensor("reduce_mean_21_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_21_keep_dims_0 = const()[name = tensor("reduce_mean_21_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_21_cast = reduce_mean(axes = reduce_mean_21_axes_0, keep_dims = reduce_mean_21_keep_dims_0, x = reshape_28_cast)[name = tensor("reduce_mean_21_cast")]; + tensor sub_14_cast = sub(x = reshape_28_cast, y = reduce_mean_21_cast)[name = tensor("sub_14_cast")]; + tensor square_7_cast = square(x = sub_14_cast)[name = tensor("square_7_cast")]; + tensor reduce_mean_23_axes_0 = const()[name = tensor("reduce_mean_23_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_23_keep_dims_0 = const()[name = tensor("reduce_mean_23_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_23_cast = reduce_mean(axes = reduce_mean_23_axes_0, keep_dims = reduce_mean_23_keep_dims_0, x = square_7_cast)[name = tensor("reduce_mean_23_cast")]; + tensor add_14_y_0_to_fp16 = const()[name = tensor("add_14_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_14_cast = add(x = reduce_mean_23_cast, y = add_14_y_0_to_fp16)[name = tensor("add_14_cast")]; + tensor sqrt_7_cast = sqrt(x = add_14_cast)[name = tensor("sqrt_7_cast")]; + tensor real_div_7_cast = real_div(x = sub_14_cast, y = sqrt_7_cast)[name = tensor("real_div_7_cast")]; + tensor reshape_29_shape_0 = const()[name = tensor("reshape_29_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_29_cast = reshape(shape = reshape_29_shape_0, x = real_div_7_cast)[name = tensor("reshape_29_cast")]; + tensor add_15_gamma_0_to_fp16 = const()[name = tensor("add_15_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71654912)))]; + tensor add_15_beta_0_to_fp16 = const()[name = tensor("add_15_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71656256)))]; + tensor add_15_epsilon_0_to_fp16 = const()[name = tensor("add_15_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_15_cast = batch_norm(beta = add_15_beta_0_to_fp16, epsilon = add_15_epsilon_0_to_fp16, gamma = add_15_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_29_cast)[name = tensor("add_15_cast")]; + tensor input_83_cast = silu(x = add_15_cast)[name = tensor("input_83_cast")]; + tensor var_740 = const()[name = tensor("op_740"), val = tensor([1, 1])]; + tensor var_742 = const()[name = tensor("op_742"), val = tensor([1, 1])]; + tensor hidden_states_33_pad_type_0 = const()[name = tensor("hidden_states_33_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_33_pad_0 = const()[name = tensor("hidden_states_33_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(71657600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75344064))), name = tensor("down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75344640)))]; + tensor hidden_states_33_cast = conv(bias = down_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_742, groups = var_282, pad = hidden_states_33_pad_0, pad_type = hidden_states_33_pad_type_0, strides = var_740, weight = down_blocks_1_resnets_1_conv1_weight_to_fp16_palettized, x = input_83_cast)[name = tensor("hidden_states_33_cast")]; + tensor var_748 = const()[name = tensor("op_748"), val = tensor([1, 1])]; + tensor var_750 = const()[name = tensor("op_750"), val = tensor([1, 1])]; + tensor temb_7_pad_type_0 = const()[name = tensor("temb_7_pad_type_0"), val = tensor("custom")]; + tensor temb_7_pad_0 = const()[name = tensor("temb_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(75345984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76165248))), name = tensor("down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76165824)))]; + tensor temb_7_cast = conv(bias = down_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_750, groups = var_282, pad = temb_7_pad_0, pad_type = temb_7_pad_type_0, strides = var_748, weight = down_blocks_1_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_7_cast")]; + tensor input_87_cast = add(x = hidden_states_33_cast, y = temb_7_cast)[name = tensor("input_87_cast")]; + tensor reshape_32_shape_0 = const()[name = tensor("reshape_32_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_32_cast = reshape(shape = reshape_32_shape_0, x = input_87_cast)[name = tensor("reshape_32_cast")]; + tensor reduce_mean_24_axes_0 = const()[name = tensor("reduce_mean_24_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_24_keep_dims_0 = const()[name = tensor("reduce_mean_24_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_24_cast = reduce_mean(axes = reduce_mean_24_axes_0, keep_dims = reduce_mean_24_keep_dims_0, x = reshape_32_cast)[name = tensor("reduce_mean_24_cast")]; + tensor sub_16_cast = sub(x = reshape_32_cast, y = reduce_mean_24_cast)[name = tensor("sub_16_cast")]; + tensor square_8_cast = square(x = sub_16_cast)[name = tensor("square_8_cast")]; + tensor reduce_mean_26_axes_0 = const()[name = tensor("reduce_mean_26_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_26_keep_dims_0 = const()[name = tensor("reduce_mean_26_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_26_cast = reduce_mean(axes = reduce_mean_26_axes_0, keep_dims = reduce_mean_26_keep_dims_0, x = square_8_cast)[name = tensor("reduce_mean_26_cast")]; + tensor add_16_y_0_to_fp16 = const()[name = tensor("add_16_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_16_cast = add(x = reduce_mean_26_cast, y = add_16_y_0_to_fp16)[name = tensor("add_16_cast")]; + tensor sqrt_8_cast = sqrt(x = add_16_cast)[name = tensor("sqrt_8_cast")]; + tensor real_div_8_cast = real_div(x = sub_16_cast, y = sqrt_8_cast)[name = tensor("real_div_8_cast")]; + tensor reshape_33_shape_0 = const()[name = tensor("reshape_33_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_33_cast = reshape(shape = reshape_33_shape_0, x = real_div_8_cast)[name = tensor("reshape_33_cast")]; + tensor add_17_gamma_0_to_fp16 = const()[name = tensor("add_17_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76167168)))]; + tensor add_17_beta_0_to_fp16 = const()[name = tensor("add_17_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76168512)))]; + tensor add_17_epsilon_0_to_fp16 = const()[name = tensor("add_17_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_17_cast = batch_norm(beta = add_17_beta_0_to_fp16, epsilon = add_17_epsilon_0_to_fp16, gamma = add_17_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_33_cast)[name = tensor("add_17_cast")]; + tensor input_91_cast = silu(x = add_17_cast)[name = tensor("input_91_cast")]; + tensor var_760 = const()[name = tensor("op_760"), val = tensor([1, 1])]; + tensor var_762 = const()[name = tensor("op_762"), val = tensor([1, 1])]; + tensor hidden_states_35_pad_type_0 = const()[name = tensor("hidden_states_35_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_35_pad_0 = const()[name = tensor("hidden_states_35_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(76169856)))]; + tensor down_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83542720)))]; + tensor hidden_states_35_cast = conv(bias = down_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_762, groups = var_282, pad = hidden_states_35_pad_0, pad_type = hidden_states_35_pad_type_0, strides = var_760, weight = down_blocks_1_resnets_1_conv2_weight_to_fp16, x = input_91_cast)[name = tensor("hidden_states_35_cast")]; + tensor hidden_states_37_cast = add(x = input_79_cast, y = hidden_states_35_cast)[name = tensor("hidden_states_37_cast")]; + tensor reshape_36_shape_0 = const()[name = tensor("reshape_36_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_36_cast = reshape(shape = reshape_36_shape_0, x = hidden_states_37_cast)[name = tensor("reshape_36_cast")]; + tensor reduce_mean_27_axes_0 = const()[name = tensor("reduce_mean_27_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_27_keep_dims_0 = const()[name = tensor("reduce_mean_27_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_27_cast = reduce_mean(axes = reduce_mean_27_axes_0, keep_dims = reduce_mean_27_keep_dims_0, x = reshape_36_cast)[name = tensor("reduce_mean_27_cast")]; + tensor sub_18_cast = sub(x = reshape_36_cast, y = reduce_mean_27_cast)[name = tensor("sub_18_cast")]; + tensor square_9_cast = square(x = sub_18_cast)[name = tensor("square_9_cast")]; + tensor reduce_mean_29_axes_0 = const()[name = tensor("reduce_mean_29_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_29_keep_dims_0 = const()[name = tensor("reduce_mean_29_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_29_cast = reduce_mean(axes = reduce_mean_29_axes_0, keep_dims = reduce_mean_29_keep_dims_0, x = square_9_cast)[name = tensor("reduce_mean_29_cast")]; + tensor add_18_y_0_to_fp16 = const()[name = tensor("add_18_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_18_cast = add(x = reduce_mean_29_cast, y = add_18_y_0_to_fp16)[name = tensor("add_18_cast")]; + tensor sqrt_9_cast = sqrt(x = add_18_cast)[name = tensor("sqrt_9_cast")]; + tensor real_div_9_cast = real_div(x = sub_18_cast, y = sqrt_9_cast)[name = tensor("real_div_9_cast")]; + tensor reshape_37_shape_0 = const()[name = tensor("reshape_37_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_37_cast = reshape(shape = reshape_37_shape_0, x = real_div_9_cast)[name = tensor("reshape_37_cast")]; + tensor add_19_gamma_0_to_fp16 = const()[name = tensor("add_19_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83544064)))]; + tensor add_19_beta_0_to_fp16 = const()[name = tensor("add_19_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83545408)))]; + tensor add_19_epsilon_0_to_fp16 = const()[name = tensor("add_19_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_19_cast = batch_norm(beta = add_19_beta_0_to_fp16, epsilon = add_19_epsilon_0_to_fp16, gamma = add_19_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_37_cast)[name = tensor("add_19_cast")]; + tensor var_784 = const()[name = tensor("op_784"), val = tensor([1, 1])]; + tensor var_786 = const()[name = tensor("op_786"), val = tensor([1, 1])]; + tensor hidden_states_39_pad_type_0 = const()[name = tensor("hidden_states_39_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_39_pad_0 = const()[name = tensor("hidden_states_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_in_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(83546752)))]; + tensor down_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84366016)))]; + tensor hidden_states_39_cast = conv(bias = down_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_786, groups = var_282, pad = hidden_states_39_pad_0, pad_type = hidden_states_39_pad_type_0, strides = var_784, weight = down_blocks_1_attentions_1_proj_in_weight_to_fp16, x = add_19_cast)[name = tensor("hidden_states_39_cast")]; + tensor var_791 = const()[name = tensor("op_791"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_13_cast = reshape(shape = var_791, x = hidden_states_39_cast)[name = tensor("inputs_13_cast")]; + tensor var_801 = const()[name = tensor("op_801"), val = tensor([1])]; + tensor channels_mean_13_cast = reduce_mean(axes = var_801, keep_dims = var_277, x = inputs_13_cast)[name = tensor("channels_mean_13_cast")]; + tensor zero_mean_13_cast = sub(x = inputs_13_cast, y = channels_mean_13_cast)[name = tensor("zero_mean_13_cast")]; + tensor zero_mean_sq_13_cast = mul(x = zero_mean_13_cast, y = zero_mean_13_cast)[name = tensor("zero_mean_sq_13_cast")]; + tensor var_805 = const()[name = tensor("op_805"), val = tensor([1])]; + tensor var_806_cast = reduce_mean(axes = var_805, keep_dims = var_277, x = zero_mean_sq_13_cast)[name = tensor("op_806_cast")]; + tensor var_807_to_fp16 = const()[name = tensor("op_807_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_808_cast = add(x = var_806_cast, y = var_807_to_fp16)[name = tensor("op_808_cast")]; + tensor denom_13_epsilon_0_to_fp16 = const()[name = tensor("denom_13_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_13_cast = rsqrt(epsilon = denom_13_epsilon_0_to_fp16, x = var_808_cast)[name = tensor("denom_13_cast")]; + tensor out_13_cast = mul(x = zero_mean_13_cast, y = denom_13_cast)[name = tensor("out_13_cast")]; + tensor var_812_to_fp16 = const()[name = tensor("op_812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84367360)))]; + tensor var_813_cast = add(x = out_13_cast, y = var_812_to_fp16)[name = tensor("op_813_cast")]; + tensor var_815_to_fp16 = const()[name = tensor("op_815_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84368704)))]; + tensor hidden_states_41_cast = mul(x = var_813_cast, y = var_815_to_fp16)[name = tensor("hidden_states_41_cast")]; + tensor var_822 = const()[name = tensor("op_822"), val = tensor([1, 1])]; + tensor var_824 = const()[name = tensor("op_824"), val = tensor([1, 1])]; + tensor q_9_pad_type_0 = const()[name = tensor("q_9_pad_type_0"), val = tensor("custom")]; + tensor q_9_pad_0 = const()[name = tensor("q_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84370048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84779712))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_9_cast = conv(dilations = var_824, groups = var_282, pad = q_9_pad_0, pad_type = q_9_pad_type_0, strides = var_822, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("q_9_cast")]; + tensor var_828 = const()[name = tensor("op_828"), val = tensor([1, 1])]; + tensor var_830 = const()[name = tensor("op_830"), val = tensor([1, 1])]; + tensor k_9_pad_type_0 = const()[name = tensor("k_9_pad_type_0"), val = tensor("custom")]; + tensor k_9_pad_0 = const()[name = tensor("k_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(84780288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85189952))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_9_cast = conv(dilations = var_830, groups = var_282, pad = k_9_pad_0, pad_type = k_9_pad_type_0, strides = var_828, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("k_9_cast")]; + tensor var_834 = const()[name = tensor("op_834"), val = tensor([1, 1])]; + tensor var_836 = const()[name = tensor("op_836"), val = tensor([1, 1])]; + tensor v_9_pad_type_0 = const()[name = tensor("v_9_pad_type_0"), val = tensor("custom")]; + tensor v_9_pad_0 = const()[name = tensor("v_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85190528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85600192))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_9_cast = conv(dilations = var_836, groups = var_282, pad = v_9_pad_0, pad_type = v_9_pad_type_0, strides = var_834, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_41_cast)[name = tensor("v_9_cast")]; + tensor var_840 = const()[name = tensor("op_840"), val = tensor([2, 10, 64, -1])]; + tensor var_841_cast = reshape(shape = var_840, x = q_9_cast)[name = tensor("op_841_cast")]; + tensor var_842 = const()[name = tensor("op_842"), val = tensor([2, 10, 64, -1])]; + tensor var_843_cast = reshape(shape = var_842, x = k_9_cast)[name = tensor("op_843_cast")]; + tensor var_844 = const()[name = tensor("op_844"), val = tensor([2, 10, 64, -1])]; + tensor var_845_cast = reshape(shape = var_844, x = v_9_cast)[name = tensor("op_845_cast")]; + tensor attn_weights_17_transpose_x_0 = const()[name = tensor("attn_weights_17_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_17_transpose_y_0 = const()[name = tensor("attn_weights_17_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_17_cast = matmul(transpose_x = attn_weights_17_transpose_x_0, transpose_y = attn_weights_17_transpose_y_0, x = var_841_cast, y = var_843_cast)[name = tensor("attn_weights_17_cast")]; + tensor attn_weights_19_cast = mul(x = attn_weights_17_cast, y = var_273_to_fp16)[name = tensor("attn_weights_19_cast")]; + tensor var_849_cast = softmax(axis = var_266, x = attn_weights_19_cast)[name = tensor("op_849_cast")]; + tensor attn_9_transpose_x_0 = const()[name = tensor("attn_9_transpose_x_0"), val = tensor(false)]; + tensor attn_9_transpose_y_0 = const()[name = tensor("attn_9_transpose_y_0"), val = tensor(true)]; + tensor attn_9_cast = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_845_cast, y = var_849_cast)[name = tensor("attn_9_cast")]; + tensor var_853 = const()[name = tensor("op_853"), val = tensor([2, 640, 1, -1])]; + tensor input_95_cast = reshape(shape = var_853, x = attn_9_cast)[name = tensor("input_95_cast")]; + tensor var_858 = const()[name = tensor("op_858"), val = tensor([1, 1])]; + tensor var_860 = const()[name = tensor("op_860"), val = tensor([1, 1])]; + tensor var_862_pad_type_0 = const()[name = tensor("op_862_pad_type_0"), val = tensor("custom")]; + tensor var_862_pad_0 = const()[name = tensor("op_862_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(85600768)))]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86420032)))]; + tensor var_862_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_860, groups = var_282, pad = var_862_pad_0, pad_type = var_862_pad_type_0, strides = var_858, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16, x = input_95_cast)[name = tensor("op_862_cast")]; + tensor inputs_15_cast = add(x = var_862_cast, y = inputs_13_cast)[name = tensor("inputs_15_cast")]; + tensor var_866 = const()[name = tensor("op_866"), val = tensor([1])]; + tensor channels_mean_15_cast = reduce_mean(axes = var_866, keep_dims = var_277, x = inputs_15_cast)[name = tensor("channels_mean_15_cast")]; + tensor zero_mean_15_cast = sub(x = inputs_15_cast, y = channels_mean_15_cast)[name = tensor("zero_mean_15_cast")]; + tensor zero_mean_sq_15_cast = mul(x = zero_mean_15_cast, y = zero_mean_15_cast)[name = tensor("zero_mean_sq_15_cast")]; + tensor var_870 = const()[name = tensor("op_870"), val = tensor([1])]; + tensor var_871_cast = reduce_mean(axes = var_870, keep_dims = var_277, x = zero_mean_sq_15_cast)[name = tensor("op_871_cast")]; + tensor var_872_to_fp16 = const()[name = tensor("op_872_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_873_cast = add(x = var_871_cast, y = var_872_to_fp16)[name = tensor("op_873_cast")]; + tensor denom_15_epsilon_0_to_fp16 = const()[name = tensor("denom_15_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_15_cast = rsqrt(epsilon = denom_15_epsilon_0_to_fp16, x = var_873_cast)[name = tensor("denom_15_cast")]; + tensor out_15_cast = mul(x = zero_mean_15_cast, y = denom_15_cast)[name = tensor("out_15_cast")]; + tensor var_877_to_fp16 = const()[name = tensor("op_877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86421376)))]; + tensor var_878_cast = add(x = out_15_cast, y = var_877_to_fp16)[name = tensor("op_878_cast")]; + tensor var_880_to_fp16 = const()[name = tensor("op_880_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86422720)))]; + tensor hidden_states_43_cast = mul(x = var_878_cast, y = var_880_to_fp16)[name = tensor("hidden_states_43_cast")]; + tensor var_887 = const()[name = tensor("op_887"), val = tensor([1, 1])]; + tensor var_889 = const()[name = tensor("op_889"), val = tensor([1, 1])]; + tensor q_11_pad_type_0 = const()[name = tensor("q_11_pad_type_0"), val = tensor("custom")]; + tensor q_11_pad_0 = const()[name = tensor("q_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86424064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86731328))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_11_cast = conv(dilations = var_889, groups = var_282, pad = q_11_pad_0, pad_type = q_11_pad_type_0, strides = var_887, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_43_cast)[name = tensor("q_11_cast")]; + tensor var_893 = const()[name = tensor("op_893"), val = tensor([1, 1])]; + tensor var_895 = const()[name = tensor("op_895"), val = tensor([1, 1])]; + tensor k_11_pad_type_0 = const()[name = tensor("k_11_pad_type_0"), val = tensor("custom")]; + tensor k_11_pad_0 = const()[name = tensor("k_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(86731520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(87714624))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_11_cast = conv(dilations = var_895, groups = var_282, pad = k_11_pad_0, pad_type = k_11_pad_type_0, strides = var_893, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_11_cast")]; + tensor var_899 = const()[name = tensor("op_899"), val = tensor([1, 1])]; + tensor var_901 = const()[name = tensor("op_901"), val = tensor([1, 1])]; + tensor v_11_pad_type_0 = const()[name = tensor("v_11_pad_type_0"), val = tensor("custom")]; + tensor v_11_pad_0 = const()[name = tensor("v_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(87714816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89025600))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_11_cast = conv(dilations = var_901, groups = var_282, pad = v_11_pad_0, pad_type = v_11_pad_type_0, strides = var_899, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_11_cast")]; + tensor var_905 = const()[name = tensor("op_905"), val = tensor([2, 10, 64, -1])]; + tensor var_906_cast = reshape(shape = var_905, x = q_11_cast)[name = tensor("op_906_cast")]; + tensor var_907 = const()[name = tensor("op_907"), val = tensor([2, 10, 64, -1])]; + tensor var_908_cast = reshape(shape = var_907, x = k_11_cast)[name = tensor("op_908_cast")]; + tensor var_909 = const()[name = tensor("op_909"), val = tensor([2, 10, 64, -1])]; + tensor var_910_cast = reshape(shape = var_909, x = v_11_cast)[name = tensor("op_910_cast")]; + tensor attn_weights_21_transpose_x_0 = const()[name = tensor("attn_weights_21_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_21_transpose_y_0 = const()[name = tensor("attn_weights_21_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_21_cast = matmul(transpose_x = attn_weights_21_transpose_x_0, transpose_y = attn_weights_21_transpose_y_0, x = var_906_cast, y = var_908_cast)[name = tensor("attn_weights_21_cast")]; + tensor attn_weights_23_cast = mul(x = attn_weights_21_cast, y = var_273_to_fp16)[name = tensor("attn_weights_23_cast")]; + tensor var_914_cast = softmax(axis = var_266, x = attn_weights_23_cast)[name = tensor("op_914_cast")]; + tensor attn_11_transpose_x_0 = const()[name = tensor("attn_11_transpose_x_0"), val = tensor(false)]; + tensor attn_11_transpose_y_0 = const()[name = tensor("attn_11_transpose_y_0"), val = tensor(true)]; + tensor attn_11_cast = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_910_cast, y = var_914_cast)[name = tensor("attn_11_cast")]; + tensor var_918 = const()[name = tensor("op_918"), val = tensor([2, 640, 1, -1])]; + tensor input_97_cast = reshape(shape = var_918, x = attn_11_cast)[name = tensor("input_97_cast")]; + tensor var_923 = const()[name = tensor("op_923"), val = tensor([1, 1])]; + tensor var_925 = const()[name = tensor("op_925"), val = tensor([1, 1])]; + tensor var_927_pad_type_0 = const()[name = tensor("op_927_pad_type_0"), val = tensor("custom")]; + tensor var_927_pad_0 = const()[name = tensor("op_927_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89026176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89333440))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89333632)))]; + tensor var_927_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_925, groups = var_282, pad = var_927_pad_0, pad_type = var_927_pad_type_0, strides = var_923, weight = down_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_97_cast)[name = tensor("op_927_cast")]; + tensor inputs_17_cast = add(x = var_927_cast, y = inputs_15_cast)[name = tensor("inputs_17_cast")]; + tensor var_931 = const()[name = tensor("op_931"), val = tensor([1])]; + tensor channels_mean_17_cast = reduce_mean(axes = var_931, keep_dims = var_277, x = inputs_17_cast)[name = tensor("channels_mean_17_cast")]; + tensor zero_mean_17_cast = sub(x = inputs_17_cast, y = channels_mean_17_cast)[name = tensor("zero_mean_17_cast")]; + tensor zero_mean_sq_17_cast = mul(x = zero_mean_17_cast, y = zero_mean_17_cast)[name = tensor("zero_mean_sq_17_cast")]; + tensor var_935 = const()[name = tensor("op_935"), val = tensor([1])]; + tensor var_936_cast = reduce_mean(axes = var_935, keep_dims = var_277, x = zero_mean_sq_17_cast)[name = tensor("op_936_cast")]; + tensor var_937_to_fp16 = const()[name = tensor("op_937_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_938_cast = add(x = var_936_cast, y = var_937_to_fp16)[name = tensor("op_938_cast")]; + tensor denom_17_epsilon_0_to_fp16 = const()[name = tensor("denom_17_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_17_cast = rsqrt(epsilon = denom_17_epsilon_0_to_fp16, x = var_938_cast)[name = tensor("denom_17_cast")]; + tensor out_17_cast = mul(x = zero_mean_17_cast, y = denom_17_cast)[name = tensor("out_17_cast")]; + tensor var_942_to_fp16 = const()[name = tensor("op_942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89334976)))]; + tensor var_943_cast = add(x = out_17_cast, y = var_942_to_fp16)[name = tensor("op_943_cast")]; + tensor var_945_to_fp16 = const()[name = tensor("op_945_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89336320)))]; + tensor input_99_cast = mul(x = var_943_cast, y = var_945_to_fp16)[name = tensor("input_99_cast")]; + tensor var_953 = const()[name = tensor("op_953"), val = tensor([1, 1])]; + tensor var_955 = const()[name = tensor("op_955"), val = tensor([1, 1])]; + tensor var_957_pad_type_0 = const()[name = tensor("op_957_pad_type_0"), val = tensor("custom")]; + tensor var_957_pad_0 = const()[name = tensor("op_957_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(89337664)))]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95891328)))]; + tensor var_957_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_955, groups = var_282, pad = var_957_pad_0, pad_type = var_957_pad_type_0, strides = var_953, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16, x = input_99_cast)[name = tensor("op_957_cast")]; + tensor var_958_split_sizes_0 = const()[name = tensor("op_958_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_958_axis_0 = const()[name = tensor("op_958_axis_0"), val = tensor(1)]; + tensor var_958_cast_0, tensor var_958_cast_1 = split(axis = var_958_axis_0, split_sizes = var_958_split_sizes_0, x = var_957_cast)[name = tensor("op_958_cast")]; + tensor var_960_mode_0 = const()[name = tensor("op_960_mode_0"), val = tensor("EXACT")]; + tensor var_960_cast = gelu(mode = var_960_mode_0, x = var_958_cast_1)[name = tensor("op_960_cast")]; + tensor input_101_cast = mul(x = var_958_cast_0, y = var_960_cast)[name = tensor("input_101_cast")]; + tensor var_964 = const()[name = tensor("op_964"), val = tensor([1, 1])]; + tensor var_966 = const()[name = tensor("op_966"), val = tensor([1, 1])]; + tensor var_968_pad_type_0 = const()[name = tensor("op_968_pad_type_0"), val = tensor("custom")]; + tensor var_968_pad_0 = const()[name = tensor("op_968_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(95901632)))]; + tensor down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99178496)))]; + tensor var_968_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_966, groups = var_282, pad = var_968_pad_0, pad_type = var_968_pad_type_0, strides = var_964, weight = down_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16, x = input_101_cast)[name = tensor("op_968_cast")]; + tensor inputs_19_cast = add(x = var_968_cast, y = inputs_17_cast)[name = tensor("inputs_19_cast")]; + tensor var_978 = const()[name = tensor("op_978"), val = tensor([1])]; + tensor channels_mean_19_cast = reduce_mean(axes = var_978, keep_dims = var_277, x = inputs_19_cast)[name = tensor("channels_mean_19_cast")]; + tensor zero_mean_19_cast = sub(x = inputs_19_cast, y = channels_mean_19_cast)[name = tensor("zero_mean_19_cast")]; + tensor zero_mean_sq_19_cast = mul(x = zero_mean_19_cast, y = zero_mean_19_cast)[name = tensor("zero_mean_sq_19_cast")]; + tensor var_982 = const()[name = tensor("op_982"), val = tensor([1])]; + tensor var_983_cast = reduce_mean(axes = var_982, keep_dims = var_277, x = zero_mean_sq_19_cast)[name = tensor("op_983_cast")]; + tensor var_984_to_fp16 = const()[name = tensor("op_984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_985_cast = add(x = var_983_cast, y = var_984_to_fp16)[name = tensor("op_985_cast")]; + tensor denom_19_epsilon_0_to_fp16 = const()[name = tensor("denom_19_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_19_cast = rsqrt(epsilon = denom_19_epsilon_0_to_fp16, x = var_985_cast)[name = tensor("denom_19_cast")]; + tensor out_19_cast = mul(x = zero_mean_19_cast, y = denom_19_cast)[name = tensor("out_19_cast")]; + tensor var_989_to_fp16 = const()[name = tensor("op_989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99179840)))]; + tensor var_990_cast = add(x = out_19_cast, y = var_989_to_fp16)[name = tensor("op_990_cast")]; + tensor var_992_to_fp16 = const()[name = tensor("op_992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99181184)))]; + tensor hidden_states_47_cast = mul(x = var_990_cast, y = var_992_to_fp16)[name = tensor("hidden_states_47_cast")]; + tensor var_999 = const()[name = tensor("op_999"), val = tensor([1, 1])]; + tensor var_1001 = const()[name = tensor("op_1001"), val = tensor([1, 1])]; + tensor q_13_pad_type_0 = const()[name = tensor("q_13_pad_type_0"), val = tensor("custom")]; + tensor q_13_pad_0 = const()[name = tensor("q_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99182528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99592192))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_13_cast = conv(dilations = var_1001, groups = var_282, pad = q_13_pad_0, pad_type = q_13_pad_type_0, strides = var_999, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("q_13_cast")]; + tensor var_1005 = const()[name = tensor("op_1005"), val = tensor([1, 1])]; + tensor var_1007 = const()[name = tensor("op_1007"), val = tensor([1, 1])]; + tensor k_13_pad_type_0 = const()[name = tensor("k_13_pad_type_0"), val = tensor("custom")]; + tensor k_13_pad_0 = const()[name = tensor("k_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(99592768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100002432))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_13_cast = conv(dilations = var_1007, groups = var_282, pad = k_13_pad_0, pad_type = k_13_pad_type_0, strides = var_1005, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("k_13_cast")]; + tensor var_1011 = const()[name = tensor("op_1011"), val = tensor([1, 1])]; + tensor var_1013 = const()[name = tensor("op_1013"), val = tensor([1, 1])]; + tensor v_13_pad_type_0 = const()[name = tensor("v_13_pad_type_0"), val = tensor("custom")]; + tensor v_13_pad_0 = const()[name = tensor("v_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100003008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100412672))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor v_13_cast = conv(dilations = var_1013, groups = var_282, pad = v_13_pad_0, pad_type = v_13_pad_type_0, strides = var_1011, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_47_cast)[name = tensor("v_13_cast")]; + tensor var_1017 = const()[name = tensor("op_1017"), val = tensor([2, 10, 64, -1])]; + tensor var_1018_cast = reshape(shape = var_1017, x = q_13_cast)[name = tensor("op_1018_cast")]; + tensor var_1019 = const()[name = tensor("op_1019"), val = tensor([2, 10, 64, -1])]; + tensor var_1020_cast = reshape(shape = var_1019, x = k_13_cast)[name = tensor("op_1020_cast")]; + tensor var_1021 = const()[name = tensor("op_1021"), val = tensor([2, 10, 64, -1])]; + tensor var_1022_cast = reshape(shape = var_1021, x = v_13_cast)[name = tensor("op_1022_cast")]; + tensor attn_weights_25_transpose_x_0 = const()[name = tensor("attn_weights_25_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_25_transpose_y_0 = const()[name = tensor("attn_weights_25_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_25_cast = matmul(transpose_x = attn_weights_25_transpose_x_0, transpose_y = attn_weights_25_transpose_y_0, x = var_1018_cast, y = var_1020_cast)[name = tensor("attn_weights_25_cast")]; + tensor attn_weights_27_cast = mul(x = attn_weights_25_cast, y = var_273_to_fp16)[name = tensor("attn_weights_27_cast")]; + tensor var_1026_cast = softmax(axis = var_266, x = attn_weights_27_cast)[name = tensor("op_1026_cast")]; + tensor attn_13_transpose_x_0 = const()[name = tensor("attn_13_transpose_x_0"), val = tensor(false)]; + tensor attn_13_transpose_y_0 = const()[name = tensor("attn_13_transpose_y_0"), val = tensor(true)]; + tensor attn_13_cast = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_1022_cast, y = var_1026_cast)[name = tensor("attn_13_cast")]; + tensor var_1030 = const()[name = tensor("op_1030"), val = tensor([2, 640, 1, -1])]; + tensor input_103_cast = reshape(shape = var_1030, x = attn_13_cast)[name = tensor("input_103_cast")]; + tensor var_1035 = const()[name = tensor("op_1035"), val = tensor([1, 1])]; + tensor var_1037 = const()[name = tensor("op_1037"), val = tensor([1, 1])]; + tensor var_1039_pad_type_0 = const()[name = tensor("op_1039_pad_type_0"), val = tensor("custom")]; + tensor var_1039_pad_0 = const()[name = tensor("op_1039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100413248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100822912))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100823488)))]; + tensor var_1039_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1037, groups = var_282, pad = var_1039_pad_0, pad_type = var_1039_pad_type_0, strides = var_1035, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_103_cast)[name = tensor("op_1039_cast")]; + tensor inputs_21_cast = add(x = var_1039_cast, y = inputs_19_cast)[name = tensor("inputs_21_cast")]; + tensor var_1043 = const()[name = tensor("op_1043"), val = tensor([1])]; + tensor channels_mean_21_cast = reduce_mean(axes = var_1043, keep_dims = var_277, x = inputs_21_cast)[name = tensor("channels_mean_21_cast")]; + tensor zero_mean_21_cast = sub(x = inputs_21_cast, y = channels_mean_21_cast)[name = tensor("zero_mean_21_cast")]; + tensor zero_mean_sq_21_cast = mul(x = zero_mean_21_cast, y = zero_mean_21_cast)[name = tensor("zero_mean_sq_21_cast")]; + tensor var_1047 = const()[name = tensor("op_1047"), val = tensor([1])]; + tensor var_1048_cast = reduce_mean(axes = var_1047, keep_dims = var_277, x = zero_mean_sq_21_cast)[name = tensor("op_1048_cast")]; + tensor var_1049_to_fp16 = const()[name = tensor("op_1049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1050_cast = add(x = var_1048_cast, y = var_1049_to_fp16)[name = tensor("op_1050_cast")]; + tensor denom_21_epsilon_0_to_fp16 = const()[name = tensor("denom_21_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_21_cast = rsqrt(epsilon = denom_21_epsilon_0_to_fp16, x = var_1050_cast)[name = tensor("denom_21_cast")]; + tensor out_21_cast = mul(x = zero_mean_21_cast, y = denom_21_cast)[name = tensor("out_21_cast")]; + tensor var_1054_to_fp16 = const()[name = tensor("op_1054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100824832)))]; + tensor var_1055_cast = add(x = out_21_cast, y = var_1054_to_fp16)[name = tensor("op_1055_cast")]; + tensor var_1057_to_fp16 = const()[name = tensor("op_1057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100826176)))]; + tensor hidden_states_49_cast = mul(x = var_1055_cast, y = var_1057_to_fp16)[name = tensor("hidden_states_49_cast")]; + tensor var_1064 = const()[name = tensor("op_1064"), val = tensor([1, 1])]; + tensor var_1066 = const()[name = tensor("op_1066"), val = tensor([1, 1])]; + tensor q_15_pad_type_0 = const()[name = tensor("q_15_pad_type_0"), val = tensor("custom")]; + tensor q_15_pad_0 = const()[name = tensor("q_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(100827520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101134784))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_15_cast = conv(dilations = var_1066, groups = var_282, pad = q_15_pad_0, pad_type = q_15_pad_type_0, strides = var_1064, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_49_cast)[name = tensor("q_15_cast")]; + tensor var_1070 = const()[name = tensor("op_1070"), val = tensor([1, 1])]; + tensor var_1072 = const()[name = tensor("op_1072"), val = tensor([1, 1])]; + tensor k_15_pad_type_0 = const()[name = tensor("k_15_pad_type_0"), val = tensor("custom")]; + tensor k_15_pad_0 = const()[name = tensor("k_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(101134976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102118080))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_15_cast = conv(dilations = var_1072, groups = var_282, pad = k_15_pad_0, pad_type = k_15_pad_type_0, strides = var_1070, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_15_cast")]; + tensor var_1076 = const()[name = tensor("op_1076"), val = tensor([1, 1])]; + tensor var_1078 = const()[name = tensor("op_1078"), val = tensor([1, 1])]; + tensor v_15_pad_type_0 = const()[name = tensor("v_15_pad_type_0"), val = tensor("custom")]; + tensor v_15_pad_0 = const()[name = tensor("v_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(102118272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103429056))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_15_cast = conv(dilations = var_1078, groups = var_282, pad = v_15_pad_0, pad_type = v_15_pad_type_0, strides = var_1076, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_15_cast")]; + tensor var_1082 = const()[name = tensor("op_1082"), val = tensor([2, 10, 64, -1])]; + tensor var_1083_cast = reshape(shape = var_1082, x = q_15_cast)[name = tensor("op_1083_cast")]; + tensor var_1084 = const()[name = tensor("op_1084"), val = tensor([2, 10, 64, -1])]; + tensor var_1085_cast = reshape(shape = var_1084, x = k_15_cast)[name = tensor("op_1085_cast")]; + tensor var_1086 = const()[name = tensor("op_1086"), val = tensor([2, 10, 64, -1])]; + tensor var_1087_cast = reshape(shape = var_1086, x = v_15_cast)[name = tensor("op_1087_cast")]; + tensor attn_weights_29_transpose_x_0 = const()[name = tensor("attn_weights_29_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_29_transpose_y_0 = const()[name = tensor("attn_weights_29_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_29_cast = matmul(transpose_x = attn_weights_29_transpose_x_0, transpose_y = attn_weights_29_transpose_y_0, x = var_1083_cast, y = var_1085_cast)[name = tensor("attn_weights_29_cast")]; + tensor attn_weights_31_cast = mul(x = attn_weights_29_cast, y = var_273_to_fp16)[name = tensor("attn_weights_31_cast")]; + tensor var_1091_cast = softmax(axis = var_266, x = attn_weights_31_cast)[name = tensor("op_1091_cast")]; + tensor attn_15_transpose_x_0 = const()[name = tensor("attn_15_transpose_x_0"), val = tensor(false)]; + tensor attn_15_transpose_y_0 = const()[name = tensor("attn_15_transpose_y_0"), val = tensor(true)]; + tensor attn_15_cast = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_1087_cast, y = var_1091_cast)[name = tensor("attn_15_cast")]; + tensor var_1095 = const()[name = tensor("op_1095"), val = tensor([2, 640, 1, -1])]; + tensor input_105_cast = reshape(shape = var_1095, x = attn_15_cast)[name = tensor("input_105_cast")]; + tensor var_1100 = const()[name = tensor("op_1100"), val = tensor([1, 1])]; + tensor var_1102 = const()[name = tensor("op_1102"), val = tensor([1, 1])]; + tensor var_1104_pad_type_0 = const()[name = tensor("op_1104_pad_type_0"), val = tensor("custom")]; + tensor var_1104_pad_0 = const()[name = tensor("op_1104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103429632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103839296))), name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103839872)))]; + tensor var_1104_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1102, groups = var_282, pad = var_1104_pad_0, pad_type = var_1104_pad_type_0, strides = var_1100, weight = down_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_105_cast)[name = tensor("op_1104_cast")]; + tensor inputs_23_cast = add(x = var_1104_cast, y = inputs_21_cast)[name = tensor("inputs_23_cast")]; + tensor var_1108 = const()[name = tensor("op_1108"), val = tensor([1])]; + tensor channels_mean_23_cast = reduce_mean(axes = var_1108, keep_dims = var_277, x = inputs_23_cast)[name = tensor("channels_mean_23_cast")]; + tensor zero_mean_23_cast = sub(x = inputs_23_cast, y = channels_mean_23_cast)[name = tensor("zero_mean_23_cast")]; + tensor zero_mean_sq_23_cast = mul(x = zero_mean_23_cast, y = zero_mean_23_cast)[name = tensor("zero_mean_sq_23_cast")]; + tensor var_1112 = const()[name = tensor("op_1112"), val = tensor([1])]; + tensor var_1113_cast = reduce_mean(axes = var_1112, keep_dims = var_277, x = zero_mean_sq_23_cast)[name = tensor("op_1113_cast")]; + tensor var_1114_to_fp16 = const()[name = tensor("op_1114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1115_cast = add(x = var_1113_cast, y = var_1114_to_fp16)[name = tensor("op_1115_cast")]; + tensor denom_23_epsilon_0_to_fp16 = const()[name = tensor("denom_23_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_23_cast = rsqrt(epsilon = denom_23_epsilon_0_to_fp16, x = var_1115_cast)[name = tensor("denom_23_cast")]; + tensor out_23_cast = mul(x = zero_mean_23_cast, y = denom_23_cast)[name = tensor("out_23_cast")]; + tensor var_1119_to_fp16 = const()[name = tensor("op_1119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103841216)))]; + tensor var_1120_cast = add(x = out_23_cast, y = var_1119_to_fp16)[name = tensor("op_1120_cast")]; + tensor var_1122_to_fp16 = const()[name = tensor("op_1122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103842560)))]; + tensor input_107_cast = mul(x = var_1120_cast, y = var_1122_to_fp16)[name = tensor("input_107_cast")]; + tensor var_1130 = const()[name = tensor("op_1130"), val = tensor([1, 1])]; + tensor var_1132 = const()[name = tensor("op_1132"), val = tensor([1, 1])]; + tensor var_1134_pad_type_0 = const()[name = tensor("op_1134_pad_type_0"), val = tensor("custom")]; + tensor var_1134_pad_0 = const()[name = tensor("op_1134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(103843904)))]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110397568)))]; + tensor var_1134_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1132, groups = var_282, pad = var_1134_pad_0, pad_type = var_1134_pad_type_0, strides = var_1130, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16, x = input_107_cast)[name = tensor("op_1134_cast")]; + tensor var_1135_split_sizes_0 = const()[name = tensor("op_1135_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_1135_axis_0 = const()[name = tensor("op_1135_axis_0"), val = tensor(1)]; + tensor var_1135_cast_0, tensor var_1135_cast_1 = split(axis = var_1135_axis_0, split_sizes = var_1135_split_sizes_0, x = var_1134_cast)[name = tensor("op_1135_cast")]; + tensor var_1137_mode_0 = const()[name = tensor("op_1137_mode_0"), val = tensor("EXACT")]; + tensor var_1137_cast = gelu(mode = var_1137_mode_0, x = var_1135_cast_1)[name = tensor("op_1137_cast")]; + tensor input_109_cast = mul(x = var_1135_cast_0, y = var_1137_cast)[name = tensor("input_109_cast")]; + tensor var_1141 = const()[name = tensor("op_1141"), val = tensor([1, 1])]; + tensor var_1143 = const()[name = tensor("op_1143"), val = tensor([1, 1])]; + tensor var_1145_pad_type_0 = const()[name = tensor("op_1145_pad_type_0"), val = tensor("custom")]; + tensor var_1145_pad_0 = const()[name = tensor("op_1145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(110407872)))]; + tensor down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113684736)))]; + tensor var_1145_cast = conv(bias = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1143, groups = var_282, pad = var_1145_pad_0, pad_type = var_1145_pad_type_0, strides = var_1141, weight = down_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16, x = input_109_cast)[name = tensor("op_1145_cast")]; + tensor hidden_states_53_cast = add(x = var_1145_cast, y = inputs_23_cast)[name = tensor("hidden_states_53_cast")]; + tensor var_1147 = const()[name = tensor("op_1147"), val = tensor([2, 640, 64, 64])]; + tensor input_111_cast = reshape(shape = var_1147, x = hidden_states_53_cast)[name = tensor("input_111_cast")]; + tensor var_1151 = const()[name = tensor("op_1151"), val = tensor([1, 1])]; + tensor var_1153 = const()[name = tensor("op_1153"), val = tensor([1, 1])]; + tensor hidden_states_55_pad_type_0 = const()[name = tensor("hidden_states_55_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_55_pad_0 = const()[name = tensor("hidden_states_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_1_attentions_1_proj_out_weight_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(113686080)))]; + tensor down_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114505344)))]; + tensor hidden_states_55_cast = conv(bias = down_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_1153, groups = var_282, pad = hidden_states_55_pad_0, pad_type = hidden_states_55_pad_type_0, strides = var_1151, weight = down_blocks_1_attentions_1_proj_out_weight_to_fp16, x = input_111_cast)[name = tensor("hidden_states_55_cast")]; + tensor input_113_cast = add(x = hidden_states_55_cast, y = hidden_states_37_cast)[name = tensor("input_113_cast")]; + tensor var_1160 = const()[name = tensor("op_1160"), val = tensor([2, 2])]; + tensor var_1162 = const()[name = tensor("op_1162"), val = tensor([1, 1])]; + tensor input_115_pad_type_0 = const()[name = tensor("input_115_pad_type_0"), val = tensor("custom")]; + tensor input_115_pad_0 = const()[name = tensor("input_115_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(114506688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118193152))), name = tensor("down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized"), shape = tensor([640, 640, 3, 3])]; + tensor down_blocks_1_downsamplers_0_conv_bias_to_fp16 = const()[name = tensor("down_blocks_1_downsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118193728)))]; + tensor input_115_cast = conv(bias = down_blocks_1_downsamplers_0_conv_bias_to_fp16, dilations = var_1162, groups = var_282, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = var_1160, weight = down_blocks_1_downsamplers_0_conv_weight_to_fp16_palettized, x = input_113_cast)[name = tensor("input_115_cast")]; + tensor var_1170 = const()[name = tensor("op_1170"), val = tensor(3)]; + tensor var_1181 = const()[name = tensor("op_1181"), val = tensor(true)]; + tensor var_1186 = const()[name = tensor("op_1186"), val = tensor(1)]; + tensor reshape_40_shape_0 = const()[name = tensor("reshape_40_shape_0"), val = tensor([2, 32, 20, 32, 32])]; + tensor reshape_40_cast = reshape(shape = reshape_40_shape_0, x = input_115_cast)[name = tensor("reshape_40_cast")]; + tensor reduce_mean_30_axes_0 = const()[name = tensor("reduce_mean_30_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_30_keep_dims_0 = const()[name = tensor("reduce_mean_30_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_30_cast = reduce_mean(axes = reduce_mean_30_axes_0, keep_dims = reduce_mean_30_keep_dims_0, x = reshape_40_cast)[name = tensor("reduce_mean_30_cast")]; + tensor sub_20_cast = sub(x = reshape_40_cast, y = reduce_mean_30_cast)[name = tensor("sub_20_cast")]; + tensor square_10_cast = square(x = sub_20_cast)[name = tensor("square_10_cast")]; + tensor reduce_mean_32_axes_0 = const()[name = tensor("reduce_mean_32_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_32_keep_dims_0 = const()[name = tensor("reduce_mean_32_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_32_cast = reduce_mean(axes = reduce_mean_32_axes_0, keep_dims = reduce_mean_32_keep_dims_0, x = square_10_cast)[name = tensor("reduce_mean_32_cast")]; + tensor add_20_y_0_to_fp16 = const()[name = tensor("add_20_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_20_cast = add(x = reduce_mean_32_cast, y = add_20_y_0_to_fp16)[name = tensor("add_20_cast")]; + tensor sqrt_10_cast = sqrt(x = add_20_cast)[name = tensor("sqrt_10_cast")]; + tensor real_div_10_cast = real_div(x = sub_20_cast, y = sqrt_10_cast)[name = tensor("real_div_10_cast")]; + tensor reshape_41_shape_0 = const()[name = tensor("reshape_41_shape_0"), val = tensor([2, 640, 32, 32])]; + tensor reshape_41_cast = reshape(shape = reshape_41_shape_0, x = real_div_10_cast)[name = tensor("reshape_41_cast")]; + tensor add_21_gamma_0_to_fp16 = const()[name = tensor("add_21_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118195072)))]; + tensor add_21_beta_0_to_fp16 = const()[name = tensor("add_21_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118196416)))]; + tensor add_21_epsilon_0_to_fp16 = const()[name = tensor("add_21_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_21_cast = batch_norm(beta = add_21_beta_0_to_fp16, epsilon = add_21_epsilon_0_to_fp16, gamma = add_21_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_41_cast)[name = tensor("add_21_cast")]; + tensor input_119_cast = silu(x = add_21_cast)[name = tensor("input_119_cast")]; + tensor var_1207 = const()[name = tensor("op_1207"), val = tensor([1, 1])]; + tensor var_1209 = const()[name = tensor("op_1209"), val = tensor([1, 1])]; + tensor hidden_states_57_pad_type_0 = const()[name = tensor("hidden_states_57_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_57_pad_0 = const()[name = tensor("hidden_states_57_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(118197760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123727424))), name = tensor("down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 640, 3, 3])]; + tensor down_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123727616)))]; + tensor hidden_states_57_cast = conv(bias = down_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_1209, groups = var_1186, pad = hidden_states_57_pad_0, pad_type = hidden_states_57_pad_type_0, strides = var_1207, weight = down_blocks_2_resnets_0_conv1_weight_to_fp16_palettized, x = input_119_cast)[name = tensor("hidden_states_57_cast")]; + tensor var_1215 = const()[name = tensor("op_1215"), val = tensor([1, 1])]; + tensor var_1217 = const()[name = tensor("op_1217"), val = tensor([1, 1])]; + tensor temb_9_pad_type_0 = const()[name = tensor("temb_9_pad_type_0"), val = tensor("custom")]; + tensor temb_9_pad_0 = const()[name = tensor("temb_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(123730240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124959104))), name = tensor("down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124959296)))]; + tensor temb_9_cast = conv(bias = down_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_1217, groups = var_1186, pad = temb_9_pad_0, pad_type = temb_9_pad_type_0, strides = var_1215, weight = down_blocks_2_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_9_cast")]; + tensor input_123_cast = add(x = hidden_states_57_cast, y = temb_9_cast)[name = tensor("input_123_cast")]; + tensor reshape_44_shape_0 = const()[name = tensor("reshape_44_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_44_cast = reshape(shape = reshape_44_shape_0, x = input_123_cast)[name = tensor("reshape_44_cast")]; + tensor reduce_mean_33_axes_0 = const()[name = tensor("reduce_mean_33_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_33_keep_dims_0 = const()[name = tensor("reduce_mean_33_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_33_cast = reduce_mean(axes = reduce_mean_33_axes_0, keep_dims = reduce_mean_33_keep_dims_0, x = reshape_44_cast)[name = tensor("reduce_mean_33_cast")]; + tensor sub_22_cast = sub(x = reshape_44_cast, y = reduce_mean_33_cast)[name = tensor("sub_22_cast")]; + tensor square_11_cast = square(x = sub_22_cast)[name = tensor("square_11_cast")]; + tensor reduce_mean_35_axes_0 = const()[name = tensor("reduce_mean_35_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_35_keep_dims_0 = const()[name = tensor("reduce_mean_35_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_35_cast = reduce_mean(axes = reduce_mean_35_axes_0, keep_dims = reduce_mean_35_keep_dims_0, x = square_11_cast)[name = tensor("reduce_mean_35_cast")]; + tensor add_22_y_0_to_fp16 = const()[name = tensor("add_22_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_22_cast = add(x = reduce_mean_35_cast, y = add_22_y_0_to_fp16)[name = tensor("add_22_cast")]; + tensor sqrt_11_cast = sqrt(x = add_22_cast)[name = tensor("sqrt_11_cast")]; + tensor real_div_11_cast = real_div(x = sub_22_cast, y = sqrt_11_cast)[name = tensor("real_div_11_cast")]; + tensor reshape_45_shape_0 = const()[name = tensor("reshape_45_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_45_cast = reshape(shape = reshape_45_shape_0, x = real_div_11_cast)[name = tensor("reshape_45_cast")]; + tensor add_23_mean_0_to_fp16 = const()[name = tensor("add_23_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124961920)))]; + tensor add_23_variance_0_to_fp16 = const()[name = tensor("add_23_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124964544)))]; + tensor add_23_gamma_0_to_fp16 = const()[name = tensor("add_23_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124967168)))]; + tensor add_23_beta_0_to_fp16 = const()[name = tensor("add_23_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124969792)))]; + tensor add_23_epsilon_0_to_fp16 = const()[name = tensor("add_23_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_23_cast = batch_norm(beta = add_23_beta_0_to_fp16, epsilon = add_23_epsilon_0_to_fp16, gamma = add_23_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_45_cast)[name = tensor("add_23_cast")]; + tensor input_127_cast = silu(x = add_23_cast)[name = tensor("input_127_cast")]; + tensor var_1227 = const()[name = tensor("op_1227"), val = tensor([1, 1])]; + tensor var_1229 = const()[name = tensor("op_1229"), val = tensor([1, 1])]; + tensor hidden_states_59_pad_type_0 = const()[name = tensor("hidden_states_59_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_59_pad_0 = const()[name = tensor("hidden_states_59_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(124972416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139718080))), name = tensor("down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139718656)))]; + tensor hidden_states_59_cast = conv(bias = down_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_1229, groups = var_1186, pad = hidden_states_59_pad_0, pad_type = hidden_states_59_pad_type_0, strides = var_1227, weight = down_blocks_2_resnets_0_conv2_weight_to_fp16_palettized, x = input_127_cast)[name = tensor("hidden_states_59_cast")]; + tensor var_1234 = const()[name = tensor("op_1234"), val = tensor([1, 1])]; + tensor var_1236 = const()[name = tensor("op_1236"), val = tensor([1, 1])]; + tensor x_3_pad_type_0 = const()[name = tensor("x_3_pad_type_0"), val = tensor("custom")]; + tensor x_3_pad_0 = const()[name = tensor("x_3_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(139721280)))]; + tensor down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141359744)))]; + tensor x_3_cast = conv(bias = down_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_1236, groups = var_1186, pad = x_3_pad_0, pad_type = x_3_pad_type_0, strides = var_1234, weight = down_blocks_2_resnets_0_conv_shortcut_weight_to_fp16, x = input_115_cast)[name = tensor("x_3_cast")]; + tensor hidden_states_61_cast = add(x = x_3_cast, y = hidden_states_59_cast)[name = tensor("hidden_states_61_cast")]; + tensor reshape_48_shape_0 = const()[name = tensor("reshape_48_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_48_cast = reshape(shape = reshape_48_shape_0, x = hidden_states_61_cast)[name = tensor("reshape_48_cast")]; + tensor reduce_mean_36_axes_0 = const()[name = tensor("reduce_mean_36_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_36_keep_dims_0 = const()[name = tensor("reduce_mean_36_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_36_cast = reduce_mean(axes = reduce_mean_36_axes_0, keep_dims = reduce_mean_36_keep_dims_0, x = reshape_48_cast)[name = tensor("reduce_mean_36_cast")]; + tensor sub_24_cast = sub(x = reshape_48_cast, y = reduce_mean_36_cast)[name = tensor("sub_24_cast")]; + tensor square_12_cast = square(x = sub_24_cast)[name = tensor("square_12_cast")]; + tensor reduce_mean_38_axes_0 = const()[name = tensor("reduce_mean_38_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_38_keep_dims_0 = const()[name = tensor("reduce_mean_38_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_38_cast = reduce_mean(axes = reduce_mean_38_axes_0, keep_dims = reduce_mean_38_keep_dims_0, x = square_12_cast)[name = tensor("reduce_mean_38_cast")]; + tensor add_24_y_0_to_fp16 = const()[name = tensor("add_24_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_24_cast = add(x = reduce_mean_38_cast, y = add_24_y_0_to_fp16)[name = tensor("add_24_cast")]; + tensor sqrt_12_cast = sqrt(x = add_24_cast)[name = tensor("sqrt_12_cast")]; + tensor real_div_12_cast = real_div(x = sub_24_cast, y = sqrt_12_cast)[name = tensor("real_div_12_cast")]; + tensor reshape_49_shape_0 = const()[name = tensor("reshape_49_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_49_cast = reshape(shape = reshape_49_shape_0, x = real_div_12_cast)[name = tensor("reshape_49_cast")]; + tensor add_25_gamma_0_to_fp16 = const()[name = tensor("add_25_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141362368)))]; + tensor add_25_beta_0_to_fp16 = const()[name = tensor("add_25_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141364992)))]; + tensor add_25_epsilon_0_to_fp16 = const()[name = tensor("add_25_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_25_cast = batch_norm(beta = add_25_beta_0_to_fp16, epsilon = add_25_epsilon_0_to_fp16, gamma = add_25_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_49_cast)[name = tensor("add_25_cast")]; + tensor var_1274 = const()[name = tensor("op_1274"), val = tensor([1, 1])]; + tensor var_1276 = const()[name = tensor("op_1276"), val = tensor([1, 1])]; + tensor hidden_states_63_pad_type_0 = const()[name = tensor("hidden_states_63_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_63_pad_0 = const()[name = tensor("hidden_states_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(141367616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143006080))), name = tensor("down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143006656)))]; + tensor hidden_states_63_cast = conv(bias = down_blocks_2_attentions_0_proj_in_bias_to_fp16, dilations = var_1276, groups = var_1186, pad = hidden_states_63_pad_0, pad_type = hidden_states_63_pad_type_0, strides = var_1274, weight = down_blocks_2_attentions_0_proj_in_weight_to_fp16_palettized, x = add_25_cast)[name = tensor("hidden_states_63_cast")]; + tensor var_1281 = const()[name = tensor("op_1281"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_25_cast = reshape(shape = var_1281, x = hidden_states_63_cast)[name = tensor("inputs_25_cast")]; + tensor var_1291 = const()[name = tensor("op_1291"), val = tensor([1])]; + tensor channels_mean_25_cast = reduce_mean(axes = var_1291, keep_dims = var_1181, x = inputs_25_cast)[name = tensor("channels_mean_25_cast")]; + tensor zero_mean_25_cast = sub(x = inputs_25_cast, y = channels_mean_25_cast)[name = tensor("zero_mean_25_cast")]; + tensor zero_mean_sq_25_cast = mul(x = zero_mean_25_cast, y = zero_mean_25_cast)[name = tensor("zero_mean_sq_25_cast")]; + tensor var_1295 = const()[name = tensor("op_1295"), val = tensor([1])]; + tensor var_1296_cast = reduce_mean(axes = var_1295, keep_dims = var_1181, x = zero_mean_sq_25_cast)[name = tensor("op_1296_cast")]; + tensor var_1297_to_fp16 = const()[name = tensor("op_1297_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1298_cast = add(x = var_1296_cast, y = var_1297_to_fp16)[name = tensor("op_1298_cast")]; + tensor denom_25_epsilon_0_to_fp16 = const()[name = tensor("denom_25_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_25_cast = rsqrt(epsilon = denom_25_epsilon_0_to_fp16, x = var_1298_cast)[name = tensor("denom_25_cast")]; + tensor out_25_cast = mul(x = zero_mean_25_cast, y = denom_25_cast)[name = tensor("out_25_cast")]; + tensor var_1302_to_fp16 = const()[name = tensor("op_1302_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143009280)))]; + tensor var_1303_cast = add(x = out_25_cast, y = var_1302_to_fp16)[name = tensor("op_1303_cast")]; + tensor var_1305_to_fp16 = const()[name = tensor("op_1305_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143011904)))]; + tensor hidden_states_65_cast = mul(x = var_1303_cast, y = var_1305_to_fp16)[name = tensor("hidden_states_65_cast")]; + tensor var_1312 = const()[name = tensor("op_1312"), val = tensor([1, 1])]; + tensor var_1314 = const()[name = tensor("op_1314"), val = tensor([1, 1])]; + tensor q_17_pad_type_0 = const()[name = tensor("q_17_pad_type_0"), val = tensor("custom")]; + tensor q_17_pad_0 = const()[name = tensor("q_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143014528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143833792))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_17_cast = conv(dilations = var_1314, groups = var_1186, pad = q_17_pad_0, pad_type = q_17_pad_type_0, strides = var_1312, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("q_17_cast")]; + tensor var_1318 = const()[name = tensor("op_1318"), val = tensor([1, 1])]; + tensor var_1320 = const()[name = tensor("op_1320"), val = tensor([1, 1])]; + tensor k_17_pad_type_0 = const()[name = tensor("k_17_pad_type_0"), val = tensor("custom")]; + tensor k_17_pad_0 = const()[name = tensor("k_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(143833920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144653184))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_17_cast = conv(dilations = var_1320, groups = var_1186, pad = k_17_pad_0, pad_type = k_17_pad_type_0, strides = var_1318, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("k_17_cast")]; + tensor var_1324 = const()[name = tensor("op_1324"), val = tensor([1, 1])]; + tensor var_1326 = const()[name = tensor("op_1326"), val = tensor([1, 1])]; + tensor v_17_pad_type_0 = const()[name = tensor("v_17_pad_type_0"), val = tensor("custom")]; + tensor v_17_pad_0 = const()[name = tensor("v_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(144653312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146291776))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_17_cast = conv(dilations = var_1326, groups = var_1186, pad = v_17_pad_0, pad_type = v_17_pad_type_0, strides = var_1324, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_65_cast)[name = tensor("v_17_cast")]; + tensor var_1330 = const()[name = tensor("op_1330"), val = tensor([2, 20, 64, -1])]; + tensor var_1331_cast = reshape(shape = var_1330, x = q_17_cast)[name = tensor("op_1331_cast")]; + tensor var_1332 = const()[name = tensor("op_1332"), val = tensor([2, 20, 64, -1])]; + tensor var_1333_cast = reshape(shape = var_1332, x = k_17_cast)[name = tensor("op_1333_cast")]; + tensor var_1334 = const()[name = tensor("op_1334"), val = tensor([2, 20, 64, -1])]; + tensor var_1335_cast = reshape(shape = var_1334, x = v_17_cast)[name = tensor("op_1335_cast")]; + tensor attn_weights_33_transpose_x_0 = const()[name = tensor("attn_weights_33_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_33_transpose_y_0 = const()[name = tensor("attn_weights_33_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_33_cast = matmul(transpose_x = attn_weights_33_transpose_x_0, transpose_y = attn_weights_33_transpose_y_0, x = var_1331_cast, y = var_1333_cast)[name = tensor("attn_weights_33_cast")]; + tensor var_1177_to_fp16 = const()[name = tensor("op_1177_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_35_cast = mul(x = attn_weights_33_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_35_cast")]; + tensor var_1339_cast = softmax(axis = var_1170, x = attn_weights_35_cast)[name = tensor("op_1339_cast")]; + tensor attn_17_transpose_x_0 = const()[name = tensor("attn_17_transpose_x_0"), val = tensor(false)]; + tensor attn_17_transpose_y_0 = const()[name = tensor("attn_17_transpose_y_0"), val = tensor(true)]; + tensor attn_17_cast = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1335_cast, y = var_1339_cast)[name = tensor("attn_17_cast")]; + tensor var_1343 = const()[name = tensor("op_1343"), val = tensor([2, 1280, 1, -1])]; + tensor input_131_cast = reshape(shape = var_1343, x = attn_17_cast)[name = tensor("input_131_cast")]; + tensor var_1348 = const()[name = tensor("op_1348"), val = tensor([1, 1])]; + tensor var_1350 = const()[name = tensor("op_1350"), val = tensor([1, 1])]; + tensor var_1352_pad_type_0 = const()[name = tensor("op_1352_pad_type_0"), val = tensor("custom")]; + tensor var_1352_pad_0 = const()[name = tensor("op_1352_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(146292352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147930816))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147931392)))]; + tensor var_1352_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_1350, groups = var_1186, pad = var_1352_pad_0, pad_type = var_1352_pad_type_0, strides = var_1348, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_131_cast)[name = tensor("op_1352_cast")]; + tensor inputs_27_cast = add(x = var_1352_cast, y = inputs_25_cast)[name = tensor("inputs_27_cast")]; + tensor var_1356 = const()[name = tensor("op_1356"), val = tensor([1])]; + tensor channels_mean_27_cast = reduce_mean(axes = var_1356, keep_dims = var_1181, x = inputs_27_cast)[name = tensor("channels_mean_27_cast")]; + tensor zero_mean_27_cast = sub(x = inputs_27_cast, y = channels_mean_27_cast)[name = tensor("zero_mean_27_cast")]; + tensor zero_mean_sq_27_cast = mul(x = zero_mean_27_cast, y = zero_mean_27_cast)[name = tensor("zero_mean_sq_27_cast")]; + tensor var_1360 = const()[name = tensor("op_1360"), val = tensor([1])]; + tensor var_1361_cast = reduce_mean(axes = var_1360, keep_dims = var_1181, x = zero_mean_sq_27_cast)[name = tensor("op_1361_cast")]; + tensor var_1362_to_fp16 = const()[name = tensor("op_1362_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1363_cast = add(x = var_1361_cast, y = var_1362_to_fp16)[name = tensor("op_1363_cast")]; + tensor denom_27_epsilon_0_to_fp16 = const()[name = tensor("denom_27_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_27_cast = rsqrt(epsilon = denom_27_epsilon_0_to_fp16, x = var_1363_cast)[name = tensor("denom_27_cast")]; + tensor out_27_cast = mul(x = zero_mean_27_cast, y = denom_27_cast)[name = tensor("out_27_cast")]; + tensor var_1367_to_fp16 = const()[name = tensor("op_1367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147934016)))]; + tensor var_1368_cast = add(x = out_27_cast, y = var_1367_to_fp16)[name = tensor("op_1368_cast")]; + tensor var_1370_to_fp16 = const()[name = tensor("op_1370_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147936640)))]; + tensor hidden_states_67_cast = mul(x = var_1368_cast, y = var_1370_to_fp16)[name = tensor("hidden_states_67_cast")]; + tensor var_1377 = const()[name = tensor("op_1377"), val = tensor([1, 1])]; + tensor var_1379 = const()[name = tensor("op_1379"), val = tensor([1, 1])]; + tensor q_19_pad_type_0 = const()[name = tensor("q_19_pad_type_0"), val = tensor("custom")]; + tensor q_19_pad_0 = const()[name = tensor("q_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(147939264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148758528))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_19_cast = conv(dilations = var_1379, groups = var_1186, pad = q_19_pad_0, pad_type = q_19_pad_type_0, strides = var_1377, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_67_cast)[name = tensor("q_19_cast")]; + tensor var_1383 = const()[name = tensor("op_1383"), val = tensor([1, 1])]; + tensor var_1385 = const()[name = tensor("op_1385"), val = tensor([1, 1])]; + tensor k_19_pad_type_0 = const()[name = tensor("k_19_pad_type_0"), val = tensor("custom")]; + tensor k_19_pad_0 = const()[name = tensor("k_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(148758656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(150069440))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_19_cast = conv(dilations = var_1385, groups = var_1186, pad = k_19_pad_0, pad_type = k_19_pad_type_0, strides = var_1383, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_19_cast")]; + tensor var_1389 = const()[name = tensor("op_1389"), val = tensor([1, 1])]; + tensor var_1391 = const()[name = tensor("op_1391"), val = tensor([1, 1])]; + tensor v_19_pad_type_0 = const()[name = tensor("v_19_pad_type_0"), val = tensor("custom")]; + tensor v_19_pad_0 = const()[name = tensor("v_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(150069568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151380352))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_19_cast = conv(dilations = var_1391, groups = var_1186, pad = v_19_pad_0, pad_type = v_19_pad_type_0, strides = var_1389, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_19_cast")]; + tensor var_1395 = const()[name = tensor("op_1395"), val = tensor([2, 20, 64, -1])]; + tensor var_1396_cast = reshape(shape = var_1395, x = q_19_cast)[name = tensor("op_1396_cast")]; + tensor var_1397 = const()[name = tensor("op_1397"), val = tensor([2, 20, 64, -1])]; + tensor var_1398_cast = reshape(shape = var_1397, x = k_19_cast)[name = tensor("op_1398_cast")]; + tensor var_1399 = const()[name = tensor("op_1399"), val = tensor([2, 20, 64, -1])]; + tensor var_1400_cast = reshape(shape = var_1399, x = v_19_cast)[name = tensor("op_1400_cast")]; + tensor attn_weights_37_transpose_x_0 = const()[name = tensor("attn_weights_37_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_37_transpose_y_0 = const()[name = tensor("attn_weights_37_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_37_cast = matmul(transpose_x = attn_weights_37_transpose_x_0, transpose_y = attn_weights_37_transpose_y_0, x = var_1396_cast, y = var_1398_cast)[name = tensor("attn_weights_37_cast")]; + tensor attn_weights_39_cast = mul(x = attn_weights_37_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_39_cast")]; + tensor var_1404_cast = softmax(axis = var_1170, x = attn_weights_39_cast)[name = tensor("op_1404_cast")]; + tensor attn_19_transpose_x_0 = const()[name = tensor("attn_19_transpose_x_0"), val = tensor(false)]; + tensor attn_19_transpose_y_0 = const()[name = tensor("attn_19_transpose_y_0"), val = tensor(true)]; + tensor attn_19_cast = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1400_cast, y = var_1404_cast)[name = tensor("attn_19_cast")]; + tensor var_1408 = const()[name = tensor("op_1408"), val = tensor([2, 1280, 1, -1])]; + tensor input_133_cast = reshape(shape = var_1408, x = attn_19_cast)[name = tensor("input_133_cast")]; + tensor var_1413 = const()[name = tensor("op_1413"), val = tensor([1, 1])]; + tensor var_1415 = const()[name = tensor("op_1415"), val = tensor([1, 1])]; + tensor var_1417_pad_type_0 = const()[name = tensor("op_1417_pad_type_0"), val = tensor("custom")]; + tensor var_1417_pad_0 = const()[name = tensor("op_1417_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(151380480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152199744))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152199872)))]; + tensor var_1417_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_1415, groups = var_1186, pad = var_1417_pad_0, pad_type = var_1417_pad_type_0, strides = var_1413, weight = down_blocks_2_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_133_cast)[name = tensor("op_1417_cast")]; + tensor inputs_29_cast = add(x = var_1417_cast, y = inputs_27_cast)[name = tensor("inputs_29_cast")]; + tensor var_1421 = const()[name = tensor("op_1421"), val = tensor([1])]; + tensor channels_mean_29_cast = reduce_mean(axes = var_1421, keep_dims = var_1181, x = inputs_29_cast)[name = tensor("channels_mean_29_cast")]; + tensor zero_mean_29_cast = sub(x = inputs_29_cast, y = channels_mean_29_cast)[name = tensor("zero_mean_29_cast")]; + tensor zero_mean_sq_29_cast = mul(x = zero_mean_29_cast, y = zero_mean_29_cast)[name = tensor("zero_mean_sq_29_cast")]; + tensor var_1425 = const()[name = tensor("op_1425"), val = tensor([1])]; + tensor var_1426_cast = reduce_mean(axes = var_1425, keep_dims = var_1181, x = zero_mean_sq_29_cast)[name = tensor("op_1426_cast")]; + tensor var_1427_to_fp16 = const()[name = tensor("op_1427_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1428_cast = add(x = var_1426_cast, y = var_1427_to_fp16)[name = tensor("op_1428_cast")]; + tensor denom_29_epsilon_0_to_fp16 = const()[name = tensor("denom_29_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_29_cast = rsqrt(epsilon = denom_29_epsilon_0_to_fp16, x = var_1428_cast)[name = tensor("denom_29_cast")]; + tensor out_29_cast = mul(x = zero_mean_29_cast, y = denom_29_cast)[name = tensor("out_29_cast")]; + tensor var_1432_to_fp16 = const()[name = tensor("op_1432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152202496)))]; + tensor var_1433_cast = add(x = out_29_cast, y = var_1432_to_fp16)[name = tensor("op_1433_cast")]; + tensor var_1435_to_fp16 = const()[name = tensor("op_1435_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152205120)))]; + tensor input_135_cast = mul(x = var_1433_cast, y = var_1435_to_fp16)[name = tensor("input_135_cast")]; + tensor var_1443 = const()[name = tensor("op_1443"), val = tensor([1, 1])]; + tensor var_1445 = const()[name = tensor("op_1445"), val = tensor([1, 1])]; + tensor var_1447_pad_type_0 = const()[name = tensor("op_1447_pad_type_0"), val = tensor("custom")]; + tensor var_1447_pad_0 = const()[name = tensor("op_1447_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(152207744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165315008))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165315584)))]; + tensor var_1447_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_1445, groups = var_1186, pad = var_1447_pad_0, pad_type = var_1447_pad_type_0, strides = var_1443, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_135_cast)[name = tensor("op_1447_cast")]; + tensor var_1448_split_sizes_0 = const()[name = tensor("op_1448_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1448_axis_0 = const()[name = tensor("op_1448_axis_0"), val = tensor(1)]; + tensor var_1448_cast_0, tensor var_1448_cast_1 = split(axis = var_1448_axis_0, split_sizes = var_1448_split_sizes_0, x = var_1447_cast)[name = tensor("op_1448_cast")]; + tensor var_1450_mode_0 = const()[name = tensor("op_1450_mode_0"), val = tensor("EXACT")]; + tensor var_1450_cast = gelu(mode = var_1450_mode_0, x = var_1448_cast_1)[name = tensor("op_1450_cast")]; + tensor input_137_cast = mul(x = var_1448_cast_0, y = var_1450_cast)[name = tensor("input_137_cast")]; + tensor var_1454 = const()[name = tensor("op_1454"), val = tensor([1, 1])]; + tensor var_1456 = const()[name = tensor("op_1456"), val = tensor([1, 1])]; + tensor var_1458_pad_type_0 = const()[name = tensor("op_1458_pad_type_0"), val = tensor("custom")]; + tensor var_1458_pad_0 = const()[name = tensor("op_1458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(165336128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171889792))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171890368)))]; + tensor var_1458_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_1456, groups = var_1186, pad = var_1458_pad_0, pad_type = var_1458_pad_type_0, strides = var_1454, weight = down_blocks_2_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_137_cast)[name = tensor("op_1458_cast")]; + tensor inputs_31_cast = add(x = var_1458_cast, y = inputs_29_cast)[name = tensor("inputs_31_cast")]; + tensor var_1468 = const()[name = tensor("op_1468"), val = tensor([1])]; + tensor channels_mean_31_cast = reduce_mean(axes = var_1468, keep_dims = var_1181, x = inputs_31_cast)[name = tensor("channels_mean_31_cast")]; + tensor zero_mean_31_cast = sub(x = inputs_31_cast, y = channels_mean_31_cast)[name = tensor("zero_mean_31_cast")]; + tensor zero_mean_sq_31_cast = mul(x = zero_mean_31_cast, y = zero_mean_31_cast)[name = tensor("zero_mean_sq_31_cast")]; + tensor var_1472 = const()[name = tensor("op_1472"), val = tensor([1])]; + tensor var_1473_cast = reduce_mean(axes = var_1472, keep_dims = var_1181, x = zero_mean_sq_31_cast)[name = tensor("op_1473_cast")]; + tensor var_1474_to_fp16 = const()[name = tensor("op_1474_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1475_cast = add(x = var_1473_cast, y = var_1474_to_fp16)[name = tensor("op_1475_cast")]; + tensor denom_31_epsilon_0_to_fp16 = const()[name = tensor("denom_31_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_31_cast = rsqrt(epsilon = denom_31_epsilon_0_to_fp16, x = var_1475_cast)[name = tensor("denom_31_cast")]; + tensor out_31_cast = mul(x = zero_mean_31_cast, y = denom_31_cast)[name = tensor("out_31_cast")]; + tensor var_1479_to_fp16 = const()[name = tensor("op_1479_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171892992)))]; + tensor var_1480_cast = add(x = out_31_cast, y = var_1479_to_fp16)[name = tensor("op_1480_cast")]; + tensor var_1482_to_fp16 = const()[name = tensor("op_1482_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171895616)))]; + tensor hidden_states_71_cast = mul(x = var_1480_cast, y = var_1482_to_fp16)[name = tensor("hidden_states_71_cast")]; + tensor var_1489 = const()[name = tensor("op_1489"), val = tensor([1, 1])]; + tensor var_1491 = const()[name = tensor("op_1491"), val = tensor([1, 1])]; + tensor q_21_pad_type_0 = const()[name = tensor("q_21_pad_type_0"), val = tensor("custom")]; + tensor q_21_pad_0 = const()[name = tensor("q_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(171898240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172717504))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_21_cast = conv(dilations = var_1491, groups = var_1186, pad = q_21_pad_0, pad_type = q_21_pad_type_0, strides = var_1489, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("q_21_cast")]; + tensor var_1495 = const()[name = tensor("op_1495"), val = tensor([1, 1])]; + tensor var_1497 = const()[name = tensor("op_1497"), val = tensor([1, 1])]; + tensor k_21_pad_type_0 = const()[name = tensor("k_21_pad_type_0"), val = tensor("custom")]; + tensor k_21_pad_0 = const()[name = tensor("k_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(172717632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173536896))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_21_cast = conv(dilations = var_1497, groups = var_1186, pad = k_21_pad_0, pad_type = k_21_pad_type_0, strides = var_1495, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("k_21_cast")]; + tensor var_1501 = const()[name = tensor("op_1501"), val = tensor([1, 1])]; + tensor var_1503 = const()[name = tensor("op_1503"), val = tensor([1, 1])]; + tensor v_21_pad_type_0 = const()[name = tensor("v_21_pad_type_0"), val = tensor("custom")]; + tensor v_21_pad_0 = const()[name = tensor("v_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(173537024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174765888))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_21_cast = conv(dilations = var_1503, groups = var_1186, pad = v_21_pad_0, pad_type = v_21_pad_type_0, strides = var_1501, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_71_cast)[name = tensor("v_21_cast")]; + tensor var_1507 = const()[name = tensor("op_1507"), val = tensor([2, 20, 64, -1])]; + tensor var_1508_cast = reshape(shape = var_1507, x = q_21_cast)[name = tensor("op_1508_cast")]; + tensor var_1509 = const()[name = tensor("op_1509"), val = tensor([2, 20, 64, -1])]; + tensor var_1510_cast = reshape(shape = var_1509, x = k_21_cast)[name = tensor("op_1510_cast")]; + tensor var_1511 = const()[name = tensor("op_1511"), val = tensor([2, 20, 64, -1])]; + tensor var_1512_cast = reshape(shape = var_1511, x = v_21_cast)[name = tensor("op_1512_cast")]; + tensor attn_weights_41_transpose_x_0 = const()[name = tensor("attn_weights_41_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_41_transpose_y_0 = const()[name = tensor("attn_weights_41_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_41_cast = matmul(transpose_x = attn_weights_41_transpose_x_0, transpose_y = attn_weights_41_transpose_y_0, x = var_1508_cast, y = var_1510_cast)[name = tensor("attn_weights_41_cast")]; + tensor attn_weights_43_cast = mul(x = attn_weights_41_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_43_cast")]; + tensor var_1516_cast = softmax(axis = var_1170, x = attn_weights_43_cast)[name = tensor("op_1516_cast")]; + tensor attn_21_transpose_x_0 = const()[name = tensor("attn_21_transpose_x_0"), val = tensor(false)]; + tensor attn_21_transpose_y_0 = const()[name = tensor("attn_21_transpose_y_0"), val = tensor(true)]; + tensor attn_21_cast = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1512_cast, y = var_1516_cast)[name = tensor("attn_21_cast")]; + tensor var_1520 = const()[name = tensor("op_1520"), val = tensor([2, 1280, 1, -1])]; + tensor input_139_cast = reshape(shape = var_1520, x = attn_21_cast)[name = tensor("input_139_cast")]; + tensor var_1525 = const()[name = tensor("op_1525"), val = tensor([1, 1])]; + tensor var_1527 = const()[name = tensor("op_1527"), val = tensor([1, 1])]; + tensor var_1529_pad_type_0 = const()[name = tensor("op_1529_pad_type_0"), val = tensor("custom")]; + tensor var_1529_pad_0 = const()[name = tensor("op_1529_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(174766080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175994944))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175995136)))]; + tensor var_1529_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_1527, groups = var_1186, pad = var_1529_pad_0, pad_type = var_1529_pad_type_0, strides = var_1525, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_139_cast)[name = tensor("op_1529_cast")]; + tensor inputs_33_cast = add(x = var_1529_cast, y = inputs_31_cast)[name = tensor("inputs_33_cast")]; + tensor var_1533 = const()[name = tensor("op_1533"), val = tensor([1])]; + tensor channels_mean_33_cast = reduce_mean(axes = var_1533, keep_dims = var_1181, x = inputs_33_cast)[name = tensor("channels_mean_33_cast")]; + tensor zero_mean_33_cast = sub(x = inputs_33_cast, y = channels_mean_33_cast)[name = tensor("zero_mean_33_cast")]; + tensor zero_mean_sq_33_cast = mul(x = zero_mean_33_cast, y = zero_mean_33_cast)[name = tensor("zero_mean_sq_33_cast")]; + tensor var_1537 = const()[name = tensor("op_1537"), val = tensor([1])]; + tensor var_1538_cast = reduce_mean(axes = var_1537, keep_dims = var_1181, x = zero_mean_sq_33_cast)[name = tensor("op_1538_cast")]; + tensor var_1539_to_fp16 = const()[name = tensor("op_1539_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1540_cast = add(x = var_1538_cast, y = var_1539_to_fp16)[name = tensor("op_1540_cast")]; + tensor denom_33_epsilon_0_to_fp16 = const()[name = tensor("denom_33_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_33_cast = rsqrt(epsilon = denom_33_epsilon_0_to_fp16, x = var_1540_cast)[name = tensor("denom_33_cast")]; + tensor out_33_cast = mul(x = zero_mean_33_cast, y = denom_33_cast)[name = tensor("out_33_cast")]; + tensor var_1544_to_fp16 = const()[name = tensor("op_1544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(175997760)))]; + tensor var_1545_cast = add(x = out_33_cast, y = var_1544_to_fp16)[name = tensor("op_1545_cast")]; + tensor var_1547_to_fp16 = const()[name = tensor("op_1547_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176000384)))]; + tensor hidden_states_73_cast = mul(x = var_1545_cast, y = var_1547_to_fp16)[name = tensor("hidden_states_73_cast")]; + tensor var_1554 = const()[name = tensor("op_1554"), val = tensor([1, 1])]; + tensor var_1556 = const()[name = tensor("op_1556"), val = tensor([1, 1])]; + tensor q_23_pad_type_0 = const()[name = tensor("q_23_pad_type_0"), val = tensor("custom")]; + tensor q_23_pad_0 = const()[name = tensor("q_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(176003008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177231872))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_23_cast = conv(dilations = var_1556, groups = var_1186, pad = q_23_pad_0, pad_type = q_23_pad_type_0, strides = var_1554, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_73_cast)[name = tensor("q_23_cast")]; + tensor var_1560 = const()[name = tensor("op_1560"), val = tensor([1, 1])]; + tensor var_1562 = const()[name = tensor("op_1562"), val = tensor([1, 1])]; + tensor k_23_pad_type_0 = const()[name = tensor("k_23_pad_type_0"), val = tensor("custom")]; + tensor k_23_pad_0 = const()[name = tensor("k_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(177232064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178542848))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_23_cast = conv(dilations = var_1562, groups = var_1186, pad = k_23_pad_0, pad_type = k_23_pad_type_0, strides = var_1560, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_23_cast")]; + tensor var_1566 = const()[name = tensor("op_1566"), val = tensor([1, 1])]; + tensor var_1568 = const()[name = tensor("op_1568"), val = tensor([1, 1])]; + tensor v_23_pad_type_0 = const()[name = tensor("v_23_pad_type_0"), val = tensor("custom")]; + tensor v_23_pad_0 = const()[name = tensor("v_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(178542976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180509120))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_23_cast = conv(dilations = var_1568, groups = var_1186, pad = v_23_pad_0, pad_type = v_23_pad_type_0, strides = var_1566, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_23_cast")]; + tensor var_1572 = const()[name = tensor("op_1572"), val = tensor([2, 20, 64, -1])]; + tensor var_1573_cast = reshape(shape = var_1572, x = q_23_cast)[name = tensor("op_1573_cast")]; + tensor var_1574 = const()[name = tensor("op_1574"), val = tensor([2, 20, 64, -1])]; + tensor var_1575_cast = reshape(shape = var_1574, x = k_23_cast)[name = tensor("op_1575_cast")]; + tensor var_1576 = const()[name = tensor("op_1576"), val = tensor([2, 20, 64, -1])]; + tensor var_1577_cast = reshape(shape = var_1576, x = v_23_cast)[name = tensor("op_1577_cast")]; + tensor attn_weights_45_transpose_x_0 = const()[name = tensor("attn_weights_45_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_45_transpose_y_0 = const()[name = tensor("attn_weights_45_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_45_cast = matmul(transpose_x = attn_weights_45_transpose_x_0, transpose_y = attn_weights_45_transpose_y_0, x = var_1573_cast, y = var_1575_cast)[name = tensor("attn_weights_45_cast")]; + tensor attn_weights_47_cast = mul(x = attn_weights_45_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_47_cast")]; + tensor var_1581_cast = softmax(axis = var_1170, x = attn_weights_47_cast)[name = tensor("op_1581_cast")]; + tensor attn_23_transpose_x_0 = const()[name = tensor("attn_23_transpose_x_0"), val = tensor(false)]; + tensor attn_23_transpose_y_0 = const()[name = tensor("attn_23_transpose_y_0"), val = tensor(true)]; + tensor attn_23_cast = matmul(transpose_x = attn_23_transpose_x_0, transpose_y = attn_23_transpose_y_0, x = var_1577_cast, y = var_1581_cast)[name = tensor("attn_23_cast")]; + tensor var_1585 = const()[name = tensor("op_1585"), val = tensor([2, 1280, 1, -1])]; + tensor input_141_cast = reshape(shape = var_1585, x = attn_23_cast)[name = tensor("input_141_cast")]; + tensor var_1590 = const()[name = tensor("op_1590"), val = tensor([1, 1])]; + tensor var_1592 = const()[name = tensor("op_1592"), val = tensor([1, 1])]; + tensor var_1594_pad_type_0 = const()[name = tensor("op_1594_pad_type_0"), val = tensor("custom")]; + tensor var_1594_pad_0 = const()[name = tensor("op_1594_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(180509312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181738176))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181738368)))]; + tensor var_1594_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_1592, groups = var_1186, pad = var_1594_pad_0, pad_type = var_1594_pad_type_0, strides = var_1590, weight = down_blocks_2_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_141_cast)[name = tensor("op_1594_cast")]; + tensor inputs_35_cast = add(x = var_1594_cast, y = inputs_33_cast)[name = tensor("inputs_35_cast")]; + tensor var_1598 = const()[name = tensor("op_1598"), val = tensor([1])]; + tensor channels_mean_35_cast = reduce_mean(axes = var_1598, keep_dims = var_1181, x = inputs_35_cast)[name = tensor("channels_mean_35_cast")]; + tensor zero_mean_35_cast = sub(x = inputs_35_cast, y = channels_mean_35_cast)[name = tensor("zero_mean_35_cast")]; + tensor zero_mean_sq_35_cast = mul(x = zero_mean_35_cast, y = zero_mean_35_cast)[name = tensor("zero_mean_sq_35_cast")]; + tensor var_1602 = const()[name = tensor("op_1602"), val = tensor([1])]; + tensor var_1603_cast = reduce_mean(axes = var_1602, keep_dims = var_1181, x = zero_mean_sq_35_cast)[name = tensor("op_1603_cast")]; + tensor var_1604_to_fp16 = const()[name = tensor("op_1604_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1605_cast = add(x = var_1603_cast, y = var_1604_to_fp16)[name = tensor("op_1605_cast")]; + tensor denom_35_epsilon_0_to_fp16 = const()[name = tensor("denom_35_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_35_cast = rsqrt(epsilon = denom_35_epsilon_0_to_fp16, x = var_1605_cast)[name = tensor("denom_35_cast")]; + tensor out_35_cast = mul(x = zero_mean_35_cast, y = denom_35_cast)[name = tensor("out_35_cast")]; + tensor var_1609_to_fp16 = const()[name = tensor("op_1609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181740992)))]; + tensor var_1610_cast = add(x = out_35_cast, y = var_1609_to_fp16)[name = tensor("op_1610_cast")]; + tensor var_1612_to_fp16 = const()[name = tensor("op_1612_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181743616)))]; + tensor input_143_cast = mul(x = var_1610_cast, y = var_1612_to_fp16)[name = tensor("input_143_cast")]; + tensor var_1620 = const()[name = tensor("op_1620"), val = tensor([1, 1])]; + tensor var_1622 = const()[name = tensor("op_1622"), val = tensor([1, 1])]; + tensor var_1624_pad_type_0 = const()[name = tensor("op_1624_pad_type_0"), val = tensor("custom")]; + tensor var_1624_pad_0 = const()[name = tensor("op_1624_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(181746240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191576704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191576896)))]; + tensor var_1624_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_1622, groups = var_1186, pad = var_1624_pad_0, pad_type = var_1624_pad_type_0, strides = var_1620, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_143_cast)[name = tensor("op_1624_cast")]; + tensor var_1625_split_sizes_0 = const()[name = tensor("op_1625_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1625_axis_0 = const()[name = tensor("op_1625_axis_0"), val = tensor(1)]; + tensor var_1625_cast_0, tensor var_1625_cast_1 = split(axis = var_1625_axis_0, split_sizes = var_1625_split_sizes_0, x = var_1624_cast)[name = tensor("op_1625_cast")]; + tensor var_1627_mode_0 = const()[name = tensor("op_1627_mode_0"), val = tensor("EXACT")]; + tensor var_1627_cast = gelu(mode = var_1627_mode_0, x = var_1625_cast_1)[name = tensor("op_1627_cast")]; + tensor input_145_cast = mul(x = var_1625_cast_0, y = var_1627_cast)[name = tensor("input_145_cast")]; + tensor var_1631 = const()[name = tensor("op_1631"), val = tensor([1, 1])]; + tensor var_1633 = const()[name = tensor("op_1633"), val = tensor([1, 1])]; + tensor var_1635_pad_type_0 = const()[name = tensor("op_1635_pad_type_0"), val = tensor("custom")]; + tensor var_1635_pad_0 = const()[name = tensor("op_1635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(191597440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196512704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196512896)))]; + tensor var_1635_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_1633, groups = var_1186, pad = var_1635_pad_0, pad_type = var_1635_pad_type_0, strides = var_1631, weight = down_blocks_2_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_145_cast)[name = tensor("op_1635_cast")]; + tensor inputs_37_cast = add(x = var_1635_cast, y = inputs_35_cast)[name = tensor("inputs_37_cast")]; + tensor var_1645 = const()[name = tensor("op_1645"), val = tensor([1])]; + tensor channels_mean_37_cast = reduce_mean(axes = var_1645, keep_dims = var_1181, x = inputs_37_cast)[name = tensor("channels_mean_37_cast")]; + tensor zero_mean_37_cast = sub(x = inputs_37_cast, y = channels_mean_37_cast)[name = tensor("zero_mean_37_cast")]; + tensor zero_mean_sq_37_cast = mul(x = zero_mean_37_cast, y = zero_mean_37_cast)[name = tensor("zero_mean_sq_37_cast")]; + tensor var_1649 = const()[name = tensor("op_1649"), val = tensor([1])]; + tensor var_1650_cast = reduce_mean(axes = var_1649, keep_dims = var_1181, x = zero_mean_sq_37_cast)[name = tensor("op_1650_cast")]; + tensor var_1651_to_fp16 = const()[name = tensor("op_1651_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1652_cast = add(x = var_1650_cast, y = var_1651_to_fp16)[name = tensor("op_1652_cast")]; + tensor denom_37_epsilon_0_to_fp16 = const()[name = tensor("denom_37_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_37_cast = rsqrt(epsilon = denom_37_epsilon_0_to_fp16, x = var_1652_cast)[name = tensor("denom_37_cast")]; + tensor out_37_cast = mul(x = zero_mean_37_cast, y = denom_37_cast)[name = tensor("out_37_cast")]; + tensor var_1656_to_fp16 = const()[name = tensor("op_1656_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196515520)))]; + tensor var_1657_cast = add(x = out_37_cast, y = var_1656_to_fp16)[name = tensor("op_1657_cast")]; + tensor var_1659_to_fp16 = const()[name = tensor("op_1659_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196518144)))]; + tensor hidden_states_77_cast = mul(x = var_1657_cast, y = var_1659_to_fp16)[name = tensor("hidden_states_77_cast")]; + tensor var_1666 = const()[name = tensor("op_1666"), val = tensor([1, 1])]; + tensor var_1668 = const()[name = tensor("op_1668"), val = tensor([1, 1])]; + tensor q_25_pad_type_0 = const()[name = tensor("q_25_pad_type_0"), val = tensor("custom")]; + tensor q_25_pad_0 = const()[name = tensor("q_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(196520768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197340032))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_25_cast = conv(dilations = var_1668, groups = var_1186, pad = q_25_pad_0, pad_type = q_25_pad_type_0, strides = var_1666, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("q_25_cast")]; + tensor var_1672 = const()[name = tensor("op_1672"), val = tensor([1, 1])]; + tensor var_1674 = const()[name = tensor("op_1674"), val = tensor([1, 1])]; + tensor k_25_pad_type_0 = const()[name = tensor("k_25_pad_type_0"), val = tensor("custom")]; + tensor k_25_pad_0 = const()[name = tensor("k_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(197340160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198159424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_25_cast = conv(dilations = var_1674, groups = var_1186, pad = k_25_pad_0, pad_type = k_25_pad_type_0, strides = var_1672, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("k_25_cast")]; + tensor var_1678 = const()[name = tensor("op_1678"), val = tensor([1, 1])]; + tensor var_1680 = const()[name = tensor("op_1680"), val = tensor([1, 1])]; + tensor v_25_pad_type_0 = const()[name = tensor("v_25_pad_type_0"), val = tensor("custom")]; + tensor v_25_pad_0 = const()[name = tensor("v_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(198159552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199388416))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_25_cast = conv(dilations = var_1680, groups = var_1186, pad = v_25_pad_0, pad_type = v_25_pad_type_0, strides = var_1678, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_77_cast)[name = tensor("v_25_cast")]; + tensor var_1684 = const()[name = tensor("op_1684"), val = tensor([2, 20, 64, -1])]; + tensor var_1685_cast = reshape(shape = var_1684, x = q_25_cast)[name = tensor("op_1685_cast")]; + tensor var_1686 = const()[name = tensor("op_1686"), val = tensor([2, 20, 64, -1])]; + tensor var_1687_cast = reshape(shape = var_1686, x = k_25_cast)[name = tensor("op_1687_cast")]; + tensor var_1688 = const()[name = tensor("op_1688"), val = tensor([2, 20, 64, -1])]; + tensor var_1689_cast = reshape(shape = var_1688, x = v_25_cast)[name = tensor("op_1689_cast")]; + tensor attn_weights_49_transpose_x_0 = const()[name = tensor("attn_weights_49_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_49_transpose_y_0 = const()[name = tensor("attn_weights_49_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_49_cast = matmul(transpose_x = attn_weights_49_transpose_x_0, transpose_y = attn_weights_49_transpose_y_0, x = var_1685_cast, y = var_1687_cast)[name = tensor("attn_weights_49_cast")]; + tensor attn_weights_51_cast = mul(x = attn_weights_49_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_51_cast")]; + tensor var_1693_cast = softmax(axis = var_1170, x = attn_weights_51_cast)[name = tensor("op_1693_cast")]; + tensor attn_25_transpose_x_0 = const()[name = tensor("attn_25_transpose_x_0"), val = tensor(false)]; + tensor attn_25_transpose_y_0 = const()[name = tensor("attn_25_transpose_y_0"), val = tensor(true)]; + tensor attn_25_cast = matmul(transpose_x = attn_25_transpose_x_0, transpose_y = attn_25_transpose_y_0, x = var_1689_cast, y = var_1693_cast)[name = tensor("attn_25_cast")]; + tensor var_1697 = const()[name = tensor("op_1697"), val = tensor([2, 1280, 1, -1])]; + tensor input_147_cast = reshape(shape = var_1697, x = attn_25_cast)[name = tensor("input_147_cast")]; + tensor var_1702 = const()[name = tensor("op_1702"), val = tensor([1, 1])]; + tensor var_1704 = const()[name = tensor("op_1704"), val = tensor([1, 1])]; + tensor var_1706_pad_type_0 = const()[name = tensor("op_1706_pad_type_0"), val = tensor("custom")]; + tensor var_1706_pad_0 = const()[name = tensor("op_1706_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(199388608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200617472))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200617664)))]; + tensor var_1706_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_1704, groups = var_1186, pad = var_1706_pad_0, pad_type = var_1706_pad_type_0, strides = var_1702, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_147_cast)[name = tensor("op_1706_cast")]; + tensor inputs_39_cast = add(x = var_1706_cast, y = inputs_37_cast)[name = tensor("inputs_39_cast")]; + tensor var_1710 = const()[name = tensor("op_1710"), val = tensor([1])]; + tensor channels_mean_39_cast = reduce_mean(axes = var_1710, keep_dims = var_1181, x = inputs_39_cast)[name = tensor("channels_mean_39_cast")]; + tensor zero_mean_39_cast = sub(x = inputs_39_cast, y = channels_mean_39_cast)[name = tensor("zero_mean_39_cast")]; + tensor zero_mean_sq_39_cast = mul(x = zero_mean_39_cast, y = zero_mean_39_cast)[name = tensor("zero_mean_sq_39_cast")]; + tensor var_1714 = const()[name = tensor("op_1714"), val = tensor([1])]; + tensor var_1715_cast = reduce_mean(axes = var_1714, keep_dims = var_1181, x = zero_mean_sq_39_cast)[name = tensor("op_1715_cast")]; + tensor var_1716_to_fp16 = const()[name = tensor("op_1716_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1717_cast = add(x = var_1715_cast, y = var_1716_to_fp16)[name = tensor("op_1717_cast")]; + tensor denom_39_epsilon_0_to_fp16 = const()[name = tensor("denom_39_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_39_cast = rsqrt(epsilon = denom_39_epsilon_0_to_fp16, x = var_1717_cast)[name = tensor("denom_39_cast")]; + tensor out_39_cast = mul(x = zero_mean_39_cast, y = denom_39_cast)[name = tensor("out_39_cast")]; + tensor var_1721_to_fp16 = const()[name = tensor("op_1721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200620288)))]; + tensor var_1722_cast = add(x = out_39_cast, y = var_1721_to_fp16)[name = tensor("op_1722_cast")]; + tensor var_1724_to_fp16 = const()[name = tensor("op_1724_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200622912)))]; + tensor hidden_states_79_cast = mul(x = var_1722_cast, y = var_1724_to_fp16)[name = tensor("hidden_states_79_cast")]; + tensor var_1731 = const()[name = tensor("op_1731"), val = tensor([1, 1])]; + tensor var_1733 = const()[name = tensor("op_1733"), val = tensor([1, 1])]; + tensor q_27_pad_type_0 = const()[name = tensor("q_27_pad_type_0"), val = tensor("custom")]; + tensor q_27_pad_0 = const()[name = tensor("q_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(200625536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201444800))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_27_cast = conv(dilations = var_1733, groups = var_1186, pad = q_27_pad_0, pad_type = q_27_pad_type_0, strides = var_1731, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_79_cast)[name = tensor("q_27_cast")]; + tensor var_1737 = const()[name = tensor("op_1737"), val = tensor([1, 1])]; + tensor var_1739 = const()[name = tensor("op_1739"), val = tensor([1, 1])]; + tensor k_27_pad_type_0 = const()[name = tensor("k_27_pad_type_0"), val = tensor("custom")]; + tensor k_27_pad_0 = const()[name = tensor("k_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(201444928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202755712))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_27_cast = conv(dilations = var_1739, groups = var_1186, pad = k_27_pad_0, pad_type = k_27_pad_type_0, strides = var_1737, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_27_cast")]; + tensor var_1743 = const()[name = tensor("op_1743"), val = tensor([1, 1])]; + tensor var_1745 = const()[name = tensor("op_1745"), val = tensor([1, 1])]; + tensor v_27_pad_type_0 = const()[name = tensor("v_27_pad_type_0"), val = tensor("custom")]; + tensor v_27_pad_0 = const()[name = tensor("v_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(202755840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204721984))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_27_cast = conv(dilations = var_1745, groups = var_1186, pad = v_27_pad_0, pad_type = v_27_pad_type_0, strides = var_1743, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_27_cast")]; + tensor var_1749 = const()[name = tensor("op_1749"), val = tensor([2, 20, 64, -1])]; + tensor var_1750_cast = reshape(shape = var_1749, x = q_27_cast)[name = tensor("op_1750_cast")]; + tensor var_1751 = const()[name = tensor("op_1751"), val = tensor([2, 20, 64, -1])]; + tensor var_1752_cast = reshape(shape = var_1751, x = k_27_cast)[name = tensor("op_1752_cast")]; + tensor var_1753 = const()[name = tensor("op_1753"), val = tensor([2, 20, 64, -1])]; + tensor var_1754_cast = reshape(shape = var_1753, x = v_27_cast)[name = tensor("op_1754_cast")]; + tensor attn_weights_53_transpose_x_0 = const()[name = tensor("attn_weights_53_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_53_transpose_y_0 = const()[name = tensor("attn_weights_53_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_53_cast = matmul(transpose_x = attn_weights_53_transpose_x_0, transpose_y = attn_weights_53_transpose_y_0, x = var_1750_cast, y = var_1752_cast)[name = tensor("attn_weights_53_cast")]; + tensor attn_weights_55_cast = mul(x = attn_weights_53_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_55_cast")]; + tensor var_1758_cast = softmax(axis = var_1170, x = attn_weights_55_cast)[name = tensor("op_1758_cast")]; + tensor attn_27_transpose_x_0 = const()[name = tensor("attn_27_transpose_x_0"), val = tensor(false)]; + tensor attn_27_transpose_y_0 = const()[name = tensor("attn_27_transpose_y_0"), val = tensor(true)]; + tensor attn_27_cast = matmul(transpose_x = attn_27_transpose_x_0, transpose_y = attn_27_transpose_y_0, x = var_1754_cast, y = var_1758_cast)[name = tensor("attn_27_cast")]; + tensor var_1762 = const()[name = tensor("op_1762"), val = tensor([2, 1280, 1, -1])]; + tensor input_149_cast = reshape(shape = var_1762, x = attn_27_cast)[name = tensor("input_149_cast")]; + tensor var_1767 = const()[name = tensor("op_1767"), val = tensor([1, 1])]; + tensor var_1769 = const()[name = tensor("op_1769"), val = tensor([1, 1])]; + tensor var_1771_pad_type_0 = const()[name = tensor("op_1771_pad_type_0"), val = tensor("custom")]; + tensor var_1771_pad_0 = const()[name = tensor("op_1771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(204722176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205541440))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205541568)))]; + tensor var_1771_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_1769, groups = var_1186, pad = var_1771_pad_0, pad_type = var_1771_pad_type_0, strides = var_1767, weight = down_blocks_2_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_149_cast)[name = tensor("op_1771_cast")]; + tensor inputs_41_cast = add(x = var_1771_cast, y = inputs_39_cast)[name = tensor("inputs_41_cast")]; + tensor var_1775 = const()[name = tensor("op_1775"), val = tensor([1])]; + tensor channels_mean_41_cast = reduce_mean(axes = var_1775, keep_dims = var_1181, x = inputs_41_cast)[name = tensor("channels_mean_41_cast")]; + tensor zero_mean_41_cast = sub(x = inputs_41_cast, y = channels_mean_41_cast)[name = tensor("zero_mean_41_cast")]; + tensor zero_mean_sq_41_cast = mul(x = zero_mean_41_cast, y = zero_mean_41_cast)[name = tensor("zero_mean_sq_41_cast")]; + tensor var_1779 = const()[name = tensor("op_1779"), val = tensor([1])]; + tensor var_1780_cast = reduce_mean(axes = var_1779, keep_dims = var_1181, x = zero_mean_sq_41_cast)[name = tensor("op_1780_cast")]; + tensor var_1781_to_fp16 = const()[name = tensor("op_1781_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1782_cast = add(x = var_1780_cast, y = var_1781_to_fp16)[name = tensor("op_1782_cast")]; + tensor denom_41_epsilon_0_to_fp16 = const()[name = tensor("denom_41_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_41_cast = rsqrt(epsilon = denom_41_epsilon_0_to_fp16, x = var_1782_cast)[name = tensor("denom_41_cast")]; + tensor out_41_cast = mul(x = zero_mean_41_cast, y = denom_41_cast)[name = tensor("out_41_cast")]; + tensor var_1786_to_fp16 = const()[name = tensor("op_1786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205544192)))]; + tensor var_1787_cast = add(x = out_41_cast, y = var_1786_to_fp16)[name = tensor("op_1787_cast")]; + tensor var_1789_to_fp16 = const()[name = tensor("op_1789_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205546816)))]; + tensor input_151_cast = mul(x = var_1787_cast, y = var_1789_to_fp16)[name = tensor("input_151_cast")]; + tensor var_1797 = const()[name = tensor("op_1797"), val = tensor([1, 1])]; + tensor var_1799 = const()[name = tensor("op_1799"), val = tensor([1, 1])]; + tensor var_1801_pad_type_0 = const()[name = tensor("op_1801_pad_type_0"), val = tensor("custom")]; + tensor var_1801_pad_0 = const()[name = tensor("op_1801_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(205549440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218656704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218657280)))]; + tensor var_1801_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_1799, groups = var_1186, pad = var_1801_pad_0, pad_type = var_1801_pad_type_0, strides = var_1797, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_151_cast)[name = tensor("op_1801_cast")]; + tensor var_1802_split_sizes_0 = const()[name = tensor("op_1802_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1802_axis_0 = const()[name = tensor("op_1802_axis_0"), val = tensor(1)]; + tensor var_1802_cast_0, tensor var_1802_cast_1 = split(axis = var_1802_axis_0, split_sizes = var_1802_split_sizes_0, x = var_1801_cast)[name = tensor("op_1802_cast")]; + tensor var_1804_mode_0 = const()[name = tensor("op_1804_mode_0"), val = tensor("EXACT")]; + tensor var_1804_cast = gelu(mode = var_1804_mode_0, x = var_1802_cast_1)[name = tensor("op_1804_cast")]; + tensor input_153_cast = mul(x = var_1802_cast_0, y = var_1804_cast)[name = tensor("input_153_cast")]; + tensor var_1808 = const()[name = tensor("op_1808"), val = tensor([1, 1])]; + tensor var_1810 = const()[name = tensor("op_1810"), val = tensor([1, 1])]; + tensor var_1812_pad_type_0 = const()[name = tensor("op_1812_pad_type_0"), val = tensor("custom")]; + tensor var_1812_pad_0 = const()[name = tensor("op_1812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(218677824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223593088))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223593280)))]; + tensor var_1812_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_1810, groups = var_1186, pad = var_1812_pad_0, pad_type = var_1812_pad_type_0, strides = var_1808, weight = down_blocks_2_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_153_cast)[name = tensor("op_1812_cast")]; + tensor inputs_43_cast = add(x = var_1812_cast, y = inputs_41_cast)[name = tensor("inputs_43_cast")]; + tensor var_1822 = const()[name = tensor("op_1822"), val = tensor([1])]; + tensor channels_mean_43_cast = reduce_mean(axes = var_1822, keep_dims = var_1181, x = inputs_43_cast)[name = tensor("channels_mean_43_cast")]; + tensor zero_mean_43_cast = sub(x = inputs_43_cast, y = channels_mean_43_cast)[name = tensor("zero_mean_43_cast")]; + tensor zero_mean_sq_43_cast = mul(x = zero_mean_43_cast, y = zero_mean_43_cast)[name = tensor("zero_mean_sq_43_cast")]; + tensor var_1826 = const()[name = tensor("op_1826"), val = tensor([1])]; + tensor var_1827_cast = reduce_mean(axes = var_1826, keep_dims = var_1181, x = zero_mean_sq_43_cast)[name = tensor("op_1827_cast")]; + tensor var_1828_to_fp16 = const()[name = tensor("op_1828_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1829_cast = add(x = var_1827_cast, y = var_1828_to_fp16)[name = tensor("op_1829_cast")]; + tensor denom_43_epsilon_0_to_fp16 = const()[name = tensor("denom_43_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_43_cast = rsqrt(epsilon = denom_43_epsilon_0_to_fp16, x = var_1829_cast)[name = tensor("denom_43_cast")]; + tensor out_43_cast = mul(x = zero_mean_43_cast, y = denom_43_cast)[name = tensor("out_43_cast")]; + tensor var_1833_to_fp16 = const()[name = tensor("op_1833_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223595904)))]; + tensor var_1834_cast = add(x = out_43_cast, y = var_1833_to_fp16)[name = tensor("op_1834_cast")]; + tensor var_1836_to_fp16 = const()[name = tensor("op_1836_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223598528)))]; + tensor hidden_states_83_cast = mul(x = var_1834_cast, y = var_1836_to_fp16)[name = tensor("hidden_states_83_cast")]; + tensor var_1843 = const()[name = tensor("op_1843"), val = tensor([1, 1])]; + tensor var_1845 = const()[name = tensor("op_1845"), val = tensor([1, 1])]; + tensor q_29_pad_type_0 = const()[name = tensor("q_29_pad_type_0"), val = tensor("custom")]; + tensor q_29_pad_0 = const()[name = tensor("q_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(223601152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224830016))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_29_cast = conv(dilations = var_1845, groups = var_1186, pad = q_29_pad_0, pad_type = q_29_pad_type_0, strides = var_1843, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("q_29_cast")]; + tensor var_1849 = const()[name = tensor("op_1849"), val = tensor([1, 1])]; + tensor var_1851 = const()[name = tensor("op_1851"), val = tensor([1, 1])]; + tensor k_29_pad_type_0 = const()[name = tensor("k_29_pad_type_0"), val = tensor("custom")]; + tensor k_29_pad_0 = const()[name = tensor("k_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(224830208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226059072))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_29_cast = conv(dilations = var_1851, groups = var_1186, pad = k_29_pad_0, pad_type = k_29_pad_type_0, strides = var_1849, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("k_29_cast")]; + tensor var_1855 = const()[name = tensor("op_1855"), val = tensor([1, 1])]; + tensor var_1857 = const()[name = tensor("op_1857"), val = tensor([1, 1])]; + tensor v_29_pad_type_0 = const()[name = tensor("v_29_pad_type_0"), val = tensor("custom")]; + tensor v_29_pad_0 = const()[name = tensor("v_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(226059264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227288128))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_29_cast = conv(dilations = var_1857, groups = var_1186, pad = v_29_pad_0, pad_type = v_29_pad_type_0, strides = var_1855, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_83_cast)[name = tensor("v_29_cast")]; + tensor var_1861 = const()[name = tensor("op_1861"), val = tensor([2, 20, 64, -1])]; + tensor var_1862_cast = reshape(shape = var_1861, x = q_29_cast)[name = tensor("op_1862_cast")]; + tensor var_1863 = const()[name = tensor("op_1863"), val = tensor([2, 20, 64, -1])]; + tensor var_1864_cast = reshape(shape = var_1863, x = k_29_cast)[name = tensor("op_1864_cast")]; + tensor var_1865 = const()[name = tensor("op_1865"), val = tensor([2, 20, 64, -1])]; + tensor var_1866_cast = reshape(shape = var_1865, x = v_29_cast)[name = tensor("op_1866_cast")]; + tensor attn_weights_57_transpose_x_0 = const()[name = tensor("attn_weights_57_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_57_transpose_y_0 = const()[name = tensor("attn_weights_57_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_57_cast = matmul(transpose_x = attn_weights_57_transpose_x_0, transpose_y = attn_weights_57_transpose_y_0, x = var_1862_cast, y = var_1864_cast)[name = tensor("attn_weights_57_cast")]; + tensor attn_weights_59_cast = mul(x = attn_weights_57_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_59_cast")]; + tensor var_1870_cast = softmax(axis = var_1170, x = attn_weights_59_cast)[name = tensor("op_1870_cast")]; + tensor attn_29_transpose_x_0 = const()[name = tensor("attn_29_transpose_x_0"), val = tensor(false)]; + tensor attn_29_transpose_y_0 = const()[name = tensor("attn_29_transpose_y_0"), val = tensor(true)]; + tensor attn_29_cast = matmul(transpose_x = attn_29_transpose_x_0, transpose_y = attn_29_transpose_y_0, x = var_1866_cast, y = var_1870_cast)[name = tensor("attn_29_cast")]; + tensor var_1874 = const()[name = tensor("op_1874"), val = tensor([2, 1280, 1, -1])]; + tensor input_155_cast = reshape(shape = var_1874, x = attn_29_cast)[name = tensor("input_155_cast")]; + tensor var_1879 = const()[name = tensor("op_1879"), val = tensor([1, 1])]; + tensor var_1881 = const()[name = tensor("op_1881"), val = tensor([1, 1])]; + tensor var_1883_pad_type_0 = const()[name = tensor("op_1883_pad_type_0"), val = tensor("custom")]; + tensor var_1883_pad_0 = const()[name = tensor("op_1883_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(227288320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228517184))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228517376)))]; + tensor var_1883_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_1881, groups = var_1186, pad = var_1883_pad_0, pad_type = var_1883_pad_type_0, strides = var_1879, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_155_cast)[name = tensor("op_1883_cast")]; + tensor inputs_45_cast = add(x = var_1883_cast, y = inputs_43_cast)[name = tensor("inputs_45_cast")]; + tensor var_1887 = const()[name = tensor("op_1887"), val = tensor([1])]; + tensor channels_mean_45_cast = reduce_mean(axes = var_1887, keep_dims = var_1181, x = inputs_45_cast)[name = tensor("channels_mean_45_cast")]; + tensor zero_mean_45_cast = sub(x = inputs_45_cast, y = channels_mean_45_cast)[name = tensor("zero_mean_45_cast")]; + tensor zero_mean_sq_45_cast = mul(x = zero_mean_45_cast, y = zero_mean_45_cast)[name = tensor("zero_mean_sq_45_cast")]; + tensor var_1891 = const()[name = tensor("op_1891"), val = tensor([1])]; + tensor var_1892_cast = reduce_mean(axes = var_1891, keep_dims = var_1181, x = zero_mean_sq_45_cast)[name = tensor("op_1892_cast")]; + tensor var_1893_to_fp16 = const()[name = tensor("op_1893_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1894_cast = add(x = var_1892_cast, y = var_1893_to_fp16)[name = tensor("op_1894_cast")]; + tensor denom_45_epsilon_0_to_fp16 = const()[name = tensor("denom_45_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_45_cast = rsqrt(epsilon = denom_45_epsilon_0_to_fp16, x = var_1894_cast)[name = tensor("denom_45_cast")]; + tensor out_45_cast = mul(x = zero_mean_45_cast, y = denom_45_cast)[name = tensor("out_45_cast")]; + tensor var_1898_to_fp16 = const()[name = tensor("op_1898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228520000)))]; + tensor var_1899_cast = add(x = out_45_cast, y = var_1898_to_fp16)[name = tensor("op_1899_cast")]; + tensor var_1901_to_fp16 = const()[name = tensor("op_1901_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228522624)))]; + tensor hidden_states_85_cast = mul(x = var_1899_cast, y = var_1901_to_fp16)[name = tensor("hidden_states_85_cast")]; + tensor var_1908 = const()[name = tensor("op_1908"), val = tensor([1, 1])]; + tensor var_1910 = const()[name = tensor("op_1910"), val = tensor([1, 1])]; + tensor q_31_pad_type_0 = const()[name = tensor("q_31_pad_type_0"), val = tensor("custom")]; + tensor q_31_pad_0 = const()[name = tensor("q_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(228525248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229344512))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_31_cast = conv(dilations = var_1910, groups = var_1186, pad = q_31_pad_0, pad_type = q_31_pad_type_0, strides = var_1908, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_85_cast)[name = tensor("q_31_cast")]; + tensor var_1914 = const()[name = tensor("op_1914"), val = tensor([1, 1])]; + tensor var_1916 = const()[name = tensor("op_1916"), val = tensor([1, 1])]; + tensor k_31_pad_type_0 = const()[name = tensor("k_31_pad_type_0"), val = tensor("custom")]; + tensor k_31_pad_0 = const()[name = tensor("k_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(229344640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230655424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_31_cast = conv(dilations = var_1916, groups = var_1186, pad = k_31_pad_0, pad_type = k_31_pad_type_0, strides = var_1914, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_31_cast")]; + tensor var_1920 = const()[name = tensor("op_1920"), val = tensor([1, 1])]; + tensor var_1922 = const()[name = tensor("op_1922"), val = tensor([1, 1])]; + tensor v_31_pad_type_0 = const()[name = tensor("v_31_pad_type_0"), val = tensor("custom")]; + tensor v_31_pad_0 = const()[name = tensor("v_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(230655552))), lut = tensor([-0x1.6d4p-6, -0x1.9d8p-8, 0x1.a04p-8, 0x1.6e4p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_31_cast = conv(dilations = var_1922, groups = var_1186, pad = v_31_pad_0, pad_type = v_31_pad_type_0, strides = var_1920, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_31_cast")]; + tensor var_1926 = const()[name = tensor("op_1926"), val = tensor([2, 20, 64, -1])]; + tensor var_1927_cast = reshape(shape = var_1926, x = q_31_cast)[name = tensor("op_1927_cast")]; + tensor var_1928 = const()[name = tensor("op_1928"), val = tensor([2, 20, 64, -1])]; + tensor var_1929_cast = reshape(shape = var_1928, x = k_31_cast)[name = tensor("op_1929_cast")]; + tensor var_1930 = const()[name = tensor("op_1930"), val = tensor([2, 20, 64, -1])]; + tensor var_1931_cast = reshape(shape = var_1930, x = v_31_cast)[name = tensor("op_1931_cast")]; + tensor attn_weights_61_transpose_x_0 = const()[name = tensor("attn_weights_61_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_61_transpose_y_0 = const()[name = tensor("attn_weights_61_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_61_cast = matmul(transpose_x = attn_weights_61_transpose_x_0, transpose_y = attn_weights_61_transpose_y_0, x = var_1927_cast, y = var_1929_cast)[name = tensor("attn_weights_61_cast")]; + tensor attn_weights_63_cast = mul(x = attn_weights_61_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_63_cast")]; + tensor var_1935_cast = softmax(axis = var_1170, x = attn_weights_63_cast)[name = tensor("op_1935_cast")]; + tensor attn_31_transpose_x_0 = const()[name = tensor("attn_31_transpose_x_0"), val = tensor(false)]; + tensor attn_31_transpose_y_0 = const()[name = tensor("attn_31_transpose_y_0"), val = tensor(true)]; + tensor attn_31_cast = matmul(transpose_x = attn_31_transpose_x_0, transpose_y = attn_31_transpose_y_0, x = var_1931_cast, y = var_1935_cast)[name = tensor("attn_31_cast")]; + tensor var_1939 = const()[name = tensor("op_1939"), val = tensor([2, 1280, 1, -1])]; + tensor input_157_cast = reshape(shape = var_1939, x = attn_31_cast)[name = tensor("input_157_cast")]; + tensor var_1944 = const()[name = tensor("op_1944"), val = tensor([1, 1])]; + tensor var_1946 = const()[name = tensor("op_1946"), val = tensor([1, 1])]; + tensor var_1948_pad_type_0 = const()[name = tensor("op_1948_pad_type_0"), val = tensor("custom")]; + tensor var_1948_pad_0 = const()[name = tensor("op_1948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(231310976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232130240))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232130368)))]; + tensor var_1948_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_1946, groups = var_1186, pad = var_1948_pad_0, pad_type = var_1948_pad_type_0, strides = var_1944, weight = down_blocks_2_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_157_cast)[name = tensor("op_1948_cast")]; + tensor inputs_47_cast = add(x = var_1948_cast, y = inputs_45_cast)[name = tensor("inputs_47_cast")]; + tensor var_1952 = const()[name = tensor("op_1952"), val = tensor([1])]; + tensor channels_mean_47_cast = reduce_mean(axes = var_1952, keep_dims = var_1181, x = inputs_47_cast)[name = tensor("channels_mean_47_cast")]; + tensor zero_mean_47_cast = sub(x = inputs_47_cast, y = channels_mean_47_cast)[name = tensor("zero_mean_47_cast")]; + tensor zero_mean_sq_47_cast = mul(x = zero_mean_47_cast, y = zero_mean_47_cast)[name = tensor("zero_mean_sq_47_cast")]; + tensor var_1956 = const()[name = tensor("op_1956"), val = tensor([1])]; + tensor var_1957_cast = reduce_mean(axes = var_1956, keep_dims = var_1181, x = zero_mean_sq_47_cast)[name = tensor("op_1957_cast")]; + tensor var_1958_to_fp16 = const()[name = tensor("op_1958_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_1959_cast = add(x = var_1957_cast, y = var_1958_to_fp16)[name = tensor("op_1959_cast")]; + tensor denom_47_epsilon_0_to_fp16 = const()[name = tensor("denom_47_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_47_cast = rsqrt(epsilon = denom_47_epsilon_0_to_fp16, x = var_1959_cast)[name = tensor("denom_47_cast")]; + tensor out_47_cast = mul(x = zero_mean_47_cast, y = denom_47_cast)[name = tensor("out_47_cast")]; + tensor var_1963_to_fp16 = const()[name = tensor("op_1963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232132992)))]; + tensor var_1964_cast = add(x = out_47_cast, y = var_1963_to_fp16)[name = tensor("op_1964_cast")]; + tensor var_1966_to_fp16 = const()[name = tensor("op_1966_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232135616)))]; + tensor input_159_cast = mul(x = var_1964_cast, y = var_1966_to_fp16)[name = tensor("input_159_cast")]; + tensor var_1974 = const()[name = tensor("op_1974"), val = tensor([1, 1])]; + tensor var_1976 = const()[name = tensor("op_1976"), val = tensor([1, 1])]; + tensor var_1978_pad_type_0 = const()[name = tensor("op_1978_pad_type_0"), val = tensor("custom")]; + tensor var_1978_pad_0 = const()[name = tensor("op_1978_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(232138240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241968704))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241968896)))]; + tensor var_1978_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_1976, groups = var_1186, pad = var_1978_pad_0, pad_type = var_1978_pad_type_0, strides = var_1974, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_159_cast)[name = tensor("op_1978_cast")]; + tensor var_1979_split_sizes_0 = const()[name = tensor("op_1979_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_1979_axis_0 = const()[name = tensor("op_1979_axis_0"), val = tensor(1)]; + tensor var_1979_cast_0, tensor var_1979_cast_1 = split(axis = var_1979_axis_0, split_sizes = var_1979_split_sizes_0, x = var_1978_cast)[name = tensor("op_1979_cast")]; + tensor var_1981_mode_0 = const()[name = tensor("op_1981_mode_0"), val = tensor("EXACT")]; + tensor var_1981_cast = gelu(mode = var_1981_mode_0, x = var_1979_cast_1)[name = tensor("op_1981_cast")]; + tensor input_161_cast = mul(x = var_1979_cast_0, y = var_1981_cast)[name = tensor("input_161_cast")]; + tensor var_1985 = const()[name = tensor("op_1985"), val = tensor([1, 1])]; + tensor var_1987 = const()[name = tensor("op_1987"), val = tensor([1, 1])]; + tensor var_1989_pad_type_0 = const()[name = tensor("op_1989_pad_type_0"), val = tensor("custom")]; + tensor var_1989_pad_0 = const()[name = tensor("op_1989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(241989440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248543104))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248543680)))]; + tensor var_1989_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_1987, groups = var_1186, pad = var_1989_pad_0, pad_type = var_1989_pad_type_0, strides = var_1985, weight = down_blocks_2_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_161_cast)[name = tensor("op_1989_cast")]; + tensor inputs_49_cast = add(x = var_1989_cast, y = inputs_47_cast)[name = tensor("inputs_49_cast")]; + tensor var_1999 = const()[name = tensor("op_1999"), val = tensor([1])]; + tensor channels_mean_49_cast = reduce_mean(axes = var_1999, keep_dims = var_1181, x = inputs_49_cast)[name = tensor("channels_mean_49_cast")]; + tensor zero_mean_49_cast = sub(x = inputs_49_cast, y = channels_mean_49_cast)[name = tensor("zero_mean_49_cast")]; + tensor zero_mean_sq_49_cast = mul(x = zero_mean_49_cast, y = zero_mean_49_cast)[name = tensor("zero_mean_sq_49_cast")]; + tensor var_2003 = const()[name = tensor("op_2003"), val = tensor([1])]; + tensor var_2004_cast = reduce_mean(axes = var_2003, keep_dims = var_1181, x = zero_mean_sq_49_cast)[name = tensor("op_2004_cast")]; + tensor var_2005_to_fp16 = const()[name = tensor("op_2005_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2006_cast = add(x = var_2004_cast, y = var_2005_to_fp16)[name = tensor("op_2006_cast")]; + tensor denom_49_epsilon_0_to_fp16 = const()[name = tensor("denom_49_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_49_cast = rsqrt(epsilon = denom_49_epsilon_0_to_fp16, x = var_2006_cast)[name = tensor("denom_49_cast")]; + tensor out_49_cast = mul(x = zero_mean_49_cast, y = denom_49_cast)[name = tensor("out_49_cast")]; + tensor var_2010_to_fp16 = const()[name = tensor("op_2010_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248546304)))]; + tensor var_2011_cast = add(x = out_49_cast, y = var_2010_to_fp16)[name = tensor("op_2011_cast")]; + tensor var_2013_to_fp16 = const()[name = tensor("op_2013_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248548928)))]; + tensor hidden_states_89_cast = mul(x = var_2011_cast, y = var_2013_to_fp16)[name = tensor("hidden_states_89_cast")]; + tensor var_2020 = const()[name = tensor("op_2020"), val = tensor([1, 1])]; + tensor var_2022 = const()[name = tensor("op_2022"), val = tensor([1, 1])]; + tensor q_33_pad_type_0 = const()[name = tensor("q_33_pad_type_0"), val = tensor("custom")]; + tensor q_33_pad_0 = const()[name = tensor("q_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(248551552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(249370816))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_33_cast = conv(dilations = var_2022, groups = var_1186, pad = q_33_pad_0, pad_type = q_33_pad_type_0, strides = var_2020, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("q_33_cast")]; + tensor var_2026 = const()[name = tensor("op_2026"), val = tensor([1, 1])]; + tensor var_2028 = const()[name = tensor("op_2028"), val = tensor([1, 1])]; + tensor k_33_pad_type_0 = const()[name = tensor("k_33_pad_type_0"), val = tensor("custom")]; + tensor k_33_pad_0 = const()[name = tensor("k_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(249370944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250599808))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_33_cast = conv(dilations = var_2028, groups = var_1186, pad = k_33_pad_0, pad_type = k_33_pad_type_0, strides = var_2026, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("k_33_cast")]; + tensor var_2032 = const()[name = tensor("op_2032"), val = tensor([1, 1])]; + tensor var_2034 = const()[name = tensor("op_2034"), val = tensor([1, 1])]; + tensor v_33_pad_type_0 = const()[name = tensor("v_33_pad_type_0"), val = tensor("custom")]; + tensor v_33_pad_0 = const()[name = tensor("v_33_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(250600000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251828864))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_33_cast = conv(dilations = var_2034, groups = var_1186, pad = v_33_pad_0, pad_type = v_33_pad_type_0, strides = var_2032, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_89_cast)[name = tensor("v_33_cast")]; + tensor var_2038 = const()[name = tensor("op_2038"), val = tensor([2, 20, 64, -1])]; + tensor var_2039_cast = reshape(shape = var_2038, x = q_33_cast)[name = tensor("op_2039_cast")]; + tensor var_2040 = const()[name = tensor("op_2040"), val = tensor([2, 20, 64, -1])]; + tensor var_2041_cast = reshape(shape = var_2040, x = k_33_cast)[name = tensor("op_2041_cast")]; + tensor var_2042 = const()[name = tensor("op_2042"), val = tensor([2, 20, 64, -1])]; + tensor var_2043_cast = reshape(shape = var_2042, x = v_33_cast)[name = tensor("op_2043_cast")]; + tensor attn_weights_65_transpose_x_0 = const()[name = tensor("attn_weights_65_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_65_transpose_y_0 = const()[name = tensor("attn_weights_65_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_65_cast = matmul(transpose_x = attn_weights_65_transpose_x_0, transpose_y = attn_weights_65_transpose_y_0, x = var_2039_cast, y = var_2041_cast)[name = tensor("attn_weights_65_cast")]; + tensor attn_weights_67_cast = mul(x = attn_weights_65_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_67_cast")]; + tensor var_2047_cast = softmax(axis = var_1170, x = attn_weights_67_cast)[name = tensor("op_2047_cast")]; + tensor attn_33_transpose_x_0 = const()[name = tensor("attn_33_transpose_x_0"), val = tensor(false)]; + tensor attn_33_transpose_y_0 = const()[name = tensor("attn_33_transpose_y_0"), val = tensor(true)]; + tensor attn_33_cast = matmul(transpose_x = attn_33_transpose_x_0, transpose_y = attn_33_transpose_y_0, x = var_2043_cast, y = var_2047_cast)[name = tensor("attn_33_cast")]; + tensor var_2051 = const()[name = tensor("op_2051"), val = tensor([2, 1280, 1, -1])]; + tensor input_163_cast = reshape(shape = var_2051, x = attn_33_cast)[name = tensor("input_163_cast")]; + tensor var_2056 = const()[name = tensor("op_2056"), val = tensor([1, 1])]; + tensor var_2058 = const()[name = tensor("op_2058"), val = tensor([1, 1])]; + tensor var_2060_pad_type_0 = const()[name = tensor("op_2060_pad_type_0"), val = tensor("custom")]; + tensor var_2060_pad_0 = const()[name = tensor("op_2060_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(251829056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253057920))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253058112)))]; + tensor var_2060_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_2058, groups = var_1186, pad = var_2060_pad_0, pad_type = var_2060_pad_type_0, strides = var_2056, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_163_cast)[name = tensor("op_2060_cast")]; + tensor inputs_51_cast = add(x = var_2060_cast, y = inputs_49_cast)[name = tensor("inputs_51_cast")]; + tensor var_2064 = const()[name = tensor("op_2064"), val = tensor([1])]; + tensor channels_mean_51_cast = reduce_mean(axes = var_2064, keep_dims = var_1181, x = inputs_51_cast)[name = tensor("channels_mean_51_cast")]; + tensor zero_mean_51_cast = sub(x = inputs_51_cast, y = channels_mean_51_cast)[name = tensor("zero_mean_51_cast")]; + tensor zero_mean_sq_51_cast = mul(x = zero_mean_51_cast, y = zero_mean_51_cast)[name = tensor("zero_mean_sq_51_cast")]; + tensor var_2068 = const()[name = tensor("op_2068"), val = tensor([1])]; + tensor var_2069_cast = reduce_mean(axes = var_2068, keep_dims = var_1181, x = zero_mean_sq_51_cast)[name = tensor("op_2069_cast")]; + tensor var_2070_to_fp16 = const()[name = tensor("op_2070_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2071_cast = add(x = var_2069_cast, y = var_2070_to_fp16)[name = tensor("op_2071_cast")]; + tensor denom_51_epsilon_0_to_fp16 = const()[name = tensor("denom_51_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_51_cast = rsqrt(epsilon = denom_51_epsilon_0_to_fp16, x = var_2071_cast)[name = tensor("denom_51_cast")]; + tensor out_51_cast = mul(x = zero_mean_51_cast, y = denom_51_cast)[name = tensor("out_51_cast")]; + tensor var_2075_to_fp16 = const()[name = tensor("op_2075_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253060736)))]; + tensor var_2076_cast = add(x = out_51_cast, y = var_2075_to_fp16)[name = tensor("op_2076_cast")]; + tensor var_2078_to_fp16 = const()[name = tensor("op_2078_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253063360)))]; + tensor hidden_states_91_cast = mul(x = var_2076_cast, y = var_2078_to_fp16)[name = tensor("hidden_states_91_cast")]; + tensor var_2085 = const()[name = tensor("op_2085"), val = tensor([1, 1])]; + tensor var_2087 = const()[name = tensor("op_2087"), val = tensor([1, 1])]; + tensor q_35_pad_type_0 = const()[name = tensor("q_35_pad_type_0"), val = tensor("custom")]; + tensor q_35_pad_0 = const()[name = tensor("q_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253065984))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253885248))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_35_cast = conv(dilations = var_2087, groups = var_1186, pad = q_35_pad_0, pad_type = q_35_pad_type_0, strides = var_2085, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_91_cast)[name = tensor("q_35_cast")]; + tensor var_2091 = const()[name = tensor("op_2091"), val = tensor([1, 1])]; + tensor var_2093 = const()[name = tensor("op_2093"), val = tensor([1, 1])]; + tensor k_35_pad_type_0 = const()[name = tensor("k_35_pad_type_0"), val = tensor("custom")]; + tensor k_35_pad_0 = const()[name = tensor("k_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(253885376))), lut = tensor([-0x1.064p-6, -0x1.42p-8, 0x1.42cp-8, 0x1.064p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_35_cast = conv(dilations = var_2093, groups = var_1186, pad = k_35_pad_0, pad_type = k_35_pad_type_0, strides = var_2091, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_35_cast")]; + tensor var_2097 = const()[name = tensor("op_2097"), val = tensor([1, 1])]; + tensor var_2099 = const()[name = tensor("op_2099"), val = tensor([1, 1])]; + tensor v_35_pad_type_0 = const()[name = tensor("v_35_pad_type_0"), val = tensor("custom")]; + tensor v_35_pad_0 = const()[name = tensor("v_35_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(254540800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255851584))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_35_cast = conv(dilations = var_2099, groups = var_1186, pad = v_35_pad_0, pad_type = v_35_pad_type_0, strides = var_2097, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_35_cast")]; + tensor var_2103 = const()[name = tensor("op_2103"), val = tensor([2, 20, 64, -1])]; + tensor var_2104_cast = reshape(shape = var_2103, x = q_35_cast)[name = tensor("op_2104_cast")]; + tensor var_2105 = const()[name = tensor("op_2105"), val = tensor([2, 20, 64, -1])]; + tensor var_2106_cast = reshape(shape = var_2105, x = k_35_cast)[name = tensor("op_2106_cast")]; + tensor var_2107 = const()[name = tensor("op_2107"), val = tensor([2, 20, 64, -1])]; + tensor var_2108_cast = reshape(shape = var_2107, x = v_35_cast)[name = tensor("op_2108_cast")]; + tensor attn_weights_69_transpose_x_0 = const()[name = tensor("attn_weights_69_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_69_transpose_y_0 = const()[name = tensor("attn_weights_69_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_69_cast = matmul(transpose_x = attn_weights_69_transpose_x_0, transpose_y = attn_weights_69_transpose_y_0, x = var_2104_cast, y = var_2106_cast)[name = tensor("attn_weights_69_cast")]; + tensor attn_weights_71_cast = mul(x = attn_weights_69_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_71_cast")]; + tensor var_2112_cast = softmax(axis = var_1170, x = attn_weights_71_cast)[name = tensor("op_2112_cast")]; + tensor attn_35_transpose_x_0 = const()[name = tensor("attn_35_transpose_x_0"), val = tensor(false)]; + tensor attn_35_transpose_y_0 = const()[name = tensor("attn_35_transpose_y_0"), val = tensor(true)]; + tensor attn_35_cast = matmul(transpose_x = attn_35_transpose_x_0, transpose_y = attn_35_transpose_y_0, x = var_2108_cast, y = var_2112_cast)[name = tensor("attn_35_cast")]; + tensor var_2116 = const()[name = tensor("op_2116"), val = tensor([2, 1280, 1, -1])]; + tensor input_165_cast = reshape(shape = var_2116, x = attn_35_cast)[name = tensor("input_165_cast")]; + tensor var_2121 = const()[name = tensor("op_2121"), val = tensor([1, 1])]; + tensor var_2123 = const()[name = tensor("op_2123"), val = tensor([1, 1])]; + tensor var_2125_pad_type_0 = const()[name = tensor("op_2125_pad_type_0"), val = tensor("custom")]; + tensor var_2125_pad_0 = const()[name = tensor("op_2125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(255851712))), lut = tensor([-0x1.6e4p-7, -0x1.aa8p-9, 0x1.a3p-9, 0x1.6c4p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(256261376)))]; + tensor var_2125_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_2123, groups = var_1186, pad = var_2125_pad_0, pad_type = var_2125_pad_type_0, strides = var_2121, weight = down_blocks_2_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_165_cast)[name = tensor("op_2125_cast")]; + tensor inputs_53_cast = add(x = var_2125_cast, y = inputs_51_cast)[name = tensor("inputs_53_cast")]; + tensor var_2129 = const()[name = tensor("op_2129"), val = tensor([1])]; + tensor channels_mean_53_cast = reduce_mean(axes = var_2129, keep_dims = var_1181, x = inputs_53_cast)[name = tensor("channels_mean_53_cast")]; + tensor zero_mean_53_cast = sub(x = inputs_53_cast, y = channels_mean_53_cast)[name = tensor("zero_mean_53_cast")]; + tensor zero_mean_sq_53_cast = mul(x = zero_mean_53_cast, y = zero_mean_53_cast)[name = tensor("zero_mean_sq_53_cast")]; + tensor var_2133 = const()[name = tensor("op_2133"), val = tensor([1])]; + tensor var_2134_cast = reduce_mean(axes = var_2133, keep_dims = var_1181, x = zero_mean_sq_53_cast)[name = tensor("op_2134_cast")]; + tensor var_2135_to_fp16 = const()[name = tensor("op_2135_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2136_cast = add(x = var_2134_cast, y = var_2135_to_fp16)[name = tensor("op_2136_cast")]; + tensor denom_53_epsilon_0_to_fp16 = const()[name = tensor("denom_53_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_53_cast = rsqrt(epsilon = denom_53_epsilon_0_to_fp16, x = var_2136_cast)[name = tensor("denom_53_cast")]; + tensor out_53_cast = mul(x = zero_mean_53_cast, y = denom_53_cast)[name = tensor("out_53_cast")]; + tensor var_2140_to_fp16 = const()[name = tensor("op_2140_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(256264000)))]; + tensor var_2141_cast = add(x = out_53_cast, y = var_2140_to_fp16)[name = tensor("op_2141_cast")]; + tensor var_2143_to_fp16 = const()[name = tensor("op_2143_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(256266624)))]; + tensor input_167_cast = mul(x = var_2141_cast, y = var_2143_to_fp16)[name = tensor("input_167_cast")]; + tensor var_2151 = const()[name = tensor("op_2151"), val = tensor([1, 1])]; + tensor var_2153 = const()[name = tensor("op_2153"), val = tensor([1, 1])]; + tensor var_2155_pad_type_0 = const()[name = tensor("op_2155_pad_type_0"), val = tensor("custom")]; + tensor var_2155_pad_0 = const()[name = tensor("op_2155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(256269248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(266099712))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(266099904)))]; + tensor var_2155_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_2153, groups = var_1186, pad = var_2155_pad_0, pad_type = var_2155_pad_type_0, strides = var_2151, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_167_cast)[name = tensor("op_2155_cast")]; + tensor var_2156_split_sizes_0 = const()[name = tensor("op_2156_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2156_axis_0 = const()[name = tensor("op_2156_axis_0"), val = tensor(1)]; + tensor var_2156_cast_0, tensor var_2156_cast_1 = split(axis = var_2156_axis_0, split_sizes = var_2156_split_sizes_0, x = var_2155_cast)[name = tensor("op_2156_cast")]; + tensor var_2158_mode_0 = const()[name = tensor("op_2158_mode_0"), val = tensor("EXACT")]; + tensor var_2158_cast = gelu(mode = var_2158_mode_0, x = var_2156_cast_1)[name = tensor("op_2158_cast")]; + tensor input_169_cast = mul(x = var_2156_cast_0, y = var_2158_cast)[name = tensor("input_169_cast")]; + tensor var_2162 = const()[name = tensor("op_2162"), val = tensor([1, 1])]; + tensor var_2164 = const()[name = tensor("op_2164"), val = tensor([1, 1])]; + tensor var_2166_pad_type_0 = const()[name = tensor("op_2166_pad_type_0"), val = tensor("custom")]; + tensor var_2166_pad_0 = const()[name = tensor("op_2166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(266120448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272674112))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272674688)))]; + tensor var_2166_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_2164, groups = var_1186, pad = var_2166_pad_0, pad_type = var_2166_pad_type_0, strides = var_2162, weight = down_blocks_2_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_169_cast)[name = tensor("op_2166_cast")]; + tensor inputs_55_cast = add(x = var_2166_cast, y = inputs_53_cast)[name = tensor("inputs_55_cast")]; + tensor var_2176 = const()[name = tensor("op_2176"), val = tensor([1])]; + tensor channels_mean_55_cast = reduce_mean(axes = var_2176, keep_dims = var_1181, x = inputs_55_cast)[name = tensor("channels_mean_55_cast")]; + tensor zero_mean_55_cast = sub(x = inputs_55_cast, y = channels_mean_55_cast)[name = tensor("zero_mean_55_cast")]; + tensor zero_mean_sq_55_cast = mul(x = zero_mean_55_cast, y = zero_mean_55_cast)[name = tensor("zero_mean_sq_55_cast")]; + tensor var_2180 = const()[name = tensor("op_2180"), val = tensor([1])]; + tensor var_2181_cast = reduce_mean(axes = var_2180, keep_dims = var_1181, x = zero_mean_sq_55_cast)[name = tensor("op_2181_cast")]; + tensor var_2182_to_fp16 = const()[name = tensor("op_2182_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2183_cast = add(x = var_2181_cast, y = var_2182_to_fp16)[name = tensor("op_2183_cast")]; + tensor denom_55_epsilon_0_to_fp16 = const()[name = tensor("denom_55_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_55_cast = rsqrt(epsilon = denom_55_epsilon_0_to_fp16, x = var_2183_cast)[name = tensor("denom_55_cast")]; + tensor out_55_cast = mul(x = zero_mean_55_cast, y = denom_55_cast)[name = tensor("out_55_cast")]; + tensor var_2187_to_fp16 = const()[name = tensor("op_2187_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272677312)))]; + tensor var_2188_cast = add(x = out_55_cast, y = var_2187_to_fp16)[name = tensor("op_2188_cast")]; + tensor var_2190_to_fp16 = const()[name = tensor("op_2190_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272679936)))]; + tensor hidden_states_95_cast = mul(x = var_2188_cast, y = var_2190_to_fp16)[name = tensor("hidden_states_95_cast")]; + tensor var_2197 = const()[name = tensor("op_2197"), val = tensor([1, 1])]; + tensor var_2199 = const()[name = tensor("op_2199"), val = tensor([1, 1])]; + tensor q_37_pad_type_0 = const()[name = tensor("q_37_pad_type_0"), val = tensor("custom")]; + tensor q_37_pad_0 = const()[name = tensor("q_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(272682560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(273911424))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_37_cast = conv(dilations = var_2199, groups = var_1186, pad = q_37_pad_0, pad_type = q_37_pad_type_0, strides = var_2197, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("q_37_cast")]; + tensor var_2203 = const()[name = tensor("op_2203"), val = tensor([1, 1])]; + tensor var_2205 = const()[name = tensor("op_2205"), val = tensor([1, 1])]; + tensor k_37_pad_type_0 = const()[name = tensor("k_37_pad_type_0"), val = tensor("custom")]; + tensor k_37_pad_0 = const()[name = tensor("k_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(273911616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275140480))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_37_cast = conv(dilations = var_2205, groups = var_1186, pad = k_37_pad_0, pad_type = k_37_pad_type_0, strides = var_2203, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("k_37_cast")]; + tensor var_2209 = const()[name = tensor("op_2209"), val = tensor([1, 1])]; + tensor var_2211 = const()[name = tensor("op_2211"), val = tensor([1, 1])]; + tensor v_37_pad_type_0 = const()[name = tensor("v_37_pad_type_0"), val = tensor("custom")]; + tensor v_37_pad_0 = const()[name = tensor("v_37_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(275140672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276779136))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_37_cast = conv(dilations = var_2211, groups = var_1186, pad = v_37_pad_0, pad_type = v_37_pad_type_0, strides = var_2209, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_95_cast)[name = tensor("v_37_cast")]; + tensor var_2215 = const()[name = tensor("op_2215"), val = tensor([2, 20, 64, -1])]; + tensor var_2216_cast = reshape(shape = var_2215, x = q_37_cast)[name = tensor("op_2216_cast")]; + tensor var_2217 = const()[name = tensor("op_2217"), val = tensor([2, 20, 64, -1])]; + tensor var_2218_cast = reshape(shape = var_2217, x = k_37_cast)[name = tensor("op_2218_cast")]; + tensor var_2219 = const()[name = tensor("op_2219"), val = tensor([2, 20, 64, -1])]; + tensor var_2220_cast = reshape(shape = var_2219, x = v_37_cast)[name = tensor("op_2220_cast")]; + tensor attn_weights_73_transpose_x_0 = const()[name = tensor("attn_weights_73_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_73_transpose_y_0 = const()[name = tensor("attn_weights_73_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_73_cast = matmul(transpose_x = attn_weights_73_transpose_x_0, transpose_y = attn_weights_73_transpose_y_0, x = var_2216_cast, y = var_2218_cast)[name = tensor("attn_weights_73_cast")]; + tensor attn_weights_75_cast = mul(x = attn_weights_73_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_75_cast")]; + tensor var_2224_cast = softmax(axis = var_1170, x = attn_weights_75_cast)[name = tensor("op_2224_cast")]; + tensor attn_37_transpose_x_0 = const()[name = tensor("attn_37_transpose_x_0"), val = tensor(false)]; + tensor attn_37_transpose_y_0 = const()[name = tensor("attn_37_transpose_y_0"), val = tensor(true)]; + tensor attn_37_cast = matmul(transpose_x = attn_37_transpose_x_0, transpose_y = attn_37_transpose_y_0, x = var_2220_cast, y = var_2224_cast)[name = tensor("attn_37_cast")]; + tensor var_2228 = const()[name = tensor("op_2228"), val = tensor([2, 1280, 1, -1])]; + tensor input_171_cast = reshape(shape = var_2228, x = attn_37_cast)[name = tensor("input_171_cast")]; + tensor var_2233 = const()[name = tensor("op_2233"), val = tensor([1, 1])]; + tensor var_2235 = const()[name = tensor("op_2235"), val = tensor([1, 1])]; + tensor var_2237_pad_type_0 = const()[name = tensor("op_2237_pad_type_0"), val = tensor("custom")]; + tensor var_2237_pad_0 = const()[name = tensor("op_2237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(276779712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278008576))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278008768)))]; + tensor var_2237_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_2235, groups = var_1186, pad = var_2237_pad_0, pad_type = var_2237_pad_type_0, strides = var_2233, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_171_cast)[name = tensor("op_2237_cast")]; + tensor inputs_57_cast = add(x = var_2237_cast, y = inputs_55_cast)[name = tensor("inputs_57_cast")]; + tensor var_2241 = const()[name = tensor("op_2241"), val = tensor([1])]; + tensor channels_mean_57_cast = reduce_mean(axes = var_2241, keep_dims = var_1181, x = inputs_57_cast)[name = tensor("channels_mean_57_cast")]; + tensor zero_mean_57_cast = sub(x = inputs_57_cast, y = channels_mean_57_cast)[name = tensor("zero_mean_57_cast")]; + tensor zero_mean_sq_57_cast = mul(x = zero_mean_57_cast, y = zero_mean_57_cast)[name = tensor("zero_mean_sq_57_cast")]; + tensor var_2245 = const()[name = tensor("op_2245"), val = tensor([1])]; + tensor var_2246_cast = reduce_mean(axes = var_2245, keep_dims = var_1181, x = zero_mean_sq_57_cast)[name = tensor("op_2246_cast")]; + tensor var_2247_to_fp16 = const()[name = tensor("op_2247_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2248_cast = add(x = var_2246_cast, y = var_2247_to_fp16)[name = tensor("op_2248_cast")]; + tensor denom_57_epsilon_0_to_fp16 = const()[name = tensor("denom_57_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_57_cast = rsqrt(epsilon = denom_57_epsilon_0_to_fp16, x = var_2248_cast)[name = tensor("denom_57_cast")]; + tensor out_57_cast = mul(x = zero_mean_57_cast, y = denom_57_cast)[name = tensor("out_57_cast")]; + tensor var_2252_to_fp16 = const()[name = tensor("op_2252_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278011392)))]; + tensor var_2253_cast = add(x = out_57_cast, y = var_2252_to_fp16)[name = tensor("op_2253_cast")]; + tensor var_2255_to_fp16 = const()[name = tensor("op_2255_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278014016)))]; + tensor hidden_states_97_cast = mul(x = var_2253_cast, y = var_2255_to_fp16)[name = tensor("hidden_states_97_cast")]; + tensor var_2262 = const()[name = tensor("op_2262"), val = tensor([1, 1])]; + tensor var_2264 = const()[name = tensor("op_2264"), val = tensor([1, 1])]; + tensor q_39_pad_type_0 = const()[name = tensor("q_39_pad_type_0"), val = tensor("custom")]; + tensor q_39_pad_0 = const()[name = tensor("q_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278016640))), lut = tensor([-0x1.318p-6, -0x1.8f8p-8, 0x1.8ep-8, 0x1.31p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_39_cast = conv(dilations = var_2264, groups = var_1186, pad = q_39_pad_0, pad_type = q_39_pad_type_0, strides = var_2262, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_97_cast)[name = tensor("q_39_cast")]; + tensor var_2268 = const()[name = tensor("op_2268"), val = tensor([1, 1])]; + tensor var_2270 = const()[name = tensor("op_2270"), val = tensor([1, 1])]; + tensor k_39_pad_type_0 = const()[name = tensor("k_39_pad_type_0"), val = tensor("custom")]; + tensor k_39_pad_0 = const()[name = tensor("k_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(278426304))), lut = tensor([-0x1.cbcp-7, -0x1.22p-8, 0x1.214p-8, 0x1.cbcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_39_cast = conv(dilations = var_2270, groups = var_1186, pad = k_39_pad_0, pad_type = k_39_pad_type_0, strides = var_2268, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_39_cast")]; + tensor var_2274 = const()[name = tensor("op_2274"), val = tensor([1, 1])]; + tensor var_2276 = const()[name = tensor("op_2276"), val = tensor([1, 1])]; + tensor v_39_pad_type_0 = const()[name = tensor("v_39_pad_type_0"), val = tensor("custom")]; + tensor v_39_pad_0 = const()[name = tensor("v_39_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279081728))), lut = tensor([-0x1.1fcp-6, -0x1.4d4p-8, 0x1.514p-8, 0x1.20cp-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_39_cast = conv(dilations = var_2276, groups = var_1186, pad = v_39_pad_0, pad_type = v_39_pad_type_0, strides = var_2274, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_39_cast")]; + tensor var_2280 = const()[name = tensor("op_2280"), val = tensor([2, 20, 64, -1])]; + tensor var_2281_cast = reshape(shape = var_2280, x = q_39_cast)[name = tensor("op_2281_cast")]; + tensor var_2282 = const()[name = tensor("op_2282"), val = tensor([2, 20, 64, -1])]; + tensor var_2283_cast = reshape(shape = var_2282, x = k_39_cast)[name = tensor("op_2283_cast")]; + tensor var_2284 = const()[name = tensor("op_2284"), val = tensor([2, 20, 64, -1])]; + tensor var_2285_cast = reshape(shape = var_2284, x = v_39_cast)[name = tensor("op_2285_cast")]; + tensor attn_weights_77_transpose_x_0 = const()[name = tensor("attn_weights_77_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_77_transpose_y_0 = const()[name = tensor("attn_weights_77_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_77_cast = matmul(transpose_x = attn_weights_77_transpose_x_0, transpose_y = attn_weights_77_transpose_y_0, x = var_2281_cast, y = var_2283_cast)[name = tensor("attn_weights_77_cast")]; + tensor attn_weights_79_cast = mul(x = attn_weights_77_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_79_cast")]; + tensor var_2289_cast = softmax(axis = var_1170, x = attn_weights_79_cast)[name = tensor("op_2289_cast")]; + tensor attn_39_transpose_x_0 = const()[name = tensor("attn_39_transpose_x_0"), val = tensor(false)]; + tensor attn_39_transpose_y_0 = const()[name = tensor("attn_39_transpose_y_0"), val = tensor(true)]; + tensor attn_39_cast = matmul(transpose_x = attn_39_transpose_x_0, transpose_y = attn_39_transpose_y_0, x = var_2285_cast, y = var_2289_cast)[name = tensor("attn_39_cast")]; + tensor var_2293 = const()[name = tensor("op_2293"), val = tensor([2, 1280, 1, -1])]; + tensor input_173_cast = reshape(shape = var_2293, x = attn_39_cast)[name = tensor("input_173_cast")]; + tensor var_2298 = const()[name = tensor("op_2298"), val = tensor([1, 1])]; + tensor var_2300 = const()[name = tensor("op_2300"), val = tensor([1, 1])]; + tensor var_2302_pad_type_0 = const()[name = tensor("op_2302_pad_type_0"), val = tensor("custom")]; + tensor var_2302_pad_0 = const()[name = tensor("op_2302_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279737152))), lut = tensor([-0x1.46cp-8, 0x1.48p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279942016)))]; + tensor var_2302_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_2300, groups = var_1186, pad = var_2302_pad_0, pad_type = var_2302_pad_type_0, strides = var_2298, weight = down_blocks_2_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_173_cast)[name = tensor("op_2302_cast")]; + tensor inputs_59_cast = add(x = var_2302_cast, y = inputs_57_cast)[name = tensor("inputs_59_cast")]; + tensor var_2306 = const()[name = tensor("op_2306"), val = tensor([1])]; + tensor channels_mean_59_cast = reduce_mean(axes = var_2306, keep_dims = var_1181, x = inputs_59_cast)[name = tensor("channels_mean_59_cast")]; + tensor zero_mean_59_cast = sub(x = inputs_59_cast, y = channels_mean_59_cast)[name = tensor("zero_mean_59_cast")]; + tensor zero_mean_sq_59_cast = mul(x = zero_mean_59_cast, y = zero_mean_59_cast)[name = tensor("zero_mean_sq_59_cast")]; + tensor var_2310 = const()[name = tensor("op_2310"), val = tensor([1])]; + tensor var_2311_cast = reduce_mean(axes = var_2310, keep_dims = var_1181, x = zero_mean_sq_59_cast)[name = tensor("op_2311_cast")]; + tensor var_2312_to_fp16 = const()[name = tensor("op_2312_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2313_cast = add(x = var_2311_cast, y = var_2312_to_fp16)[name = tensor("op_2313_cast")]; + tensor denom_59_epsilon_0_to_fp16 = const()[name = tensor("denom_59_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_59_cast = rsqrt(epsilon = denom_59_epsilon_0_to_fp16, x = var_2313_cast)[name = tensor("denom_59_cast")]; + tensor out_59_cast = mul(x = zero_mean_59_cast, y = denom_59_cast)[name = tensor("out_59_cast")]; + tensor var_2317_to_fp16 = const()[name = tensor("op_2317_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279944640)))]; + tensor var_2318_cast = add(x = out_59_cast, y = var_2317_to_fp16)[name = tensor("op_2318_cast")]; + tensor var_2320_to_fp16 = const()[name = tensor("op_2320_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279947264)))]; + tensor input_175_cast = mul(x = var_2318_cast, y = var_2320_to_fp16)[name = tensor("input_175_cast")]; + tensor var_2328 = const()[name = tensor("op_2328"), val = tensor([1, 1])]; + tensor var_2330 = const()[name = tensor("op_2330"), val = tensor([1, 1])]; + tensor var_2332_pad_type_0 = const()[name = tensor("op_2332_pad_type_0"), val = tensor("custom")]; + tensor var_2332_pad_0 = const()[name = tensor("op_2332_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(279949888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293057152))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293057728)))]; + tensor var_2332_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_2330, groups = var_1186, pad = var_2332_pad_0, pad_type = var_2332_pad_type_0, strides = var_2328, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_175_cast)[name = tensor("op_2332_cast")]; + tensor var_2333_split_sizes_0 = const()[name = tensor("op_2333_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2333_axis_0 = const()[name = tensor("op_2333_axis_0"), val = tensor(1)]; + tensor var_2333_cast_0, tensor var_2333_cast_1 = split(axis = var_2333_axis_0, split_sizes = var_2333_split_sizes_0, x = var_2332_cast)[name = tensor("op_2333_cast")]; + tensor var_2335_mode_0 = const()[name = tensor("op_2335_mode_0"), val = tensor("EXACT")]; + tensor var_2335_cast = gelu(mode = var_2335_mode_0, x = var_2333_cast_1)[name = tensor("op_2335_cast")]; + tensor input_177_cast = mul(x = var_2333_cast_0, y = var_2335_cast)[name = tensor("input_177_cast")]; + tensor var_2339 = const()[name = tensor("op_2339"), val = tensor([1, 1])]; + tensor var_2341 = const()[name = tensor("op_2341"), val = tensor([1, 1])]; + tensor var_2343_pad_type_0 = const()[name = tensor("op_2343_pad_type_0"), val = tensor("custom")]; + tensor var_2343_pad_0 = const()[name = tensor("op_2343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(293078272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297993536))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297993728)))]; + tensor var_2343_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_2341, groups = var_1186, pad = var_2343_pad_0, pad_type = var_2343_pad_type_0, strides = var_2339, weight = down_blocks_2_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_177_cast)[name = tensor("op_2343_cast")]; + tensor inputs_61_cast = add(x = var_2343_cast, y = inputs_59_cast)[name = tensor("inputs_61_cast")]; + tensor var_2353 = const()[name = tensor("op_2353"), val = tensor([1])]; + tensor channels_mean_61_cast = reduce_mean(axes = var_2353, keep_dims = var_1181, x = inputs_61_cast)[name = tensor("channels_mean_61_cast")]; + tensor zero_mean_61_cast = sub(x = inputs_61_cast, y = channels_mean_61_cast)[name = tensor("zero_mean_61_cast")]; + tensor zero_mean_sq_61_cast = mul(x = zero_mean_61_cast, y = zero_mean_61_cast)[name = tensor("zero_mean_sq_61_cast")]; + tensor var_2357 = const()[name = tensor("op_2357"), val = tensor([1])]; + tensor var_2358_cast = reduce_mean(axes = var_2357, keep_dims = var_1181, x = zero_mean_sq_61_cast)[name = tensor("op_2358_cast")]; + tensor var_2359_to_fp16 = const()[name = tensor("op_2359_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2360_cast = add(x = var_2358_cast, y = var_2359_to_fp16)[name = tensor("op_2360_cast")]; + tensor denom_61_epsilon_0_to_fp16 = const()[name = tensor("denom_61_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_61_cast = rsqrt(epsilon = denom_61_epsilon_0_to_fp16, x = var_2360_cast)[name = tensor("denom_61_cast")]; + tensor out_61_cast = mul(x = zero_mean_61_cast, y = denom_61_cast)[name = tensor("out_61_cast")]; + tensor var_2364_to_fp16 = const()[name = tensor("op_2364_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297996352)))]; + tensor var_2365_cast = add(x = out_61_cast, y = var_2364_to_fp16)[name = tensor("op_2365_cast")]; + tensor var_2367_to_fp16 = const()[name = tensor("op_2367_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(297998976)))]; + tensor hidden_states_101_cast = mul(x = var_2365_cast, y = var_2367_to_fp16)[name = tensor("hidden_states_101_cast")]; + tensor var_2374 = const()[name = tensor("op_2374"), val = tensor([1, 1])]; + tensor var_2376 = const()[name = tensor("op_2376"), val = tensor([1, 1])]; + tensor q_41_pad_type_0 = const()[name = tensor("q_41_pad_type_0"), val = tensor("custom")]; + tensor q_41_pad_0 = const()[name = tensor("q_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(298001600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(298820864))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_41_cast = conv(dilations = var_2376, groups = var_1186, pad = q_41_pad_0, pad_type = q_41_pad_type_0, strides = var_2374, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("q_41_cast")]; + tensor var_2380 = const()[name = tensor("op_2380"), val = tensor([1, 1])]; + tensor var_2382 = const()[name = tensor("op_2382"), val = tensor([1, 1])]; + tensor k_41_pad_type_0 = const()[name = tensor("k_41_pad_type_0"), val = tensor("custom")]; + tensor k_41_pad_0 = const()[name = tensor("k_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(298820992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(299640256))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_41_cast = conv(dilations = var_2382, groups = var_1186, pad = k_41_pad_0, pad_type = k_41_pad_type_0, strides = var_2380, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("k_41_cast")]; + tensor var_2386 = const()[name = tensor("op_2386"), val = tensor([1, 1])]; + tensor var_2388 = const()[name = tensor("op_2388"), val = tensor([1, 1])]; + tensor v_41_pad_type_0 = const()[name = tensor("v_41_pad_type_0"), val = tensor("custom")]; + tensor v_41_pad_0 = const()[name = tensor("v_41_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(299640384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300869248))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_41_cast = conv(dilations = var_2388, groups = var_1186, pad = v_41_pad_0, pad_type = v_41_pad_type_0, strides = var_2386, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_101_cast)[name = tensor("v_41_cast")]; + tensor var_2392 = const()[name = tensor("op_2392"), val = tensor([2, 20, 64, -1])]; + tensor var_2393_cast = reshape(shape = var_2392, x = q_41_cast)[name = tensor("op_2393_cast")]; + tensor var_2394 = const()[name = tensor("op_2394"), val = tensor([2, 20, 64, -1])]; + tensor var_2395_cast = reshape(shape = var_2394, x = k_41_cast)[name = tensor("op_2395_cast")]; + tensor var_2396 = const()[name = tensor("op_2396"), val = tensor([2, 20, 64, -1])]; + tensor var_2397_cast = reshape(shape = var_2396, x = v_41_cast)[name = tensor("op_2397_cast")]; + tensor attn_weights_81_transpose_x_0 = const()[name = tensor("attn_weights_81_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_81_transpose_y_0 = const()[name = tensor("attn_weights_81_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_81_cast = matmul(transpose_x = attn_weights_81_transpose_x_0, transpose_y = attn_weights_81_transpose_y_0, x = var_2393_cast, y = var_2395_cast)[name = tensor("attn_weights_81_cast")]; + tensor attn_weights_83_cast = mul(x = attn_weights_81_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_83_cast")]; + tensor var_2401_cast = softmax(axis = var_1170, x = attn_weights_83_cast)[name = tensor("op_2401_cast")]; + tensor attn_41_transpose_x_0 = const()[name = tensor("attn_41_transpose_x_0"), val = tensor(false)]; + tensor attn_41_transpose_y_0 = const()[name = tensor("attn_41_transpose_y_0"), val = tensor(true)]; + tensor attn_41_cast = matmul(transpose_x = attn_41_transpose_x_0, transpose_y = attn_41_transpose_y_0, x = var_2397_cast, y = var_2401_cast)[name = tensor("attn_41_cast")]; + tensor var_2405 = const()[name = tensor("op_2405"), val = tensor([2, 1280, 1, -1])]; + tensor input_179_cast = reshape(shape = var_2405, x = attn_41_cast)[name = tensor("input_179_cast")]; + tensor var_2410 = const()[name = tensor("op_2410"), val = tensor([1, 1])]; + tensor var_2412 = const()[name = tensor("op_2412"), val = tensor([1, 1])]; + tensor var_2414_pad_type_0 = const()[name = tensor("op_2414_pad_type_0"), val = tensor("custom")]; + tensor var_2414_pad_0 = const()[name = tensor("op_2414_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(300869440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302098304))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302098496)))]; + tensor var_2414_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_2412, groups = var_1186, pad = var_2414_pad_0, pad_type = var_2414_pad_type_0, strides = var_2410, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_179_cast)[name = tensor("op_2414_cast")]; + tensor inputs_63_cast = add(x = var_2414_cast, y = inputs_61_cast)[name = tensor("inputs_63_cast")]; + tensor var_2418 = const()[name = tensor("op_2418"), val = tensor([1])]; + tensor channels_mean_63_cast = reduce_mean(axes = var_2418, keep_dims = var_1181, x = inputs_63_cast)[name = tensor("channels_mean_63_cast")]; + tensor zero_mean_63_cast = sub(x = inputs_63_cast, y = channels_mean_63_cast)[name = tensor("zero_mean_63_cast")]; + tensor zero_mean_sq_63_cast = mul(x = zero_mean_63_cast, y = zero_mean_63_cast)[name = tensor("zero_mean_sq_63_cast")]; + tensor var_2422 = const()[name = tensor("op_2422"), val = tensor([1])]; + tensor var_2423_cast = reduce_mean(axes = var_2422, keep_dims = var_1181, x = zero_mean_sq_63_cast)[name = tensor("op_2423_cast")]; + tensor var_2424_to_fp16 = const()[name = tensor("op_2424_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2425_cast = add(x = var_2423_cast, y = var_2424_to_fp16)[name = tensor("op_2425_cast")]; + tensor denom_63_epsilon_0_to_fp16 = const()[name = tensor("denom_63_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_63_cast = rsqrt(epsilon = denom_63_epsilon_0_to_fp16, x = var_2425_cast)[name = tensor("denom_63_cast")]; + tensor out_63_cast = mul(x = zero_mean_63_cast, y = denom_63_cast)[name = tensor("out_63_cast")]; + tensor var_2429_to_fp16 = const()[name = tensor("op_2429_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302101120)))]; + tensor var_2430_cast = add(x = out_63_cast, y = var_2429_to_fp16)[name = tensor("op_2430_cast")]; + tensor var_2432_to_fp16 = const()[name = tensor("op_2432_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302103744)))]; + tensor hidden_states_103_cast = mul(x = var_2430_cast, y = var_2432_to_fp16)[name = tensor("hidden_states_103_cast")]; + tensor var_2439 = const()[name = tensor("op_2439"), val = tensor([1, 1])]; + tensor var_2441 = const()[name = tensor("op_2441"), val = tensor([1, 1])]; + tensor q_43_pad_type_0 = const()[name = tensor("q_43_pad_type_0"), val = tensor("custom")]; + tensor q_43_pad_0 = const()[name = tensor("q_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302106368))), lut = tensor([-0x1.1a8p-6, -0x1.74cp-8, 0x1.74p-8, 0x1.1a8p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_43_cast = conv(dilations = var_2441, groups = var_1186, pad = q_43_pad_0, pad_type = q_43_pad_type_0, strides = var_2439, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_103_cast)[name = tensor("q_43_cast")]; + tensor var_2445 = const()[name = tensor("op_2445"), val = tensor([1, 1])]; + tensor var_2447 = const()[name = tensor("op_2447"), val = tensor([1, 1])]; + tensor k_43_pad_type_0 = const()[name = tensor("k_43_pad_type_0"), val = tensor("custom")]; + tensor k_43_pad_0 = const()[name = tensor("k_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(302516032))), lut = tensor([-0x1.854p-7, -0x1.f78p-9, 0x1.f7cp-9, 0x1.84cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_43_cast = conv(dilations = var_2447, groups = var_1186, pad = k_43_pad_0, pad_type = k_43_pad_type_0, strides = var_2445, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_43_cast")]; + tensor var_2451 = const()[name = tensor("op_2451"), val = tensor([1, 1])]; + tensor var_2453 = const()[name = tensor("op_2453"), val = tensor([1, 1])]; + tensor v_43_pad_type_0 = const()[name = tensor("v_43_pad_type_0"), val = tensor("custom")]; + tensor v_43_pad_0 = const()[name = tensor("v_43_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(303171456))), lut = tensor([-0x1.cb8p-7, -0x1.138p-8, 0x1.15p-8, 0x1.cbcp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_43_cast = conv(dilations = var_2453, groups = var_1186, pad = v_43_pad_0, pad_type = v_43_pad_type_0, strides = var_2451, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_43_cast")]; + tensor var_2457 = const()[name = tensor("op_2457"), val = tensor([2, 20, 64, -1])]; + tensor var_2458_cast = reshape(shape = var_2457, x = q_43_cast)[name = tensor("op_2458_cast")]; + tensor var_2459 = const()[name = tensor("op_2459"), val = tensor([2, 20, 64, -1])]; + tensor var_2460_cast = reshape(shape = var_2459, x = k_43_cast)[name = tensor("op_2460_cast")]; + tensor var_2461 = const()[name = tensor("op_2461"), val = tensor([2, 20, 64, -1])]; + tensor var_2462_cast = reshape(shape = var_2461, x = v_43_cast)[name = tensor("op_2462_cast")]; + tensor attn_weights_85_transpose_x_0 = const()[name = tensor("attn_weights_85_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_85_transpose_y_0 = const()[name = tensor("attn_weights_85_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_85_cast = matmul(transpose_x = attn_weights_85_transpose_x_0, transpose_y = attn_weights_85_transpose_y_0, x = var_2458_cast, y = var_2460_cast)[name = tensor("attn_weights_85_cast")]; + tensor attn_weights_87_cast = mul(x = attn_weights_85_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_87_cast")]; + tensor var_2466_cast = softmax(axis = var_1170, x = attn_weights_87_cast)[name = tensor("op_2466_cast")]; + tensor attn_43_transpose_x_0 = const()[name = tensor("attn_43_transpose_x_0"), val = tensor(false)]; + tensor attn_43_transpose_y_0 = const()[name = tensor("attn_43_transpose_y_0"), val = tensor(true)]; + tensor attn_43_cast = matmul(transpose_x = attn_43_transpose_x_0, transpose_y = attn_43_transpose_y_0, x = var_2462_cast, y = var_2466_cast)[name = tensor("attn_43_cast")]; + tensor var_2470 = const()[name = tensor("op_2470"), val = tensor([2, 1280, 1, -1])]; + tensor input_181_cast = reshape(shape = var_2470, x = attn_43_cast)[name = tensor("input_181_cast")]; + tensor var_2475 = const()[name = tensor("op_2475"), val = tensor([1, 1])]; + tensor var_2477 = const()[name = tensor("op_2477"), val = tensor([1, 1])]; + tensor var_2479_pad_type_0 = const()[name = tensor("op_2479_pad_type_0"), val = tensor("custom")]; + tensor var_2479_pad_0 = const()[name = tensor("op_2479_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(303826880))), lut = tensor([-0x1.06cp-7, -0x1.3ecp-9, 0x1.3dcp-9, 0x1.068p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304236544)))]; + tensor var_2479_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_2477, groups = var_1186, pad = var_2479_pad_0, pad_type = var_2479_pad_type_0, strides = var_2475, weight = down_blocks_2_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_181_cast)[name = tensor("op_2479_cast")]; + tensor inputs_65_cast = add(x = var_2479_cast, y = inputs_63_cast)[name = tensor("inputs_65_cast")]; + tensor var_2483 = const()[name = tensor("op_2483"), val = tensor([1])]; + tensor channels_mean_65_cast = reduce_mean(axes = var_2483, keep_dims = var_1181, x = inputs_65_cast)[name = tensor("channels_mean_65_cast")]; + tensor zero_mean_65_cast = sub(x = inputs_65_cast, y = channels_mean_65_cast)[name = tensor("zero_mean_65_cast")]; + tensor zero_mean_sq_65_cast = mul(x = zero_mean_65_cast, y = zero_mean_65_cast)[name = tensor("zero_mean_sq_65_cast")]; + tensor var_2487 = const()[name = tensor("op_2487"), val = tensor([1])]; + tensor var_2488_cast = reduce_mean(axes = var_2487, keep_dims = var_1181, x = zero_mean_sq_65_cast)[name = tensor("op_2488_cast")]; + tensor var_2489_to_fp16 = const()[name = tensor("op_2489_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2490_cast = add(x = var_2488_cast, y = var_2489_to_fp16)[name = tensor("op_2490_cast")]; + tensor denom_65_epsilon_0_to_fp16 = const()[name = tensor("denom_65_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_65_cast = rsqrt(epsilon = denom_65_epsilon_0_to_fp16, x = var_2490_cast)[name = tensor("denom_65_cast")]; + tensor out_65_cast = mul(x = zero_mean_65_cast, y = denom_65_cast)[name = tensor("out_65_cast")]; + tensor var_2494_to_fp16 = const()[name = tensor("op_2494_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304239168)))]; + tensor var_2495_cast = add(x = out_65_cast, y = var_2494_to_fp16)[name = tensor("op_2495_cast")]; + tensor var_2497_to_fp16 = const()[name = tensor("op_2497_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304241792)))]; + tensor input_183_cast = mul(x = var_2495_cast, y = var_2497_to_fp16)[name = tensor("input_183_cast")]; + tensor var_2505 = const()[name = tensor("op_2505"), val = tensor([1, 1])]; + tensor var_2507 = const()[name = tensor("op_2507"), val = tensor([1, 1])]; + tensor var_2509_pad_type_0 = const()[name = tensor("op_2509_pad_type_0"), val = tensor("custom")]; + tensor var_2509_pad_0 = const()[name = tensor("op_2509_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(304244416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314074880))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314075072)))]; + tensor var_2509_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_2507, groups = var_1186, pad = var_2509_pad_0, pad_type = var_2509_pad_type_0, strides = var_2505, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_183_cast)[name = tensor("op_2509_cast")]; + tensor var_2510_split_sizes_0 = const()[name = tensor("op_2510_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2510_axis_0 = const()[name = tensor("op_2510_axis_0"), val = tensor(1)]; + tensor var_2510_cast_0, tensor var_2510_cast_1 = split(axis = var_2510_axis_0, split_sizes = var_2510_split_sizes_0, x = var_2509_cast)[name = tensor("op_2510_cast")]; + tensor var_2512_mode_0 = const()[name = tensor("op_2512_mode_0"), val = tensor("EXACT")]; + tensor var_2512_cast = gelu(mode = var_2512_mode_0, x = var_2510_cast_1)[name = tensor("op_2512_cast")]; + tensor input_185_cast = mul(x = var_2510_cast_0, y = var_2512_cast)[name = tensor("input_185_cast")]; + tensor var_2516 = const()[name = tensor("op_2516"), val = tensor([1, 1])]; + tensor var_2518 = const()[name = tensor("op_2518"), val = tensor([1, 1])]; + tensor var_2520_pad_type_0 = const()[name = tensor("op_2520_pad_type_0"), val = tensor("custom")]; + tensor var_2520_pad_0 = const()[name = tensor("op_2520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(314095616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319010880))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319011072)))]; + tensor var_2520_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_2518, groups = var_1186, pad = var_2520_pad_0, pad_type = var_2520_pad_type_0, strides = var_2516, weight = down_blocks_2_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_185_cast)[name = tensor("op_2520_cast")]; + tensor inputs_67_cast = add(x = var_2520_cast, y = inputs_65_cast)[name = tensor("inputs_67_cast")]; + tensor var_2530 = const()[name = tensor("op_2530"), val = tensor([1])]; + tensor channels_mean_67_cast = reduce_mean(axes = var_2530, keep_dims = var_1181, x = inputs_67_cast)[name = tensor("channels_mean_67_cast")]; + tensor zero_mean_67_cast = sub(x = inputs_67_cast, y = channels_mean_67_cast)[name = tensor("zero_mean_67_cast")]; + tensor zero_mean_sq_67_cast = mul(x = zero_mean_67_cast, y = zero_mean_67_cast)[name = tensor("zero_mean_sq_67_cast")]; + tensor var_2534 = const()[name = tensor("op_2534"), val = tensor([1])]; + tensor var_2535_cast = reduce_mean(axes = var_2534, keep_dims = var_1181, x = zero_mean_sq_67_cast)[name = tensor("op_2535_cast")]; + tensor var_2536_to_fp16 = const()[name = tensor("op_2536_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2537_cast = add(x = var_2535_cast, y = var_2536_to_fp16)[name = tensor("op_2537_cast")]; + tensor denom_67_epsilon_0_to_fp16 = const()[name = tensor("denom_67_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_67_cast = rsqrt(epsilon = denom_67_epsilon_0_to_fp16, x = var_2537_cast)[name = tensor("denom_67_cast")]; + tensor out_67_cast = mul(x = zero_mean_67_cast, y = denom_67_cast)[name = tensor("out_67_cast")]; + tensor var_2541_to_fp16 = const()[name = tensor("op_2541_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319013696)))]; + tensor var_2542_cast = add(x = out_67_cast, y = var_2541_to_fp16)[name = tensor("op_2542_cast")]; + tensor var_2544_to_fp16 = const()[name = tensor("op_2544_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319016320)))]; + tensor hidden_states_107_cast = mul(x = var_2542_cast, y = var_2544_to_fp16)[name = tensor("hidden_states_107_cast")]; + tensor var_2551 = const()[name = tensor("op_2551"), val = tensor([1, 1])]; + tensor var_2553 = const()[name = tensor("op_2553"), val = tensor([1, 1])]; + tensor q_45_pad_type_0 = const()[name = tensor("q_45_pad_type_0"), val = tensor("custom")]; + tensor q_45_pad_0 = const()[name = tensor("q_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319018944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319838208))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_45_cast = conv(dilations = var_2553, groups = var_1186, pad = q_45_pad_0, pad_type = q_45_pad_type_0, strides = var_2551, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("q_45_cast")]; + tensor var_2557 = const()[name = tensor("op_2557"), val = tensor([1, 1])]; + tensor var_2559 = const()[name = tensor("op_2559"), val = tensor([1, 1])]; + tensor k_45_pad_type_0 = const()[name = tensor("k_45_pad_type_0"), val = tensor("custom")]; + tensor k_45_pad_0 = const()[name = tensor("k_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(319838336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(321067200))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_45_cast = conv(dilations = var_2559, groups = var_1186, pad = k_45_pad_0, pad_type = k_45_pad_type_0, strides = var_2557, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("k_45_cast")]; + tensor var_2563 = const()[name = tensor("op_2563"), val = tensor([1, 1])]; + tensor var_2565 = const()[name = tensor("op_2565"), val = tensor([1, 1])]; + tensor v_45_pad_type_0 = const()[name = tensor("v_45_pad_type_0"), val = tensor("custom")]; + tensor v_45_pad_0 = const()[name = tensor("v_45_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(321067392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322296256))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_45_cast = conv(dilations = var_2565, groups = var_1186, pad = v_45_pad_0, pad_type = v_45_pad_type_0, strides = var_2563, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_107_cast)[name = tensor("v_45_cast")]; + tensor var_2569 = const()[name = tensor("op_2569"), val = tensor([2, 20, 64, -1])]; + tensor var_2570_cast = reshape(shape = var_2569, x = q_45_cast)[name = tensor("op_2570_cast")]; + tensor var_2571 = const()[name = tensor("op_2571"), val = tensor([2, 20, 64, -1])]; + tensor var_2572_cast = reshape(shape = var_2571, x = k_45_cast)[name = tensor("op_2572_cast")]; + tensor var_2573 = const()[name = tensor("op_2573"), val = tensor([2, 20, 64, -1])]; + tensor var_2574_cast = reshape(shape = var_2573, x = v_45_cast)[name = tensor("op_2574_cast")]; + tensor attn_weights_89_transpose_x_0 = const()[name = tensor("attn_weights_89_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_89_transpose_y_0 = const()[name = tensor("attn_weights_89_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_89_cast = matmul(transpose_x = attn_weights_89_transpose_x_0, transpose_y = attn_weights_89_transpose_y_0, x = var_2570_cast, y = var_2572_cast)[name = tensor("attn_weights_89_cast")]; + tensor attn_weights_91_cast = mul(x = attn_weights_89_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_91_cast")]; + tensor var_2578_cast = softmax(axis = var_1170, x = attn_weights_91_cast)[name = tensor("op_2578_cast")]; + tensor attn_45_transpose_x_0 = const()[name = tensor("attn_45_transpose_x_0"), val = tensor(false)]; + tensor attn_45_transpose_y_0 = const()[name = tensor("attn_45_transpose_y_0"), val = tensor(true)]; + tensor attn_45_cast = matmul(transpose_x = attn_45_transpose_x_0, transpose_y = attn_45_transpose_y_0, x = var_2574_cast, y = var_2578_cast)[name = tensor("attn_45_cast")]; + tensor var_2582 = const()[name = tensor("op_2582"), val = tensor([2, 1280, 1, -1])]; + tensor input_187_cast = reshape(shape = var_2582, x = attn_45_cast)[name = tensor("input_187_cast")]; + tensor var_2587 = const()[name = tensor("op_2587"), val = tensor([1, 1])]; + tensor var_2589 = const()[name = tensor("op_2589"), val = tensor([1, 1])]; + tensor var_2591_pad_type_0 = const()[name = tensor("op_2591_pad_type_0"), val = tensor("custom")]; + tensor var_2591_pad_0 = const()[name = tensor("op_2591_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(322296448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323934912))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323935488)))]; + tensor var_2591_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_2589, groups = var_1186, pad = var_2591_pad_0, pad_type = var_2591_pad_type_0, strides = var_2587, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_187_cast)[name = tensor("op_2591_cast")]; + tensor inputs_69_cast = add(x = var_2591_cast, y = inputs_67_cast)[name = tensor("inputs_69_cast")]; + tensor var_2595 = const()[name = tensor("op_2595"), val = tensor([1])]; + tensor channels_mean_69_cast = reduce_mean(axes = var_2595, keep_dims = var_1181, x = inputs_69_cast)[name = tensor("channels_mean_69_cast")]; + tensor zero_mean_69_cast = sub(x = inputs_69_cast, y = channels_mean_69_cast)[name = tensor("zero_mean_69_cast")]; + tensor zero_mean_sq_69_cast = mul(x = zero_mean_69_cast, y = zero_mean_69_cast)[name = tensor("zero_mean_sq_69_cast")]; + tensor var_2599 = const()[name = tensor("op_2599"), val = tensor([1])]; + tensor var_2600_cast = reduce_mean(axes = var_2599, keep_dims = var_1181, x = zero_mean_sq_69_cast)[name = tensor("op_2600_cast")]; + tensor var_2601_to_fp16 = const()[name = tensor("op_2601_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2602_cast = add(x = var_2600_cast, y = var_2601_to_fp16)[name = tensor("op_2602_cast")]; + tensor denom_69_epsilon_0_to_fp16 = const()[name = tensor("denom_69_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_69_cast = rsqrt(epsilon = denom_69_epsilon_0_to_fp16, x = var_2602_cast)[name = tensor("denom_69_cast")]; + tensor out_69_cast = mul(x = zero_mean_69_cast, y = denom_69_cast)[name = tensor("out_69_cast")]; + tensor var_2606_to_fp16 = const()[name = tensor("op_2606_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323938112)))]; + tensor var_2607_cast = add(x = out_69_cast, y = var_2606_to_fp16)[name = tensor("op_2607_cast")]; + tensor var_2609_to_fp16 = const()[name = tensor("op_2609_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323940736)))]; + tensor hidden_states_109_cast = mul(x = var_2607_cast, y = var_2609_to_fp16)[name = tensor("hidden_states_109_cast")]; + tensor var_2616 = const()[name = tensor("op_2616"), val = tensor([1, 1])]; + tensor var_2618 = const()[name = tensor("op_2618"), val = tensor([1, 1])]; + tensor q_47_pad_type_0 = const()[name = tensor("q_47_pad_type_0"), val = tensor("custom")]; + tensor q_47_pad_0 = const()[name = tensor("q_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(323943360))), lut = tensor([-0x1.61p-7, 0x1.614p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_47_cast = conv(dilations = var_2618, groups = var_1186, pad = q_47_pad_0, pad_type = q_47_pad_type_0, strides = var_2616, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_109_cast)[name = tensor("q_47_cast")]; + tensor var_2622 = const()[name = tensor("op_2622"), val = tensor([1, 1])]; + tensor var_2624 = const()[name = tensor("op_2624"), val = tensor([1, 1])]; + tensor k_47_pad_type_0 = const()[name = tensor("k_47_pad_type_0"), val = tensor("custom")]; + tensor k_47_pad_0 = const()[name = tensor("k_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324148224))), lut = tensor([-0x1.84cp-7, -0x1.f74p-9, 0x1.fep-9, 0x1.86cp-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_47_cast = conv(dilations = var_2624, groups = var_1186, pad = k_47_pad_0, pad_type = k_47_pad_type_0, strides = var_2622, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_47_cast")]; + tensor var_2628 = const()[name = tensor("op_2628"), val = tensor([1, 1])]; + tensor var_2630 = const()[name = tensor("op_2630"), val = tensor([1, 1])]; + tensor v_47_pad_type_0 = const()[name = tensor("v_47_pad_type_0"), val = tensor("custom")]; + tensor v_47_pad_0 = const()[name = tensor("v_47_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(324803648))), lut = tensor([-0x1.e6cp-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_47_cast = conv(dilations = var_2630, groups = var_1186, pad = v_47_pad_0, pad_type = v_47_pad_type_0, strides = var_2628, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_47_cast")]; + tensor var_2634 = const()[name = tensor("op_2634"), val = tensor([2, 20, 64, -1])]; + tensor var_2635_cast = reshape(shape = var_2634, x = q_47_cast)[name = tensor("op_2635_cast")]; + tensor var_2636 = const()[name = tensor("op_2636"), val = tensor([2, 20, 64, -1])]; + tensor var_2637_cast = reshape(shape = var_2636, x = k_47_cast)[name = tensor("op_2637_cast")]; + tensor var_2638 = const()[name = tensor("op_2638"), val = tensor([2, 20, 64, -1])]; + tensor var_2639_cast = reshape(shape = var_2638, x = v_47_cast)[name = tensor("op_2639_cast")]; + tensor attn_weights_93_transpose_x_0 = const()[name = tensor("attn_weights_93_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_93_transpose_y_0 = const()[name = tensor("attn_weights_93_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_93_cast = matmul(transpose_x = attn_weights_93_transpose_x_0, transpose_y = attn_weights_93_transpose_y_0, x = var_2635_cast, y = var_2637_cast)[name = tensor("attn_weights_93_cast")]; + tensor attn_weights_95_cast = mul(x = attn_weights_93_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_95_cast")]; + tensor var_2643_cast = softmax(axis = var_1170, x = attn_weights_95_cast)[name = tensor("op_2643_cast")]; + tensor attn_47_transpose_x_0 = const()[name = tensor("attn_47_transpose_x_0"), val = tensor(false)]; + tensor attn_47_transpose_y_0 = const()[name = tensor("attn_47_transpose_y_0"), val = tensor(true)]; + tensor attn_47_cast = matmul(transpose_x = attn_47_transpose_x_0, transpose_y = attn_47_transpose_y_0, x = var_2639_cast, y = var_2643_cast)[name = tensor("attn_47_cast")]; + tensor var_2647 = const()[name = tensor("op_2647"), val = tensor([2, 1280, 1, -1])]; + tensor input_189_cast = reshape(shape = var_2647, x = attn_47_cast)[name = tensor("input_189_cast")]; + tensor var_2652 = const()[name = tensor("op_2652"), val = tensor([1, 1])]; + tensor var_2654 = const()[name = tensor("op_2654"), val = tensor([1, 1])]; + tensor var_2656_pad_type_0 = const()[name = tensor("op_2656_pad_type_0"), val = tensor("custom")]; + tensor var_2656_pad_0 = const()[name = tensor("op_2656_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325131392))), lut = tensor([-0x1.214p-8, 0x1.21p-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325336256)))]; + tensor var_2656_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_2654, groups = var_1186, pad = var_2656_pad_0, pad_type = var_2656_pad_type_0, strides = var_2652, weight = down_blocks_2_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_189_cast)[name = tensor("op_2656_cast")]; + tensor inputs_71_cast = add(x = var_2656_cast, y = inputs_69_cast)[name = tensor("inputs_71_cast")]; + tensor var_2660 = const()[name = tensor("op_2660"), val = tensor([1])]; + tensor channels_mean_71_cast = reduce_mean(axes = var_2660, keep_dims = var_1181, x = inputs_71_cast)[name = tensor("channels_mean_71_cast")]; + tensor zero_mean_71_cast = sub(x = inputs_71_cast, y = channels_mean_71_cast)[name = tensor("zero_mean_71_cast")]; + tensor zero_mean_sq_71_cast = mul(x = zero_mean_71_cast, y = zero_mean_71_cast)[name = tensor("zero_mean_sq_71_cast")]; + tensor var_2664 = const()[name = tensor("op_2664"), val = tensor([1])]; + tensor var_2665_cast = reduce_mean(axes = var_2664, keep_dims = var_1181, x = zero_mean_sq_71_cast)[name = tensor("op_2665_cast")]; + tensor var_2666_to_fp16 = const()[name = tensor("op_2666_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2667_cast = add(x = var_2665_cast, y = var_2666_to_fp16)[name = tensor("op_2667_cast")]; + tensor denom_71_epsilon_0_to_fp16 = const()[name = tensor("denom_71_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_71_cast = rsqrt(epsilon = denom_71_epsilon_0_to_fp16, x = var_2667_cast)[name = tensor("denom_71_cast")]; + tensor out_71_cast = mul(x = zero_mean_71_cast, y = denom_71_cast)[name = tensor("out_71_cast")]; + tensor var_2671_to_fp16 = const()[name = tensor("op_2671_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325338880)))]; + tensor var_2672_cast = add(x = out_71_cast, y = var_2671_to_fp16)[name = tensor("op_2672_cast")]; + tensor var_2674_to_fp16 = const()[name = tensor("op_2674_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325341504)))]; + tensor input_191_cast = mul(x = var_2672_cast, y = var_2674_to_fp16)[name = tensor("input_191_cast")]; + tensor var_2682 = const()[name = tensor("op_2682"), val = tensor([1, 1])]; + tensor var_2684 = const()[name = tensor("op_2684"), val = tensor([1, 1])]; + tensor var_2686_pad_type_0 = const()[name = tensor("op_2686_pad_type_0"), val = tensor("custom")]; + tensor var_2686_pad_0 = const()[name = tensor("op_2686_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(325344128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338451392))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338451968)))]; + tensor var_2686_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_2684, groups = var_1186, pad = var_2686_pad_0, pad_type = var_2686_pad_type_0, strides = var_2682, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_191_cast)[name = tensor("op_2686_cast")]; + tensor var_2687_split_sizes_0 = const()[name = tensor("op_2687_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2687_axis_0 = const()[name = tensor("op_2687_axis_0"), val = tensor(1)]; + tensor var_2687_cast_0, tensor var_2687_cast_1 = split(axis = var_2687_axis_0, split_sizes = var_2687_split_sizes_0, x = var_2686_cast)[name = tensor("op_2687_cast")]; + tensor var_2689_mode_0 = const()[name = tensor("op_2689_mode_0"), val = tensor("EXACT")]; + tensor var_2689_cast = gelu(mode = var_2689_mode_0, x = var_2687_cast_1)[name = tensor("op_2689_cast")]; + tensor input_193_cast = mul(x = var_2687_cast_0, y = var_2689_cast)[name = tensor("input_193_cast")]; + tensor var_2693 = const()[name = tensor("op_2693"), val = tensor([1, 1])]; + tensor var_2695 = const()[name = tensor("op_2695"), val = tensor([1, 1])]; + tensor var_2697_pad_type_0 = const()[name = tensor("op_2697_pad_type_0"), val = tensor("custom")]; + tensor var_2697_pad_0 = const()[name = tensor("op_2697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(338472512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(343387776))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(343387968)))]; + tensor var_2697_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_2695, groups = var_1186, pad = var_2697_pad_0, pad_type = var_2697_pad_type_0, strides = var_2693, weight = down_blocks_2_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_193_cast)[name = tensor("op_2697_cast")]; + tensor inputs_73_cast = add(x = var_2697_cast, y = inputs_71_cast)[name = tensor("inputs_73_cast")]; + tensor var_2707 = const()[name = tensor("op_2707"), val = tensor([1])]; + tensor channels_mean_73_cast = reduce_mean(axes = var_2707, keep_dims = var_1181, x = inputs_73_cast)[name = tensor("channels_mean_73_cast")]; + tensor zero_mean_73_cast = sub(x = inputs_73_cast, y = channels_mean_73_cast)[name = tensor("zero_mean_73_cast")]; + tensor zero_mean_sq_73_cast = mul(x = zero_mean_73_cast, y = zero_mean_73_cast)[name = tensor("zero_mean_sq_73_cast")]; + tensor var_2711 = const()[name = tensor("op_2711"), val = tensor([1])]; + tensor var_2712_cast = reduce_mean(axes = var_2711, keep_dims = var_1181, x = zero_mean_sq_73_cast)[name = tensor("op_2712_cast")]; + tensor var_2713_to_fp16 = const()[name = tensor("op_2713_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2714_cast = add(x = var_2712_cast, y = var_2713_to_fp16)[name = tensor("op_2714_cast")]; + tensor denom_73_epsilon_0_to_fp16 = const()[name = tensor("denom_73_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_73_cast = rsqrt(epsilon = denom_73_epsilon_0_to_fp16, x = var_2714_cast)[name = tensor("denom_73_cast")]; + tensor out_73_cast = mul(x = zero_mean_73_cast, y = denom_73_cast)[name = tensor("out_73_cast")]; + tensor var_2718_to_fp16 = const()[name = tensor("op_2718_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(343390592)))]; + tensor var_2719_cast = add(x = out_73_cast, y = var_2718_to_fp16)[name = tensor("op_2719_cast")]; + tensor var_2721_to_fp16 = const()[name = tensor("op_2721_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(343393216)))]; + tensor hidden_states_113_cast = mul(x = var_2719_cast, y = var_2721_to_fp16)[name = tensor("hidden_states_113_cast")]; + tensor var_2728 = const()[name = tensor("op_2728"), val = tensor([1, 1])]; + tensor var_2730 = const()[name = tensor("op_2730"), val = tensor([1, 1])]; + tensor q_49_pad_type_0 = const()[name = tensor("q_49_pad_type_0"), val = tensor("custom")]; + tensor q_49_pad_0 = const()[name = tensor("q_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(343395840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344215104))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_49_cast = conv(dilations = var_2730, groups = var_1186, pad = q_49_pad_0, pad_type = q_49_pad_type_0, strides = var_2728, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("q_49_cast")]; + tensor var_2734 = const()[name = tensor("op_2734"), val = tensor([1, 1])]; + tensor var_2736 = const()[name = tensor("op_2736"), val = tensor([1, 1])]; + tensor k_49_pad_type_0 = const()[name = tensor("k_49_pad_type_0"), val = tensor("custom")]; + tensor k_49_pad_0 = const()[name = tensor("k_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(344215232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(345034496))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_49_cast = conv(dilations = var_2736, groups = var_1186, pad = k_49_pad_0, pad_type = k_49_pad_type_0, strides = var_2734, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("k_49_cast")]; + tensor var_2740 = const()[name = tensor("op_2740"), val = tensor([1, 1])]; + tensor var_2742 = const()[name = tensor("op_2742"), val = tensor([1, 1])]; + tensor v_49_pad_type_0 = const()[name = tensor("v_49_pad_type_0"), val = tensor("custom")]; + tensor v_49_pad_0 = const()[name = tensor("v_49_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(345034624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346263488))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_49_cast = conv(dilations = var_2742, groups = var_1186, pad = v_49_pad_0, pad_type = v_49_pad_type_0, strides = var_2740, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_113_cast)[name = tensor("v_49_cast")]; + tensor var_2746 = const()[name = tensor("op_2746"), val = tensor([2, 20, 64, -1])]; + tensor var_2747_cast = reshape(shape = var_2746, x = q_49_cast)[name = tensor("op_2747_cast")]; + tensor var_2748 = const()[name = tensor("op_2748"), val = tensor([2, 20, 64, -1])]; + tensor var_2749_cast = reshape(shape = var_2748, x = k_49_cast)[name = tensor("op_2749_cast")]; + tensor var_2750 = const()[name = tensor("op_2750"), val = tensor([2, 20, 64, -1])]; + tensor var_2751_cast = reshape(shape = var_2750, x = v_49_cast)[name = tensor("op_2751_cast")]; + tensor attn_weights_97_transpose_x_0 = const()[name = tensor("attn_weights_97_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_97_transpose_y_0 = const()[name = tensor("attn_weights_97_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_97_cast = matmul(transpose_x = attn_weights_97_transpose_x_0, transpose_y = attn_weights_97_transpose_y_0, x = var_2747_cast, y = var_2749_cast)[name = tensor("attn_weights_97_cast")]; + tensor attn_weights_99_cast = mul(x = attn_weights_97_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_99_cast")]; + tensor var_2755_cast = softmax(axis = var_1170, x = attn_weights_99_cast)[name = tensor("op_2755_cast")]; + tensor attn_49_transpose_x_0 = const()[name = tensor("attn_49_transpose_x_0"), val = tensor(false)]; + tensor attn_49_transpose_y_0 = const()[name = tensor("attn_49_transpose_y_0"), val = tensor(true)]; + tensor attn_49_cast = matmul(transpose_x = attn_49_transpose_x_0, transpose_y = attn_49_transpose_y_0, x = var_2751_cast, y = var_2755_cast)[name = tensor("attn_49_cast")]; + tensor var_2759 = const()[name = tensor("op_2759"), val = tensor([2, 1280, 1, -1])]; + tensor input_195_cast = reshape(shape = var_2759, x = attn_49_cast)[name = tensor("input_195_cast")]; + tensor var_2764 = const()[name = tensor("op_2764"), val = tensor([1, 1])]; + tensor var_2766 = const()[name = tensor("op_2766"), val = tensor([1, 1])]; + tensor var_2768_pad_type_0 = const()[name = tensor("op_2768_pad_type_0"), val = tensor("custom")]; + tensor var_2768_pad_0 = const()[name = tensor("op_2768_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(346263680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347492544))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347492736)))]; + tensor var_2768_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_2766, groups = var_1186, pad = var_2768_pad_0, pad_type = var_2768_pad_type_0, strides = var_2764, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_195_cast)[name = tensor("op_2768_cast")]; + tensor inputs_75_cast = add(x = var_2768_cast, y = inputs_73_cast)[name = tensor("inputs_75_cast")]; + tensor var_2772 = const()[name = tensor("op_2772"), val = tensor([1])]; + tensor channels_mean_75_cast = reduce_mean(axes = var_2772, keep_dims = var_1181, x = inputs_75_cast)[name = tensor("channels_mean_75_cast")]; + tensor zero_mean_75_cast = sub(x = inputs_75_cast, y = channels_mean_75_cast)[name = tensor("zero_mean_75_cast")]; + tensor zero_mean_sq_75_cast = mul(x = zero_mean_75_cast, y = zero_mean_75_cast)[name = tensor("zero_mean_sq_75_cast")]; + tensor var_2776 = const()[name = tensor("op_2776"), val = tensor([1])]; + tensor var_2777_cast = reduce_mean(axes = var_2776, keep_dims = var_1181, x = zero_mean_sq_75_cast)[name = tensor("op_2777_cast")]; + tensor var_2778_to_fp16 = const()[name = tensor("op_2778_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2779_cast = add(x = var_2777_cast, y = var_2778_to_fp16)[name = tensor("op_2779_cast")]; + tensor denom_75_epsilon_0_to_fp16 = const()[name = tensor("denom_75_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_75_cast = rsqrt(epsilon = denom_75_epsilon_0_to_fp16, x = var_2779_cast)[name = tensor("denom_75_cast")]; + tensor out_75_cast = mul(x = zero_mean_75_cast, y = denom_75_cast)[name = tensor("out_75_cast")]; + tensor var_2783_to_fp16 = const()[name = tensor("op_2783_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347495360)))]; + tensor var_2784_cast = add(x = out_75_cast, y = var_2783_to_fp16)[name = tensor("op_2784_cast")]; + tensor var_2786_to_fp16 = const()[name = tensor("op_2786_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347497984)))]; + tensor hidden_states_115_cast = mul(x = var_2784_cast, y = var_2786_to_fp16)[name = tensor("hidden_states_115_cast")]; + tensor var_2793 = const()[name = tensor("op_2793"), val = tensor([1, 1])]; + tensor var_2795 = const()[name = tensor("op_2795"), val = tensor([1, 1])]; + tensor q_51_pad_type_0 = const()[name = tensor("q_51_pad_type_0"), val = tensor("custom")]; + tensor q_51_pad_0 = const()[name = tensor("q_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347500608))), lut = tensor([-0x1.60cp-7, 0x1.608p-7]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_51_cast = conv(dilations = var_2795, groups = var_1186, pad = q_51_pad_0, pad_type = q_51_pad_type_0, strides = var_2793, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_115_cast)[name = tensor("q_51_cast")]; + tensor var_2799 = const()[name = tensor("op_2799"), val = tensor([1, 1])]; + tensor var_2801 = const()[name = tensor("op_2801"), val = tensor([1, 1])]; + tensor k_51_pad_type_0 = const()[name = tensor("k_51_pad_type_0"), val = tensor("custom")]; + tensor k_51_pad_0 = const()[name = tensor("k_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(347705472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349016256))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_51_cast = conv(dilations = var_2801, groups = var_1186, pad = k_51_pad_0, pad_type = k_51_pad_type_0, strides = var_2799, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_51_cast")]; + tensor var_2805 = const()[name = tensor("op_2805"), val = tensor([1, 1])]; + tensor var_2807 = const()[name = tensor("op_2807"), val = tensor([1, 1])]; + tensor v_51_pad_type_0 = const()[name = tensor("v_51_pad_type_0"), val = tensor("custom")]; + tensor v_51_pad_0 = const()[name = tensor("v_51_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349016384))), lut = tensor([-0x1.eap-8, 0x1.ecp-8]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_51_cast = conv(dilations = var_2807, groups = var_1186, pad = v_51_pad_0, pad_type = v_51_pad_type_0, strides = var_2805, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_51_cast")]; + tensor var_2811 = const()[name = tensor("op_2811"), val = tensor([2, 20, 64, -1])]; + tensor var_2812_cast = reshape(shape = var_2811, x = q_51_cast)[name = tensor("op_2812_cast")]; + tensor var_2813 = const()[name = tensor("op_2813"), val = tensor([2, 20, 64, -1])]; + tensor var_2814_cast = reshape(shape = var_2813, x = k_51_cast)[name = tensor("op_2814_cast")]; + tensor var_2815 = const()[name = tensor("op_2815"), val = tensor([2, 20, 64, -1])]; + tensor var_2816_cast = reshape(shape = var_2815, x = v_51_cast)[name = tensor("op_2816_cast")]; + tensor attn_weights_101_transpose_x_0 = const()[name = tensor("attn_weights_101_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_101_transpose_y_0 = const()[name = tensor("attn_weights_101_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_101_cast = matmul(transpose_x = attn_weights_101_transpose_x_0, transpose_y = attn_weights_101_transpose_y_0, x = var_2812_cast, y = var_2814_cast)[name = tensor("attn_weights_101_cast")]; + tensor attn_weights_103_cast = mul(x = attn_weights_101_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_103_cast")]; + tensor var_2820_cast = softmax(axis = var_1170, x = attn_weights_103_cast)[name = tensor("op_2820_cast")]; + tensor attn_51_transpose_x_0 = const()[name = tensor("attn_51_transpose_x_0"), val = tensor(false)]; + tensor attn_51_transpose_y_0 = const()[name = tensor("attn_51_transpose_y_0"), val = tensor(true)]; + tensor attn_51_cast = matmul(transpose_x = attn_51_transpose_x_0, transpose_y = attn_51_transpose_y_0, x = var_2816_cast, y = var_2820_cast)[name = tensor("attn_51_cast")]; + tensor var_2824 = const()[name = tensor("op_2824"), val = tensor([2, 1280, 1, -1])]; + tensor input_197_cast = reshape(shape = var_2824, x = attn_51_cast)[name = tensor("input_197_cast")]; + tensor var_2829 = const()[name = tensor("op_2829"), val = tensor([1, 1])]; + tensor var_2831 = const()[name = tensor("op_2831"), val = tensor([1, 1])]; + tensor var_2833_pad_type_0 = const()[name = tensor("op_2833_pad_type_0"), val = tensor("custom")]; + tensor var_2833_pad_0 = const()[name = tensor("op_2833_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(349344128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350163392))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350163520)))]; + tensor var_2833_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_2831, groups = var_1186, pad = var_2833_pad_0, pad_type = var_2833_pad_type_0, strides = var_2829, weight = down_blocks_2_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_197_cast)[name = tensor("op_2833_cast")]; + tensor inputs_77_cast = add(x = var_2833_cast, y = inputs_75_cast)[name = tensor("inputs_77_cast")]; + tensor var_2837 = const()[name = tensor("op_2837"), val = tensor([1])]; + tensor channels_mean_77_cast = reduce_mean(axes = var_2837, keep_dims = var_1181, x = inputs_77_cast)[name = tensor("channels_mean_77_cast")]; + tensor zero_mean_77_cast = sub(x = inputs_77_cast, y = channels_mean_77_cast)[name = tensor("zero_mean_77_cast")]; + tensor zero_mean_sq_77_cast = mul(x = zero_mean_77_cast, y = zero_mean_77_cast)[name = tensor("zero_mean_sq_77_cast")]; + tensor var_2841 = const()[name = tensor("op_2841"), val = tensor([1])]; + tensor var_2842_cast = reduce_mean(axes = var_2841, keep_dims = var_1181, x = zero_mean_sq_77_cast)[name = tensor("op_2842_cast")]; + tensor var_2843_to_fp16 = const()[name = tensor("op_2843_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2844_cast = add(x = var_2842_cast, y = var_2843_to_fp16)[name = tensor("op_2844_cast")]; + tensor denom_77_epsilon_0_to_fp16 = const()[name = tensor("denom_77_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_77_cast = rsqrt(epsilon = denom_77_epsilon_0_to_fp16, x = var_2844_cast)[name = tensor("denom_77_cast")]; + tensor out_77_cast = mul(x = zero_mean_77_cast, y = denom_77_cast)[name = tensor("out_77_cast")]; + tensor var_2848_to_fp16 = const()[name = tensor("op_2848_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350166144)))]; + tensor var_2849_cast = add(x = out_77_cast, y = var_2848_to_fp16)[name = tensor("op_2849_cast")]; + tensor var_2851_to_fp16 = const()[name = tensor("op_2851_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350168768)))]; + tensor input_199_cast = mul(x = var_2849_cast, y = var_2851_to_fp16)[name = tensor("input_199_cast")]; + tensor var_2859 = const()[name = tensor("op_2859"), val = tensor([1, 1])]; + tensor var_2861 = const()[name = tensor("op_2861"), val = tensor([1, 1])]; + tensor var_2863_pad_type_0 = const()[name = tensor("op_2863_pad_type_0"), val = tensor("custom")]; + tensor var_2863_pad_0 = const()[name = tensor("op_2863_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(350171392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363278656))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363279232)))]; + tensor var_2863_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_2861, groups = var_1186, pad = var_2863_pad_0, pad_type = var_2863_pad_type_0, strides = var_2859, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_199_cast)[name = tensor("op_2863_cast")]; + tensor var_2864_split_sizes_0 = const()[name = tensor("op_2864_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_2864_axis_0 = const()[name = tensor("op_2864_axis_0"), val = tensor(1)]; + tensor var_2864_cast_0, tensor var_2864_cast_1 = split(axis = var_2864_axis_0, split_sizes = var_2864_split_sizes_0, x = var_2863_cast)[name = tensor("op_2864_cast")]; + tensor var_2866_mode_0 = const()[name = tensor("op_2866_mode_0"), val = tensor("EXACT")]; + tensor var_2866_cast = gelu(mode = var_2866_mode_0, x = var_2864_cast_1)[name = tensor("op_2866_cast")]; + tensor input_201_cast = mul(x = var_2864_cast_0, y = var_2866_cast)[name = tensor("input_201_cast")]; + tensor var_2870 = const()[name = tensor("op_2870"), val = tensor([1, 1])]; + tensor var_2872 = const()[name = tensor("op_2872"), val = tensor([1, 1])]; + tensor var_2874_pad_type_0 = const()[name = tensor("op_2874_pad_type_0"), val = tensor("custom")]; + tensor var_2874_pad_0 = const()[name = tensor("op_2874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(363299776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368215040))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368215232)))]; + tensor var_2874_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_2872, groups = var_1186, pad = var_2874_pad_0, pad_type = var_2874_pad_type_0, strides = var_2870, weight = down_blocks_2_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_201_cast)[name = tensor("op_2874_cast")]; + tensor inputs_79_cast = add(x = var_2874_cast, y = inputs_77_cast)[name = tensor("inputs_79_cast")]; + tensor var_2884 = const()[name = tensor("op_2884"), val = tensor([1])]; + tensor channels_mean_79_cast = reduce_mean(axes = var_2884, keep_dims = var_1181, x = inputs_79_cast)[name = tensor("channels_mean_79_cast")]; + tensor zero_mean_79_cast = sub(x = inputs_79_cast, y = channels_mean_79_cast)[name = tensor("zero_mean_79_cast")]; + tensor zero_mean_sq_79_cast = mul(x = zero_mean_79_cast, y = zero_mean_79_cast)[name = tensor("zero_mean_sq_79_cast")]; + tensor var_2888 = const()[name = tensor("op_2888"), val = tensor([1])]; + tensor var_2889_cast = reduce_mean(axes = var_2888, keep_dims = var_1181, x = zero_mean_sq_79_cast)[name = tensor("op_2889_cast")]; + tensor var_2890_to_fp16 = const()[name = tensor("op_2890_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2891_cast = add(x = var_2889_cast, y = var_2890_to_fp16)[name = tensor("op_2891_cast")]; + tensor denom_79_epsilon_0_to_fp16 = const()[name = tensor("denom_79_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_79_cast = rsqrt(epsilon = denom_79_epsilon_0_to_fp16, x = var_2891_cast)[name = tensor("denom_79_cast")]; + tensor out_79_cast = mul(x = zero_mean_79_cast, y = denom_79_cast)[name = tensor("out_79_cast")]; + tensor var_2895_to_fp16 = const()[name = tensor("op_2895_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368217856)))]; + tensor var_2896_cast = add(x = out_79_cast, y = var_2895_to_fp16)[name = tensor("op_2896_cast")]; + tensor var_2898_to_fp16 = const()[name = tensor("op_2898_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368220480)))]; + tensor hidden_states_119_cast = mul(x = var_2896_cast, y = var_2898_to_fp16)[name = tensor("hidden_states_119_cast")]; + tensor var_2905 = const()[name = tensor("op_2905"), val = tensor([1, 1])]; + tensor var_2907 = const()[name = tensor("op_2907"), val = tensor([1, 1])]; + tensor q_53_pad_type_0 = const()[name = tensor("q_53_pad_type_0"), val = tensor("custom")]; + tensor q_53_pad_0 = const()[name = tensor("q_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(368223104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369042368))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_53_cast = conv(dilations = var_2907, groups = var_1186, pad = q_53_pad_0, pad_type = q_53_pad_type_0, strides = var_2905, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("q_53_cast")]; + tensor var_2911 = const()[name = tensor("op_2911"), val = tensor([1, 1])]; + tensor var_2913 = const()[name = tensor("op_2913"), val = tensor([1, 1])]; + tensor k_53_pad_type_0 = const()[name = tensor("k_53_pad_type_0"), val = tensor("custom")]; + tensor k_53_pad_0 = const()[name = tensor("k_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369042496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369861760))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_53_cast = conv(dilations = var_2913, groups = var_1186, pad = k_53_pad_0, pad_type = k_53_pad_type_0, strides = var_2911, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("k_53_cast")]; + tensor var_2917 = const()[name = tensor("op_2917"), val = tensor([1, 1])]; + tensor var_2919 = const()[name = tensor("op_2919"), val = tensor([1, 1])]; + tensor v_53_pad_type_0 = const()[name = tensor("v_53_pad_type_0"), val = tensor("custom")]; + tensor v_53_pad_0 = const()[name = tensor("v_53_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(369861888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371090752))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_53_cast = conv(dilations = var_2919, groups = var_1186, pad = v_53_pad_0, pad_type = v_53_pad_type_0, strides = var_2917, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_119_cast)[name = tensor("v_53_cast")]; + tensor var_2923 = const()[name = tensor("op_2923"), val = tensor([2, 20, 64, -1])]; + tensor var_2924_cast = reshape(shape = var_2923, x = q_53_cast)[name = tensor("op_2924_cast")]; + tensor var_2925 = const()[name = tensor("op_2925"), val = tensor([2, 20, 64, -1])]; + tensor var_2926_cast = reshape(shape = var_2925, x = k_53_cast)[name = tensor("op_2926_cast")]; + tensor var_2927 = const()[name = tensor("op_2927"), val = tensor([2, 20, 64, -1])]; + tensor var_2928_cast = reshape(shape = var_2927, x = v_53_cast)[name = tensor("op_2928_cast")]; + tensor attn_weights_105_transpose_x_0 = const()[name = tensor("attn_weights_105_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_105_transpose_y_0 = const()[name = tensor("attn_weights_105_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_105_cast = matmul(transpose_x = attn_weights_105_transpose_x_0, transpose_y = attn_weights_105_transpose_y_0, x = var_2924_cast, y = var_2926_cast)[name = tensor("attn_weights_105_cast")]; + tensor attn_weights_107_cast = mul(x = attn_weights_105_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_107_cast")]; + tensor var_2932_cast = softmax(axis = var_1170, x = attn_weights_107_cast)[name = tensor("op_2932_cast")]; + tensor attn_53_transpose_x_0 = const()[name = tensor("attn_53_transpose_x_0"), val = tensor(false)]; + tensor attn_53_transpose_y_0 = const()[name = tensor("attn_53_transpose_y_0"), val = tensor(true)]; + tensor attn_53_cast = matmul(transpose_x = attn_53_transpose_x_0, transpose_y = attn_53_transpose_y_0, x = var_2928_cast, y = var_2932_cast)[name = tensor("attn_53_cast")]; + tensor var_2936 = const()[name = tensor("op_2936"), val = tensor([2, 1280, 1, -1])]; + tensor input_203_cast = reshape(shape = var_2936, x = attn_53_cast)[name = tensor("input_203_cast")]; + tensor var_2941 = const()[name = tensor("op_2941"), val = tensor([1, 1])]; + tensor var_2943 = const()[name = tensor("op_2943"), val = tensor([1, 1])]; + tensor var_2945_pad_type_0 = const()[name = tensor("op_2945_pad_type_0"), val = tensor("custom")]; + tensor var_2945_pad_0 = const()[name = tensor("op_2945_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(371090944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372319808))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372320000)))]; + tensor var_2945_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_2943, groups = var_1186, pad = var_2945_pad_0, pad_type = var_2945_pad_type_0, strides = var_2941, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_203_cast)[name = tensor("op_2945_cast")]; + tensor inputs_81_cast = add(x = var_2945_cast, y = inputs_79_cast)[name = tensor("inputs_81_cast")]; + tensor var_2949 = const()[name = tensor("op_2949"), val = tensor([1])]; + tensor channels_mean_81_cast = reduce_mean(axes = var_2949, keep_dims = var_1181, x = inputs_81_cast)[name = tensor("channels_mean_81_cast")]; + tensor zero_mean_81_cast = sub(x = inputs_81_cast, y = channels_mean_81_cast)[name = tensor("zero_mean_81_cast")]; + tensor zero_mean_sq_81_cast = mul(x = zero_mean_81_cast, y = zero_mean_81_cast)[name = tensor("zero_mean_sq_81_cast")]; + tensor var_2953 = const()[name = tensor("op_2953"), val = tensor([1])]; + tensor var_2954_cast = reduce_mean(axes = var_2953, keep_dims = var_1181, x = zero_mean_sq_81_cast)[name = tensor("op_2954_cast")]; + tensor var_2955_to_fp16 = const()[name = tensor("op_2955_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_2956_cast = add(x = var_2954_cast, y = var_2955_to_fp16)[name = tensor("op_2956_cast")]; + tensor denom_81_epsilon_0_to_fp16 = const()[name = tensor("denom_81_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_81_cast = rsqrt(epsilon = denom_81_epsilon_0_to_fp16, x = var_2956_cast)[name = tensor("denom_81_cast")]; + tensor out_81_cast = mul(x = zero_mean_81_cast, y = denom_81_cast)[name = tensor("out_81_cast")]; + tensor var_2960_to_fp16 = const()[name = tensor("op_2960_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372322624)))]; + tensor var_2961_cast = add(x = out_81_cast, y = var_2960_to_fp16)[name = tensor("op_2961_cast")]; + tensor var_2963_to_fp16 = const()[name = tensor("op_2963_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372325248)))]; + tensor hidden_states_121_cast = mul(x = var_2961_cast, y = var_2963_to_fp16)[name = tensor("hidden_states_121_cast")]; + tensor var_2970 = const()[name = tensor("op_2970"), val = tensor([1, 1])]; + tensor var_2972 = const()[name = tensor("op_2972"), val = tensor([1, 1])]; + tensor q_55_pad_type_0 = const()[name = tensor("q_55_pad_type_0"), val = tensor("custom")]; + tensor q_55_pad_0 = const()[name = tensor("q_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372327872))), lut = tensor([-0x1.268p-6, -0x1.834p-8, 0x1.814p-8, 0x1.264p-6]), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_55_cast = conv(dilations = var_2972, groups = var_1186, pad = q_55_pad_0, pad_type = q_55_pad_type_0, strides = var_2970, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_121_cast)[name = tensor("q_55_cast")]; + tensor var_2976 = const()[name = tensor("op_2976"), val = tensor([1, 1])]; + tensor var_2978 = const()[name = tensor("op_2978"), val = tensor([1, 1])]; + tensor k_55_pad_type_0 = const()[name = tensor("k_55_pad_type_0"), val = tensor("custom")]; + tensor k_55_pad_0 = const()[name = tensor("k_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(372737536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(374048320))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_55_cast = conv(dilations = var_2978, groups = var_1186, pad = k_55_pad_0, pad_type = k_55_pad_type_0, strides = var_2976, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_55_cast")]; + tensor var_2982 = const()[name = tensor("op_2982"), val = tensor([1, 1])]; + tensor var_2984 = const()[name = tensor("op_2984"), val = tensor([1, 1])]; + tensor v_55_pad_type_0 = const()[name = tensor("v_55_pad_type_0"), val = tensor("custom")]; + tensor v_55_pad_0 = const()[name = tensor("v_55_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(374048448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(376014592))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_55_cast = conv(dilations = var_2984, groups = var_1186, pad = v_55_pad_0, pad_type = v_55_pad_type_0, strides = var_2982, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_55_cast")]; + tensor var_2988 = const()[name = tensor("op_2988"), val = tensor([2, 20, 64, -1])]; + tensor var_2989_cast = reshape(shape = var_2988, x = q_55_cast)[name = tensor("op_2989_cast")]; + tensor var_2990 = const()[name = tensor("op_2990"), val = tensor([2, 20, 64, -1])]; + tensor var_2991_cast = reshape(shape = var_2990, x = k_55_cast)[name = tensor("op_2991_cast")]; + tensor var_2992 = const()[name = tensor("op_2992"), val = tensor([2, 20, 64, -1])]; + tensor var_2993_cast = reshape(shape = var_2992, x = v_55_cast)[name = tensor("op_2993_cast")]; + tensor attn_weights_109_transpose_x_0 = const()[name = tensor("attn_weights_109_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_109_transpose_y_0 = const()[name = tensor("attn_weights_109_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_109_cast = matmul(transpose_x = attn_weights_109_transpose_x_0, transpose_y = attn_weights_109_transpose_y_0, x = var_2989_cast, y = var_2991_cast)[name = tensor("attn_weights_109_cast")]; + tensor attn_weights_111_cast = mul(x = attn_weights_109_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_111_cast")]; + tensor var_2997_cast = softmax(axis = var_1170, x = attn_weights_111_cast)[name = tensor("op_2997_cast")]; + tensor attn_55_transpose_x_0 = const()[name = tensor("attn_55_transpose_x_0"), val = tensor(false)]; + tensor attn_55_transpose_y_0 = const()[name = tensor("attn_55_transpose_y_0"), val = tensor(true)]; + tensor attn_55_cast = matmul(transpose_x = attn_55_transpose_x_0, transpose_y = attn_55_transpose_y_0, x = var_2993_cast, y = var_2997_cast)[name = tensor("attn_55_cast")]; + tensor var_3001 = const()[name = tensor("op_3001"), val = tensor([2, 1280, 1, -1])]; + tensor input_205_cast = reshape(shape = var_3001, x = attn_55_cast)[name = tensor("input_205_cast")]; + tensor var_3006 = const()[name = tensor("op_3006"), val = tensor([1, 1])]; + tensor var_3008 = const()[name = tensor("op_3008"), val = tensor([1, 1])]; + tensor var_3010_pad_type_0 = const()[name = tensor("op_3010_pad_type_0"), val = tensor("custom")]; + tensor var_3010_pad_0 = const()[name = tensor("op_3010_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(376014784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377243648))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377243840)))]; + tensor var_3010_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_3008, groups = var_1186, pad = var_3010_pad_0, pad_type = var_3010_pad_type_0, strides = var_3006, weight = down_blocks_2_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_205_cast)[name = tensor("op_3010_cast")]; + tensor inputs_83_cast = add(x = var_3010_cast, y = inputs_81_cast)[name = tensor("inputs_83_cast")]; + tensor var_3014 = const()[name = tensor("op_3014"), val = tensor([1])]; + tensor channels_mean_83_cast = reduce_mean(axes = var_3014, keep_dims = var_1181, x = inputs_83_cast)[name = tensor("channels_mean_83_cast")]; + tensor zero_mean_83_cast = sub(x = inputs_83_cast, y = channels_mean_83_cast)[name = tensor("zero_mean_83_cast")]; + tensor zero_mean_sq_83_cast = mul(x = zero_mean_83_cast, y = zero_mean_83_cast)[name = tensor("zero_mean_sq_83_cast")]; + tensor var_3018 = const()[name = tensor("op_3018"), val = tensor([1])]; + tensor var_3019_cast = reduce_mean(axes = var_3018, keep_dims = var_1181, x = zero_mean_sq_83_cast)[name = tensor("op_3019_cast")]; + tensor var_3020_to_fp16 = const()[name = tensor("op_3020_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3021_cast = add(x = var_3019_cast, y = var_3020_to_fp16)[name = tensor("op_3021_cast")]; + tensor denom_83_epsilon_0_to_fp16 = const()[name = tensor("denom_83_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_83_cast = rsqrt(epsilon = denom_83_epsilon_0_to_fp16, x = var_3021_cast)[name = tensor("denom_83_cast")]; + tensor out_83_cast = mul(x = zero_mean_83_cast, y = denom_83_cast)[name = tensor("out_83_cast")]; + tensor var_3025_to_fp16 = const()[name = tensor("op_3025_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377246464)))]; + tensor var_3026_cast = add(x = out_83_cast, y = var_3025_to_fp16)[name = tensor("op_3026_cast")]; + tensor var_3028_to_fp16 = const()[name = tensor("op_3028_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377249088)))]; + tensor input_207_cast = mul(x = var_3026_cast, y = var_3028_to_fp16)[name = tensor("input_207_cast")]; + tensor var_3036 = const()[name = tensor("op_3036"), val = tensor([1, 1])]; + tensor var_3038 = const()[name = tensor("op_3038"), val = tensor([1, 1])]; + tensor var_3040_pad_type_0 = const()[name = tensor("op_3040_pad_type_0"), val = tensor("custom")]; + tensor var_3040_pad_0 = const()[name = tensor("op_3040_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(377251712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390358976))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390359552)))]; + tensor var_3040_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_3038, groups = var_1186, pad = var_3040_pad_0, pad_type = var_3040_pad_type_0, strides = var_3036, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_207_cast)[name = tensor("op_3040_cast")]; + tensor var_3041_split_sizes_0 = const()[name = tensor("op_3041_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3041_axis_0 = const()[name = tensor("op_3041_axis_0"), val = tensor(1)]; + tensor var_3041_cast_0, tensor var_3041_cast_1 = split(axis = var_3041_axis_0, split_sizes = var_3041_split_sizes_0, x = var_3040_cast)[name = tensor("op_3041_cast")]; + tensor var_3043_mode_0 = const()[name = tensor("op_3043_mode_0"), val = tensor("EXACT")]; + tensor var_3043_cast = gelu(mode = var_3043_mode_0, x = var_3041_cast_1)[name = tensor("op_3043_cast")]; + tensor input_209_cast = mul(x = var_3041_cast_0, y = var_3043_cast)[name = tensor("input_209_cast")]; + tensor var_3047 = const()[name = tensor("op_3047"), val = tensor([1, 1])]; + tensor var_3049 = const()[name = tensor("op_3049"), val = tensor([1, 1])]; + tensor var_3051_pad_type_0 = const()[name = tensor("op_3051_pad_type_0"), val = tensor("custom")]; + tensor var_3051_pad_0 = const()[name = tensor("op_3051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(390380096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396933760))), name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396934336)))]; + tensor var_3051_cast = conv(bias = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_3049, groups = var_1186, pad = var_3051_pad_0, pad_type = var_3051_pad_type_0, strides = var_3047, weight = down_blocks_2_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_209_cast)[name = tensor("op_3051_cast")]; + tensor hidden_states_125_cast = add(x = var_3051_cast, y = inputs_83_cast)[name = tensor("hidden_states_125_cast")]; + tensor var_3053 = const()[name = tensor("op_3053"), val = tensor([2, 1280, 32, 32])]; + tensor input_211_cast = reshape(shape = var_3053, x = hidden_states_125_cast)[name = tensor("input_211_cast")]; + tensor var_3057 = const()[name = tensor("op_3057"), val = tensor([1, 1])]; + tensor var_3059 = const()[name = tensor("op_3059"), val = tensor([1, 1])]; + tensor hidden_states_127_pad_type_0 = const()[name = tensor("hidden_states_127_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_127_pad_0 = const()[name = tensor("hidden_states_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(396936960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398575424))), name = tensor("down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398576000)))]; + tensor hidden_states_127_cast = conv(bias = down_blocks_2_attentions_0_proj_out_bias_to_fp16, dilations = var_3059, groups = var_1186, pad = hidden_states_127_pad_0, pad_type = hidden_states_127_pad_type_0, strides = var_3057, weight = down_blocks_2_attentions_0_proj_out_weight_to_fp16_palettized, x = input_211_cast)[name = tensor("hidden_states_127_cast")]; + tensor input_213_cast = add(x = hidden_states_127_cast, y = hidden_states_61_cast)[name = tensor("input_213_cast")]; + tensor reshape_52_shape_0 = const()[name = tensor("reshape_52_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_52_cast = reshape(shape = reshape_52_shape_0, x = input_213_cast)[name = tensor("reshape_52_cast")]; + tensor reduce_mean_39_axes_0 = const()[name = tensor("reduce_mean_39_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_39_keep_dims_0 = const()[name = tensor("reduce_mean_39_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_39_cast = reduce_mean(axes = reduce_mean_39_axes_0, keep_dims = reduce_mean_39_keep_dims_0, x = reshape_52_cast)[name = tensor("reduce_mean_39_cast")]; + tensor sub_26_cast = sub(x = reshape_52_cast, y = reduce_mean_39_cast)[name = tensor("sub_26_cast")]; + tensor square_13_cast = square(x = sub_26_cast)[name = tensor("square_13_cast")]; + tensor reduce_mean_41_axes_0 = const()[name = tensor("reduce_mean_41_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_41_keep_dims_0 = const()[name = tensor("reduce_mean_41_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_41_cast = reduce_mean(axes = reduce_mean_41_axes_0, keep_dims = reduce_mean_41_keep_dims_0, x = square_13_cast)[name = tensor("reduce_mean_41_cast")]; + tensor add_26_y_0_to_fp16 = const()[name = tensor("add_26_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_26_cast = add(x = reduce_mean_41_cast, y = add_26_y_0_to_fp16)[name = tensor("add_26_cast")]; + tensor sqrt_13_cast = sqrt(x = add_26_cast)[name = tensor("sqrt_13_cast")]; + tensor real_div_13_cast = real_div(x = sub_26_cast, y = sqrt_13_cast)[name = tensor("real_div_13_cast")]; + tensor reshape_53_shape_0 = const()[name = tensor("reshape_53_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_53_cast = reshape(shape = reshape_53_shape_0, x = real_div_13_cast)[name = tensor("reshape_53_cast")]; + tensor add_27_gamma_0_to_fp16 = const()[name = tensor("add_27_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398578624)))]; + tensor add_27_beta_0_to_fp16 = const()[name = tensor("add_27_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398581248)))]; + tensor add_27_epsilon_0_to_fp16 = const()[name = tensor("add_27_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_27_cast = batch_norm(beta = add_27_beta_0_to_fp16, epsilon = add_27_epsilon_0_to_fp16, gamma = add_27_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_53_cast)[name = tensor("add_27_cast")]; + tensor input_217_cast = silu(x = add_27_cast)[name = tensor("input_217_cast")]; + tensor var_3074 = const()[name = tensor("op_3074"), val = tensor([1, 1])]; + tensor var_3076 = const()[name = tensor("op_3076"), val = tensor([1, 1])]; + tensor hidden_states_129_pad_type_0 = const()[name = tensor("hidden_states_129_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_129_pad_0 = const()[name = tensor("hidden_states_129_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(398583872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409643136))), name = tensor("down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409643328)))]; + tensor hidden_states_129_cast = conv(bias = down_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_3076, groups = var_1186, pad = hidden_states_129_pad_0, pad_type = hidden_states_129_pad_type_0, strides = var_3074, weight = down_blocks_2_resnets_1_conv1_weight_to_fp16_palettized, x = input_217_cast)[name = tensor("hidden_states_129_cast")]; + tensor var_3082 = const()[name = tensor("op_3082"), val = tensor([1, 1])]; + tensor var_3084 = const()[name = tensor("op_3084"), val = tensor([1, 1])]; + tensor temb_11_pad_type_0 = const()[name = tensor("temb_11_pad_type_0"), val = tensor("custom")]; + tensor temb_11_pad_0 = const()[name = tensor("temb_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(409645952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410874816))), name = tensor("down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410875008)))]; + tensor temb_11_cast = conv(bias = down_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_3084, groups = var_1186, pad = temb_11_pad_0, pad_type = temb_11_pad_type_0, strides = var_3082, weight = down_blocks_2_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_11_cast")]; + tensor input_221_cast = add(x = hidden_states_129_cast, y = temb_11_cast)[name = tensor("input_221_cast")]; + tensor reshape_56_shape_0 = const()[name = tensor("reshape_56_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_56_cast = reshape(shape = reshape_56_shape_0, x = input_221_cast)[name = tensor("reshape_56_cast")]; + tensor reduce_mean_42_axes_0 = const()[name = tensor("reduce_mean_42_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_42_keep_dims_0 = const()[name = tensor("reduce_mean_42_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_42_cast = reduce_mean(axes = reduce_mean_42_axes_0, keep_dims = reduce_mean_42_keep_dims_0, x = reshape_56_cast)[name = tensor("reduce_mean_42_cast")]; + tensor sub_28_cast = sub(x = reshape_56_cast, y = reduce_mean_42_cast)[name = tensor("sub_28_cast")]; + tensor square_14_cast = square(x = sub_28_cast)[name = tensor("square_14_cast")]; + tensor reduce_mean_44_axes_0 = const()[name = tensor("reduce_mean_44_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_44_keep_dims_0 = const()[name = tensor("reduce_mean_44_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_44_cast = reduce_mean(axes = reduce_mean_44_axes_0, keep_dims = reduce_mean_44_keep_dims_0, x = square_14_cast)[name = tensor("reduce_mean_44_cast")]; + tensor add_28_y_0_to_fp16 = const()[name = tensor("add_28_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_28_cast = add(x = reduce_mean_44_cast, y = add_28_y_0_to_fp16)[name = tensor("add_28_cast")]; + tensor sqrt_14_cast = sqrt(x = add_28_cast)[name = tensor("sqrt_14_cast")]; + tensor real_div_14_cast = real_div(x = sub_28_cast, y = sqrt_14_cast)[name = tensor("real_div_14_cast")]; + tensor reshape_57_shape_0 = const()[name = tensor("reshape_57_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_57_cast = reshape(shape = reshape_57_shape_0, x = real_div_14_cast)[name = tensor("reshape_57_cast")]; + tensor add_29_gamma_0_to_fp16 = const()[name = tensor("add_29_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410877632)))]; + tensor add_29_beta_0_to_fp16 = const()[name = tensor("add_29_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410880256)))]; + tensor add_29_epsilon_0_to_fp16 = const()[name = tensor("add_29_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_29_cast = batch_norm(beta = add_29_beta_0_to_fp16, epsilon = add_29_epsilon_0_to_fp16, gamma = add_29_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_57_cast)[name = tensor("add_29_cast")]; + tensor input_225_cast = silu(x = add_29_cast)[name = tensor("input_225_cast")]; + tensor var_3094 = const()[name = tensor("op_3094"), val = tensor([1, 1])]; + tensor var_3096 = const()[name = tensor("op_3096"), val = tensor([1, 1])]; + tensor hidden_states_131_pad_type_0 = const()[name = tensor("hidden_states_131_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_131_pad_0 = const()[name = tensor("hidden_states_131_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(410882880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421942144))), name = tensor("down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor down_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("down_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421942336)))]; + tensor hidden_states_131_cast = conv(bias = down_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_3096, groups = var_1186, pad = hidden_states_131_pad_0, pad_type = hidden_states_131_pad_type_0, strides = var_3094, weight = down_blocks_2_resnets_1_conv2_weight_to_fp16_palettized, x = input_225_cast)[name = tensor("hidden_states_131_cast")]; + tensor hidden_states_133_cast = add(x = input_213_cast, y = hidden_states_131_cast)[name = tensor("hidden_states_133_cast")]; + tensor reshape_60_shape_0 = const()[name = tensor("reshape_60_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_60_cast = reshape(shape = reshape_60_shape_0, x = hidden_states_133_cast)[name = tensor("reshape_60_cast")]; + tensor reduce_mean_45_axes_0 = const()[name = tensor("reduce_mean_45_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_45_keep_dims_0 = const()[name = tensor("reduce_mean_45_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_45_cast = reduce_mean(axes = reduce_mean_45_axes_0, keep_dims = reduce_mean_45_keep_dims_0, x = reshape_60_cast)[name = tensor("reduce_mean_45_cast")]; + tensor sub_30_cast = sub(x = reshape_60_cast, y = reduce_mean_45_cast)[name = tensor("sub_30_cast")]; + tensor square_15_cast = square(x = sub_30_cast)[name = tensor("square_15_cast")]; + tensor reduce_mean_47_axes_0 = const()[name = tensor("reduce_mean_47_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_47_keep_dims_0 = const()[name = tensor("reduce_mean_47_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_47_cast = reduce_mean(axes = reduce_mean_47_axes_0, keep_dims = reduce_mean_47_keep_dims_0, x = square_15_cast)[name = tensor("reduce_mean_47_cast")]; + tensor add_30_y_0_to_fp16 = const()[name = tensor("add_30_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_30_cast = add(x = reduce_mean_47_cast, y = add_30_y_0_to_fp16)[name = tensor("add_30_cast")]; + tensor sqrt_15_cast = sqrt(x = add_30_cast)[name = tensor("sqrt_15_cast")]; + tensor real_div_15_cast = real_div(x = sub_30_cast, y = sqrt_15_cast)[name = tensor("real_div_15_cast")]; + tensor reshape_61_shape_0 = const()[name = tensor("reshape_61_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_61_cast = reshape(shape = reshape_61_shape_0, x = real_div_15_cast)[name = tensor("reshape_61_cast")]; + tensor add_31_gamma_0_to_fp16 = const()[name = tensor("add_31_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421944960)))]; + tensor add_31_beta_0_to_fp16 = const()[name = tensor("add_31_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421947584)))]; + tensor add_31_epsilon_0_to_fp16 = const()[name = tensor("add_31_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_31_cast = batch_norm(beta = add_31_beta_0_to_fp16, epsilon = add_31_epsilon_0_to_fp16, gamma = add_31_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_61_cast)[name = tensor("add_31_cast")]; + tensor var_3134 = const()[name = tensor("op_3134"), val = tensor([1, 1])]; + tensor var_3136 = const()[name = tensor("op_3136"), val = tensor([1, 1])]; + tensor hidden_states_135_pad_type_0 = const()[name = tensor("hidden_states_135_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_135_pad_0 = const()[name = tensor("hidden_states_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(421950208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423588672))), name = tensor("down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423589248)))]; + tensor hidden_states_135_cast = conv(bias = down_blocks_2_attentions_1_proj_in_bias_to_fp16, dilations = var_3136, groups = var_1186, pad = hidden_states_135_pad_0, pad_type = hidden_states_135_pad_type_0, strides = var_3134, weight = down_blocks_2_attentions_1_proj_in_weight_to_fp16_palettized, x = add_31_cast)[name = tensor("hidden_states_135_cast")]; + tensor var_3141 = const()[name = tensor("op_3141"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_85_cast = reshape(shape = var_3141, x = hidden_states_135_cast)[name = tensor("inputs_85_cast")]; + tensor var_3151 = const()[name = tensor("op_3151"), val = tensor([1])]; + tensor channels_mean_85_cast = reduce_mean(axes = var_3151, keep_dims = var_1181, x = inputs_85_cast)[name = tensor("channels_mean_85_cast")]; + tensor zero_mean_85_cast = sub(x = inputs_85_cast, y = channels_mean_85_cast)[name = tensor("zero_mean_85_cast")]; + tensor zero_mean_sq_85_cast = mul(x = zero_mean_85_cast, y = zero_mean_85_cast)[name = tensor("zero_mean_sq_85_cast")]; + tensor var_3155 = const()[name = tensor("op_3155"), val = tensor([1])]; + tensor var_3156_cast = reduce_mean(axes = var_3155, keep_dims = var_1181, x = zero_mean_sq_85_cast)[name = tensor("op_3156_cast")]; + tensor var_3157_to_fp16 = const()[name = tensor("op_3157_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3158_cast = add(x = var_3156_cast, y = var_3157_to_fp16)[name = tensor("op_3158_cast")]; + tensor denom_85_epsilon_0_to_fp16 = const()[name = tensor("denom_85_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_85_cast = rsqrt(epsilon = denom_85_epsilon_0_to_fp16, x = var_3158_cast)[name = tensor("denom_85_cast")]; + tensor out_85_cast = mul(x = zero_mean_85_cast, y = denom_85_cast)[name = tensor("out_85_cast")]; + tensor var_3162_to_fp16 = const()[name = tensor("op_3162_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423591872)))]; + tensor var_3163_cast = add(x = out_85_cast, y = var_3162_to_fp16)[name = tensor("op_3163_cast")]; + tensor var_3165_to_fp16 = const()[name = tensor("op_3165_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423594496)))]; + tensor hidden_states_137_cast = mul(x = var_3163_cast, y = var_3165_to_fp16)[name = tensor("hidden_states_137_cast")]; + tensor var_3172 = const()[name = tensor("op_3172"), val = tensor([1, 1])]; + tensor var_3174 = const()[name = tensor("op_3174"), val = tensor([1, 1])]; + tensor q_57_pad_type_0 = const()[name = tensor("q_57_pad_type_0"), val = tensor("custom")]; + tensor q_57_pad_0 = const()[name = tensor("q_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(423597120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424416384))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_57_cast = conv(dilations = var_3174, groups = var_1186, pad = q_57_pad_0, pad_type = q_57_pad_type_0, strides = var_3172, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("q_57_cast")]; + tensor var_3178 = const()[name = tensor("op_3178"), val = tensor([1, 1])]; + tensor var_3180 = const()[name = tensor("op_3180"), val = tensor([1, 1])]; + tensor k_57_pad_type_0 = const()[name = tensor("k_57_pad_type_0"), val = tensor("custom")]; + tensor k_57_pad_0 = const()[name = tensor("k_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(424416512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425235776))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_57_cast = conv(dilations = var_3180, groups = var_1186, pad = k_57_pad_0, pad_type = k_57_pad_type_0, strides = var_3178, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("k_57_cast")]; + tensor var_3184 = const()[name = tensor("op_3184"), val = tensor([1, 1])]; + tensor var_3186 = const()[name = tensor("op_3186"), val = tensor([1, 1])]; + tensor v_57_pad_type_0 = const()[name = tensor("v_57_pad_type_0"), val = tensor("custom")]; + tensor v_57_pad_0 = const()[name = tensor("v_57_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(425235904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(426464768))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_57_cast = conv(dilations = var_3186, groups = var_1186, pad = v_57_pad_0, pad_type = v_57_pad_type_0, strides = var_3184, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_137_cast)[name = tensor("v_57_cast")]; + tensor var_3190 = const()[name = tensor("op_3190"), val = tensor([2, 20, 64, -1])]; + tensor var_3191_cast = reshape(shape = var_3190, x = q_57_cast)[name = tensor("op_3191_cast")]; + tensor var_3192 = const()[name = tensor("op_3192"), val = tensor([2, 20, 64, -1])]; + tensor var_3193_cast = reshape(shape = var_3192, x = k_57_cast)[name = tensor("op_3193_cast")]; + tensor var_3194 = const()[name = tensor("op_3194"), val = tensor([2, 20, 64, -1])]; + tensor var_3195_cast = reshape(shape = var_3194, x = v_57_cast)[name = tensor("op_3195_cast")]; + tensor attn_weights_113_transpose_x_0 = const()[name = tensor("attn_weights_113_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_113_transpose_y_0 = const()[name = tensor("attn_weights_113_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_113_cast = matmul(transpose_x = attn_weights_113_transpose_x_0, transpose_y = attn_weights_113_transpose_y_0, x = var_3191_cast, y = var_3193_cast)[name = tensor("attn_weights_113_cast")]; + tensor attn_weights_115_cast = mul(x = attn_weights_113_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_115_cast")]; + tensor var_3199_cast = softmax(axis = var_1170, x = attn_weights_115_cast)[name = tensor("op_3199_cast")]; + tensor attn_57_transpose_x_0 = const()[name = tensor("attn_57_transpose_x_0"), val = tensor(false)]; + tensor attn_57_transpose_y_0 = const()[name = tensor("attn_57_transpose_y_0"), val = tensor(true)]; + tensor attn_57_cast = matmul(transpose_x = attn_57_transpose_x_0, transpose_y = attn_57_transpose_y_0, x = var_3195_cast, y = var_3199_cast)[name = tensor("attn_57_cast")]; + tensor var_3203 = const()[name = tensor("op_3203"), val = tensor([2, 1280, 1, -1])]; + tensor input_229_cast = reshape(shape = var_3203, x = attn_57_cast)[name = tensor("input_229_cast")]; + tensor var_3208 = const()[name = tensor("op_3208"), val = tensor([1, 1])]; + tensor var_3210 = const()[name = tensor("op_3210"), val = tensor([1, 1])]; + tensor var_3212_pad_type_0 = const()[name = tensor("op_3212_pad_type_0"), val = tensor("custom")]; + tensor var_3212_pad_0 = const()[name = tensor("op_3212_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(426464960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427693824))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427694016)))]; + tensor var_3212_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_3210, groups = var_1186, pad = var_3212_pad_0, pad_type = var_3212_pad_type_0, strides = var_3208, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_229_cast)[name = tensor("op_3212_cast")]; + tensor inputs_87_cast = add(x = var_3212_cast, y = inputs_85_cast)[name = tensor("inputs_87_cast")]; + tensor var_3216 = const()[name = tensor("op_3216"), val = tensor([1])]; + tensor channels_mean_87_cast = reduce_mean(axes = var_3216, keep_dims = var_1181, x = inputs_87_cast)[name = tensor("channels_mean_87_cast")]; + tensor zero_mean_87_cast = sub(x = inputs_87_cast, y = channels_mean_87_cast)[name = tensor("zero_mean_87_cast")]; + tensor zero_mean_sq_87_cast = mul(x = zero_mean_87_cast, y = zero_mean_87_cast)[name = tensor("zero_mean_sq_87_cast")]; + tensor var_3220 = const()[name = tensor("op_3220"), val = tensor([1])]; + tensor var_3221_cast = reduce_mean(axes = var_3220, keep_dims = var_1181, x = zero_mean_sq_87_cast)[name = tensor("op_3221_cast")]; + tensor var_3222_to_fp16 = const()[name = tensor("op_3222_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3223_cast = add(x = var_3221_cast, y = var_3222_to_fp16)[name = tensor("op_3223_cast")]; + tensor denom_87_epsilon_0_to_fp16 = const()[name = tensor("denom_87_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_87_cast = rsqrt(epsilon = denom_87_epsilon_0_to_fp16, x = var_3223_cast)[name = tensor("denom_87_cast")]; + tensor out_87_cast = mul(x = zero_mean_87_cast, y = denom_87_cast)[name = tensor("out_87_cast")]; + tensor var_3227_to_fp16 = const()[name = tensor("op_3227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427696640)))]; + tensor var_3228_cast = add(x = out_87_cast, y = var_3227_to_fp16)[name = tensor("op_3228_cast")]; + tensor var_3230_to_fp16 = const()[name = tensor("op_3230_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427699264)))]; + tensor hidden_states_139_cast = mul(x = var_3228_cast, y = var_3230_to_fp16)[name = tensor("hidden_states_139_cast")]; + tensor var_3237 = const()[name = tensor("op_3237"), val = tensor([1, 1])]; + tensor var_3239 = const()[name = tensor("op_3239"), val = tensor([1, 1])]; + tensor q_59_pad_type_0 = const()[name = tensor("q_59_pad_type_0"), val = tensor("custom")]; + tensor q_59_pad_0 = const()[name = tensor("q_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(427701888))), lut = tensor([-0x1.d3cp-6, -0x1.1ccp-7, 0x1.1a8p-7, 0x1.d28p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_59_cast = conv(dilations = var_3239, groups = var_1186, pad = q_59_pad_0, pad_type = q_59_pad_type_0, strides = var_3237, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_139_cast)[name = tensor("q_59_cast")]; + tensor var_3243 = const()[name = tensor("op_3243"), val = tensor([1, 1])]; + tensor var_3245 = const()[name = tensor("op_3245"), val = tensor([1, 1])]; + tensor k_59_pad_type_0 = const()[name = tensor("k_59_pad_type_0"), val = tensor("custom")]; + tensor k_59_pad_0 = const()[name = tensor("k_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428111552))), lut = tensor([-0x1.dc8p-6, -0x1.1b8p-7, 0x1.198p-7, 0x1.dbp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_59_cast = conv(dilations = var_3245, groups = var_1186, pad = k_59_pad_0, pad_type = k_59_pad_type_0, strides = var_3243, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_59_cast")]; + tensor var_3249 = const()[name = tensor("op_3249"), val = tensor([1, 1])]; + tensor var_3251 = const()[name = tensor("op_3251"), val = tensor([1, 1])]; + tensor v_59_pad_type_0 = const()[name = tensor("v_59_pad_type_0"), val = tensor("custom")]; + tensor v_59_pad_0 = const()[name = tensor("v_59_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(428766976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430077760))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_59_cast = conv(dilations = var_3251, groups = var_1186, pad = v_59_pad_0, pad_type = v_59_pad_type_0, strides = var_3249, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_59_cast")]; + tensor var_3255 = const()[name = tensor("op_3255"), val = tensor([2, 20, 64, -1])]; + tensor var_3256_cast = reshape(shape = var_3255, x = q_59_cast)[name = tensor("op_3256_cast")]; + tensor var_3257 = const()[name = tensor("op_3257"), val = tensor([2, 20, 64, -1])]; + tensor var_3258_cast = reshape(shape = var_3257, x = k_59_cast)[name = tensor("op_3258_cast")]; + tensor var_3259 = const()[name = tensor("op_3259"), val = tensor([2, 20, 64, -1])]; + tensor var_3260_cast = reshape(shape = var_3259, x = v_59_cast)[name = tensor("op_3260_cast")]; + tensor attn_weights_117_transpose_x_0 = const()[name = tensor("attn_weights_117_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_117_transpose_y_0 = const()[name = tensor("attn_weights_117_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_117_cast = matmul(transpose_x = attn_weights_117_transpose_x_0, transpose_y = attn_weights_117_transpose_y_0, x = var_3256_cast, y = var_3258_cast)[name = tensor("attn_weights_117_cast")]; + tensor attn_weights_119_cast = mul(x = attn_weights_117_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_119_cast")]; + tensor var_3264_cast = softmax(axis = var_1170, x = attn_weights_119_cast)[name = tensor("op_3264_cast")]; + tensor attn_59_transpose_x_0 = const()[name = tensor("attn_59_transpose_x_0"), val = tensor(false)]; + tensor attn_59_transpose_y_0 = const()[name = tensor("attn_59_transpose_y_0"), val = tensor(true)]; + tensor attn_59_cast = matmul(transpose_x = attn_59_transpose_x_0, transpose_y = attn_59_transpose_y_0, x = var_3260_cast, y = var_3264_cast)[name = tensor("attn_59_cast")]; + tensor var_3268 = const()[name = tensor("op_3268"), val = tensor([2, 1280, 1, -1])]; + tensor input_231_cast = reshape(shape = var_3268, x = attn_59_cast)[name = tensor("input_231_cast")]; + tensor var_3273 = const()[name = tensor("op_3273"), val = tensor([1, 1])]; + tensor var_3275 = const()[name = tensor("op_3275"), val = tensor([1, 1])]; + tensor var_3277_pad_type_0 = const()[name = tensor("op_3277_pad_type_0"), val = tensor("custom")]; + tensor var_3277_pad_0 = const()[name = tensor("op_3277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430077888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430897152))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430897280)))]; + tensor var_3277_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_3275, groups = var_1186, pad = var_3277_pad_0, pad_type = var_3277_pad_type_0, strides = var_3273, weight = down_blocks_2_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_231_cast)[name = tensor("op_3277_cast")]; + tensor inputs_89_cast = add(x = var_3277_cast, y = inputs_87_cast)[name = tensor("inputs_89_cast")]; + tensor var_3281 = const()[name = tensor("op_3281"), val = tensor([1])]; + tensor channels_mean_89_cast = reduce_mean(axes = var_3281, keep_dims = var_1181, x = inputs_89_cast)[name = tensor("channels_mean_89_cast")]; + tensor zero_mean_89_cast = sub(x = inputs_89_cast, y = channels_mean_89_cast)[name = tensor("zero_mean_89_cast")]; + tensor zero_mean_sq_89_cast = mul(x = zero_mean_89_cast, y = zero_mean_89_cast)[name = tensor("zero_mean_sq_89_cast")]; + tensor var_3285 = const()[name = tensor("op_3285"), val = tensor([1])]; + tensor var_3286_cast = reduce_mean(axes = var_3285, keep_dims = var_1181, x = zero_mean_sq_89_cast)[name = tensor("op_3286_cast")]; + tensor var_3287_to_fp16 = const()[name = tensor("op_3287_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3288_cast = add(x = var_3286_cast, y = var_3287_to_fp16)[name = tensor("op_3288_cast")]; + tensor denom_89_epsilon_0_to_fp16 = const()[name = tensor("denom_89_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_89_cast = rsqrt(epsilon = denom_89_epsilon_0_to_fp16, x = var_3288_cast)[name = tensor("denom_89_cast")]; + tensor out_89_cast = mul(x = zero_mean_89_cast, y = denom_89_cast)[name = tensor("out_89_cast")]; + tensor var_3292_to_fp16 = const()[name = tensor("op_3292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430899904)))]; + tensor var_3293_cast = add(x = out_89_cast, y = var_3292_to_fp16)[name = tensor("op_3293_cast")]; + tensor var_3295_to_fp16 = const()[name = tensor("op_3295_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430902528)))]; + tensor input_233_cast = mul(x = var_3293_cast, y = var_3295_to_fp16)[name = tensor("input_233_cast")]; + tensor var_3303 = const()[name = tensor("op_3303"), val = tensor([1, 1])]; + tensor var_3305 = const()[name = tensor("op_3305"), val = tensor([1, 1])]; + tensor var_3307_pad_type_0 = const()[name = tensor("op_3307_pad_type_0"), val = tensor("custom")]; + tensor var_3307_pad_0 = const()[name = tensor("op_3307_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(430905152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(440735616))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(440735808)))]; + tensor var_3307_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_3305, groups = var_1186, pad = var_3307_pad_0, pad_type = var_3307_pad_type_0, strides = var_3303, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_233_cast)[name = tensor("op_3307_cast")]; + tensor var_3308_split_sizes_0 = const()[name = tensor("op_3308_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3308_axis_0 = const()[name = tensor("op_3308_axis_0"), val = tensor(1)]; + tensor var_3308_cast_0, tensor var_3308_cast_1 = split(axis = var_3308_axis_0, split_sizes = var_3308_split_sizes_0, x = var_3307_cast)[name = tensor("op_3308_cast")]; + tensor var_3310_mode_0 = const()[name = tensor("op_3310_mode_0"), val = tensor("EXACT")]; + tensor var_3310_cast = gelu(mode = var_3310_mode_0, x = var_3308_cast_1)[name = tensor("op_3310_cast")]; + tensor input_235_cast = mul(x = var_3308_cast_0, y = var_3310_cast)[name = tensor("input_235_cast")]; + tensor var_3314 = const()[name = tensor("op_3314"), val = tensor([1, 1])]; + tensor var_3316 = const()[name = tensor("op_3316"), val = tensor([1, 1])]; + tensor var_3318_pad_type_0 = const()[name = tensor("op_3318_pad_type_0"), val = tensor("custom")]; + tensor var_3318_pad_0 = const()[name = tensor("op_3318_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(440756352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445671616))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445671808)))]; + tensor var_3318_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_3316, groups = var_1186, pad = var_3318_pad_0, pad_type = var_3318_pad_type_0, strides = var_3314, weight = down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_235_cast)[name = tensor("op_3318_cast")]; + tensor inputs_91_cast = add(x = var_3318_cast, y = inputs_89_cast)[name = tensor("inputs_91_cast")]; + tensor var_3328 = const()[name = tensor("op_3328"), val = tensor([1])]; + tensor channels_mean_91_cast = reduce_mean(axes = var_3328, keep_dims = var_1181, x = inputs_91_cast)[name = tensor("channels_mean_91_cast")]; + tensor zero_mean_91_cast = sub(x = inputs_91_cast, y = channels_mean_91_cast)[name = tensor("zero_mean_91_cast")]; + tensor zero_mean_sq_91_cast = mul(x = zero_mean_91_cast, y = zero_mean_91_cast)[name = tensor("zero_mean_sq_91_cast")]; + tensor var_3332 = const()[name = tensor("op_3332"), val = tensor([1])]; + tensor var_3333_cast = reduce_mean(axes = var_3332, keep_dims = var_1181, x = zero_mean_sq_91_cast)[name = tensor("op_3333_cast")]; + tensor var_3334_to_fp16 = const()[name = tensor("op_3334_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3335_cast = add(x = var_3333_cast, y = var_3334_to_fp16)[name = tensor("op_3335_cast")]; + tensor denom_91_epsilon_0_to_fp16 = const()[name = tensor("denom_91_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_91_cast = rsqrt(epsilon = denom_91_epsilon_0_to_fp16, x = var_3335_cast)[name = tensor("denom_91_cast")]; + tensor out_91_cast = mul(x = zero_mean_91_cast, y = denom_91_cast)[name = tensor("out_91_cast")]; + tensor var_3339_to_fp16 = const()[name = tensor("op_3339_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445674432)))]; + tensor var_3340_cast = add(x = out_91_cast, y = var_3339_to_fp16)[name = tensor("op_3340_cast")]; + tensor var_3342_to_fp16 = const()[name = tensor("op_3342_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445677056)))]; + tensor hidden_states_143_cast = mul(x = var_3340_cast, y = var_3342_to_fp16)[name = tensor("hidden_states_143_cast")]; + tensor var_3349 = const()[name = tensor("op_3349"), val = tensor([1, 1])]; + tensor var_3351 = const()[name = tensor("op_3351"), val = tensor([1, 1])]; + tensor q_61_pad_type_0 = const()[name = tensor("q_61_pad_type_0"), val = tensor("custom")]; + tensor q_61_pad_0 = const()[name = tensor("q_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(445679680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446498944))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_61_cast = conv(dilations = var_3351, groups = var_1186, pad = q_61_pad_0, pad_type = q_61_pad_type_0, strides = var_3349, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("q_61_cast")]; + tensor var_3355 = const()[name = tensor("op_3355"), val = tensor([1, 1])]; + tensor var_3357 = const()[name = tensor("op_3357"), val = tensor([1, 1])]; + tensor k_61_pad_type_0 = const()[name = tensor("k_61_pad_type_0"), val = tensor("custom")]; + tensor k_61_pad_0 = const()[name = tensor("k_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(446499072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318336))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_61_cast = conv(dilations = var_3357, groups = var_1186, pad = k_61_pad_0, pad_type = k_61_pad_type_0, strides = var_3355, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("k_61_cast")]; + tensor var_3361 = const()[name = tensor("op_3361"), val = tensor([1, 1])]; + tensor var_3363 = const()[name = tensor("op_3363"), val = tensor([1, 1])]; + tensor v_61_pad_type_0 = const()[name = tensor("v_61_pad_type_0"), val = tensor("custom")]; + tensor v_61_pad_0 = const()[name = tensor("v_61_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(447318464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547328))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_61_cast = conv(dilations = var_3363, groups = var_1186, pad = v_61_pad_0, pad_type = v_61_pad_type_0, strides = var_3361, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_143_cast)[name = tensor("v_61_cast")]; + tensor var_3367 = const()[name = tensor("op_3367"), val = tensor([2, 20, 64, -1])]; + tensor var_3368_cast = reshape(shape = var_3367, x = q_61_cast)[name = tensor("op_3368_cast")]; + tensor var_3369 = const()[name = tensor("op_3369"), val = tensor([2, 20, 64, -1])]; + tensor var_3370_cast = reshape(shape = var_3369, x = k_61_cast)[name = tensor("op_3370_cast")]; + tensor var_3371 = const()[name = tensor("op_3371"), val = tensor([2, 20, 64, -1])]; + tensor var_3372_cast = reshape(shape = var_3371, x = v_61_cast)[name = tensor("op_3372_cast")]; + tensor attn_weights_121_transpose_x_0 = const()[name = tensor("attn_weights_121_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_121_transpose_y_0 = const()[name = tensor("attn_weights_121_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_121_cast = matmul(transpose_x = attn_weights_121_transpose_x_0, transpose_y = attn_weights_121_transpose_y_0, x = var_3368_cast, y = var_3370_cast)[name = tensor("attn_weights_121_cast")]; + tensor attn_weights_123_cast = mul(x = attn_weights_121_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_123_cast")]; + tensor var_3376_cast = softmax(axis = var_1170, x = attn_weights_123_cast)[name = tensor("op_3376_cast")]; + tensor attn_61_transpose_x_0 = const()[name = tensor("attn_61_transpose_x_0"), val = tensor(false)]; + tensor attn_61_transpose_y_0 = const()[name = tensor("attn_61_transpose_y_0"), val = tensor(true)]; + tensor attn_61_cast = matmul(transpose_x = attn_61_transpose_x_0, transpose_y = attn_61_transpose_y_0, x = var_3372_cast, y = var_3376_cast)[name = tensor("attn_61_cast")]; + tensor var_3380 = const()[name = tensor("op_3380"), val = tensor([2, 1280, 1, -1])]; + tensor input_237_cast = reshape(shape = var_3380, x = attn_61_cast)[name = tensor("input_237_cast")]; + tensor var_3385 = const()[name = tensor("op_3385"), val = tensor([1, 1])]; + tensor var_3387 = const()[name = tensor("op_3387"), val = tensor([1, 1])]; + tensor var_3389_pad_type_0 = const()[name = tensor("op_3389_pad_type_0"), val = tensor("custom")]; + tensor var_3389_pad_0 = const()[name = tensor("op_3389_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(448547520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776384))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449776576)))]; + tensor var_3389_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_3387, groups = var_1186, pad = var_3389_pad_0, pad_type = var_3389_pad_type_0, strides = var_3385, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_237_cast)[name = tensor("op_3389_cast")]; + tensor inputs_93_cast = add(x = var_3389_cast, y = inputs_91_cast)[name = tensor("inputs_93_cast")]; + tensor var_3393 = const()[name = tensor("op_3393"), val = tensor([1])]; + tensor channels_mean_93_cast = reduce_mean(axes = var_3393, keep_dims = var_1181, x = inputs_93_cast)[name = tensor("channels_mean_93_cast")]; + tensor zero_mean_93_cast = sub(x = inputs_93_cast, y = channels_mean_93_cast)[name = tensor("zero_mean_93_cast")]; + tensor zero_mean_sq_93_cast = mul(x = zero_mean_93_cast, y = zero_mean_93_cast)[name = tensor("zero_mean_sq_93_cast")]; + tensor var_3397 = const()[name = tensor("op_3397"), val = tensor([1])]; + tensor var_3398_cast = reduce_mean(axes = var_3397, keep_dims = var_1181, x = zero_mean_sq_93_cast)[name = tensor("op_3398_cast")]; + tensor var_3399_to_fp16 = const()[name = tensor("op_3399_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3400_cast = add(x = var_3398_cast, y = var_3399_to_fp16)[name = tensor("op_3400_cast")]; + tensor denom_93_epsilon_0_to_fp16 = const()[name = tensor("denom_93_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_93_cast = rsqrt(epsilon = denom_93_epsilon_0_to_fp16, x = var_3400_cast)[name = tensor("denom_93_cast")]; + tensor out_93_cast = mul(x = zero_mean_93_cast, y = denom_93_cast)[name = tensor("out_93_cast")]; + tensor var_3404_to_fp16 = const()[name = tensor("op_3404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449779200)))]; + tensor var_3405_cast = add(x = out_93_cast, y = var_3404_to_fp16)[name = tensor("op_3405_cast")]; + tensor var_3407_to_fp16 = const()[name = tensor("op_3407_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449781824)))]; + tensor hidden_states_145_cast = mul(x = var_3405_cast, y = var_3407_to_fp16)[name = tensor("hidden_states_145_cast")]; + tensor var_3414 = const()[name = tensor("op_3414"), val = tensor([1, 1])]; + tensor var_3416 = const()[name = tensor("op_3416"), val = tensor([1, 1])]; + tensor q_63_pad_type_0 = const()[name = tensor("q_63_pad_type_0"), val = tensor("custom")]; + tensor q_63_pad_0 = const()[name = tensor("q_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449784448))), lut = tensor([-0x1.284p-6, 0x1.274p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_63_cast = conv(dilations = var_3416, groups = var_1186, pad = q_63_pad_0, pad_type = q_63_pad_type_0, strides = var_3414, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_145_cast)[name = tensor("q_63_cast")]; + tensor var_3420 = const()[name = tensor("op_3420"), val = tensor([1, 1])]; + tensor var_3422 = const()[name = tensor("op_3422"), val = tensor([1, 1])]; + tensor k_63_pad_type_0 = const()[name = tensor("k_63_pad_type_0"), val = tensor("custom")]; + tensor k_63_pad_0 = const()[name = tensor("k_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(449989312))), lut = tensor([-0x1.f2p-6, -0x1.278p-7, 0x1.258p-7, 0x1.f1p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_63_cast = conv(dilations = var_3422, groups = var_1186, pad = k_63_pad_0, pad_type = k_63_pad_type_0, strides = var_3420, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_63_cast")]; + tensor var_3426 = const()[name = tensor("op_3426"), val = tensor([1, 1])]; + tensor var_3428 = const()[name = tensor("op_3428"), val = tensor([1, 1])]; + tensor v_63_pad_type_0 = const()[name = tensor("v_63_pad_type_0"), val = tensor("custom")]; + tensor v_63_pad_0 = const()[name = tensor("v_63_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(450644736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451955520))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_63_cast = conv(dilations = var_3428, groups = var_1186, pad = v_63_pad_0, pad_type = v_63_pad_type_0, strides = var_3426, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_63_cast")]; + tensor var_3432 = const()[name = tensor("op_3432"), val = tensor([2, 20, 64, -1])]; + tensor var_3433_cast = reshape(shape = var_3432, x = q_63_cast)[name = tensor("op_3433_cast")]; + tensor var_3434 = const()[name = tensor("op_3434"), val = tensor([2, 20, 64, -1])]; + tensor var_3435_cast = reshape(shape = var_3434, x = k_63_cast)[name = tensor("op_3435_cast")]; + tensor var_3436 = const()[name = tensor("op_3436"), val = tensor([2, 20, 64, -1])]; + tensor var_3437_cast = reshape(shape = var_3436, x = v_63_cast)[name = tensor("op_3437_cast")]; + tensor attn_weights_125_transpose_x_0 = const()[name = tensor("attn_weights_125_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_125_transpose_y_0 = const()[name = tensor("attn_weights_125_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_125_cast = matmul(transpose_x = attn_weights_125_transpose_x_0, transpose_y = attn_weights_125_transpose_y_0, x = var_3433_cast, y = var_3435_cast)[name = tensor("attn_weights_125_cast")]; + tensor attn_weights_127_cast = mul(x = attn_weights_125_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_127_cast")]; + tensor var_3441_cast = softmax(axis = var_1170, x = attn_weights_127_cast)[name = tensor("op_3441_cast")]; + tensor attn_63_transpose_x_0 = const()[name = tensor("attn_63_transpose_x_0"), val = tensor(false)]; + tensor attn_63_transpose_y_0 = const()[name = tensor("attn_63_transpose_y_0"), val = tensor(true)]; + tensor attn_63_cast = matmul(transpose_x = attn_63_transpose_x_0, transpose_y = attn_63_transpose_y_0, x = var_3437_cast, y = var_3441_cast)[name = tensor("attn_63_cast")]; + tensor var_3445 = const()[name = tensor("op_3445"), val = tensor([2, 1280, 1, -1])]; + tensor input_239_cast = reshape(shape = var_3445, x = attn_63_cast)[name = tensor("input_239_cast")]; + tensor var_3450 = const()[name = tensor("op_3450"), val = tensor([1, 1])]; + tensor var_3452 = const()[name = tensor("op_3452"), val = tensor([1, 1])]; + tensor var_3454_pad_type_0 = const()[name = tensor("op_3454_pad_type_0"), val = tensor("custom")]; + tensor var_3454_pad_0 = const()[name = tensor("op_3454_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(451955648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(452774912))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(452775040)))]; + tensor var_3454_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_3452, groups = var_1186, pad = var_3454_pad_0, pad_type = var_3454_pad_type_0, strides = var_3450, weight = down_blocks_2_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_239_cast)[name = tensor("op_3454_cast")]; + tensor inputs_95_cast = add(x = var_3454_cast, y = inputs_93_cast)[name = tensor("inputs_95_cast")]; + tensor var_3458 = const()[name = tensor("op_3458"), val = tensor([1])]; + tensor channels_mean_95_cast = reduce_mean(axes = var_3458, keep_dims = var_1181, x = inputs_95_cast)[name = tensor("channels_mean_95_cast")]; + tensor zero_mean_95_cast = sub(x = inputs_95_cast, y = channels_mean_95_cast)[name = tensor("zero_mean_95_cast")]; + tensor zero_mean_sq_95_cast = mul(x = zero_mean_95_cast, y = zero_mean_95_cast)[name = tensor("zero_mean_sq_95_cast")]; + tensor var_3462 = const()[name = tensor("op_3462"), val = tensor([1])]; + tensor var_3463_cast = reduce_mean(axes = var_3462, keep_dims = var_1181, x = zero_mean_sq_95_cast)[name = tensor("op_3463_cast")]; + tensor var_3464_to_fp16 = const()[name = tensor("op_3464_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3465_cast = add(x = var_3463_cast, y = var_3464_to_fp16)[name = tensor("op_3465_cast")]; + tensor denom_95_epsilon_0_to_fp16 = const()[name = tensor("denom_95_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_95_cast = rsqrt(epsilon = denom_95_epsilon_0_to_fp16, x = var_3465_cast)[name = tensor("denom_95_cast")]; + tensor out_95_cast = mul(x = zero_mean_95_cast, y = denom_95_cast)[name = tensor("out_95_cast")]; + tensor var_3469_to_fp16 = const()[name = tensor("op_3469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(452777664)))]; + tensor var_3470_cast = add(x = out_95_cast, y = var_3469_to_fp16)[name = tensor("op_3470_cast")]; + tensor var_3472_to_fp16 = const()[name = tensor("op_3472_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(452780288)))]; + tensor input_241_cast = mul(x = var_3470_cast, y = var_3472_to_fp16)[name = tensor("input_241_cast")]; + tensor var_3480 = const()[name = tensor("op_3480"), val = tensor([1, 1])]; + tensor var_3482 = const()[name = tensor("op_3482"), val = tensor([1, 1])]; + tensor var_3484_pad_type_0 = const()[name = tensor("op_3484_pad_type_0"), val = tensor("custom")]; + tensor var_3484_pad_0 = const()[name = tensor("op_3484_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(452782912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462613376))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462613568)))]; + tensor var_3484_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_3482, groups = var_1186, pad = var_3484_pad_0, pad_type = var_3484_pad_type_0, strides = var_3480, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_241_cast)[name = tensor("op_3484_cast")]; + tensor var_3485_split_sizes_0 = const()[name = tensor("op_3485_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3485_axis_0 = const()[name = tensor("op_3485_axis_0"), val = tensor(1)]; + tensor var_3485_cast_0, tensor var_3485_cast_1 = split(axis = var_3485_axis_0, split_sizes = var_3485_split_sizes_0, x = var_3484_cast)[name = tensor("op_3485_cast")]; + tensor var_3487_mode_0 = const()[name = tensor("op_3487_mode_0"), val = tensor("EXACT")]; + tensor var_3487_cast = gelu(mode = var_3487_mode_0, x = var_3485_cast_1)[name = tensor("op_3487_cast")]; + tensor input_243_cast = mul(x = var_3485_cast_0, y = var_3487_cast)[name = tensor("input_243_cast")]; + tensor var_3491 = const()[name = tensor("op_3491"), val = tensor([1, 1])]; + tensor var_3493 = const()[name = tensor("op_3493"), val = tensor([1, 1])]; + tensor var_3495_pad_type_0 = const()[name = tensor("op_3495_pad_type_0"), val = tensor("custom")]; + tensor var_3495_pad_0 = const()[name = tensor("op_3495_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(462634112))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467549376))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467549568)))]; + tensor var_3495_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_3493, groups = var_1186, pad = var_3495_pad_0, pad_type = var_3495_pad_type_0, strides = var_3491, weight = down_blocks_2_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_243_cast)[name = tensor("op_3495_cast")]; + tensor inputs_97_cast = add(x = var_3495_cast, y = inputs_95_cast)[name = tensor("inputs_97_cast")]; + tensor var_3505 = const()[name = tensor("op_3505"), val = tensor([1])]; + tensor channels_mean_97_cast = reduce_mean(axes = var_3505, keep_dims = var_1181, x = inputs_97_cast)[name = tensor("channels_mean_97_cast")]; + tensor zero_mean_97_cast = sub(x = inputs_97_cast, y = channels_mean_97_cast)[name = tensor("zero_mean_97_cast")]; + tensor zero_mean_sq_97_cast = mul(x = zero_mean_97_cast, y = zero_mean_97_cast)[name = tensor("zero_mean_sq_97_cast")]; + tensor var_3509 = const()[name = tensor("op_3509"), val = tensor([1])]; + tensor var_3510_cast = reduce_mean(axes = var_3509, keep_dims = var_1181, x = zero_mean_sq_97_cast)[name = tensor("op_3510_cast")]; + tensor var_3511_to_fp16 = const()[name = tensor("op_3511_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3512_cast = add(x = var_3510_cast, y = var_3511_to_fp16)[name = tensor("op_3512_cast")]; + tensor denom_97_epsilon_0_to_fp16 = const()[name = tensor("denom_97_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_97_cast = rsqrt(epsilon = denom_97_epsilon_0_to_fp16, x = var_3512_cast)[name = tensor("denom_97_cast")]; + tensor out_97_cast = mul(x = zero_mean_97_cast, y = denom_97_cast)[name = tensor("out_97_cast")]; + tensor var_3516_to_fp16 = const()[name = tensor("op_3516_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467552192)))]; + tensor var_3517_cast = add(x = out_97_cast, y = var_3516_to_fp16)[name = tensor("op_3517_cast")]; + tensor var_3519_to_fp16 = const()[name = tensor("op_3519_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467554816)))]; + tensor hidden_states_149_cast = mul(x = var_3517_cast, y = var_3519_to_fp16)[name = tensor("hidden_states_149_cast")]; + tensor var_3526 = const()[name = tensor("op_3526"), val = tensor([1, 1])]; + tensor var_3528 = const()[name = tensor("op_3528"), val = tensor([1, 1])]; + tensor q_65_pad_type_0 = const()[name = tensor("q_65_pad_type_0"), val = tensor("custom")]; + tensor q_65_pad_0 = const()[name = tensor("q_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(467557440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(468376704))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_65_cast = conv(dilations = var_3528, groups = var_1186, pad = q_65_pad_0, pad_type = q_65_pad_type_0, strides = var_3526, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("q_65_cast")]; + tensor var_3532 = const()[name = tensor("op_3532"), val = tensor([1, 1])]; + tensor var_3534 = const()[name = tensor("op_3534"), val = tensor([1, 1])]; + tensor k_65_pad_type_0 = const()[name = tensor("k_65_pad_type_0"), val = tensor("custom")]; + tensor k_65_pad_0 = const()[name = tensor("k_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(468376832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(469196096))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_65_cast = conv(dilations = var_3534, groups = var_1186, pad = k_65_pad_0, pad_type = k_65_pad_type_0, strides = var_3532, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("k_65_cast")]; + tensor var_3538 = const()[name = tensor("op_3538"), val = tensor([1, 1])]; + tensor var_3540 = const()[name = tensor("op_3540"), val = tensor([1, 1])]; + tensor v_65_pad_type_0 = const()[name = tensor("v_65_pad_type_0"), val = tensor("custom")]; + tensor v_65_pad_0 = const()[name = tensor("v_65_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(469196224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470425088))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_65_cast = conv(dilations = var_3540, groups = var_1186, pad = v_65_pad_0, pad_type = v_65_pad_type_0, strides = var_3538, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_149_cast)[name = tensor("v_65_cast")]; + tensor var_3544 = const()[name = tensor("op_3544"), val = tensor([2, 20, 64, -1])]; + tensor var_3545_cast = reshape(shape = var_3544, x = q_65_cast)[name = tensor("op_3545_cast")]; + tensor var_3546 = const()[name = tensor("op_3546"), val = tensor([2, 20, 64, -1])]; + tensor var_3547_cast = reshape(shape = var_3546, x = k_65_cast)[name = tensor("op_3547_cast")]; + tensor var_3548 = const()[name = tensor("op_3548"), val = tensor([2, 20, 64, -1])]; + tensor var_3549_cast = reshape(shape = var_3548, x = v_65_cast)[name = tensor("op_3549_cast")]; + tensor attn_weights_129_transpose_x_0 = const()[name = tensor("attn_weights_129_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_129_transpose_y_0 = const()[name = tensor("attn_weights_129_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_129_cast = matmul(transpose_x = attn_weights_129_transpose_x_0, transpose_y = attn_weights_129_transpose_y_0, x = var_3545_cast, y = var_3547_cast)[name = tensor("attn_weights_129_cast")]; + tensor attn_weights_131_cast = mul(x = attn_weights_129_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_131_cast")]; + tensor var_3553_cast = softmax(axis = var_1170, x = attn_weights_131_cast)[name = tensor("op_3553_cast")]; + tensor attn_65_transpose_x_0 = const()[name = tensor("attn_65_transpose_x_0"), val = tensor(false)]; + tensor attn_65_transpose_y_0 = const()[name = tensor("attn_65_transpose_y_0"), val = tensor(true)]; + tensor attn_65_cast = matmul(transpose_x = attn_65_transpose_x_0, transpose_y = attn_65_transpose_y_0, x = var_3549_cast, y = var_3553_cast)[name = tensor("attn_65_cast")]; + tensor var_3557 = const()[name = tensor("op_3557"), val = tensor([2, 1280, 1, -1])]; + tensor input_245_cast = reshape(shape = var_3557, x = attn_65_cast)[name = tensor("input_245_cast")]; + tensor var_3562 = const()[name = tensor("op_3562"), val = tensor([1, 1])]; + tensor var_3564 = const()[name = tensor("op_3564"), val = tensor([1, 1])]; + tensor var_3566_pad_type_0 = const()[name = tensor("op_3566_pad_type_0"), val = tensor("custom")]; + tensor var_3566_pad_0 = const()[name = tensor("op_3566_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(470425280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471244544))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471244672)))]; + tensor var_3566_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_3564, groups = var_1186, pad = var_3566_pad_0, pad_type = var_3566_pad_type_0, strides = var_3562, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_245_cast)[name = tensor("op_3566_cast")]; + tensor inputs_99_cast = add(x = var_3566_cast, y = inputs_97_cast)[name = tensor("inputs_99_cast")]; + tensor var_3570 = const()[name = tensor("op_3570"), val = tensor([1])]; + tensor channels_mean_99_cast = reduce_mean(axes = var_3570, keep_dims = var_1181, x = inputs_99_cast)[name = tensor("channels_mean_99_cast")]; + tensor zero_mean_99_cast = sub(x = inputs_99_cast, y = channels_mean_99_cast)[name = tensor("zero_mean_99_cast")]; + tensor zero_mean_sq_99_cast = mul(x = zero_mean_99_cast, y = zero_mean_99_cast)[name = tensor("zero_mean_sq_99_cast")]; + tensor var_3574 = const()[name = tensor("op_3574"), val = tensor([1])]; + tensor var_3575_cast = reduce_mean(axes = var_3574, keep_dims = var_1181, x = zero_mean_sq_99_cast)[name = tensor("op_3575_cast")]; + tensor var_3576_to_fp16 = const()[name = tensor("op_3576_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3577_cast = add(x = var_3575_cast, y = var_3576_to_fp16)[name = tensor("op_3577_cast")]; + tensor denom_99_epsilon_0_to_fp16 = const()[name = tensor("denom_99_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_99_cast = rsqrt(epsilon = denom_99_epsilon_0_to_fp16, x = var_3577_cast)[name = tensor("denom_99_cast")]; + tensor out_99_cast = mul(x = zero_mean_99_cast, y = denom_99_cast)[name = tensor("out_99_cast")]; + tensor var_3581_to_fp16 = const()[name = tensor("op_3581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471247296)))]; + tensor var_3582_cast = add(x = out_99_cast, y = var_3581_to_fp16)[name = tensor("op_3582_cast")]; + tensor var_3584_to_fp16 = const()[name = tensor("op_3584_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471249920)))]; + tensor hidden_states_151_cast = mul(x = var_3582_cast, y = var_3584_to_fp16)[name = tensor("hidden_states_151_cast")]; + tensor var_3591 = const()[name = tensor("op_3591"), val = tensor([1, 1])]; + tensor var_3593 = const()[name = tensor("op_3593"), val = tensor([1, 1])]; + tensor q_67_pad_type_0 = const()[name = tensor("q_67_pad_type_0"), val = tensor("custom")]; + tensor q_67_pad_0 = const()[name = tensor("q_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471252544))), lut = tensor([-0x1.354p-6, 0x1.364p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_67_cast = conv(dilations = var_3593, groups = var_1186, pad = q_67_pad_0, pad_type = q_67_pad_type_0, strides = var_3591, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_151_cast)[name = tensor("q_67_cast")]; + tensor var_3597 = const()[name = tensor("op_3597"), val = tensor([1, 1])]; + tensor var_3599 = const()[name = tensor("op_3599"), val = tensor([1, 1])]; + tensor k_67_pad_type_0 = const()[name = tensor("k_67_pad_type_0"), val = tensor("custom")]; + tensor k_67_pad_0 = const()[name = tensor("k_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(471457408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(472768192))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_67_cast = conv(dilations = var_3599, groups = var_1186, pad = k_67_pad_0, pad_type = k_67_pad_type_0, strides = var_3597, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_67_cast")]; + tensor var_3603 = const()[name = tensor("op_3603"), val = tensor([1, 1])]; + tensor var_3605 = const()[name = tensor("op_3605"), val = tensor([1, 1])]; + tensor v_67_pad_type_0 = const()[name = tensor("v_67_pad_type_0"), val = tensor("custom")]; + tensor v_67_pad_0 = const()[name = tensor("v_67_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(472768320))), lut = tensor([-0x1.1bcp-5, -0x1.49cp-7, 0x1.4ccp-7, 0x1.1dp-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_67_cast = conv(dilations = var_3605, groups = var_1186, pad = v_67_pad_0, pad_type = v_67_pad_type_0, strides = var_3603, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_67_cast")]; + tensor var_3609 = const()[name = tensor("op_3609"), val = tensor([2, 20, 64, -1])]; + tensor var_3610_cast = reshape(shape = var_3609, x = q_67_cast)[name = tensor("op_3610_cast")]; + tensor var_3611 = const()[name = tensor("op_3611"), val = tensor([2, 20, 64, -1])]; + tensor var_3612_cast = reshape(shape = var_3611, x = k_67_cast)[name = tensor("op_3612_cast")]; + tensor var_3613 = const()[name = tensor("op_3613"), val = tensor([2, 20, 64, -1])]; + tensor var_3614_cast = reshape(shape = var_3613, x = v_67_cast)[name = tensor("op_3614_cast")]; + tensor attn_weights_133_transpose_x_0 = const()[name = tensor("attn_weights_133_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_133_transpose_y_0 = const()[name = tensor("attn_weights_133_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_133_cast = matmul(transpose_x = attn_weights_133_transpose_x_0, transpose_y = attn_weights_133_transpose_y_0, x = var_3610_cast, y = var_3612_cast)[name = tensor("attn_weights_133_cast")]; + tensor attn_weights_135_cast = mul(x = attn_weights_133_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_135_cast")]; + tensor var_3618_cast = softmax(axis = var_1170, x = attn_weights_135_cast)[name = tensor("op_3618_cast")]; + tensor attn_67_transpose_x_0 = const()[name = tensor("attn_67_transpose_x_0"), val = tensor(false)]; + tensor attn_67_transpose_y_0 = const()[name = tensor("attn_67_transpose_y_0"), val = tensor(true)]; + tensor attn_67_cast = matmul(transpose_x = attn_67_transpose_x_0, transpose_y = attn_67_transpose_y_0, x = var_3614_cast, y = var_3618_cast)[name = tensor("attn_67_cast")]; + tensor var_3622 = const()[name = tensor("op_3622"), val = tensor([2, 1280, 1, -1])]; + tensor input_247_cast = reshape(shape = var_3622, x = attn_67_cast)[name = tensor("input_247_cast")]; + tensor var_3627 = const()[name = tensor("op_3627"), val = tensor([1, 1])]; + tensor var_3629 = const()[name = tensor("op_3629"), val = tensor([1, 1])]; + tensor var_3631_pad_type_0 = const()[name = tensor("op_3631_pad_type_0"), val = tensor("custom")]; + tensor var_3631_pad_0 = const()[name = tensor("op_3631_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473423744))), lut = tensor([-0x1.598p-6, -0x1.9ccp-8, 0x1.9fcp-8, 0x1.5ap-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473833408)))]; + tensor var_3631_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_3629, groups = var_1186, pad = var_3631_pad_0, pad_type = var_3631_pad_type_0, strides = var_3627, weight = down_blocks_2_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_247_cast)[name = tensor("op_3631_cast")]; + tensor inputs_101_cast = add(x = var_3631_cast, y = inputs_99_cast)[name = tensor("inputs_101_cast")]; + tensor var_3635 = const()[name = tensor("op_3635"), val = tensor([1])]; + tensor channels_mean_101_cast = reduce_mean(axes = var_3635, keep_dims = var_1181, x = inputs_101_cast)[name = tensor("channels_mean_101_cast")]; + tensor zero_mean_101_cast = sub(x = inputs_101_cast, y = channels_mean_101_cast)[name = tensor("zero_mean_101_cast")]; + tensor zero_mean_sq_101_cast = mul(x = zero_mean_101_cast, y = zero_mean_101_cast)[name = tensor("zero_mean_sq_101_cast")]; + tensor var_3639 = const()[name = tensor("op_3639"), val = tensor([1])]; + tensor var_3640_cast = reduce_mean(axes = var_3639, keep_dims = var_1181, x = zero_mean_sq_101_cast)[name = tensor("op_3640_cast")]; + tensor var_3641_to_fp16 = const()[name = tensor("op_3641_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3642_cast = add(x = var_3640_cast, y = var_3641_to_fp16)[name = tensor("op_3642_cast")]; + tensor denom_101_epsilon_0_to_fp16 = const()[name = tensor("denom_101_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_101_cast = rsqrt(epsilon = denom_101_epsilon_0_to_fp16, x = var_3642_cast)[name = tensor("denom_101_cast")]; + tensor out_101_cast = mul(x = zero_mean_101_cast, y = denom_101_cast)[name = tensor("out_101_cast")]; + tensor var_3646_to_fp16 = const()[name = tensor("op_3646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473836032)))]; + tensor var_3647_cast = add(x = out_101_cast, y = var_3646_to_fp16)[name = tensor("op_3647_cast")]; + tensor var_3649_to_fp16 = const()[name = tensor("op_3649_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473838656)))]; + tensor input_249_cast = mul(x = var_3647_cast, y = var_3649_to_fp16)[name = tensor("input_249_cast")]; + tensor var_3657 = const()[name = tensor("op_3657"), val = tensor([1, 1])]; + tensor var_3659 = const()[name = tensor("op_3659"), val = tensor([1, 1])]; + tensor var_3661_pad_type_0 = const()[name = tensor("op_3661_pad_type_0"), val = tensor("custom")]; + tensor var_3661_pad_0 = const()[name = tensor("op_3661_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(473841280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483671744))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483671936)))]; + tensor var_3661_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_3659, groups = var_1186, pad = var_3661_pad_0, pad_type = var_3661_pad_type_0, strides = var_3657, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_249_cast)[name = tensor("op_3661_cast")]; + tensor var_3662_split_sizes_0 = const()[name = tensor("op_3662_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3662_axis_0 = const()[name = tensor("op_3662_axis_0"), val = tensor(1)]; + tensor var_3662_cast_0, tensor var_3662_cast_1 = split(axis = var_3662_axis_0, split_sizes = var_3662_split_sizes_0, x = var_3661_cast)[name = tensor("op_3662_cast")]; + tensor var_3664_mode_0 = const()[name = tensor("op_3664_mode_0"), val = tensor("EXACT")]; + tensor var_3664_cast = gelu(mode = var_3664_mode_0, x = var_3662_cast_1)[name = tensor("op_3664_cast")]; + tensor input_251_cast = mul(x = var_3662_cast_0, y = var_3664_cast)[name = tensor("input_251_cast")]; + tensor var_3668 = const()[name = tensor("op_3668"), val = tensor([1, 1])]; + tensor var_3670 = const()[name = tensor("op_3670"), val = tensor([1, 1])]; + tensor var_3672_pad_type_0 = const()[name = tensor("op_3672_pad_type_0"), val = tensor("custom")]; + tensor var_3672_pad_0 = const()[name = tensor("op_3672_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(483692480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(488607744))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(488607936)))]; + tensor var_3672_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_3670, groups = var_1186, pad = var_3672_pad_0, pad_type = var_3672_pad_type_0, strides = var_3668, weight = down_blocks_2_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_251_cast)[name = tensor("op_3672_cast")]; + tensor inputs_103_cast = add(x = var_3672_cast, y = inputs_101_cast)[name = tensor("inputs_103_cast")]; + tensor var_3682 = const()[name = tensor("op_3682"), val = tensor([1])]; + tensor channels_mean_103_cast = reduce_mean(axes = var_3682, keep_dims = var_1181, x = inputs_103_cast)[name = tensor("channels_mean_103_cast")]; + tensor zero_mean_103_cast = sub(x = inputs_103_cast, y = channels_mean_103_cast)[name = tensor("zero_mean_103_cast")]; + tensor zero_mean_sq_103_cast = mul(x = zero_mean_103_cast, y = zero_mean_103_cast)[name = tensor("zero_mean_sq_103_cast")]; + tensor var_3686 = const()[name = tensor("op_3686"), val = tensor([1])]; + tensor var_3687_cast = reduce_mean(axes = var_3686, keep_dims = var_1181, x = zero_mean_sq_103_cast)[name = tensor("op_3687_cast")]; + tensor var_3688_to_fp16 = const()[name = tensor("op_3688_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3689_cast = add(x = var_3687_cast, y = var_3688_to_fp16)[name = tensor("op_3689_cast")]; + tensor denom_103_epsilon_0_to_fp16 = const()[name = tensor("denom_103_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_103_cast = rsqrt(epsilon = denom_103_epsilon_0_to_fp16, x = var_3689_cast)[name = tensor("denom_103_cast")]; + tensor out_103_cast = mul(x = zero_mean_103_cast, y = denom_103_cast)[name = tensor("out_103_cast")]; + tensor var_3693_to_fp16 = const()[name = tensor("op_3693_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(488610560)))]; + tensor var_3694_cast = add(x = out_103_cast, y = var_3693_to_fp16)[name = tensor("op_3694_cast")]; + tensor var_3696_to_fp16 = const()[name = tensor("op_3696_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(488613184)))]; + tensor hidden_states_155_cast = mul(x = var_3694_cast, y = var_3696_to_fp16)[name = tensor("hidden_states_155_cast")]; + tensor var_3703 = const()[name = tensor("op_3703"), val = tensor([1, 1])]; + tensor var_3705 = const()[name = tensor("op_3705"), val = tensor([1, 1])]; + tensor q_69_pad_type_0 = const()[name = tensor("q_69_pad_type_0"), val = tensor("custom")]; + tensor q_69_pad_0 = const()[name = tensor("q_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(488615808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489435072))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_69_cast = conv(dilations = var_3705, groups = var_1186, pad = q_69_pad_0, pad_type = q_69_pad_type_0, strides = var_3703, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("q_69_cast")]; + tensor var_3709 = const()[name = tensor("op_3709"), val = tensor([1, 1])]; + tensor var_3711 = const()[name = tensor("op_3711"), val = tensor([1, 1])]; + tensor k_69_pad_type_0 = const()[name = tensor("k_69_pad_type_0"), val = tensor("custom")]; + tensor k_69_pad_0 = const()[name = tensor("k_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489435200))), lut = tensor([-0x1.428p-5, -0x1.82cp-7, 0x1.84cp-7, 0x1.428p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_69_cast = conv(dilations = var_3711, groups = var_1186, pad = k_69_pad_0, pad_type = k_69_pad_type_0, strides = var_3709, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("k_69_cast")]; + tensor var_3715 = const()[name = tensor("op_3715"), val = tensor([1, 1])]; + tensor var_3717 = const()[name = tensor("op_3717"), val = tensor([1, 1])]; + tensor v_69_pad_type_0 = const()[name = tensor("v_69_pad_type_0"), val = tensor("custom")]; + tensor v_69_pad_0 = const()[name = tensor("v_69_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(489844864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491073728))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_69_cast = conv(dilations = var_3717, groups = var_1186, pad = v_69_pad_0, pad_type = v_69_pad_type_0, strides = var_3715, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_155_cast)[name = tensor("v_69_cast")]; + tensor var_3721 = const()[name = tensor("op_3721"), val = tensor([2, 20, 64, -1])]; + tensor var_3722_cast = reshape(shape = var_3721, x = q_69_cast)[name = tensor("op_3722_cast")]; + tensor var_3723 = const()[name = tensor("op_3723"), val = tensor([2, 20, 64, -1])]; + tensor var_3724_cast = reshape(shape = var_3723, x = k_69_cast)[name = tensor("op_3724_cast")]; + tensor var_3725 = const()[name = tensor("op_3725"), val = tensor([2, 20, 64, -1])]; + tensor var_3726_cast = reshape(shape = var_3725, x = v_69_cast)[name = tensor("op_3726_cast")]; + tensor attn_weights_137_transpose_x_0 = const()[name = tensor("attn_weights_137_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_137_transpose_y_0 = const()[name = tensor("attn_weights_137_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_137_cast = matmul(transpose_x = attn_weights_137_transpose_x_0, transpose_y = attn_weights_137_transpose_y_0, x = var_3722_cast, y = var_3724_cast)[name = tensor("attn_weights_137_cast")]; + tensor attn_weights_139_cast = mul(x = attn_weights_137_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_139_cast")]; + tensor var_3730_cast = softmax(axis = var_1170, x = attn_weights_139_cast)[name = tensor("op_3730_cast")]; + tensor attn_69_transpose_x_0 = const()[name = tensor("attn_69_transpose_x_0"), val = tensor(false)]; + tensor attn_69_transpose_y_0 = const()[name = tensor("attn_69_transpose_y_0"), val = tensor(true)]; + tensor attn_69_cast = matmul(transpose_x = attn_69_transpose_x_0, transpose_y = attn_69_transpose_y_0, x = var_3726_cast, y = var_3730_cast)[name = tensor("attn_69_cast")]; + tensor var_3734 = const()[name = tensor("op_3734"), val = tensor([2, 1280, 1, -1])]; + tensor input_253_cast = reshape(shape = var_3734, x = attn_69_cast)[name = tensor("input_253_cast")]; + tensor var_3739 = const()[name = tensor("op_3739"), val = tensor([1, 1])]; + tensor var_3741 = const()[name = tensor("op_3741"), val = tensor([1, 1])]; + tensor var_3743_pad_type_0 = const()[name = tensor("op_3743_pad_type_0"), val = tensor("custom")]; + tensor var_3743_pad_0 = const()[name = tensor("op_3743_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(491073920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492302784))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492302976)))]; + tensor var_3743_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_3741, groups = var_1186, pad = var_3743_pad_0, pad_type = var_3743_pad_type_0, strides = var_3739, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_253_cast)[name = tensor("op_3743_cast")]; + tensor inputs_105_cast = add(x = var_3743_cast, y = inputs_103_cast)[name = tensor("inputs_105_cast")]; + tensor var_3747 = const()[name = tensor("op_3747"), val = tensor([1])]; + tensor channels_mean_105_cast = reduce_mean(axes = var_3747, keep_dims = var_1181, x = inputs_105_cast)[name = tensor("channels_mean_105_cast")]; + tensor zero_mean_105_cast = sub(x = inputs_105_cast, y = channels_mean_105_cast)[name = tensor("zero_mean_105_cast")]; + tensor zero_mean_sq_105_cast = mul(x = zero_mean_105_cast, y = zero_mean_105_cast)[name = tensor("zero_mean_sq_105_cast")]; + tensor var_3751 = const()[name = tensor("op_3751"), val = tensor([1])]; + tensor var_3752_cast = reduce_mean(axes = var_3751, keep_dims = var_1181, x = zero_mean_sq_105_cast)[name = tensor("op_3752_cast")]; + tensor var_3753_to_fp16 = const()[name = tensor("op_3753_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3754_cast = add(x = var_3752_cast, y = var_3753_to_fp16)[name = tensor("op_3754_cast")]; + tensor denom_105_epsilon_0_to_fp16 = const()[name = tensor("denom_105_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_105_cast = rsqrt(epsilon = denom_105_epsilon_0_to_fp16, x = var_3754_cast)[name = tensor("denom_105_cast")]; + tensor out_105_cast = mul(x = zero_mean_105_cast, y = denom_105_cast)[name = tensor("out_105_cast")]; + tensor var_3758_to_fp16 = const()[name = tensor("op_3758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492305600)))]; + tensor var_3759_cast = add(x = out_105_cast, y = var_3758_to_fp16)[name = tensor("op_3759_cast")]; + tensor var_3761_to_fp16 = const()[name = tensor("op_3761_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492308224)))]; + tensor hidden_states_157_cast = mul(x = var_3759_cast, y = var_3761_to_fp16)[name = tensor("hidden_states_157_cast")]; + tensor var_3768 = const()[name = tensor("op_3768"), val = tensor([1, 1])]; + tensor var_3770 = const()[name = tensor("op_3770"), val = tensor([1, 1])]; + tensor q_71_pad_type_0 = const()[name = tensor("q_71_pad_type_0"), val = tensor("custom")]; + tensor q_71_pad_0 = const()[name = tensor("q_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492310848))), lut = tensor([-0x1.18p-5, -0x1.514p-7, 0x1.4d8p-7, 0x1.17p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_71_cast = conv(dilations = var_3770, groups = var_1186, pad = q_71_pad_0, pad_type = q_71_pad_type_0, strides = var_3768, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_157_cast)[name = tensor("q_71_cast")]; + tensor var_3774 = const()[name = tensor("op_3774"), val = tensor([1, 1])]; + tensor var_3776 = const()[name = tensor("op_3776"), val = tensor([1, 1])]; + tensor k_71_pad_type_0 = const()[name = tensor("k_71_pad_type_0"), val = tensor("custom")]; + tensor k_71_pad_0 = const()[name = tensor("k_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(492720512))), lut = tensor([-0x1.f2cp-7, 0x1.f4cp-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_71_cast = conv(dilations = var_3776, groups = var_1186, pad = k_71_pad_0, pad_type = k_71_pad_type_0, strides = var_3774, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_71_cast")]; + tensor var_3780 = const()[name = tensor("op_3780"), val = tensor([1, 1])]; + tensor var_3782 = const()[name = tensor("op_3782"), val = tensor([1, 1])]; + tensor v_71_pad_type_0 = const()[name = tensor("v_71_pad_type_0"), val = tensor("custom")]; + tensor v_71_pad_0 = const()[name = tensor("v_71_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(493048256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494359040))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_71_cast = conv(dilations = var_3782, groups = var_1186, pad = v_71_pad_0, pad_type = v_71_pad_type_0, strides = var_3780, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_71_cast")]; + tensor var_3786 = const()[name = tensor("op_3786"), val = tensor([2, 20, 64, -1])]; + tensor var_3787_cast = reshape(shape = var_3786, x = q_71_cast)[name = tensor("op_3787_cast")]; + tensor var_3788 = const()[name = tensor("op_3788"), val = tensor([2, 20, 64, -1])]; + tensor var_3789_cast = reshape(shape = var_3788, x = k_71_cast)[name = tensor("op_3789_cast")]; + tensor var_3790 = const()[name = tensor("op_3790"), val = tensor([2, 20, 64, -1])]; + tensor var_3791_cast = reshape(shape = var_3790, x = v_71_cast)[name = tensor("op_3791_cast")]; + tensor attn_weights_141_transpose_x_0 = const()[name = tensor("attn_weights_141_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_141_transpose_y_0 = const()[name = tensor("attn_weights_141_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_141_cast = matmul(transpose_x = attn_weights_141_transpose_x_0, transpose_y = attn_weights_141_transpose_y_0, x = var_3787_cast, y = var_3789_cast)[name = tensor("attn_weights_141_cast")]; + tensor attn_weights_143_cast = mul(x = attn_weights_141_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_143_cast")]; + tensor var_3795_cast = softmax(axis = var_1170, x = attn_weights_143_cast)[name = tensor("op_3795_cast")]; + tensor attn_71_transpose_x_0 = const()[name = tensor("attn_71_transpose_x_0"), val = tensor(false)]; + tensor attn_71_transpose_y_0 = const()[name = tensor("attn_71_transpose_y_0"), val = tensor(true)]; + tensor attn_71_cast = matmul(transpose_x = attn_71_transpose_x_0, transpose_y = attn_71_transpose_y_0, x = var_3791_cast, y = var_3795_cast)[name = tensor("attn_71_cast")]; + tensor var_3799 = const()[name = tensor("op_3799"), val = tensor([2, 1280, 1, -1])]; + tensor input_255_cast = reshape(shape = var_3799, x = attn_71_cast)[name = tensor("input_255_cast")]; + tensor var_3804 = const()[name = tensor("op_3804"), val = tensor([1, 1])]; + tensor var_3806 = const()[name = tensor("op_3806"), val = tensor([1, 1])]; + tensor var_3808_pad_type_0 = const()[name = tensor("op_3808_pad_type_0"), val = tensor("custom")]; + tensor var_3808_pad_0 = const()[name = tensor("op_3808_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494359168))), lut = tensor([-0x1.618p-7, 0x1.628p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494564032)))]; + tensor var_3808_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_3806, groups = var_1186, pad = var_3808_pad_0, pad_type = var_3808_pad_type_0, strides = var_3804, weight = down_blocks_2_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_255_cast)[name = tensor("op_3808_cast")]; + tensor inputs_107_cast = add(x = var_3808_cast, y = inputs_105_cast)[name = tensor("inputs_107_cast")]; + tensor var_3812 = const()[name = tensor("op_3812"), val = tensor([1])]; + tensor channels_mean_107_cast = reduce_mean(axes = var_3812, keep_dims = var_1181, x = inputs_107_cast)[name = tensor("channels_mean_107_cast")]; + tensor zero_mean_107_cast = sub(x = inputs_107_cast, y = channels_mean_107_cast)[name = tensor("zero_mean_107_cast")]; + tensor zero_mean_sq_107_cast = mul(x = zero_mean_107_cast, y = zero_mean_107_cast)[name = tensor("zero_mean_sq_107_cast")]; + tensor var_3816 = const()[name = tensor("op_3816"), val = tensor([1])]; + tensor var_3817_cast = reduce_mean(axes = var_3816, keep_dims = var_1181, x = zero_mean_sq_107_cast)[name = tensor("op_3817_cast")]; + tensor var_3818_to_fp16 = const()[name = tensor("op_3818_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3819_cast = add(x = var_3817_cast, y = var_3818_to_fp16)[name = tensor("op_3819_cast")]; + tensor denom_107_epsilon_0_to_fp16 = const()[name = tensor("denom_107_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_107_cast = rsqrt(epsilon = denom_107_epsilon_0_to_fp16, x = var_3819_cast)[name = tensor("denom_107_cast")]; + tensor out_107_cast = mul(x = zero_mean_107_cast, y = denom_107_cast)[name = tensor("out_107_cast")]; + tensor var_3823_to_fp16 = const()[name = tensor("op_3823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494566656)))]; + tensor var_3824_cast = add(x = out_107_cast, y = var_3823_to_fp16)[name = tensor("op_3824_cast")]; + tensor var_3826_to_fp16 = const()[name = tensor("op_3826_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494569280)))]; + tensor input_257_cast = mul(x = var_3824_cast, y = var_3826_to_fp16)[name = tensor("input_257_cast")]; + tensor var_3834 = const()[name = tensor("op_3834"), val = tensor([1, 1])]; + tensor var_3836 = const()[name = tensor("op_3836"), val = tensor([1, 1])]; + tensor var_3838_pad_type_0 = const()[name = tensor("op_3838_pad_type_0"), val = tensor("custom")]; + tensor var_3838_pad_0 = const()[name = tensor("op_3838_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(494571904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504402368))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504402560)))]; + tensor var_3838_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_3836, groups = var_1186, pad = var_3838_pad_0, pad_type = var_3838_pad_type_0, strides = var_3834, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_257_cast)[name = tensor("op_3838_cast")]; + tensor var_3839_split_sizes_0 = const()[name = tensor("op_3839_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_3839_axis_0 = const()[name = tensor("op_3839_axis_0"), val = tensor(1)]; + tensor var_3839_cast_0, tensor var_3839_cast_1 = split(axis = var_3839_axis_0, split_sizes = var_3839_split_sizes_0, x = var_3838_cast)[name = tensor("op_3839_cast")]; + tensor var_3841_mode_0 = const()[name = tensor("op_3841_mode_0"), val = tensor("EXACT")]; + tensor var_3841_cast = gelu(mode = var_3841_mode_0, x = var_3839_cast_1)[name = tensor("op_3841_cast")]; + tensor input_259_cast = mul(x = var_3839_cast_0, y = var_3841_cast)[name = tensor("input_259_cast")]; + tensor var_3845 = const()[name = tensor("op_3845"), val = tensor([1, 1])]; + tensor var_3847 = const()[name = tensor("op_3847"), val = tensor([1, 1])]; + tensor var_3849_pad_type_0 = const()[name = tensor("op_3849_pad_type_0"), val = tensor("custom")]; + tensor var_3849_pad_0 = const()[name = tensor("op_3849_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(504423104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507699968))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507700096)))]; + tensor var_3849_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_3847, groups = var_1186, pad = var_3849_pad_0, pad_type = var_3849_pad_type_0, strides = var_3845, weight = down_blocks_2_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_259_cast)[name = tensor("op_3849_cast")]; + tensor inputs_109_cast = add(x = var_3849_cast, y = inputs_107_cast)[name = tensor("inputs_109_cast")]; + tensor var_3859 = const()[name = tensor("op_3859"), val = tensor([1])]; + tensor channels_mean_109_cast = reduce_mean(axes = var_3859, keep_dims = var_1181, x = inputs_109_cast)[name = tensor("channels_mean_109_cast")]; + tensor zero_mean_109_cast = sub(x = inputs_109_cast, y = channels_mean_109_cast)[name = tensor("zero_mean_109_cast")]; + tensor zero_mean_sq_109_cast = mul(x = zero_mean_109_cast, y = zero_mean_109_cast)[name = tensor("zero_mean_sq_109_cast")]; + tensor var_3863 = const()[name = tensor("op_3863"), val = tensor([1])]; + tensor var_3864_cast = reduce_mean(axes = var_3863, keep_dims = var_1181, x = zero_mean_sq_109_cast)[name = tensor("op_3864_cast")]; + tensor var_3865_to_fp16 = const()[name = tensor("op_3865_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3866_cast = add(x = var_3864_cast, y = var_3865_to_fp16)[name = tensor("op_3866_cast")]; + tensor denom_109_epsilon_0_to_fp16 = const()[name = tensor("denom_109_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_109_cast = rsqrt(epsilon = denom_109_epsilon_0_to_fp16, x = var_3866_cast)[name = tensor("denom_109_cast")]; + tensor out_109_cast = mul(x = zero_mean_109_cast, y = denom_109_cast)[name = tensor("out_109_cast")]; + tensor var_3870_to_fp16 = const()[name = tensor("op_3870_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507702720)))]; + tensor var_3871_cast = add(x = out_109_cast, y = var_3870_to_fp16)[name = tensor("op_3871_cast")]; + tensor var_3873_to_fp16 = const()[name = tensor("op_3873_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507705344)))]; + tensor hidden_states_161_cast = mul(x = var_3871_cast, y = var_3873_to_fp16)[name = tensor("hidden_states_161_cast")]; + tensor var_3880 = const()[name = tensor("op_3880"), val = tensor([1, 1])]; + tensor var_3882 = const()[name = tensor("op_3882"), val = tensor([1, 1])]; + tensor q_73_pad_type_0 = const()[name = tensor("q_73_pad_type_0"), val = tensor("custom")]; + tensor q_73_pad_0 = const()[name = tensor("q_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(507707968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509346432))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_73_cast = conv(dilations = var_3882, groups = var_1186, pad = q_73_pad_0, pad_type = q_73_pad_type_0, strides = var_3880, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("q_73_cast")]; + tensor var_3886 = const()[name = tensor("op_3886"), val = tensor([1, 1])]; + tensor var_3888 = const()[name = tensor("op_3888"), val = tensor([1, 1])]; + tensor k_73_pad_type_0 = const()[name = tensor("k_73_pad_type_0"), val = tensor("custom")]; + tensor k_73_pad_0 = const()[name = tensor("k_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509347008))), lut = tensor([-0x1.514p-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_73_cast = conv(dilations = var_3888, groups = var_1186, pad = k_73_pad_0, pad_type = k_73_pad_type_0, strides = var_3886, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("k_73_cast")]; + tensor var_3892 = const()[name = tensor("op_3892"), val = tensor([1, 1])]; + tensor var_3894 = const()[name = tensor("op_3894"), val = tensor([1, 1])]; + tensor v_73_pad_type_0 = const()[name = tensor("v_73_pad_type_0"), val = tensor("custom")]; + tensor v_73_pad_0 = const()[name = tensor("v_73_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(509551872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510780736))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_73_cast = conv(dilations = var_3894, groups = var_1186, pad = v_73_pad_0, pad_type = v_73_pad_type_0, strides = var_3892, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_161_cast)[name = tensor("v_73_cast")]; + tensor var_3898 = const()[name = tensor("op_3898"), val = tensor([2, 20, 64, -1])]; + tensor var_3899_cast = reshape(shape = var_3898, x = q_73_cast)[name = tensor("op_3899_cast")]; + tensor var_3900 = const()[name = tensor("op_3900"), val = tensor([2, 20, 64, -1])]; + tensor var_3901_cast = reshape(shape = var_3900, x = k_73_cast)[name = tensor("op_3901_cast")]; + tensor var_3902 = const()[name = tensor("op_3902"), val = tensor([2, 20, 64, -1])]; + tensor var_3903_cast = reshape(shape = var_3902, x = v_73_cast)[name = tensor("op_3903_cast")]; + tensor attn_weights_145_transpose_x_0 = const()[name = tensor("attn_weights_145_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_145_transpose_y_0 = const()[name = tensor("attn_weights_145_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_145_cast = matmul(transpose_x = attn_weights_145_transpose_x_0, transpose_y = attn_weights_145_transpose_y_0, x = var_3899_cast, y = var_3901_cast)[name = tensor("attn_weights_145_cast")]; + tensor attn_weights_147_cast = mul(x = attn_weights_145_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_147_cast")]; + tensor var_3907_cast = softmax(axis = var_1170, x = attn_weights_147_cast)[name = tensor("op_3907_cast")]; + tensor attn_73_transpose_x_0 = const()[name = tensor("attn_73_transpose_x_0"), val = tensor(false)]; + tensor attn_73_transpose_y_0 = const()[name = tensor("attn_73_transpose_y_0"), val = tensor(true)]; + tensor attn_73_cast = matmul(transpose_x = attn_73_transpose_x_0, transpose_y = attn_73_transpose_y_0, x = var_3903_cast, y = var_3907_cast)[name = tensor("attn_73_cast")]; + tensor var_3911 = const()[name = tensor("op_3911"), val = tensor([2, 1280, 1, -1])]; + tensor input_261_cast = reshape(shape = var_3911, x = attn_73_cast)[name = tensor("input_261_cast")]; + tensor var_3916 = const()[name = tensor("op_3916"), val = tensor([1, 1])]; + tensor var_3918 = const()[name = tensor("op_3918"), val = tensor([1, 1])]; + tensor var_3920_pad_type_0 = const()[name = tensor("op_3920_pad_type_0"), val = tensor("custom")]; + tensor var_3920_pad_0 = const()[name = tensor("op_3920_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(510780928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511600192))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511600320)))]; + tensor var_3920_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_3918, groups = var_1186, pad = var_3920_pad_0, pad_type = var_3920_pad_type_0, strides = var_3916, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_261_cast)[name = tensor("op_3920_cast")]; + tensor inputs_111_cast = add(x = var_3920_cast, y = inputs_109_cast)[name = tensor("inputs_111_cast")]; + tensor var_3924 = const()[name = tensor("op_3924"), val = tensor([1])]; + tensor channels_mean_111_cast = reduce_mean(axes = var_3924, keep_dims = var_1181, x = inputs_111_cast)[name = tensor("channels_mean_111_cast")]; + tensor zero_mean_111_cast = sub(x = inputs_111_cast, y = channels_mean_111_cast)[name = tensor("zero_mean_111_cast")]; + tensor zero_mean_sq_111_cast = mul(x = zero_mean_111_cast, y = zero_mean_111_cast)[name = tensor("zero_mean_sq_111_cast")]; + tensor var_3928 = const()[name = tensor("op_3928"), val = tensor([1])]; + tensor var_3929_cast = reduce_mean(axes = var_3928, keep_dims = var_1181, x = zero_mean_sq_111_cast)[name = tensor("op_3929_cast")]; + tensor var_3930_to_fp16 = const()[name = tensor("op_3930_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3931_cast = add(x = var_3929_cast, y = var_3930_to_fp16)[name = tensor("op_3931_cast")]; + tensor denom_111_epsilon_0_to_fp16 = const()[name = tensor("denom_111_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_111_cast = rsqrt(epsilon = denom_111_epsilon_0_to_fp16, x = var_3931_cast)[name = tensor("denom_111_cast")]; + tensor out_111_cast = mul(x = zero_mean_111_cast, y = denom_111_cast)[name = tensor("out_111_cast")]; + tensor var_3935_to_fp16 = const()[name = tensor("op_3935_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511602944)))]; + tensor var_3936_cast = add(x = out_111_cast, y = var_3935_to_fp16)[name = tensor("op_3936_cast")]; + tensor var_3938_to_fp16 = const()[name = tensor("op_3938_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511605568)))]; + tensor hidden_states_163_cast = mul(x = var_3936_cast, y = var_3938_to_fp16)[name = tensor("hidden_states_163_cast")]; + tensor var_3945 = const()[name = tensor("op_3945"), val = tensor([1, 1])]; + tensor var_3947 = const()[name = tensor("op_3947"), val = tensor([1, 1])]; + tensor q_75_pad_type_0 = const()[name = tensor("q_75_pad_type_0"), val = tensor("custom")]; + tensor q_75_pad_0 = const()[name = tensor("q_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(511608192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512837056))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_75_cast = conv(dilations = var_3947, groups = var_1186, pad = q_75_pad_0, pad_type = q_75_pad_type_0, strides = var_3945, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_163_cast)[name = tensor("q_75_cast")]; + tensor var_3951 = const()[name = tensor("op_3951"), val = tensor([1, 1])]; + tensor var_3953 = const()[name = tensor("op_3953"), val = tensor([1, 1])]; + tensor k_75_pad_type_0 = const()[name = tensor("k_75_pad_type_0"), val = tensor("custom")]; + tensor k_75_pad_0 = const()[name = tensor("k_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(512837248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514148032))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_75_cast = conv(dilations = var_3953, groups = var_1186, pad = k_75_pad_0, pad_type = k_75_pad_type_0, strides = var_3951, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_75_cast")]; + tensor var_3957 = const()[name = tensor("op_3957"), val = tensor([1, 1])]; + tensor var_3959 = const()[name = tensor("op_3959"), val = tensor([1, 1])]; + tensor v_75_pad_type_0 = const()[name = tensor("v_75_pad_type_0"), val = tensor("custom")]; + tensor v_75_pad_0 = const()[name = tensor("v_75_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514148160))), lut = tensor([-0x1.17cp-6, 0x1.188p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_75_cast = conv(dilations = var_3959, groups = var_1186, pad = v_75_pad_0, pad_type = v_75_pad_type_0, strides = var_3957, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_75_cast")]; + tensor var_3963 = const()[name = tensor("op_3963"), val = tensor([2, 20, 64, -1])]; + tensor var_3964_cast = reshape(shape = var_3963, x = q_75_cast)[name = tensor("op_3964_cast")]; + tensor var_3965 = const()[name = tensor("op_3965"), val = tensor([2, 20, 64, -1])]; + tensor var_3966_cast = reshape(shape = var_3965, x = k_75_cast)[name = tensor("op_3966_cast")]; + tensor var_3967 = const()[name = tensor("op_3967"), val = tensor([2, 20, 64, -1])]; + tensor var_3968_cast = reshape(shape = var_3967, x = v_75_cast)[name = tensor("op_3968_cast")]; + tensor attn_weights_149_transpose_x_0 = const()[name = tensor("attn_weights_149_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_149_transpose_y_0 = const()[name = tensor("attn_weights_149_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_149_cast = matmul(transpose_x = attn_weights_149_transpose_x_0, transpose_y = attn_weights_149_transpose_y_0, x = var_3964_cast, y = var_3966_cast)[name = tensor("attn_weights_149_cast")]; + tensor attn_weights_151_cast = mul(x = attn_weights_149_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_151_cast")]; + tensor var_3972_cast = softmax(axis = var_1170, x = attn_weights_151_cast)[name = tensor("op_3972_cast")]; + tensor attn_75_transpose_x_0 = const()[name = tensor("attn_75_transpose_x_0"), val = tensor(false)]; + tensor attn_75_transpose_y_0 = const()[name = tensor("attn_75_transpose_y_0"), val = tensor(true)]; + tensor attn_75_cast = matmul(transpose_x = attn_75_transpose_x_0, transpose_y = attn_75_transpose_y_0, x = var_3968_cast, y = var_3972_cast)[name = tensor("attn_75_cast")]; + tensor var_3976 = const()[name = tensor("op_3976"), val = tensor([2, 1280, 1, -1])]; + tensor input_263_cast = reshape(shape = var_3976, x = attn_75_cast)[name = tensor("input_263_cast")]; + tensor var_3981 = const()[name = tensor("op_3981"), val = tensor([1, 1])]; + tensor var_3983 = const()[name = tensor("op_3983"), val = tensor([1, 1])]; + tensor var_3985_pad_type_0 = const()[name = tensor("op_3985_pad_type_0"), val = tensor("custom")]; + tensor var_3985_pad_0 = const()[name = tensor("op_3985_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(514475904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515295168))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515295296)))]; + tensor var_3985_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_3983, groups = var_1186, pad = var_3985_pad_0, pad_type = var_3985_pad_type_0, strides = var_3981, weight = down_blocks_2_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_263_cast)[name = tensor("op_3985_cast")]; + tensor inputs_113_cast = add(x = var_3985_cast, y = inputs_111_cast)[name = tensor("inputs_113_cast")]; + tensor var_3989 = const()[name = tensor("op_3989"), val = tensor([1])]; + tensor channels_mean_113_cast = reduce_mean(axes = var_3989, keep_dims = var_1181, x = inputs_113_cast)[name = tensor("channels_mean_113_cast")]; + tensor zero_mean_113_cast = sub(x = inputs_113_cast, y = channels_mean_113_cast)[name = tensor("zero_mean_113_cast")]; + tensor zero_mean_sq_113_cast = mul(x = zero_mean_113_cast, y = zero_mean_113_cast)[name = tensor("zero_mean_sq_113_cast")]; + tensor var_3993 = const()[name = tensor("op_3993"), val = tensor([1])]; + tensor var_3994_cast = reduce_mean(axes = var_3993, keep_dims = var_1181, x = zero_mean_sq_113_cast)[name = tensor("op_3994_cast")]; + tensor var_3995_to_fp16 = const()[name = tensor("op_3995_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_3996_cast = add(x = var_3994_cast, y = var_3995_to_fp16)[name = tensor("op_3996_cast")]; + tensor denom_113_epsilon_0_to_fp16 = const()[name = tensor("denom_113_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_113_cast = rsqrt(epsilon = denom_113_epsilon_0_to_fp16, x = var_3996_cast)[name = tensor("denom_113_cast")]; + tensor out_113_cast = mul(x = zero_mean_113_cast, y = denom_113_cast)[name = tensor("out_113_cast")]; + tensor var_4000_to_fp16 = const()[name = tensor("op_4000_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515297920)))]; + tensor var_4001_cast = add(x = out_113_cast, y = var_4000_to_fp16)[name = tensor("op_4001_cast")]; + tensor var_4003_to_fp16 = const()[name = tensor("op_4003_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515300544)))]; + tensor input_265_cast = mul(x = var_4001_cast, y = var_4003_to_fp16)[name = tensor("input_265_cast")]; + tensor var_4011 = const()[name = tensor("op_4011"), val = tensor([1, 1])]; + tensor var_4013 = const()[name = tensor("op_4013"), val = tensor([1, 1])]; + tensor var_4015_pad_type_0 = const()[name = tensor("op_4015_pad_type_0"), val = tensor("custom")]; + tensor var_4015_pad_0 = const()[name = tensor("op_4015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(515303168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525133632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525133824)))]; + tensor var_4015_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_4013, groups = var_1186, pad = var_4015_pad_0, pad_type = var_4015_pad_type_0, strides = var_4011, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_265_cast)[name = tensor("op_4015_cast")]; + tensor var_4016_split_sizes_0 = const()[name = tensor("op_4016_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4016_axis_0 = const()[name = tensor("op_4016_axis_0"), val = tensor(1)]; + tensor var_4016_cast_0, tensor var_4016_cast_1 = split(axis = var_4016_axis_0, split_sizes = var_4016_split_sizes_0, x = var_4015_cast)[name = tensor("op_4016_cast")]; + tensor var_4018_mode_0 = const()[name = tensor("op_4018_mode_0"), val = tensor("EXACT")]; + tensor var_4018_cast = gelu(mode = var_4018_mode_0, x = var_4016_cast_1)[name = tensor("op_4018_cast")]; + tensor input_267_cast = mul(x = var_4016_cast_0, y = var_4018_cast)[name = tensor("input_267_cast")]; + tensor var_4022 = const()[name = tensor("op_4022"), val = tensor([1, 1])]; + tensor var_4024 = const()[name = tensor("op_4024"), val = tensor([1, 1])]; + tensor var_4026_pad_type_0 = const()[name = tensor("op_4026_pad_type_0"), val = tensor("custom")]; + tensor var_4026_pad_0 = const()[name = tensor("op_4026_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(525154368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530069632))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530069824)))]; + tensor var_4026_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_4024, groups = var_1186, pad = var_4026_pad_0, pad_type = var_4026_pad_type_0, strides = var_4022, weight = down_blocks_2_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_267_cast)[name = tensor("op_4026_cast")]; + tensor inputs_115_cast = add(x = var_4026_cast, y = inputs_113_cast)[name = tensor("inputs_115_cast")]; + tensor var_4036 = const()[name = tensor("op_4036"), val = tensor([1])]; + tensor channels_mean_115_cast = reduce_mean(axes = var_4036, keep_dims = var_1181, x = inputs_115_cast)[name = tensor("channels_mean_115_cast")]; + tensor zero_mean_115_cast = sub(x = inputs_115_cast, y = channels_mean_115_cast)[name = tensor("zero_mean_115_cast")]; + tensor zero_mean_sq_115_cast = mul(x = zero_mean_115_cast, y = zero_mean_115_cast)[name = tensor("zero_mean_sq_115_cast")]; + tensor var_4040 = const()[name = tensor("op_4040"), val = tensor([1])]; + tensor var_4041_cast = reduce_mean(axes = var_4040, keep_dims = var_1181, x = zero_mean_sq_115_cast)[name = tensor("op_4041_cast")]; + tensor var_4042_to_fp16 = const()[name = tensor("op_4042_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4043_cast = add(x = var_4041_cast, y = var_4042_to_fp16)[name = tensor("op_4043_cast")]; + tensor denom_115_epsilon_0_to_fp16 = const()[name = tensor("denom_115_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_115_cast = rsqrt(epsilon = denom_115_epsilon_0_to_fp16, x = var_4043_cast)[name = tensor("denom_115_cast")]; + tensor out_115_cast = mul(x = zero_mean_115_cast, y = denom_115_cast)[name = tensor("out_115_cast")]; + tensor var_4047_to_fp16 = const()[name = tensor("op_4047_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530072448)))]; + tensor var_4048_cast = add(x = out_115_cast, y = var_4047_to_fp16)[name = tensor("op_4048_cast")]; + tensor var_4050_to_fp16 = const()[name = tensor("op_4050_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530075072)))]; + tensor hidden_states_167_cast = mul(x = var_4048_cast, y = var_4050_to_fp16)[name = tensor("hidden_states_167_cast")]; + tensor var_4057 = const()[name = tensor("op_4057"), val = tensor([1, 1])]; + tensor var_4059 = const()[name = tensor("op_4059"), val = tensor([1, 1])]; + tensor q_77_pad_type_0 = const()[name = tensor("q_77_pad_type_0"), val = tensor("custom")]; + tensor q_77_pad_0 = const()[name = tensor("q_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530077696))), lut = tensor([-0x1.3f8p-5, -0x1.80cp-7, 0x1.7fcp-7, 0x1.3f4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_77_cast = conv(dilations = var_4059, groups = var_1186, pad = q_77_pad_0, pad_type = q_77_pad_type_0, strides = var_4057, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("q_77_cast")]; + tensor var_4063 = const()[name = tensor("op_4063"), val = tensor([1, 1])]; + tensor var_4065 = const()[name = tensor("op_4065"), val = tensor([1, 1])]; + tensor k_77_pad_type_0 = const()[name = tensor("k_77_pad_type_0"), val = tensor("custom")]; + tensor k_77_pad_0 = const()[name = tensor("k_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(530487360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(531306624))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_77_cast = conv(dilations = var_4065, groups = var_1186, pad = k_77_pad_0, pad_type = k_77_pad_type_0, strides = var_4063, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("k_77_cast")]; + tensor var_4069 = const()[name = tensor("op_4069"), val = tensor([1, 1])]; + tensor var_4071 = const()[name = tensor("op_4071"), val = tensor([1, 1])]; + tensor v_77_pad_type_0 = const()[name = tensor("v_77_pad_type_0"), val = tensor("custom")]; + tensor v_77_pad_0 = const()[name = tensor("v_77_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(531306752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532126016))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_77_cast = conv(dilations = var_4071, groups = var_1186, pad = v_77_pad_0, pad_type = v_77_pad_type_0, strides = var_4069, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_167_cast)[name = tensor("v_77_cast")]; + tensor var_4075 = const()[name = tensor("op_4075"), val = tensor([2, 20, 64, -1])]; + tensor var_4076_cast = reshape(shape = var_4075, x = q_77_cast)[name = tensor("op_4076_cast")]; + tensor var_4077 = const()[name = tensor("op_4077"), val = tensor([2, 20, 64, -1])]; + tensor var_4078_cast = reshape(shape = var_4077, x = k_77_cast)[name = tensor("op_4078_cast")]; + tensor var_4079 = const()[name = tensor("op_4079"), val = tensor([2, 20, 64, -1])]; + tensor var_4080_cast = reshape(shape = var_4079, x = v_77_cast)[name = tensor("op_4080_cast")]; + tensor attn_weights_153_transpose_x_0 = const()[name = tensor("attn_weights_153_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_153_transpose_y_0 = const()[name = tensor("attn_weights_153_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_153_cast = matmul(transpose_x = attn_weights_153_transpose_x_0, transpose_y = attn_weights_153_transpose_y_0, x = var_4076_cast, y = var_4078_cast)[name = tensor("attn_weights_153_cast")]; + tensor attn_weights_155_cast = mul(x = attn_weights_153_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_155_cast")]; + tensor var_4084_cast = softmax(axis = var_1170, x = attn_weights_155_cast)[name = tensor("op_4084_cast")]; + tensor attn_77_transpose_x_0 = const()[name = tensor("attn_77_transpose_x_0"), val = tensor(false)]; + tensor attn_77_transpose_y_0 = const()[name = tensor("attn_77_transpose_y_0"), val = tensor(true)]; + tensor attn_77_cast = matmul(transpose_x = attn_77_transpose_x_0, transpose_y = attn_77_transpose_y_0, x = var_4080_cast, y = var_4084_cast)[name = tensor("attn_77_cast")]; + tensor var_4088 = const()[name = tensor("op_4088"), val = tensor([2, 1280, 1, -1])]; + tensor input_269_cast = reshape(shape = var_4088, x = attn_77_cast)[name = tensor("input_269_cast")]; + tensor var_4093 = const()[name = tensor("op_4093"), val = tensor([1, 1])]; + tensor var_4095 = const()[name = tensor("op_4095"), val = tensor([1, 1])]; + tensor var_4097_pad_type_0 = const()[name = tensor("op_4097_pad_type_0"), val = tensor("custom")]; + tensor var_4097_pad_0 = const()[name = tensor("op_4097_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532126144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532945408))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532945536)))]; + tensor var_4097_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_4095, groups = var_1186, pad = var_4097_pad_0, pad_type = var_4097_pad_type_0, strides = var_4093, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_269_cast)[name = tensor("op_4097_cast")]; + tensor inputs_117_cast = add(x = var_4097_cast, y = inputs_115_cast)[name = tensor("inputs_117_cast")]; + tensor var_4101 = const()[name = tensor("op_4101"), val = tensor([1])]; + tensor channels_mean_117_cast = reduce_mean(axes = var_4101, keep_dims = var_1181, x = inputs_117_cast)[name = tensor("channels_mean_117_cast")]; + tensor zero_mean_117_cast = sub(x = inputs_117_cast, y = channels_mean_117_cast)[name = tensor("zero_mean_117_cast")]; + tensor zero_mean_sq_117_cast = mul(x = zero_mean_117_cast, y = zero_mean_117_cast)[name = tensor("zero_mean_sq_117_cast")]; + tensor var_4105 = const()[name = tensor("op_4105"), val = tensor([1])]; + tensor var_4106_cast = reduce_mean(axes = var_4105, keep_dims = var_1181, x = zero_mean_sq_117_cast)[name = tensor("op_4106_cast")]; + tensor var_4107_to_fp16 = const()[name = tensor("op_4107_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4108_cast = add(x = var_4106_cast, y = var_4107_to_fp16)[name = tensor("op_4108_cast")]; + tensor denom_117_epsilon_0_to_fp16 = const()[name = tensor("denom_117_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_117_cast = rsqrt(epsilon = denom_117_epsilon_0_to_fp16, x = var_4108_cast)[name = tensor("denom_117_cast")]; + tensor out_117_cast = mul(x = zero_mean_117_cast, y = denom_117_cast)[name = tensor("out_117_cast")]; + tensor var_4112_to_fp16 = const()[name = tensor("op_4112_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532948160)))]; + tensor var_4113_cast = add(x = out_117_cast, y = var_4112_to_fp16)[name = tensor("op_4113_cast")]; + tensor var_4115_to_fp16 = const()[name = tensor("op_4115_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532950784)))]; + tensor hidden_states_169_cast = mul(x = var_4113_cast, y = var_4115_to_fp16)[name = tensor("hidden_states_169_cast")]; + tensor var_4122 = const()[name = tensor("op_4122"), val = tensor([1, 1])]; + tensor var_4124 = const()[name = tensor("op_4124"), val = tensor([1, 1])]; + tensor q_79_pad_type_0 = const()[name = tensor("q_79_pad_type_0"), val = tensor("custom")]; + tensor q_79_pad_0 = const()[name = tensor("q_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(532953408))), lut = tensor([-0x1.fe4p-6, -0x1.35p-7, 0x1.33p-7, 0x1.fccp-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_79_cast = conv(dilations = var_4124, groups = var_1186, pad = q_79_pad_0, pad_type = q_79_pad_type_0, strides = var_4122, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_169_cast)[name = tensor("q_79_cast")]; + tensor var_4128 = const()[name = tensor("op_4128"), val = tensor([1, 1])]; + tensor var_4130 = const()[name = tensor("op_4130"), val = tensor([1, 1])]; + tensor k_79_pad_type_0 = const()[name = tensor("k_79_pad_type_0"), val = tensor("custom")]; + tensor k_79_pad_0 = const()[name = tensor("k_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(533363072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(534673856))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_79_cast = conv(dilations = var_4130, groups = var_1186, pad = k_79_pad_0, pad_type = k_79_pad_type_0, strides = var_4128, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_79_cast")]; + tensor var_4134 = const()[name = tensor("op_4134"), val = tensor([1, 1])]; + tensor var_4136 = const()[name = tensor("op_4136"), val = tensor([1, 1])]; + tensor v_79_pad_type_0 = const()[name = tensor("v_79_pad_type_0"), val = tensor("custom")]; + tensor v_79_pad_0 = const()[name = tensor("v_79_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(534673984))), lut = tensor([-0x1.12cp-6, 0x1.12p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_79_cast = conv(dilations = var_4136, groups = var_1186, pad = v_79_pad_0, pad_type = v_79_pad_type_0, strides = var_4134, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_79_cast")]; + tensor var_4140 = const()[name = tensor("op_4140"), val = tensor([2, 20, 64, -1])]; + tensor var_4141_cast = reshape(shape = var_4140, x = q_79_cast)[name = tensor("op_4141_cast")]; + tensor var_4142 = const()[name = tensor("op_4142"), val = tensor([2, 20, 64, -1])]; + tensor var_4143_cast = reshape(shape = var_4142, x = k_79_cast)[name = tensor("op_4143_cast")]; + tensor var_4144 = const()[name = tensor("op_4144"), val = tensor([2, 20, 64, -1])]; + tensor var_4145_cast = reshape(shape = var_4144, x = v_79_cast)[name = tensor("op_4145_cast")]; + tensor attn_weights_157_transpose_x_0 = const()[name = tensor("attn_weights_157_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_157_transpose_y_0 = const()[name = tensor("attn_weights_157_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_157_cast = matmul(transpose_x = attn_weights_157_transpose_x_0, transpose_y = attn_weights_157_transpose_y_0, x = var_4141_cast, y = var_4143_cast)[name = tensor("attn_weights_157_cast")]; + tensor attn_weights_159_cast = mul(x = attn_weights_157_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_159_cast")]; + tensor var_4149_cast = softmax(axis = var_1170, x = attn_weights_159_cast)[name = tensor("op_4149_cast")]; + tensor attn_79_transpose_x_0 = const()[name = tensor("attn_79_transpose_x_0"), val = tensor(false)]; + tensor attn_79_transpose_y_0 = const()[name = tensor("attn_79_transpose_y_0"), val = tensor(true)]; + tensor attn_79_cast = matmul(transpose_x = attn_79_transpose_x_0, transpose_y = attn_79_transpose_y_0, x = var_4145_cast, y = var_4149_cast)[name = tensor("attn_79_cast")]; + tensor var_4153 = const()[name = tensor("op_4153"), val = tensor([2, 1280, 1, -1])]; + tensor input_271_cast = reshape(shape = var_4153, x = attn_79_cast)[name = tensor("input_271_cast")]; + tensor var_4158 = const()[name = tensor("op_4158"), val = tensor([1, 1])]; + tensor var_4160 = const()[name = tensor("op_4160"), val = tensor([1, 1])]; + tensor var_4162_pad_type_0 = const()[name = tensor("op_4162_pad_type_0"), val = tensor("custom")]; + tensor var_4162_pad_0 = const()[name = tensor("op_4162_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535001728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535820992))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535821120)))]; + tensor var_4162_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_4160, groups = var_1186, pad = var_4162_pad_0, pad_type = var_4162_pad_type_0, strides = var_4158, weight = down_blocks_2_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_271_cast)[name = tensor("op_4162_cast")]; + tensor inputs_119_cast = add(x = var_4162_cast, y = inputs_117_cast)[name = tensor("inputs_119_cast")]; + tensor var_4166 = const()[name = tensor("op_4166"), val = tensor([1])]; + tensor channels_mean_119_cast = reduce_mean(axes = var_4166, keep_dims = var_1181, x = inputs_119_cast)[name = tensor("channels_mean_119_cast")]; + tensor zero_mean_119_cast = sub(x = inputs_119_cast, y = channels_mean_119_cast)[name = tensor("zero_mean_119_cast")]; + tensor zero_mean_sq_119_cast = mul(x = zero_mean_119_cast, y = zero_mean_119_cast)[name = tensor("zero_mean_sq_119_cast")]; + tensor var_4170 = const()[name = tensor("op_4170"), val = tensor([1])]; + tensor var_4171_cast = reduce_mean(axes = var_4170, keep_dims = var_1181, x = zero_mean_sq_119_cast)[name = tensor("op_4171_cast")]; + tensor var_4172_to_fp16 = const()[name = tensor("op_4172_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4173_cast = add(x = var_4171_cast, y = var_4172_to_fp16)[name = tensor("op_4173_cast")]; + tensor denom_119_epsilon_0_to_fp16 = const()[name = tensor("denom_119_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_119_cast = rsqrt(epsilon = denom_119_epsilon_0_to_fp16, x = var_4173_cast)[name = tensor("denom_119_cast")]; + tensor out_119_cast = mul(x = zero_mean_119_cast, y = denom_119_cast)[name = tensor("out_119_cast")]; + tensor var_4177_to_fp16 = const()[name = tensor("op_4177_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535823744)))]; + tensor var_4178_cast = add(x = out_119_cast, y = var_4177_to_fp16)[name = tensor("op_4178_cast")]; + tensor var_4180_to_fp16 = const()[name = tensor("op_4180_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535826368)))]; + tensor input_273_cast = mul(x = var_4178_cast, y = var_4180_to_fp16)[name = tensor("input_273_cast")]; + tensor var_4188 = const()[name = tensor("op_4188"), val = tensor([1, 1])]; + tensor var_4190 = const()[name = tensor("op_4190"), val = tensor([1, 1])]; + tensor var_4192_pad_type_0 = const()[name = tensor("op_4192_pad_type_0"), val = tensor("custom")]; + tensor var_4192_pad_0 = const()[name = tensor("op_4192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(535828992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(545659456))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(545659648)))]; + tensor var_4192_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_4190, groups = var_1186, pad = var_4192_pad_0, pad_type = var_4192_pad_type_0, strides = var_4188, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_273_cast)[name = tensor("op_4192_cast")]; + tensor var_4193_split_sizes_0 = const()[name = tensor("op_4193_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4193_axis_0 = const()[name = tensor("op_4193_axis_0"), val = tensor(1)]; + tensor var_4193_cast_0, tensor var_4193_cast_1 = split(axis = var_4193_axis_0, split_sizes = var_4193_split_sizes_0, x = var_4192_cast)[name = tensor("op_4193_cast")]; + tensor var_4195_mode_0 = const()[name = tensor("op_4195_mode_0"), val = tensor("EXACT")]; + tensor var_4195_cast = gelu(mode = var_4195_mode_0, x = var_4193_cast_1)[name = tensor("op_4195_cast")]; + tensor input_275_cast = mul(x = var_4193_cast_0, y = var_4195_cast)[name = tensor("input_275_cast")]; + tensor var_4199 = const()[name = tensor("op_4199"), val = tensor([1, 1])]; + tensor var_4201 = const()[name = tensor("op_4201"), val = tensor([1, 1])]; + tensor var_4203_pad_type_0 = const()[name = tensor("op_4203_pad_type_0"), val = tensor("custom")]; + tensor var_4203_pad_0 = const()[name = tensor("op_4203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(545680192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550595456))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550595648)))]; + tensor var_4203_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_4201, groups = var_1186, pad = var_4203_pad_0, pad_type = var_4203_pad_type_0, strides = var_4199, weight = down_blocks_2_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_275_cast)[name = tensor("op_4203_cast")]; + tensor inputs_121_cast = add(x = var_4203_cast, y = inputs_119_cast)[name = tensor("inputs_121_cast")]; + tensor var_4213 = const()[name = tensor("op_4213"), val = tensor([1])]; + tensor channels_mean_121_cast = reduce_mean(axes = var_4213, keep_dims = var_1181, x = inputs_121_cast)[name = tensor("channels_mean_121_cast")]; + tensor zero_mean_121_cast = sub(x = inputs_121_cast, y = channels_mean_121_cast)[name = tensor("zero_mean_121_cast")]; + tensor zero_mean_sq_121_cast = mul(x = zero_mean_121_cast, y = zero_mean_121_cast)[name = tensor("zero_mean_sq_121_cast")]; + tensor var_4217 = const()[name = tensor("op_4217"), val = tensor([1])]; + tensor var_4218_cast = reduce_mean(axes = var_4217, keep_dims = var_1181, x = zero_mean_sq_121_cast)[name = tensor("op_4218_cast")]; + tensor var_4219_to_fp16 = const()[name = tensor("op_4219_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4220_cast = add(x = var_4218_cast, y = var_4219_to_fp16)[name = tensor("op_4220_cast")]; + tensor denom_121_epsilon_0_to_fp16 = const()[name = tensor("denom_121_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_121_cast = rsqrt(epsilon = denom_121_epsilon_0_to_fp16, x = var_4220_cast)[name = tensor("denom_121_cast")]; + tensor out_121_cast = mul(x = zero_mean_121_cast, y = denom_121_cast)[name = tensor("out_121_cast")]; + tensor var_4224_to_fp16 = const()[name = tensor("op_4224_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550598272)))]; + tensor var_4225_cast = add(x = out_121_cast, y = var_4224_to_fp16)[name = tensor("op_4225_cast")]; + tensor var_4227_to_fp16 = const()[name = tensor("op_4227_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550600896)))]; + tensor hidden_states_173_cast = mul(x = var_4225_cast, y = var_4227_to_fp16)[name = tensor("hidden_states_173_cast")]; + tensor var_4234 = const()[name = tensor("op_4234"), val = tensor([1, 1])]; + tensor var_4236 = const()[name = tensor("op_4236"), val = tensor([1, 1])]; + tensor q_81_pad_type_0 = const()[name = tensor("q_81_pad_type_0"), val = tensor("custom")]; + tensor q_81_pad_0 = const()[name = tensor("q_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550603520))), lut = tensor([-0x1.50cp-6, 0x1.51p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_81_cast = conv(dilations = var_4236, groups = var_1186, pad = q_81_pad_0, pad_type = q_81_pad_type_0, strides = var_4234, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("q_81_cast")]; + tensor var_4240 = const()[name = tensor("op_4240"), val = tensor([1, 1])]; + tensor var_4242 = const()[name = tensor("op_4242"), val = tensor([1, 1])]; + tensor k_81_pad_type_0 = const()[name = tensor("k_81_pad_type_0"), val = tensor("custom")]; + tensor k_81_pad_0 = const()[name = tensor("k_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(550808384))), lut = tensor([-0x1.3a4p-5, -0x1.7acp-7, 0x1.7b4p-7, 0x1.3a4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_81_cast = conv(dilations = var_4242, groups = var_1186, pad = k_81_pad_0, pad_type = k_81_pad_type_0, strides = var_4240, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("k_81_cast")]; + tensor var_4246 = const()[name = tensor("op_4246"), val = tensor([1, 1])]; + tensor var_4248 = const()[name = tensor("op_4248"), val = tensor([1, 1])]; + tensor v_81_pad_type_0 = const()[name = tensor("v_81_pad_type_0"), val = tensor("custom")]; + tensor v_81_pad_0 = const()[name = tensor("v_81_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(551218048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552037312))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_81_cast = conv(dilations = var_4248, groups = var_1186, pad = v_81_pad_0, pad_type = v_81_pad_type_0, strides = var_4246, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_173_cast)[name = tensor("v_81_cast")]; + tensor var_4252 = const()[name = tensor("op_4252"), val = tensor([2, 20, 64, -1])]; + tensor var_4253_cast = reshape(shape = var_4252, x = q_81_cast)[name = tensor("op_4253_cast")]; + tensor var_4254 = const()[name = tensor("op_4254"), val = tensor([2, 20, 64, -1])]; + tensor var_4255_cast = reshape(shape = var_4254, x = k_81_cast)[name = tensor("op_4255_cast")]; + tensor var_4256 = const()[name = tensor("op_4256"), val = tensor([2, 20, 64, -1])]; + tensor var_4257_cast = reshape(shape = var_4256, x = v_81_cast)[name = tensor("op_4257_cast")]; + tensor attn_weights_161_transpose_x_0 = const()[name = tensor("attn_weights_161_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_161_transpose_y_0 = const()[name = tensor("attn_weights_161_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_161_cast = matmul(transpose_x = attn_weights_161_transpose_x_0, transpose_y = attn_weights_161_transpose_y_0, x = var_4253_cast, y = var_4255_cast)[name = tensor("attn_weights_161_cast")]; + tensor attn_weights_163_cast = mul(x = attn_weights_161_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_163_cast")]; + tensor var_4261_cast = softmax(axis = var_1170, x = attn_weights_163_cast)[name = tensor("op_4261_cast")]; + tensor attn_81_transpose_x_0 = const()[name = tensor("attn_81_transpose_x_0"), val = tensor(false)]; + tensor attn_81_transpose_y_0 = const()[name = tensor("attn_81_transpose_y_0"), val = tensor(true)]; + tensor attn_81_cast = matmul(transpose_x = attn_81_transpose_x_0, transpose_y = attn_81_transpose_y_0, x = var_4257_cast, y = var_4261_cast)[name = tensor("attn_81_cast")]; + tensor var_4265 = const()[name = tensor("op_4265"), val = tensor([2, 1280, 1, -1])]; + tensor input_277_cast = reshape(shape = var_4265, x = attn_81_cast)[name = tensor("input_277_cast")]; + tensor var_4270 = const()[name = tensor("op_4270"), val = tensor([1, 1])]; + tensor var_4272 = const()[name = tensor("op_4272"), val = tensor([1, 1])]; + tensor var_4274_pad_type_0 = const()[name = tensor("op_4274_pad_type_0"), val = tensor("custom")]; + tensor var_4274_pad_0 = const()[name = tensor("op_4274_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(552037440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553266304))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553266496)))]; + tensor var_4274_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_4272, groups = var_1186, pad = var_4274_pad_0, pad_type = var_4274_pad_type_0, strides = var_4270, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_277_cast)[name = tensor("op_4274_cast")]; + tensor inputs_123_cast = add(x = var_4274_cast, y = inputs_121_cast)[name = tensor("inputs_123_cast")]; + tensor var_4278 = const()[name = tensor("op_4278"), val = tensor([1])]; + tensor channels_mean_123_cast = reduce_mean(axes = var_4278, keep_dims = var_1181, x = inputs_123_cast)[name = tensor("channels_mean_123_cast")]; + tensor zero_mean_123_cast = sub(x = inputs_123_cast, y = channels_mean_123_cast)[name = tensor("zero_mean_123_cast")]; + tensor zero_mean_sq_123_cast = mul(x = zero_mean_123_cast, y = zero_mean_123_cast)[name = tensor("zero_mean_sq_123_cast")]; + tensor var_4282 = const()[name = tensor("op_4282"), val = tensor([1])]; + tensor var_4283_cast = reduce_mean(axes = var_4282, keep_dims = var_1181, x = zero_mean_sq_123_cast)[name = tensor("op_4283_cast")]; + tensor var_4284_to_fp16 = const()[name = tensor("op_4284_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4285_cast = add(x = var_4283_cast, y = var_4284_to_fp16)[name = tensor("op_4285_cast")]; + tensor denom_123_epsilon_0_to_fp16 = const()[name = tensor("denom_123_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_123_cast = rsqrt(epsilon = denom_123_epsilon_0_to_fp16, x = var_4285_cast)[name = tensor("denom_123_cast")]; + tensor out_123_cast = mul(x = zero_mean_123_cast, y = denom_123_cast)[name = tensor("out_123_cast")]; + tensor var_4289_to_fp16 = const()[name = tensor("op_4289_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553269120)))]; + tensor var_4290_cast = add(x = out_123_cast, y = var_4289_to_fp16)[name = tensor("op_4290_cast")]; + tensor var_4292_to_fp16 = const()[name = tensor("op_4292_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553271744)))]; + tensor hidden_states_175_cast = mul(x = var_4290_cast, y = var_4292_to_fp16)[name = tensor("hidden_states_175_cast")]; + tensor var_4299 = const()[name = tensor("op_4299"), val = tensor([1, 1])]; + tensor var_4301 = const()[name = tensor("op_4301"), val = tensor([1, 1])]; + tensor q_83_pad_type_0 = const()[name = tensor("q_83_pad_type_0"), val = tensor("custom")]; + tensor q_83_pad_0 = const()[name = tensor("q_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553274368))), lut = tensor([-0x1.bf4p-6, -0x1.11p-7, 0x1.144p-7, 0x1.c1p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_83_cast = conv(dilations = var_4301, groups = var_1186, pad = q_83_pad_0, pad_type = q_83_pad_type_0, strides = var_4299, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_175_cast)[name = tensor("q_83_cast")]; + tensor var_4305 = const()[name = tensor("op_4305"), val = tensor([1, 1])]; + tensor var_4307 = const()[name = tensor("op_4307"), val = tensor([1, 1])]; + tensor k_83_pad_type_0 = const()[name = tensor("k_83_pad_type_0"), val = tensor("custom")]; + tensor k_83_pad_0 = const()[name = tensor("k_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(553684032))), lut = tensor([-0x1.69cp-6, -0x1.b04p-8, 0x1.ac8p-8, 0x1.698p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_83_cast = conv(dilations = var_4307, groups = var_1186, pad = k_83_pad_0, pad_type = k_83_pad_type_0, strides = var_4305, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_83_cast")]; + tensor var_4311 = const()[name = tensor("op_4311"), val = tensor([1, 1])]; + tensor var_4313 = const()[name = tensor("op_4313"), val = tensor([1, 1])]; + tensor v_83_pad_type_0 = const()[name = tensor("v_83_pad_type_0"), val = tensor("custom")]; + tensor v_83_pad_0 = const()[name = tensor("v_83_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554339456))), lut = tensor([-0x1.fb4p-7, 0x1.fap-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_83_cast = conv(dilations = var_4313, groups = var_1186, pad = v_83_pad_0, pad_type = v_83_pad_type_0, strides = var_4311, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_83_cast")]; + tensor var_4317 = const()[name = tensor("op_4317"), val = tensor([2, 20, 64, -1])]; + tensor var_4318_cast = reshape(shape = var_4317, x = q_83_cast)[name = tensor("op_4318_cast")]; + tensor var_4319 = const()[name = tensor("op_4319"), val = tensor([2, 20, 64, -1])]; + tensor var_4320_cast = reshape(shape = var_4319, x = k_83_cast)[name = tensor("op_4320_cast")]; + tensor var_4321 = const()[name = tensor("op_4321"), val = tensor([2, 20, 64, -1])]; + tensor var_4322_cast = reshape(shape = var_4321, x = v_83_cast)[name = tensor("op_4322_cast")]; + tensor attn_weights_165_transpose_x_0 = const()[name = tensor("attn_weights_165_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_165_transpose_y_0 = const()[name = tensor("attn_weights_165_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_165_cast = matmul(transpose_x = attn_weights_165_transpose_x_0, transpose_y = attn_weights_165_transpose_y_0, x = var_4318_cast, y = var_4320_cast)[name = tensor("attn_weights_165_cast")]; + tensor attn_weights_167_cast = mul(x = attn_weights_165_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_167_cast")]; + tensor var_4326_cast = softmax(axis = var_1170, x = attn_weights_167_cast)[name = tensor("op_4326_cast")]; + tensor attn_83_transpose_x_0 = const()[name = tensor("attn_83_transpose_x_0"), val = tensor(false)]; + tensor attn_83_transpose_y_0 = const()[name = tensor("attn_83_transpose_y_0"), val = tensor(true)]; + tensor attn_83_cast = matmul(transpose_x = attn_83_transpose_x_0, transpose_y = attn_83_transpose_y_0, x = var_4322_cast, y = var_4326_cast)[name = tensor("attn_83_cast")]; + tensor var_4330 = const()[name = tensor("op_4330"), val = tensor([2, 1280, 1, -1])]; + tensor input_279_cast = reshape(shape = var_4330, x = attn_83_cast)[name = tensor("input_279_cast")]; + tensor var_4335 = const()[name = tensor("op_4335"), val = tensor([1, 1])]; + tensor var_4337 = const()[name = tensor("op_4337"), val = tensor([1, 1])]; + tensor var_4339_pad_type_0 = const()[name = tensor("op_4339_pad_type_0"), val = tensor("custom")]; + tensor var_4339_pad_0 = const()[name = tensor("op_4339_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554667200))), lut = tensor([-0x1.3e8p-7, 0x1.3f4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554872064)))]; + tensor var_4339_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_4337, groups = var_1186, pad = var_4339_pad_0, pad_type = var_4339_pad_type_0, strides = var_4335, weight = down_blocks_2_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_279_cast)[name = tensor("op_4339_cast")]; + tensor inputs_125_cast = add(x = var_4339_cast, y = inputs_123_cast)[name = tensor("inputs_125_cast")]; + tensor var_4343 = const()[name = tensor("op_4343"), val = tensor([1])]; + tensor channels_mean_125_cast = reduce_mean(axes = var_4343, keep_dims = var_1181, x = inputs_125_cast)[name = tensor("channels_mean_125_cast")]; + tensor zero_mean_125_cast = sub(x = inputs_125_cast, y = channels_mean_125_cast)[name = tensor("zero_mean_125_cast")]; + tensor zero_mean_sq_125_cast = mul(x = zero_mean_125_cast, y = zero_mean_125_cast)[name = tensor("zero_mean_sq_125_cast")]; + tensor var_4347 = const()[name = tensor("op_4347"), val = tensor([1])]; + tensor var_4348_cast = reduce_mean(axes = var_4347, keep_dims = var_1181, x = zero_mean_sq_125_cast)[name = tensor("op_4348_cast")]; + tensor var_4349_to_fp16 = const()[name = tensor("op_4349_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4350_cast = add(x = var_4348_cast, y = var_4349_to_fp16)[name = tensor("op_4350_cast")]; + tensor denom_125_epsilon_0_to_fp16 = const()[name = tensor("denom_125_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_125_cast = rsqrt(epsilon = denom_125_epsilon_0_to_fp16, x = var_4350_cast)[name = tensor("denom_125_cast")]; + tensor out_125_cast = mul(x = zero_mean_125_cast, y = denom_125_cast)[name = tensor("out_125_cast")]; + tensor var_4354_to_fp16 = const()[name = tensor("op_4354_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554874688)))]; + tensor var_4355_cast = add(x = out_125_cast, y = var_4354_to_fp16)[name = tensor("op_4355_cast")]; + tensor var_4357_to_fp16 = const()[name = tensor("op_4357_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554877312)))]; + tensor input_281_cast = mul(x = var_4355_cast, y = var_4357_to_fp16)[name = tensor("input_281_cast")]; + tensor var_4365 = const()[name = tensor("op_4365"), val = tensor([1, 1])]; + tensor var_4367 = const()[name = tensor("op_4367"), val = tensor([1, 1])]; + tensor var_4369_pad_type_0 = const()[name = tensor("op_4369_pad_type_0"), val = tensor("custom")]; + tensor var_4369_pad_0 = const()[name = tensor("op_4369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(554879936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564710400))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564710592)))]; + tensor var_4369_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_4367, groups = var_1186, pad = var_4369_pad_0, pad_type = var_4369_pad_type_0, strides = var_4365, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_281_cast)[name = tensor("op_4369_cast")]; + tensor var_4370_split_sizes_0 = const()[name = tensor("op_4370_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4370_axis_0 = const()[name = tensor("op_4370_axis_0"), val = tensor(1)]; + tensor var_4370_cast_0, tensor var_4370_cast_1 = split(axis = var_4370_axis_0, split_sizes = var_4370_split_sizes_0, x = var_4369_cast)[name = tensor("op_4370_cast")]; + tensor var_4372_mode_0 = const()[name = tensor("op_4372_mode_0"), val = tensor("EXACT")]; + tensor var_4372_cast = gelu(mode = var_4372_mode_0, x = var_4370_cast_1)[name = tensor("op_4372_cast")]; + tensor input_283_cast = mul(x = var_4370_cast_0, y = var_4372_cast)[name = tensor("input_283_cast")]; + tensor var_4376 = const()[name = tensor("op_4376"), val = tensor([1, 1])]; + tensor var_4378 = const()[name = tensor("op_4378"), val = tensor([1, 1])]; + tensor var_4380_pad_type_0 = const()[name = tensor("op_4380_pad_type_0"), val = tensor("custom")]; + tensor var_4380_pad_0 = const()[name = tensor("op_4380_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(564731136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569646400))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569646592)))]; + tensor var_4380_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_4378, groups = var_1186, pad = var_4380_pad_0, pad_type = var_4380_pad_type_0, strides = var_4376, weight = down_blocks_2_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_283_cast)[name = tensor("op_4380_cast")]; + tensor inputs_127_cast = add(x = var_4380_cast, y = inputs_125_cast)[name = tensor("inputs_127_cast")]; + tensor var_4390 = const()[name = tensor("op_4390"), val = tensor([1])]; + tensor channels_mean_127_cast = reduce_mean(axes = var_4390, keep_dims = var_1181, x = inputs_127_cast)[name = tensor("channels_mean_127_cast")]; + tensor zero_mean_127_cast = sub(x = inputs_127_cast, y = channels_mean_127_cast)[name = tensor("zero_mean_127_cast")]; + tensor zero_mean_sq_127_cast = mul(x = zero_mean_127_cast, y = zero_mean_127_cast)[name = tensor("zero_mean_sq_127_cast")]; + tensor var_4394 = const()[name = tensor("op_4394"), val = tensor([1])]; + tensor var_4395_cast = reduce_mean(axes = var_4394, keep_dims = var_1181, x = zero_mean_sq_127_cast)[name = tensor("op_4395_cast")]; + tensor var_4396_to_fp16 = const()[name = tensor("op_4396_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4397_cast = add(x = var_4395_cast, y = var_4396_to_fp16)[name = tensor("op_4397_cast")]; + tensor denom_127_epsilon_0_to_fp16 = const()[name = tensor("denom_127_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_127_cast = rsqrt(epsilon = denom_127_epsilon_0_to_fp16, x = var_4397_cast)[name = tensor("denom_127_cast")]; + tensor out_127_cast = mul(x = zero_mean_127_cast, y = denom_127_cast)[name = tensor("out_127_cast")]; + tensor var_4401_to_fp16 = const()[name = tensor("op_4401_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569649216)))]; + tensor var_4402_cast = add(x = out_127_cast, y = var_4401_to_fp16)[name = tensor("op_4402_cast")]; + tensor var_4404_to_fp16 = const()[name = tensor("op_4404_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569651840)))]; + tensor hidden_states_179_cast = mul(x = var_4402_cast, y = var_4404_to_fp16)[name = tensor("hidden_states_179_cast")]; + tensor var_4411 = const()[name = tensor("op_4411"), val = tensor([1, 1])]; + tensor var_4413 = const()[name = tensor("op_4413"), val = tensor([1, 1])]; + tensor q_85_pad_type_0 = const()[name = tensor("q_85_pad_type_0"), val = tensor("custom")]; + tensor q_85_pad_0 = const()[name = tensor("q_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569654464))), lut = tensor([-0x1.50cp-6, 0x1.52p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_85_cast = conv(dilations = var_4413, groups = var_1186, pad = q_85_pad_0, pad_type = q_85_pad_type_0, strides = var_4411, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("q_85_cast")]; + tensor var_4417 = const()[name = tensor("op_4417"), val = tensor([1, 1])]; + tensor var_4419 = const()[name = tensor("op_4419"), val = tensor([1, 1])]; + tensor k_85_pad_type_0 = const()[name = tensor("k_85_pad_type_0"), val = tensor("custom")]; + tensor k_85_pad_0 = const()[name = tensor("k_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(569859328))), lut = tensor([-0x1.3acp-5, -0x1.7bcp-7, 0x1.798p-7, 0x1.3ap-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_85_cast = conv(dilations = var_4419, groups = var_1186, pad = k_85_pad_0, pad_type = k_85_pad_type_0, strides = var_4417, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("k_85_cast")]; + tensor var_4423 = const()[name = tensor("op_4423"), val = tensor([1, 1])]; + tensor var_4425 = const()[name = tensor("op_4425"), val = tensor([1, 1])]; + tensor v_85_pad_type_0 = const()[name = tensor("v_85_pad_type_0"), val = tensor("custom")]; + tensor v_85_pad_0 = const()[name = tensor("v_85_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(570268992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571497856))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_85_cast = conv(dilations = var_4425, groups = var_1186, pad = v_85_pad_0, pad_type = v_85_pad_type_0, strides = var_4423, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_179_cast)[name = tensor("v_85_cast")]; + tensor var_4429 = const()[name = tensor("op_4429"), val = tensor([2, 20, 64, -1])]; + tensor var_4430_cast = reshape(shape = var_4429, x = q_85_cast)[name = tensor("op_4430_cast")]; + tensor var_4431 = const()[name = tensor("op_4431"), val = tensor([2, 20, 64, -1])]; + tensor var_4432_cast = reshape(shape = var_4431, x = k_85_cast)[name = tensor("op_4432_cast")]; + tensor var_4433 = const()[name = tensor("op_4433"), val = tensor([2, 20, 64, -1])]; + tensor var_4434_cast = reshape(shape = var_4433, x = v_85_cast)[name = tensor("op_4434_cast")]; + tensor attn_weights_169_transpose_x_0 = const()[name = tensor("attn_weights_169_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_169_transpose_y_0 = const()[name = tensor("attn_weights_169_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_169_cast = matmul(transpose_x = attn_weights_169_transpose_x_0, transpose_y = attn_weights_169_transpose_y_0, x = var_4430_cast, y = var_4432_cast)[name = tensor("attn_weights_169_cast")]; + tensor attn_weights_171_cast = mul(x = attn_weights_169_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_171_cast")]; + tensor var_4438_cast = softmax(axis = var_1170, x = attn_weights_171_cast)[name = tensor("op_4438_cast")]; + tensor attn_85_transpose_x_0 = const()[name = tensor("attn_85_transpose_x_0"), val = tensor(false)]; + tensor attn_85_transpose_y_0 = const()[name = tensor("attn_85_transpose_y_0"), val = tensor(true)]; + tensor attn_85_cast = matmul(transpose_x = attn_85_transpose_x_0, transpose_y = attn_85_transpose_y_0, x = var_4434_cast, y = var_4438_cast)[name = tensor("attn_85_cast")]; + tensor var_4442 = const()[name = tensor("op_4442"), val = tensor([2, 1280, 1, -1])]; + tensor input_285_cast = reshape(shape = var_4442, x = attn_85_cast)[name = tensor("input_285_cast")]; + tensor var_4447 = const()[name = tensor("op_4447"), val = tensor([1, 1])]; + tensor var_4449 = const()[name = tensor("op_4449"), val = tensor([1, 1])]; + tensor var_4451_pad_type_0 = const()[name = tensor("op_4451_pad_type_0"), val = tensor("custom")]; + tensor var_4451_pad_0 = const()[name = tensor("op_4451_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(571498048))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572726912))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572727104)))]; + tensor var_4451_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_4449, groups = var_1186, pad = var_4451_pad_0, pad_type = var_4451_pad_type_0, strides = var_4447, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_285_cast)[name = tensor("op_4451_cast")]; + tensor inputs_129_cast = add(x = var_4451_cast, y = inputs_127_cast)[name = tensor("inputs_129_cast")]; + tensor var_4455 = const()[name = tensor("op_4455"), val = tensor([1])]; + tensor channels_mean_129_cast = reduce_mean(axes = var_4455, keep_dims = var_1181, x = inputs_129_cast)[name = tensor("channels_mean_129_cast")]; + tensor zero_mean_129_cast = sub(x = inputs_129_cast, y = channels_mean_129_cast)[name = tensor("zero_mean_129_cast")]; + tensor zero_mean_sq_129_cast = mul(x = zero_mean_129_cast, y = zero_mean_129_cast)[name = tensor("zero_mean_sq_129_cast")]; + tensor var_4459 = const()[name = tensor("op_4459"), val = tensor([1])]; + tensor var_4460_cast = reduce_mean(axes = var_4459, keep_dims = var_1181, x = zero_mean_sq_129_cast)[name = tensor("op_4460_cast")]; + tensor var_4461_to_fp16 = const()[name = tensor("op_4461_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4462_cast = add(x = var_4460_cast, y = var_4461_to_fp16)[name = tensor("op_4462_cast")]; + tensor denom_129_epsilon_0_to_fp16 = const()[name = tensor("denom_129_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_129_cast = rsqrt(epsilon = denom_129_epsilon_0_to_fp16, x = var_4462_cast)[name = tensor("denom_129_cast")]; + tensor out_129_cast = mul(x = zero_mean_129_cast, y = denom_129_cast)[name = tensor("out_129_cast")]; + tensor var_4466_to_fp16 = const()[name = tensor("op_4466_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572729728)))]; + tensor var_4467_cast = add(x = out_129_cast, y = var_4466_to_fp16)[name = tensor("op_4467_cast")]; + tensor var_4469_to_fp16 = const()[name = tensor("op_4469_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572732352)))]; + tensor hidden_states_181_cast = mul(x = var_4467_cast, y = var_4469_to_fp16)[name = tensor("hidden_states_181_cast")]; + tensor var_4476 = const()[name = tensor("op_4476"), val = tensor([1, 1])]; + tensor var_4478 = const()[name = tensor("op_4478"), val = tensor([1, 1])]; + tensor q_87_pad_type_0 = const()[name = tensor("q_87_pad_type_0"), val = tensor("custom")]; + tensor q_87_pad_0 = const()[name = tensor("q_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(572734976))), lut = tensor([-0x1.adcp-6, -0x1.09p-7, 0x1.0a4p-7, 0x1.ae8p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_87_cast = conv(dilations = var_4478, groups = var_1186, pad = q_87_pad_0, pad_type = q_87_pad_type_0, strides = var_4476, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_181_cast)[name = tensor("q_87_cast")]; + tensor var_4482 = const()[name = tensor("op_4482"), val = tensor([1, 1])]; + tensor var_4484 = const()[name = tensor("op_4484"), val = tensor([1, 1])]; + tensor k_87_pad_type_0 = const()[name = tensor("k_87_pad_type_0"), val = tensor("custom")]; + tensor k_87_pad_0 = const()[name = tensor("k_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(573144640))), lut = tensor([-0x1.59p-6, -0x1.9c8p-8, 0x1.964p-8, 0x1.578p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_87_cast = conv(dilations = var_4484, groups = var_1186, pad = k_87_pad_0, pad_type = k_87_pad_type_0, strides = var_4482, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_87_cast")]; + tensor var_4488 = const()[name = tensor("op_4488"), val = tensor([1, 1])]; + tensor var_4490 = const()[name = tensor("op_4490"), val = tensor([1, 1])]; + tensor v_87_pad_type_0 = const()[name = tensor("v_87_pad_type_0"), val = tensor("custom")]; + tensor v_87_pad_0 = const()[name = tensor("v_87_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(573800064))), lut = tensor([-0x1.e88p-7, 0x1.e78p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_87_cast = conv(dilations = var_4490, groups = var_1186, pad = v_87_pad_0, pad_type = v_87_pad_type_0, strides = var_4488, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_87_cast")]; + tensor var_4494 = const()[name = tensor("op_4494"), val = tensor([2, 20, 64, -1])]; + tensor var_4495_cast = reshape(shape = var_4494, x = q_87_cast)[name = tensor("op_4495_cast")]; + tensor var_4496 = const()[name = tensor("op_4496"), val = tensor([2, 20, 64, -1])]; + tensor var_4497_cast = reshape(shape = var_4496, x = k_87_cast)[name = tensor("op_4497_cast")]; + tensor var_4498 = const()[name = tensor("op_4498"), val = tensor([2, 20, 64, -1])]; + tensor var_4499_cast = reshape(shape = var_4498, x = v_87_cast)[name = tensor("op_4499_cast")]; + tensor attn_weights_173_transpose_x_0 = const()[name = tensor("attn_weights_173_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_173_transpose_y_0 = const()[name = tensor("attn_weights_173_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_173_cast = matmul(transpose_x = attn_weights_173_transpose_x_0, transpose_y = attn_weights_173_transpose_y_0, x = var_4495_cast, y = var_4497_cast)[name = tensor("attn_weights_173_cast")]; + tensor attn_weights_175_cast = mul(x = attn_weights_173_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_175_cast")]; + tensor var_4503_cast = softmax(axis = var_1170, x = attn_weights_175_cast)[name = tensor("op_4503_cast")]; + tensor attn_87_transpose_x_0 = const()[name = tensor("attn_87_transpose_x_0"), val = tensor(false)]; + tensor attn_87_transpose_y_0 = const()[name = tensor("attn_87_transpose_y_0"), val = tensor(true)]; + tensor attn_87_cast = matmul(transpose_x = attn_87_transpose_x_0, transpose_y = attn_87_transpose_y_0, x = var_4499_cast, y = var_4503_cast)[name = tensor("attn_87_cast")]; + tensor var_4507 = const()[name = tensor("op_4507"), val = tensor([2, 1280, 1, -1])]; + tensor input_287_cast = reshape(shape = var_4507, x = attn_87_cast)[name = tensor("input_287_cast")]; + tensor var_4512 = const()[name = tensor("op_4512"), val = tensor([1, 1])]; + tensor var_4514 = const()[name = tensor("op_4514"), val = tensor([1, 1])]; + tensor var_4516_pad_type_0 = const()[name = tensor("op_4516_pad_type_0"), val = tensor("custom")]; + tensor var_4516_pad_0 = const()[name = tensor("op_4516_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574127808))), lut = tensor([-0x1.354p-7, 0x1.37p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574332672)))]; + tensor var_4516_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_4514, groups = var_1186, pad = var_4516_pad_0, pad_type = var_4516_pad_type_0, strides = var_4512, weight = down_blocks_2_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_287_cast)[name = tensor("op_4516_cast")]; + tensor inputs_131_cast = add(x = var_4516_cast, y = inputs_129_cast)[name = tensor("inputs_131_cast")]; + tensor var_4520 = const()[name = tensor("op_4520"), val = tensor([1])]; + tensor channels_mean_131_cast = reduce_mean(axes = var_4520, keep_dims = var_1181, x = inputs_131_cast)[name = tensor("channels_mean_131_cast")]; + tensor zero_mean_131_cast = sub(x = inputs_131_cast, y = channels_mean_131_cast)[name = tensor("zero_mean_131_cast")]; + tensor zero_mean_sq_131_cast = mul(x = zero_mean_131_cast, y = zero_mean_131_cast)[name = tensor("zero_mean_sq_131_cast")]; + tensor var_4524 = const()[name = tensor("op_4524"), val = tensor([1])]; + tensor var_4525_cast = reduce_mean(axes = var_4524, keep_dims = var_1181, x = zero_mean_sq_131_cast)[name = tensor("op_4525_cast")]; + tensor var_4526_to_fp16 = const()[name = tensor("op_4526_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4527_cast = add(x = var_4525_cast, y = var_4526_to_fp16)[name = tensor("op_4527_cast")]; + tensor denom_131_epsilon_0_to_fp16 = const()[name = tensor("denom_131_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_131_cast = rsqrt(epsilon = denom_131_epsilon_0_to_fp16, x = var_4527_cast)[name = tensor("denom_131_cast")]; + tensor out_131_cast = mul(x = zero_mean_131_cast, y = denom_131_cast)[name = tensor("out_131_cast")]; + tensor var_4531_to_fp16 = const()[name = tensor("op_4531_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574335296)))]; + tensor var_4532_cast = add(x = out_131_cast, y = var_4531_to_fp16)[name = tensor("op_4532_cast")]; + tensor var_4534_to_fp16 = const()[name = tensor("op_4534_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574337920)))]; + tensor input_289_cast = mul(x = var_4532_cast, y = var_4534_to_fp16)[name = tensor("input_289_cast")]; + tensor var_4542 = const()[name = tensor("op_4542"), val = tensor([1, 1])]; + tensor var_4544 = const()[name = tensor("op_4544"), val = tensor([1, 1])]; + tensor var_4546_pad_type_0 = const()[name = tensor("op_4546_pad_type_0"), val = tensor("custom")]; + tensor var_4546_pad_0 = const()[name = tensor("op_4546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(574340544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584171008))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584171200)))]; + tensor var_4546_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_4544, groups = var_1186, pad = var_4546_pad_0, pad_type = var_4546_pad_type_0, strides = var_4542, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_289_cast)[name = tensor("op_4546_cast")]; + tensor var_4547_split_sizes_0 = const()[name = tensor("op_4547_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4547_axis_0 = const()[name = tensor("op_4547_axis_0"), val = tensor(1)]; + tensor var_4547_cast_0, tensor var_4547_cast_1 = split(axis = var_4547_axis_0, split_sizes = var_4547_split_sizes_0, x = var_4546_cast)[name = tensor("op_4547_cast")]; + tensor var_4549_mode_0 = const()[name = tensor("op_4549_mode_0"), val = tensor("EXACT")]; + tensor var_4549_cast = gelu(mode = var_4549_mode_0, x = var_4547_cast_1)[name = tensor("op_4549_cast")]; + tensor input_291_cast = mul(x = var_4547_cast_0, y = var_4549_cast)[name = tensor("input_291_cast")]; + tensor var_4553 = const()[name = tensor("op_4553"), val = tensor([1, 1])]; + tensor var_4555 = const()[name = tensor("op_4555"), val = tensor([1, 1])]; + tensor var_4557_pad_type_0 = const()[name = tensor("op_4557_pad_type_0"), val = tensor("custom")]; + tensor var_4557_pad_0 = const()[name = tensor("op_4557_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(584191744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587468608))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587468736)))]; + tensor var_4557_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_4555, groups = var_1186, pad = var_4557_pad_0, pad_type = var_4557_pad_type_0, strides = var_4553, weight = down_blocks_2_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_291_cast)[name = tensor("op_4557_cast")]; + tensor inputs_133_cast = add(x = var_4557_cast, y = inputs_131_cast)[name = tensor("inputs_133_cast")]; + tensor var_4567 = const()[name = tensor("op_4567"), val = tensor([1])]; + tensor channels_mean_133_cast = reduce_mean(axes = var_4567, keep_dims = var_1181, x = inputs_133_cast)[name = tensor("channels_mean_133_cast")]; + tensor zero_mean_133_cast = sub(x = inputs_133_cast, y = channels_mean_133_cast)[name = tensor("zero_mean_133_cast")]; + tensor zero_mean_sq_133_cast = mul(x = zero_mean_133_cast, y = zero_mean_133_cast)[name = tensor("zero_mean_sq_133_cast")]; + tensor var_4571 = const()[name = tensor("op_4571"), val = tensor([1])]; + tensor var_4572_cast = reduce_mean(axes = var_4571, keep_dims = var_1181, x = zero_mean_sq_133_cast)[name = tensor("op_4572_cast")]; + tensor var_4573_to_fp16 = const()[name = tensor("op_4573_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4574_cast = add(x = var_4572_cast, y = var_4573_to_fp16)[name = tensor("op_4574_cast")]; + tensor denom_133_epsilon_0_to_fp16 = const()[name = tensor("denom_133_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_133_cast = rsqrt(epsilon = denom_133_epsilon_0_to_fp16, x = var_4574_cast)[name = tensor("denom_133_cast")]; + tensor out_133_cast = mul(x = zero_mean_133_cast, y = denom_133_cast)[name = tensor("out_133_cast")]; + tensor var_4578_to_fp16 = const()[name = tensor("op_4578_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587471360)))]; + tensor var_4579_cast = add(x = out_133_cast, y = var_4578_to_fp16)[name = tensor("op_4579_cast")]; + tensor var_4581_to_fp16 = const()[name = tensor("op_4581_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587473984)))]; + tensor hidden_states_185_cast = mul(x = var_4579_cast, y = var_4581_to_fp16)[name = tensor("hidden_states_185_cast")]; + tensor var_4588 = const()[name = tensor("op_4588"), val = tensor([1, 1])]; + tensor var_4590 = const()[name = tensor("op_4590"), val = tensor([1, 1])]; + tensor q_89_pad_type_0 = const()[name = tensor("q_89_pad_type_0"), val = tensor("custom")]; + tensor q_89_pad_0 = const()[name = tensor("q_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587476608))), lut = tensor([-0x1.4p-5, -0x1.808p-7, 0x1.804p-7, 0x1.3f4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_89_cast = conv(dilations = var_4590, groups = var_1186, pad = q_89_pad_0, pad_type = q_89_pad_type_0, strides = var_4588, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("q_89_cast")]; + tensor var_4594 = const()[name = tensor("op_4594"), val = tensor([1, 1])]; + tensor var_4596 = const()[name = tensor("op_4596"), val = tensor([1, 1])]; + tensor k_89_pad_type_0 = const()[name = tensor("k_89_pad_type_0"), val = tensor("custom")]; + tensor k_89_pad_0 = const()[name = tensor("k_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(587886272))), lut = tensor([-0x1.3d4p-5, -0x1.7d8p-7, 0x1.7ecp-7, 0x1.3d4p-5]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_89_cast = conv(dilations = var_4596, groups = var_1186, pad = k_89_pad_0, pad_type = k_89_pad_type_0, strides = var_4594, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("k_89_cast")]; + tensor var_4600 = const()[name = tensor("op_4600"), val = tensor([1, 1])]; + tensor var_4602 = const()[name = tensor("op_4602"), val = tensor([1, 1])]; + tensor v_89_pad_type_0 = const()[name = tensor("v_89_pad_type_0"), val = tensor("custom")]; + tensor v_89_pad_0 = const()[name = tensor("v_89_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(588295936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(589934400))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_89_cast = conv(dilations = var_4602, groups = var_1186, pad = v_89_pad_0, pad_type = v_89_pad_type_0, strides = var_4600, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_185_cast)[name = tensor("v_89_cast")]; + tensor var_4606 = const()[name = tensor("op_4606"), val = tensor([2, 20, 64, -1])]; + tensor var_4607_cast = reshape(shape = var_4606, x = q_89_cast)[name = tensor("op_4607_cast")]; + tensor var_4608 = const()[name = tensor("op_4608"), val = tensor([2, 20, 64, -1])]; + tensor var_4609_cast = reshape(shape = var_4608, x = k_89_cast)[name = tensor("op_4609_cast")]; + tensor var_4610 = const()[name = tensor("op_4610"), val = tensor([2, 20, 64, -1])]; + tensor var_4611_cast = reshape(shape = var_4610, x = v_89_cast)[name = tensor("op_4611_cast")]; + tensor attn_weights_177_transpose_x_0 = const()[name = tensor("attn_weights_177_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_177_transpose_y_0 = const()[name = tensor("attn_weights_177_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_177_cast = matmul(transpose_x = attn_weights_177_transpose_x_0, transpose_y = attn_weights_177_transpose_y_0, x = var_4607_cast, y = var_4609_cast)[name = tensor("attn_weights_177_cast")]; + tensor attn_weights_179_cast = mul(x = attn_weights_177_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_179_cast")]; + tensor var_4615_cast = softmax(axis = var_1170, x = attn_weights_179_cast)[name = tensor("op_4615_cast")]; + tensor attn_89_transpose_x_0 = const()[name = tensor("attn_89_transpose_x_0"), val = tensor(false)]; + tensor attn_89_transpose_y_0 = const()[name = tensor("attn_89_transpose_y_0"), val = tensor(true)]; + tensor attn_89_cast = matmul(transpose_x = attn_89_transpose_x_0, transpose_y = attn_89_transpose_y_0, x = var_4611_cast, y = var_4615_cast)[name = tensor("attn_89_cast")]; + tensor var_4619 = const()[name = tensor("op_4619"), val = tensor([2, 1280, 1, -1])]; + tensor input_293_cast = reshape(shape = var_4619, x = attn_89_cast)[name = tensor("input_293_cast")]; + tensor var_4624 = const()[name = tensor("op_4624"), val = tensor([1, 1])]; + tensor var_4626 = const()[name = tensor("op_4626"), val = tensor([1, 1])]; + tensor var_4628_pad_type_0 = const()[name = tensor("op_4628_pad_type_0"), val = tensor("custom")]; + tensor var_4628_pad_0 = const()[name = tensor("op_4628_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(589934976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590754240))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590754368)))]; + tensor var_4628_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_4626, groups = var_1186, pad = var_4628_pad_0, pad_type = var_4628_pad_type_0, strides = var_4624, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_293_cast)[name = tensor("op_4628_cast")]; + tensor inputs_135_cast = add(x = var_4628_cast, y = inputs_133_cast)[name = tensor("inputs_135_cast")]; + tensor var_4632 = const()[name = tensor("op_4632"), val = tensor([1])]; + tensor channels_mean_135_cast = reduce_mean(axes = var_4632, keep_dims = var_1181, x = inputs_135_cast)[name = tensor("channels_mean_135_cast")]; + tensor zero_mean_135_cast = sub(x = inputs_135_cast, y = channels_mean_135_cast)[name = tensor("zero_mean_135_cast")]; + tensor zero_mean_sq_135_cast = mul(x = zero_mean_135_cast, y = zero_mean_135_cast)[name = tensor("zero_mean_sq_135_cast")]; + tensor var_4636 = const()[name = tensor("op_4636"), val = tensor([1])]; + tensor var_4637_cast = reduce_mean(axes = var_4636, keep_dims = var_1181, x = zero_mean_sq_135_cast)[name = tensor("op_4637_cast")]; + tensor var_4638_to_fp16 = const()[name = tensor("op_4638_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4639_cast = add(x = var_4637_cast, y = var_4638_to_fp16)[name = tensor("op_4639_cast")]; + tensor denom_135_epsilon_0_to_fp16 = const()[name = tensor("denom_135_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_135_cast = rsqrt(epsilon = denom_135_epsilon_0_to_fp16, x = var_4639_cast)[name = tensor("denom_135_cast")]; + tensor out_135_cast = mul(x = zero_mean_135_cast, y = denom_135_cast)[name = tensor("out_135_cast")]; + tensor var_4643_to_fp16 = const()[name = tensor("op_4643_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590756992)))]; + tensor var_4644_cast = add(x = out_135_cast, y = var_4643_to_fp16)[name = tensor("op_4644_cast")]; + tensor var_4646_to_fp16 = const()[name = tensor("op_4646_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590759616)))]; + tensor hidden_states_187_cast = mul(x = var_4644_cast, y = var_4646_to_fp16)[name = tensor("hidden_states_187_cast")]; + tensor var_4653 = const()[name = tensor("op_4653"), val = tensor([1, 1])]; + tensor var_4655 = const()[name = tensor("op_4655"), val = tensor([1, 1])]; + tensor q_91_pad_type_0 = const()[name = tensor("q_91_pad_type_0"), val = tensor("custom")]; + tensor q_91_pad_0 = const()[name = tensor("q_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590762240))), lut = tensor([-0x1.cd4p-7, 0x1.cf4p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_91_cast = conv(dilations = var_4655, groups = var_1186, pad = q_91_pad_0, pad_type = q_91_pad_type_0, strides = var_4653, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_187_cast)[name = tensor("q_91_cast")]; + tensor var_4659 = const()[name = tensor("op_4659"), val = tensor([1, 1])]; + tensor var_4661 = const()[name = tensor("op_4661"), val = tensor([1, 1])]; + tensor k_91_pad_type_0 = const()[name = tensor("k_91_pad_type_0"), val = tensor("custom")]; + tensor k_91_pad_0 = const()[name = tensor("k_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(590967104))), lut = tensor([-0x1.4f8p-6, -0x1.904p-8, 0x1.8b4p-8, 0x1.4d8p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_91_cast = conv(dilations = var_4661, groups = var_1186, pad = k_91_pad_0, pad_type = k_91_pad_type_0, strides = var_4659, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_91_cast")]; + tensor var_4665 = const()[name = tensor("op_4665"), val = tensor([1, 1])]; + tensor var_4667 = const()[name = tensor("op_4667"), val = tensor([1, 1])]; + tensor v_91_pad_type_0 = const()[name = tensor("v_91_pad_type_0"), val = tensor("custom")]; + tensor v_91_pad_0 = const()[name = tensor("v_91_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591622528))), lut = tensor([-0x1.ce8p-7, 0x1.cd8p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_91_cast = conv(dilations = var_4667, groups = var_1186, pad = v_91_pad_0, pad_type = v_91_pad_type_0, strides = var_4665, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_91_cast")]; + tensor var_4671 = const()[name = tensor("op_4671"), val = tensor([2, 20, 64, -1])]; + tensor var_4672_cast = reshape(shape = var_4671, x = q_91_cast)[name = tensor("op_4672_cast")]; + tensor var_4673 = const()[name = tensor("op_4673"), val = tensor([2, 20, 64, -1])]; + tensor var_4674_cast = reshape(shape = var_4673, x = k_91_cast)[name = tensor("op_4674_cast")]; + tensor var_4675 = const()[name = tensor("op_4675"), val = tensor([2, 20, 64, -1])]; + tensor var_4676_cast = reshape(shape = var_4675, x = v_91_cast)[name = tensor("op_4676_cast")]; + tensor attn_weights_181_transpose_x_0 = const()[name = tensor("attn_weights_181_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_181_transpose_y_0 = const()[name = tensor("attn_weights_181_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_181_cast = matmul(transpose_x = attn_weights_181_transpose_x_0, transpose_y = attn_weights_181_transpose_y_0, x = var_4672_cast, y = var_4674_cast)[name = tensor("attn_weights_181_cast")]; + tensor attn_weights_183_cast = mul(x = attn_weights_181_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_183_cast")]; + tensor var_4680_cast = softmax(axis = var_1170, x = attn_weights_183_cast)[name = tensor("op_4680_cast")]; + tensor attn_91_transpose_x_0 = const()[name = tensor("attn_91_transpose_x_0"), val = tensor(false)]; + tensor attn_91_transpose_y_0 = const()[name = tensor("attn_91_transpose_y_0"), val = tensor(true)]; + tensor attn_91_cast = matmul(transpose_x = attn_91_transpose_x_0, transpose_y = attn_91_transpose_y_0, x = var_4676_cast, y = var_4680_cast)[name = tensor("attn_91_cast")]; + tensor var_4684 = const()[name = tensor("op_4684"), val = tensor([2, 1280, 1, -1])]; + tensor input_295_cast = reshape(shape = var_4684, x = attn_91_cast)[name = tensor("input_295_cast")]; + tensor var_4689 = const()[name = tensor("op_4689"), val = tensor([1, 1])]; + tensor var_4691 = const()[name = tensor("op_4691"), val = tensor([1, 1])]; + tensor var_4693_pad_type_0 = const()[name = tensor("op_4693_pad_type_0"), val = tensor("custom")]; + tensor var_4693_pad_0 = const()[name = tensor("op_4693_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(591950272))), lut = tensor([-0x1.23p-7, 0x1.25p-7]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592155136)))]; + tensor var_4693_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_4691, groups = var_1186, pad = var_4693_pad_0, pad_type = var_4693_pad_type_0, strides = var_4689, weight = down_blocks_2_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_295_cast)[name = tensor("op_4693_cast")]; + tensor inputs_137_cast = add(x = var_4693_cast, y = inputs_135_cast)[name = tensor("inputs_137_cast")]; + tensor var_4697 = const()[name = tensor("op_4697"), val = tensor([1])]; + tensor channels_mean_137_cast = reduce_mean(axes = var_4697, keep_dims = var_1181, x = inputs_137_cast)[name = tensor("channels_mean_137_cast")]; + tensor zero_mean_137_cast = sub(x = inputs_137_cast, y = channels_mean_137_cast)[name = tensor("zero_mean_137_cast")]; + tensor zero_mean_sq_137_cast = mul(x = zero_mean_137_cast, y = zero_mean_137_cast)[name = tensor("zero_mean_sq_137_cast")]; + tensor var_4701 = const()[name = tensor("op_4701"), val = tensor([1])]; + tensor var_4702_cast = reduce_mean(axes = var_4701, keep_dims = var_1181, x = zero_mean_sq_137_cast)[name = tensor("op_4702_cast")]; + tensor var_4703_to_fp16 = const()[name = tensor("op_4703_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4704_cast = add(x = var_4702_cast, y = var_4703_to_fp16)[name = tensor("op_4704_cast")]; + tensor denom_137_epsilon_0_to_fp16 = const()[name = tensor("denom_137_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_137_cast = rsqrt(epsilon = denom_137_epsilon_0_to_fp16, x = var_4704_cast)[name = tensor("denom_137_cast")]; + tensor out_137_cast = mul(x = zero_mean_137_cast, y = denom_137_cast)[name = tensor("out_137_cast")]; + tensor var_4708_to_fp16 = const()[name = tensor("op_4708_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592157760)))]; + tensor var_4709_cast = add(x = out_137_cast, y = var_4708_to_fp16)[name = tensor("op_4709_cast")]; + tensor var_4711_to_fp16 = const()[name = tensor("op_4711_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592160384)))]; + tensor input_297_cast = mul(x = var_4709_cast, y = var_4711_to_fp16)[name = tensor("input_297_cast")]; + tensor var_4719 = const()[name = tensor("op_4719"), val = tensor([1, 1])]; + tensor var_4721 = const()[name = tensor("op_4721"), val = tensor([1, 1])]; + tensor var_4723_pad_type_0 = const()[name = tensor("op_4723_pad_type_0"), val = tensor("custom")]; + tensor var_4723_pad_0 = const()[name = tensor("op_4723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(592163008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(601993472))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(601993664)))]; + tensor var_4723_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_4721, groups = var_1186, pad = var_4723_pad_0, pad_type = var_4723_pad_type_0, strides = var_4719, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_297_cast)[name = tensor("op_4723_cast")]; + tensor var_4724_split_sizes_0 = const()[name = tensor("op_4724_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4724_axis_0 = const()[name = tensor("op_4724_axis_0"), val = tensor(1)]; + tensor var_4724_cast_0, tensor var_4724_cast_1 = split(axis = var_4724_axis_0, split_sizes = var_4724_split_sizes_0, x = var_4723_cast)[name = tensor("op_4724_cast")]; + tensor var_4726_mode_0 = const()[name = tensor("op_4726_mode_0"), val = tensor("EXACT")]; + tensor var_4726_cast = gelu(mode = var_4726_mode_0, x = var_4724_cast_1)[name = tensor("op_4726_cast")]; + tensor input_299_cast = mul(x = var_4724_cast_0, y = var_4726_cast)[name = tensor("input_299_cast")]; + tensor var_4730 = const()[name = tensor("op_4730"), val = tensor([1, 1])]; + tensor var_4732 = const()[name = tensor("op_4732"), val = tensor([1, 1])]; + tensor var_4734_pad_type_0 = const()[name = tensor("op_4734_pad_type_0"), val = tensor("custom")]; + tensor var_4734_pad_0 = const()[name = tensor("op_4734_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(602014208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(606929472))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(606929664)))]; + tensor var_4734_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_4732, groups = var_1186, pad = var_4734_pad_0, pad_type = var_4734_pad_type_0, strides = var_4730, weight = down_blocks_2_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_299_cast)[name = tensor("op_4734_cast")]; + tensor inputs_139_cast = add(x = var_4734_cast, y = inputs_137_cast)[name = tensor("inputs_139_cast")]; + tensor var_4744 = const()[name = tensor("op_4744"), val = tensor([1])]; + tensor channels_mean_139_cast = reduce_mean(axes = var_4744, keep_dims = var_1181, x = inputs_139_cast)[name = tensor("channels_mean_139_cast")]; + tensor zero_mean_139_cast = sub(x = inputs_139_cast, y = channels_mean_139_cast)[name = tensor("zero_mean_139_cast")]; + tensor zero_mean_sq_139_cast = mul(x = zero_mean_139_cast, y = zero_mean_139_cast)[name = tensor("zero_mean_sq_139_cast")]; + tensor var_4748 = const()[name = tensor("op_4748"), val = tensor([1])]; + tensor var_4749_cast = reduce_mean(axes = var_4748, keep_dims = var_1181, x = zero_mean_sq_139_cast)[name = tensor("op_4749_cast")]; + tensor var_4750_to_fp16 = const()[name = tensor("op_4750_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4751_cast = add(x = var_4749_cast, y = var_4750_to_fp16)[name = tensor("op_4751_cast")]; + tensor denom_139_epsilon_0_to_fp16 = const()[name = tensor("denom_139_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_139_cast = rsqrt(epsilon = denom_139_epsilon_0_to_fp16, x = var_4751_cast)[name = tensor("denom_139_cast")]; + tensor out_139_cast = mul(x = zero_mean_139_cast, y = denom_139_cast)[name = tensor("out_139_cast")]; + tensor var_4755_to_fp16 = const()[name = tensor("op_4755_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(606932288)))]; + tensor var_4756_cast = add(x = out_139_cast, y = var_4755_to_fp16)[name = tensor("op_4756_cast")]; + tensor var_4758_to_fp16 = const()[name = tensor("op_4758_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(606934912)))]; + tensor hidden_states_191_cast = mul(x = var_4756_cast, y = var_4758_to_fp16)[name = tensor("hidden_states_191_cast")]; + tensor var_4765 = const()[name = tensor("op_4765"), val = tensor([1, 1])]; + tensor var_4767 = const()[name = tensor("op_4767"), val = tensor([1, 1])]; + tensor q_93_pad_type_0 = const()[name = tensor("q_93_pad_type_0"), val = tensor("custom")]; + tensor q_93_pad_0 = const()[name = tensor("q_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(606937536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607756800))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_93_cast = conv(dilations = var_4767, groups = var_1186, pad = q_93_pad_0, pad_type = q_93_pad_type_0, strides = var_4765, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("q_93_cast")]; + tensor var_4771 = const()[name = tensor("op_4771"), val = tensor([1, 1])]; + tensor var_4773 = const()[name = tensor("op_4773"), val = tensor([1, 1])]; + tensor k_93_pad_type_0 = const()[name = tensor("k_93_pad_type_0"), val = tensor("custom")]; + tensor k_93_pad_0 = const()[name = tensor("k_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(607756928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608576192))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_93_cast = conv(dilations = var_4773, groups = var_1186, pad = k_93_pad_0, pad_type = k_93_pad_type_0, strides = var_4771, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("k_93_cast")]; + tensor var_4777 = const()[name = tensor("op_4777"), val = tensor([1, 1])]; + tensor var_4779 = const()[name = tensor("op_4779"), val = tensor([1, 1])]; + tensor v_93_pad_type_0 = const()[name = tensor("v_93_pad_type_0"), val = tensor("custom")]; + tensor v_93_pad_0 = const()[name = tensor("v_93_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(608576320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(609395584))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_93_cast = conv(dilations = var_4779, groups = var_1186, pad = v_93_pad_0, pad_type = v_93_pad_type_0, strides = var_4777, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_191_cast)[name = tensor("v_93_cast")]; + tensor var_4783 = const()[name = tensor("op_4783"), val = tensor([2, 20, 64, -1])]; + tensor var_4784_cast = reshape(shape = var_4783, x = q_93_cast)[name = tensor("op_4784_cast")]; + tensor var_4785 = const()[name = tensor("op_4785"), val = tensor([2, 20, 64, -1])]; + tensor var_4786_cast = reshape(shape = var_4785, x = k_93_cast)[name = tensor("op_4786_cast")]; + tensor var_4787 = const()[name = tensor("op_4787"), val = tensor([2, 20, 64, -1])]; + tensor var_4788_cast = reshape(shape = var_4787, x = v_93_cast)[name = tensor("op_4788_cast")]; + tensor attn_weights_185_transpose_x_0 = const()[name = tensor("attn_weights_185_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_185_transpose_y_0 = const()[name = tensor("attn_weights_185_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_185_cast = matmul(transpose_x = attn_weights_185_transpose_x_0, transpose_y = attn_weights_185_transpose_y_0, x = var_4784_cast, y = var_4786_cast)[name = tensor("attn_weights_185_cast")]; + tensor attn_weights_187_cast = mul(x = attn_weights_185_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_187_cast")]; + tensor var_4792_cast = softmax(axis = var_1170, x = attn_weights_187_cast)[name = tensor("op_4792_cast")]; + tensor attn_93_transpose_x_0 = const()[name = tensor("attn_93_transpose_x_0"), val = tensor(false)]; + tensor attn_93_transpose_y_0 = const()[name = tensor("attn_93_transpose_y_0"), val = tensor(true)]; + tensor attn_93_cast = matmul(transpose_x = attn_93_transpose_x_0, transpose_y = attn_93_transpose_y_0, x = var_4788_cast, y = var_4792_cast)[name = tensor("attn_93_cast")]; + tensor var_4796 = const()[name = tensor("op_4796"), val = tensor([2, 1280, 1, -1])]; + tensor input_301_cast = reshape(shape = var_4796, x = attn_93_cast)[name = tensor("input_301_cast")]; + tensor var_4801 = const()[name = tensor("op_4801"), val = tensor([1, 1])]; + tensor var_4803 = const()[name = tensor("op_4803"), val = tensor([1, 1])]; + tensor var_4805_pad_type_0 = const()[name = tensor("op_4805_pad_type_0"), val = tensor("custom")]; + tensor var_4805_pad_0 = const()[name = tensor("op_4805_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(609395712))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610214976))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610215104)))]; + tensor var_4805_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_4803, groups = var_1186, pad = var_4805_pad_0, pad_type = var_4805_pad_type_0, strides = var_4801, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_301_cast)[name = tensor("op_4805_cast")]; + tensor inputs_141_cast = add(x = var_4805_cast, y = inputs_139_cast)[name = tensor("inputs_141_cast")]; + tensor var_4809 = const()[name = tensor("op_4809"), val = tensor([1])]; + tensor channels_mean_141_cast = reduce_mean(axes = var_4809, keep_dims = var_1181, x = inputs_141_cast)[name = tensor("channels_mean_141_cast")]; + tensor zero_mean_141_cast = sub(x = inputs_141_cast, y = channels_mean_141_cast)[name = tensor("zero_mean_141_cast")]; + tensor zero_mean_sq_141_cast = mul(x = zero_mean_141_cast, y = zero_mean_141_cast)[name = tensor("zero_mean_sq_141_cast")]; + tensor var_4813 = const()[name = tensor("op_4813"), val = tensor([1])]; + tensor var_4814_cast = reduce_mean(axes = var_4813, keep_dims = var_1181, x = zero_mean_sq_141_cast)[name = tensor("op_4814_cast")]; + tensor var_4815_to_fp16 = const()[name = tensor("op_4815_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4816_cast = add(x = var_4814_cast, y = var_4815_to_fp16)[name = tensor("op_4816_cast")]; + tensor denom_141_epsilon_0_to_fp16 = const()[name = tensor("denom_141_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_141_cast = rsqrt(epsilon = denom_141_epsilon_0_to_fp16, x = var_4816_cast)[name = tensor("denom_141_cast")]; + tensor out_141_cast = mul(x = zero_mean_141_cast, y = denom_141_cast)[name = tensor("out_141_cast")]; + tensor var_4820_to_fp16 = const()[name = tensor("op_4820_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610217728)))]; + tensor var_4821_cast = add(x = out_141_cast, y = var_4820_to_fp16)[name = tensor("op_4821_cast")]; + tensor var_4823_to_fp16 = const()[name = tensor("op_4823_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610220352)))]; + tensor hidden_states_193_cast = mul(x = var_4821_cast, y = var_4823_to_fp16)[name = tensor("hidden_states_193_cast")]; + tensor var_4830 = const()[name = tensor("op_4830"), val = tensor([1, 1])]; + tensor var_4832 = const()[name = tensor("op_4832"), val = tensor([1, 1])]; + tensor q_95_pad_type_0 = const()[name = tensor("q_95_pad_type_0"), val = tensor("custom")]; + tensor q_95_pad_0 = const()[name = tensor("q_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(610222976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(611042240))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_95_cast = conv(dilations = var_4832, groups = var_1186, pad = q_95_pad_0, pad_type = q_95_pad_type_0, strides = var_4830, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_193_cast)[name = tensor("q_95_cast")]; + tensor var_4836 = const()[name = tensor("op_4836"), val = tensor([1, 1])]; + tensor var_4838 = const()[name = tensor("op_4838"), val = tensor([1, 1])]; + tensor k_95_pad_type_0 = const()[name = tensor("k_95_pad_type_0"), val = tensor("custom")]; + tensor k_95_pad_0 = const()[name = tensor("k_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(611042368))), lut = tensor([-0x1.1a8p-6, -0x1.56cp-8, 0x1.57cp-8, 0x1.1b4p-6]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_95_cast = conv(dilations = var_4838, groups = var_1186, pad = k_95_pad_0, pad_type = k_95_pad_type_0, strides = var_4836, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_95_cast")]; + tensor var_4842 = const()[name = tensor("op_4842"), val = tensor([1, 1])]; + tensor var_4844 = const()[name = tensor("op_4844"), val = tensor([1, 1])]; + tensor v_95_pad_type_0 = const()[name = tensor("v_95_pad_type_0"), val = tensor("custom")]; + tensor v_95_pad_0 = const()[name = tensor("v_95_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(611697792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613008576))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_95_cast = conv(dilations = var_4844, groups = var_1186, pad = v_95_pad_0, pad_type = v_95_pad_type_0, strides = var_4842, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_95_cast")]; + tensor var_4848 = const()[name = tensor("op_4848"), val = tensor([2, 20, 64, -1])]; + tensor var_4849_cast = reshape(shape = var_4848, x = q_95_cast)[name = tensor("op_4849_cast")]; + tensor var_4850 = const()[name = tensor("op_4850"), val = tensor([2, 20, 64, -1])]; + tensor var_4851_cast = reshape(shape = var_4850, x = k_95_cast)[name = tensor("op_4851_cast")]; + tensor var_4852 = const()[name = tensor("op_4852"), val = tensor([2, 20, 64, -1])]; + tensor var_4853_cast = reshape(shape = var_4852, x = v_95_cast)[name = tensor("op_4853_cast")]; + tensor attn_weights_189_transpose_x_0 = const()[name = tensor("attn_weights_189_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_189_transpose_y_0 = const()[name = tensor("attn_weights_189_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_189_cast = matmul(transpose_x = attn_weights_189_transpose_x_0, transpose_y = attn_weights_189_transpose_y_0, x = var_4849_cast, y = var_4851_cast)[name = tensor("attn_weights_189_cast")]; + tensor attn_weights_191_cast = mul(x = attn_weights_189_cast, y = var_1177_to_fp16)[name = tensor("attn_weights_191_cast")]; + tensor var_4857_cast = softmax(axis = var_1170, x = attn_weights_191_cast)[name = tensor("op_4857_cast")]; + tensor attn_95_transpose_x_0 = const()[name = tensor("attn_95_transpose_x_0"), val = tensor(false)]; + tensor attn_95_transpose_y_0 = const()[name = tensor("attn_95_transpose_y_0"), val = tensor(true)]; + tensor attn_95_cast = matmul(transpose_x = attn_95_transpose_x_0, transpose_y = attn_95_transpose_y_0, x = var_4853_cast, y = var_4857_cast)[name = tensor("attn_95_cast")]; + tensor var_4861 = const()[name = tensor("op_4861"), val = tensor([2, 1280, 1, -1])]; + tensor input_303_cast = reshape(shape = var_4861, x = attn_95_cast)[name = tensor("input_303_cast")]; + tensor var_4866 = const()[name = tensor("op_4866"), val = tensor([1, 1])]; + tensor var_4868 = const()[name = tensor("op_4868"), val = tensor([1, 1])]; + tensor var_4870_pad_type_0 = const()[name = tensor("op_4870_pad_type_0"), val = tensor("custom")]; + tensor var_4870_pad_0 = const()[name = tensor("op_4870_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613008704))), lut = tensor([-0x1.e74p-8, 0x1.e64p-8]), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613213568)))]; + tensor var_4870_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_4868, groups = var_1186, pad = var_4870_pad_0, pad_type = var_4870_pad_type_0, strides = var_4866, weight = down_blocks_2_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_303_cast)[name = tensor("op_4870_cast")]; + tensor inputs_143_cast = add(x = var_4870_cast, y = inputs_141_cast)[name = tensor("inputs_143_cast")]; + tensor var_4874 = const()[name = tensor("op_4874"), val = tensor([1])]; + tensor channels_mean_143_cast = reduce_mean(axes = var_4874, keep_dims = var_1181, x = inputs_143_cast)[name = tensor("channels_mean_143_cast")]; + tensor zero_mean_143_cast = sub(x = inputs_143_cast, y = channels_mean_143_cast)[name = tensor("zero_mean_143_cast")]; + tensor zero_mean_sq_143_cast = mul(x = zero_mean_143_cast, y = zero_mean_143_cast)[name = tensor("zero_mean_sq_143_cast")]; + tensor var_4878 = const()[name = tensor("op_4878"), val = tensor([1])]; + tensor var_4879_cast = reduce_mean(axes = var_4878, keep_dims = var_1181, x = zero_mean_sq_143_cast)[name = tensor("op_4879_cast")]; + tensor var_4880_to_fp16 = const()[name = tensor("op_4880_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_4881_cast = add(x = var_4879_cast, y = var_4880_to_fp16)[name = tensor("op_4881_cast")]; + tensor denom_143_epsilon_0_to_fp16 = const()[name = tensor("denom_143_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_143_cast = rsqrt(epsilon = denom_143_epsilon_0_to_fp16, x = var_4881_cast)[name = tensor("denom_143_cast")]; + tensor out_143_cast = mul(x = zero_mean_143_cast, y = denom_143_cast)[name = tensor("out_143_cast")]; + tensor var_4885_to_fp16 = const()[name = tensor("op_4885_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613216192)))]; + tensor var_4886_cast = add(x = out_143_cast, y = var_4885_to_fp16)[name = tensor("op_4886_cast")]; + tensor var_4888_to_fp16 = const()[name = tensor("op_4888_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613218816)))]; + tensor input_305_cast = mul(x = var_4886_cast, y = var_4888_to_fp16)[name = tensor("input_305_cast")]; + tensor var_4896 = const()[name = tensor("op_4896"), val = tensor([1, 1])]; + tensor var_4898 = const()[name = tensor("op_4898"), val = tensor([1, 1])]; + tensor var_4900_pad_type_0 = const()[name = tensor("op_4900_pad_type_0"), val = tensor("custom")]; + tensor var_4900_pad_0 = const()[name = tensor("op_4900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(613221440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623051904))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623052096)))]; + tensor var_4900_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_4898, groups = var_1186, pad = var_4900_pad_0, pad_type = var_4900_pad_type_0, strides = var_4896, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_305_cast)[name = tensor("op_4900_cast")]; + tensor var_4901_split_sizes_0 = const()[name = tensor("op_4901_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_4901_axis_0 = const()[name = tensor("op_4901_axis_0"), val = tensor(1)]; + tensor var_4901_cast_0, tensor var_4901_cast_1 = split(axis = var_4901_axis_0, split_sizes = var_4901_split_sizes_0, x = var_4900_cast)[name = tensor("op_4901_cast")]; + tensor var_4903_mode_0 = const()[name = tensor("op_4903_mode_0"), val = tensor("EXACT")]; + tensor var_4903_cast = gelu(mode = var_4903_mode_0, x = var_4901_cast_1)[name = tensor("op_4903_cast")]; + tensor input_307_cast = mul(x = var_4901_cast_0, y = var_4903_cast)[name = tensor("input_307_cast")]; + tensor var_4907 = const()[name = tensor("op_4907"), val = tensor([1, 1])]; + tensor var_4909 = const()[name = tensor("op_4909"), val = tensor([1, 1])]; + tensor var_4911_pad_type_0 = const()[name = tensor("op_4911_pad_type_0"), val = tensor("custom")]; + tensor var_4911_pad_0 = const()[name = tensor("op_4911_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(623072640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(626349504))), name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(626349632)))]; + tensor var_4911_cast = conv(bias = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_4909, groups = var_1186, pad = var_4911_pad_0, pad_type = var_4911_pad_type_0, strides = var_4907, weight = down_blocks_2_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_307_cast)[name = tensor("op_4911_cast")]; + tensor hidden_states_197_cast = add(x = var_4911_cast, y = inputs_143_cast)[name = tensor("hidden_states_197_cast")]; + tensor var_4913 = const()[name = tensor("op_4913"), val = tensor([2, 1280, 32, 32])]; + tensor input_309_cast = reshape(shape = var_4913, x = hidden_states_197_cast)[name = tensor("input_309_cast")]; + tensor var_4917 = const()[name = tensor("op_4917"), val = tensor([1, 1])]; + tensor var_4919 = const()[name = tensor("op_4919"), val = tensor([1, 1])]; + tensor hidden_states_199_pad_type_0 = const()[name = tensor("hidden_states_199_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_199_pad_0 = const()[name = tensor("hidden_states_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(626352256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627990720))), name = tensor("down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor down_blocks_2_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("down_blocks_2_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627991296)))]; + tensor hidden_states_199_cast = conv(bias = down_blocks_2_attentions_1_proj_out_bias_to_fp16, dilations = var_4919, groups = var_1186, pad = hidden_states_199_pad_0, pad_type = hidden_states_199_pad_type_0, strides = var_4917, weight = down_blocks_2_attentions_1_proj_out_weight_to_fp16_palettized, x = input_309_cast)[name = tensor("hidden_states_199_cast")]; + tensor input_311_cast = add(x = hidden_states_199_cast, y = hidden_states_133_cast)[name = tensor("input_311_cast")]; + tensor var_4927 = const()[name = tensor("op_4927"), val = tensor(3)]; + tensor var_4938 = const()[name = tensor("op_4938"), val = tensor(true)]; + tensor var_4943 = const()[name = tensor("op_4943"), val = tensor(1)]; + tensor reshape_64_shape_0 = const()[name = tensor("reshape_64_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_64_cast = reshape(shape = reshape_64_shape_0, x = input_311_cast)[name = tensor("reshape_64_cast")]; + tensor reduce_mean_48_axes_0 = const()[name = tensor("reduce_mean_48_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_48_keep_dims_0 = const()[name = tensor("reduce_mean_48_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_48_cast = reduce_mean(axes = reduce_mean_48_axes_0, keep_dims = reduce_mean_48_keep_dims_0, x = reshape_64_cast)[name = tensor("reduce_mean_48_cast")]; + tensor sub_32_cast = sub(x = reshape_64_cast, y = reduce_mean_48_cast)[name = tensor("sub_32_cast")]; + tensor square_16_cast = square(x = sub_32_cast)[name = tensor("square_16_cast")]; + tensor reduce_mean_50_axes_0 = const()[name = tensor("reduce_mean_50_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_50_keep_dims_0 = const()[name = tensor("reduce_mean_50_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_50_cast = reduce_mean(axes = reduce_mean_50_axes_0, keep_dims = reduce_mean_50_keep_dims_0, x = square_16_cast)[name = tensor("reduce_mean_50_cast")]; + tensor add_32_y_0_to_fp16 = const()[name = tensor("add_32_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_32_cast = add(x = reduce_mean_50_cast, y = add_32_y_0_to_fp16)[name = tensor("add_32_cast")]; + tensor sqrt_16_cast = sqrt(x = add_32_cast)[name = tensor("sqrt_16_cast")]; + tensor real_div_16_cast = real_div(x = sub_32_cast, y = sqrt_16_cast)[name = tensor("real_div_16_cast")]; + tensor reshape_65_shape_0 = const()[name = tensor("reshape_65_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_65_cast = reshape(shape = reshape_65_shape_0, x = real_div_16_cast)[name = tensor("reshape_65_cast")]; + tensor add_33_gamma_0_to_fp16 = const()[name = tensor("add_33_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627993920)))]; + tensor add_33_beta_0_to_fp16 = const()[name = tensor("add_33_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627996544)))]; + tensor add_33_epsilon_0_to_fp16 = const()[name = tensor("add_33_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_33_cast = batch_norm(beta = add_33_beta_0_to_fp16, epsilon = add_33_epsilon_0_to_fp16, gamma = add_33_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_65_cast)[name = tensor("add_33_cast")]; + tensor input_315_cast = silu(x = add_33_cast)[name = tensor("input_315_cast")]; + tensor var_4961 = const()[name = tensor("op_4961"), val = tensor([1, 1])]; + tensor var_4963 = const()[name = tensor("op_4963"), val = tensor([1, 1])]; + tensor hidden_states_201_pad_type_0 = const()[name = tensor("hidden_states_201_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_201_pad_0 = const()[name = tensor("hidden_states_201_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(627999168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(639058432))), name = tensor("mid_block_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(639058624)))]; + tensor hidden_states_201_cast = conv(bias = mid_block_resnets_0_conv1_bias_to_fp16, dilations = var_4963, groups = var_4943, pad = hidden_states_201_pad_0, pad_type = hidden_states_201_pad_type_0, strides = var_4961, weight = mid_block_resnets_0_conv1_weight_to_fp16_palettized, x = input_315_cast)[name = tensor("hidden_states_201_cast")]; + tensor var_4969 = const()[name = tensor("op_4969"), val = tensor([1, 1])]; + tensor var_4971 = const()[name = tensor("op_4971"), val = tensor([1, 1])]; + tensor temb_13_pad_type_0 = const()[name = tensor("temb_13_pad_type_0"), val = tensor("custom")]; + tensor temb_13_pad_0 = const()[name = tensor("temb_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(639061248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640290112))), name = tensor("mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640290304)))]; + tensor temb_13_cast = conv(bias = mid_block_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_4971, groups = var_4943, pad = temb_13_pad_0, pad_type = temb_13_pad_type_0, strides = var_4969, weight = mid_block_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_13_cast")]; + tensor input_319_cast = add(x = hidden_states_201_cast, y = temb_13_cast)[name = tensor("input_319_cast")]; + tensor reshape_68_shape_0 = const()[name = tensor("reshape_68_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_68_cast = reshape(shape = reshape_68_shape_0, x = input_319_cast)[name = tensor("reshape_68_cast")]; + tensor reduce_mean_51_axes_0 = const()[name = tensor("reduce_mean_51_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_51_keep_dims_0 = const()[name = tensor("reduce_mean_51_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_51_cast = reduce_mean(axes = reduce_mean_51_axes_0, keep_dims = reduce_mean_51_keep_dims_0, x = reshape_68_cast)[name = tensor("reduce_mean_51_cast")]; + tensor sub_34_cast = sub(x = reshape_68_cast, y = reduce_mean_51_cast)[name = tensor("sub_34_cast")]; + tensor square_17_cast = square(x = sub_34_cast)[name = tensor("square_17_cast")]; + tensor reduce_mean_53_axes_0 = const()[name = tensor("reduce_mean_53_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_53_keep_dims_0 = const()[name = tensor("reduce_mean_53_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_53_cast = reduce_mean(axes = reduce_mean_53_axes_0, keep_dims = reduce_mean_53_keep_dims_0, x = square_17_cast)[name = tensor("reduce_mean_53_cast")]; + tensor add_34_y_0_to_fp16 = const()[name = tensor("add_34_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_34_cast = add(x = reduce_mean_53_cast, y = add_34_y_0_to_fp16)[name = tensor("add_34_cast")]; + tensor sqrt_17_cast = sqrt(x = add_34_cast)[name = tensor("sqrt_17_cast")]; + tensor real_div_17_cast = real_div(x = sub_34_cast, y = sqrt_17_cast)[name = tensor("real_div_17_cast")]; + tensor reshape_69_shape_0 = const()[name = tensor("reshape_69_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_69_cast = reshape(shape = reshape_69_shape_0, x = real_div_17_cast)[name = tensor("reshape_69_cast")]; + tensor add_35_gamma_0_to_fp16 = const()[name = tensor("add_35_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640292928)))]; + tensor add_35_beta_0_to_fp16 = const()[name = tensor("add_35_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640295552)))]; + tensor add_35_epsilon_0_to_fp16 = const()[name = tensor("add_35_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_35_cast = batch_norm(beta = add_35_beta_0_to_fp16, epsilon = add_35_epsilon_0_to_fp16, gamma = add_35_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_69_cast)[name = tensor("add_35_cast")]; + tensor input_323_cast = silu(x = add_35_cast)[name = tensor("input_323_cast")]; + tensor var_4981 = const()[name = tensor("op_4981"), val = tensor([1, 1])]; + tensor var_4983 = const()[name = tensor("op_4983"), val = tensor([1, 1])]; + tensor hidden_states_203_pad_type_0 = const()[name = tensor("hidden_states_203_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_203_pad_0 = const()[name = tensor("hidden_states_203_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(640298176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651357440))), name = tensor("mid_block_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651357632)))]; + tensor hidden_states_203_cast = conv(bias = mid_block_resnets_0_conv2_bias_to_fp16, dilations = var_4983, groups = var_4943, pad = hidden_states_203_pad_0, pad_type = hidden_states_203_pad_type_0, strides = var_4981, weight = mid_block_resnets_0_conv2_weight_to_fp16_palettized, x = input_323_cast)[name = tensor("hidden_states_203_cast")]; + tensor hidden_states_205_cast = add(x = input_311_cast, y = hidden_states_203_cast)[name = tensor("hidden_states_205_cast")]; + tensor reshape_72_shape_0 = const()[name = tensor("reshape_72_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_72_cast = reshape(shape = reshape_72_shape_0, x = hidden_states_205_cast)[name = tensor("reshape_72_cast")]; + tensor reduce_mean_54_axes_0 = const()[name = tensor("reduce_mean_54_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_54_keep_dims_0 = const()[name = tensor("reduce_mean_54_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_54_cast = reduce_mean(axes = reduce_mean_54_axes_0, keep_dims = reduce_mean_54_keep_dims_0, x = reshape_72_cast)[name = tensor("reduce_mean_54_cast")]; + tensor sub_36_cast = sub(x = reshape_72_cast, y = reduce_mean_54_cast)[name = tensor("sub_36_cast")]; + tensor square_18_cast = square(x = sub_36_cast)[name = tensor("square_18_cast")]; + tensor reduce_mean_56_axes_0 = const()[name = tensor("reduce_mean_56_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_56_keep_dims_0 = const()[name = tensor("reduce_mean_56_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_56_cast = reduce_mean(axes = reduce_mean_56_axes_0, keep_dims = reduce_mean_56_keep_dims_0, x = square_18_cast)[name = tensor("reduce_mean_56_cast")]; + tensor add_36_y_0_to_fp16 = const()[name = tensor("add_36_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_36_cast = add(x = reduce_mean_56_cast, y = add_36_y_0_to_fp16)[name = tensor("add_36_cast")]; + tensor sqrt_18_cast = sqrt(x = add_36_cast)[name = tensor("sqrt_18_cast")]; + tensor real_div_18_cast = real_div(x = sub_36_cast, y = sqrt_18_cast)[name = tensor("real_div_18_cast")]; + tensor reshape_73_shape_0 = const()[name = tensor("reshape_73_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_73_cast = reshape(shape = reshape_73_shape_0, x = real_div_18_cast)[name = tensor("reshape_73_cast")]; + tensor add_37_gamma_0_to_fp16 = const()[name = tensor("add_37_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651360256)))]; + tensor add_37_beta_0_to_fp16 = const()[name = tensor("add_37_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651362880)))]; + tensor add_37_epsilon_0_to_fp16 = const()[name = tensor("add_37_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_37_cast = batch_norm(beta = add_37_beta_0_to_fp16, epsilon = add_37_epsilon_0_to_fp16, gamma = add_37_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_73_cast)[name = tensor("add_37_cast")]; + tensor var_5021 = const()[name = tensor("op_5021"), val = tensor([1, 1])]; + tensor var_5023 = const()[name = tensor("op_5023"), val = tensor([1, 1])]; + tensor hidden_states_207_pad_type_0 = const()[name = tensor("hidden_states_207_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_207_pad_0 = const()[name = tensor("hidden_states_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(651365504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653003968))), name = tensor("mid_block_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653004544)))]; + tensor hidden_states_207_cast = conv(bias = mid_block_attentions_0_proj_in_bias_to_fp16, dilations = var_5023, groups = var_4943, pad = hidden_states_207_pad_0, pad_type = hidden_states_207_pad_type_0, strides = var_5021, weight = mid_block_attentions_0_proj_in_weight_to_fp16_palettized, x = add_37_cast)[name = tensor("hidden_states_207_cast")]; + tensor var_5028 = const()[name = tensor("op_5028"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_145_cast = reshape(shape = var_5028, x = hidden_states_207_cast)[name = tensor("inputs_145_cast")]; + tensor var_5038 = const()[name = tensor("op_5038"), val = tensor([1])]; + tensor channels_mean_145_cast = reduce_mean(axes = var_5038, keep_dims = var_4938, x = inputs_145_cast)[name = tensor("channels_mean_145_cast")]; + tensor zero_mean_145_cast = sub(x = inputs_145_cast, y = channels_mean_145_cast)[name = tensor("zero_mean_145_cast")]; + tensor zero_mean_sq_145_cast = mul(x = zero_mean_145_cast, y = zero_mean_145_cast)[name = tensor("zero_mean_sq_145_cast")]; + tensor var_5042 = const()[name = tensor("op_5042"), val = tensor([1])]; + tensor var_5043_cast = reduce_mean(axes = var_5042, keep_dims = var_4938, x = zero_mean_sq_145_cast)[name = tensor("op_5043_cast")]; + tensor var_5044_to_fp16 = const()[name = tensor("op_5044_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5045_cast = add(x = var_5043_cast, y = var_5044_to_fp16)[name = tensor("op_5045_cast")]; + tensor denom_145_epsilon_0_to_fp16 = const()[name = tensor("denom_145_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_145_cast = rsqrt(epsilon = denom_145_epsilon_0_to_fp16, x = var_5045_cast)[name = tensor("denom_145_cast")]; + tensor out_145_cast = mul(x = zero_mean_145_cast, y = denom_145_cast)[name = tensor("out_145_cast")]; + tensor var_5049_to_fp16 = const()[name = tensor("op_5049_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653007168)))]; + tensor var_5050_cast = add(x = out_145_cast, y = var_5049_to_fp16)[name = tensor("op_5050_cast")]; + tensor var_5052_to_fp16 = const()[name = tensor("op_5052_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653009792)))]; + tensor hidden_states_209_cast = mul(x = var_5050_cast, y = var_5052_to_fp16)[name = tensor("hidden_states_209_cast")]; + tensor var_5059 = const()[name = tensor("op_5059"), val = tensor([1, 1])]; + tensor var_5061 = const()[name = tensor("op_5061"), val = tensor([1, 1])]; + tensor q_97_pad_type_0 = const()[name = tensor("q_97_pad_type_0"), val = tensor("custom")]; + tensor q_97_pad_0 = const()[name = tensor("q_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(653012416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654241280))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_97_cast = conv(dilations = var_5061, groups = var_4943, pad = q_97_pad_0, pad_type = q_97_pad_type_0, strides = var_5059, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("q_97_cast")]; + tensor var_5065 = const()[name = tensor("op_5065"), val = tensor([1, 1])]; + tensor var_5067 = const()[name = tensor("op_5067"), val = tensor([1, 1])]; + tensor k_97_pad_type_0 = const()[name = tensor("k_97_pad_type_0"), val = tensor("custom")]; + tensor k_97_pad_0 = const()[name = tensor("k_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(654241472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655470336))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_97_cast = conv(dilations = var_5067, groups = var_4943, pad = k_97_pad_0, pad_type = k_97_pad_type_0, strides = var_5065, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("k_97_cast")]; + tensor var_5071 = const()[name = tensor("op_5071"), val = tensor([1, 1])]; + tensor var_5073 = const()[name = tensor("op_5073"), val = tensor([1, 1])]; + tensor v_97_pad_type_0 = const()[name = tensor("v_97_pad_type_0"), val = tensor("custom")]; + tensor v_97_pad_0 = const()[name = tensor("v_97_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(655470528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656699392))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_97_cast = conv(dilations = var_5073, groups = var_4943, pad = v_97_pad_0, pad_type = v_97_pad_type_0, strides = var_5071, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_209_cast)[name = tensor("v_97_cast")]; + tensor var_5077 = const()[name = tensor("op_5077"), val = tensor([2, 20, 64, -1])]; + tensor var_5078_cast = reshape(shape = var_5077, x = q_97_cast)[name = tensor("op_5078_cast")]; + tensor var_5079 = const()[name = tensor("op_5079"), val = tensor([2, 20, 64, -1])]; + tensor var_5080_cast = reshape(shape = var_5079, x = k_97_cast)[name = tensor("op_5080_cast")]; + tensor var_5081 = const()[name = tensor("op_5081"), val = tensor([2, 20, 64, -1])]; + tensor var_5082_cast = reshape(shape = var_5081, x = v_97_cast)[name = tensor("op_5082_cast")]; + tensor attn_weights_193_transpose_x_0 = const()[name = tensor("attn_weights_193_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_193_transpose_y_0 = const()[name = tensor("attn_weights_193_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_193_cast = matmul(transpose_x = attn_weights_193_transpose_x_0, transpose_y = attn_weights_193_transpose_y_0, x = var_5078_cast, y = var_5080_cast)[name = tensor("attn_weights_193_cast")]; + tensor var_4934_to_fp16 = const()[name = tensor("op_4934_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_195_cast = mul(x = attn_weights_193_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_195_cast")]; + tensor var_5086_cast = softmax(axis = var_4927, x = attn_weights_195_cast)[name = tensor("op_5086_cast")]; + tensor attn_97_transpose_x_0 = const()[name = tensor("attn_97_transpose_x_0"), val = tensor(false)]; + tensor attn_97_transpose_y_0 = const()[name = tensor("attn_97_transpose_y_0"), val = tensor(true)]; + tensor attn_97_cast = matmul(transpose_x = attn_97_transpose_x_0, transpose_y = attn_97_transpose_y_0, x = var_5082_cast, y = var_5086_cast)[name = tensor("attn_97_cast")]; + tensor var_5090 = const()[name = tensor("op_5090"), val = tensor([2, 1280, 1, -1])]; + tensor input_327_cast = reshape(shape = var_5090, x = attn_97_cast)[name = tensor("input_327_cast")]; + tensor var_5095 = const()[name = tensor("op_5095"), val = tensor([1, 1])]; + tensor var_5097 = const()[name = tensor("op_5097"), val = tensor([1, 1])]; + tensor var_5099_pad_type_0 = const()[name = tensor("op_5099_pad_type_0"), val = tensor("custom")]; + tensor var_5099_pad_0 = const()[name = tensor("op_5099_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(656699584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657928448))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657928640)))]; + tensor var_5099_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_5097, groups = var_4943, pad = var_5099_pad_0, pad_type = var_5099_pad_type_0, strides = var_5095, weight = mid_block_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_327_cast)[name = tensor("op_5099_cast")]; + tensor inputs_147_cast = add(x = var_5099_cast, y = inputs_145_cast)[name = tensor("inputs_147_cast")]; + tensor var_5103 = const()[name = tensor("op_5103"), val = tensor([1])]; + tensor channels_mean_147_cast = reduce_mean(axes = var_5103, keep_dims = var_4938, x = inputs_147_cast)[name = tensor("channels_mean_147_cast")]; + tensor zero_mean_147_cast = sub(x = inputs_147_cast, y = channels_mean_147_cast)[name = tensor("zero_mean_147_cast")]; + tensor zero_mean_sq_147_cast = mul(x = zero_mean_147_cast, y = zero_mean_147_cast)[name = tensor("zero_mean_sq_147_cast")]; + tensor var_5107 = const()[name = tensor("op_5107"), val = tensor([1])]; + tensor var_5108_cast = reduce_mean(axes = var_5107, keep_dims = var_4938, x = zero_mean_sq_147_cast)[name = tensor("op_5108_cast")]; + tensor var_5109_to_fp16 = const()[name = tensor("op_5109_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5110_cast = add(x = var_5108_cast, y = var_5109_to_fp16)[name = tensor("op_5110_cast")]; + tensor denom_147_epsilon_0_to_fp16 = const()[name = tensor("denom_147_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_147_cast = rsqrt(epsilon = denom_147_epsilon_0_to_fp16, x = var_5110_cast)[name = tensor("denom_147_cast")]; + tensor out_147_cast = mul(x = zero_mean_147_cast, y = denom_147_cast)[name = tensor("out_147_cast")]; + tensor var_5114_to_fp16 = const()[name = tensor("op_5114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657931264)))]; + tensor var_5115_cast = add(x = out_147_cast, y = var_5114_to_fp16)[name = tensor("op_5115_cast")]; + tensor var_5117_to_fp16 = const()[name = tensor("op_5117_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657933888)))]; + tensor hidden_states_211_cast = mul(x = var_5115_cast, y = var_5117_to_fp16)[name = tensor("hidden_states_211_cast")]; + tensor var_5124 = const()[name = tensor("op_5124"), val = tensor([1, 1])]; + tensor var_5126 = const()[name = tensor("op_5126"), val = tensor([1, 1])]; + tensor q_99_pad_type_0 = const()[name = tensor("q_99_pad_type_0"), val = tensor("custom")]; + tensor q_99_pad_0 = const()[name = tensor("q_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(657936512))), lut = tensor([-0x1.67cp-6, -0x1.c38p-8, 0x1.c3cp-8, 0x1.68p-6]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_99_cast = conv(dilations = var_5126, groups = var_4943, pad = q_99_pad_0, pad_type = q_99_pad_type_0, strides = var_5124, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_211_cast)[name = tensor("q_99_cast")]; + tensor var_5130 = const()[name = tensor("op_5130"), val = tensor([1, 1])]; + tensor var_5132 = const()[name = tensor("op_5132"), val = tensor([1, 1])]; + tensor k_99_pad_type_0 = const()[name = tensor("k_99_pad_type_0"), val = tensor("custom")]; + tensor k_99_pad_0 = const()[name = tensor("k_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(658346176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659656960))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_99_cast = conv(dilations = var_5132, groups = var_4943, pad = k_99_pad_0, pad_type = k_99_pad_type_0, strides = var_5130, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_99_cast")]; + tensor var_5136 = const()[name = tensor("op_5136"), val = tensor([1, 1])]; + tensor var_5138 = const()[name = tensor("op_5138"), val = tensor([1, 1])]; + tensor v_99_pad_type_0 = const()[name = tensor("v_99_pad_type_0"), val = tensor("custom")]; + tensor v_99_pad_0 = const()[name = tensor("v_99_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(659657088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660967872))), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_99_cast = conv(dilations = var_5138, groups = var_4943, pad = v_99_pad_0, pad_type = v_99_pad_type_0, strides = var_5136, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_99_cast")]; + tensor var_5142 = const()[name = tensor("op_5142"), val = tensor([2, 20, 64, -1])]; + tensor var_5143_cast = reshape(shape = var_5142, x = q_99_cast)[name = tensor("op_5143_cast")]; + tensor var_5144 = const()[name = tensor("op_5144"), val = tensor([2, 20, 64, -1])]; + tensor var_5145_cast = reshape(shape = var_5144, x = k_99_cast)[name = tensor("op_5145_cast")]; + tensor var_5146 = const()[name = tensor("op_5146"), val = tensor([2, 20, 64, -1])]; + tensor var_5147_cast = reshape(shape = var_5146, x = v_99_cast)[name = tensor("op_5147_cast")]; + tensor attn_weights_197_transpose_x_0 = const()[name = tensor("attn_weights_197_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_197_transpose_y_0 = const()[name = tensor("attn_weights_197_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_197_cast = matmul(transpose_x = attn_weights_197_transpose_x_0, transpose_y = attn_weights_197_transpose_y_0, x = var_5143_cast, y = var_5145_cast)[name = tensor("attn_weights_197_cast")]; + tensor attn_weights_199_cast = mul(x = attn_weights_197_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_199_cast")]; + tensor var_5151_cast = softmax(axis = var_4927, x = attn_weights_199_cast)[name = tensor("op_5151_cast")]; + tensor attn_99_transpose_x_0 = const()[name = tensor("attn_99_transpose_x_0"), val = tensor(false)]; + tensor attn_99_transpose_y_0 = const()[name = tensor("attn_99_transpose_y_0"), val = tensor(true)]; + tensor attn_99_cast = matmul(transpose_x = attn_99_transpose_x_0, transpose_y = attn_99_transpose_y_0, x = var_5147_cast, y = var_5151_cast)[name = tensor("attn_99_cast")]; + tensor var_5155 = const()[name = tensor("op_5155"), val = tensor([2, 1280, 1, -1])]; + tensor input_329_cast = reshape(shape = var_5155, x = attn_99_cast)[name = tensor("input_329_cast")]; + tensor var_5160 = const()[name = tensor("op_5160"), val = tensor([1, 1])]; + tensor var_5162 = const()[name = tensor("op_5162"), val = tensor([1, 1])]; + tensor var_5164_pad_type_0 = const()[name = tensor("op_5164_pad_type_0"), val = tensor("custom")]; + tensor var_5164_pad_0 = const()[name = tensor("op_5164_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(660968000))), lut = tensor([-0x1.69p-7, -0x1.aep-9, 0x1.aap-9, 0x1.67cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(661377664)))]; + tensor var_5164_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_5162, groups = var_4943, pad = var_5164_pad_0, pad_type = var_5164_pad_type_0, strides = var_5160, weight = mid_block_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_329_cast)[name = tensor("op_5164_cast")]; + tensor inputs_149_cast = add(x = var_5164_cast, y = inputs_147_cast)[name = tensor("inputs_149_cast")]; + tensor var_5168 = const()[name = tensor("op_5168"), val = tensor([1])]; + tensor channels_mean_149_cast = reduce_mean(axes = var_5168, keep_dims = var_4938, x = inputs_149_cast)[name = tensor("channels_mean_149_cast")]; + tensor zero_mean_149_cast = sub(x = inputs_149_cast, y = channels_mean_149_cast)[name = tensor("zero_mean_149_cast")]; + tensor zero_mean_sq_149_cast = mul(x = zero_mean_149_cast, y = zero_mean_149_cast)[name = tensor("zero_mean_sq_149_cast")]; + tensor var_5172 = const()[name = tensor("op_5172"), val = tensor([1])]; + tensor var_5173_cast = reduce_mean(axes = var_5172, keep_dims = var_4938, x = zero_mean_sq_149_cast)[name = tensor("op_5173_cast")]; + tensor var_5174_to_fp16 = const()[name = tensor("op_5174_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5175_cast = add(x = var_5173_cast, y = var_5174_to_fp16)[name = tensor("op_5175_cast")]; + tensor denom_149_epsilon_0_to_fp16 = const()[name = tensor("denom_149_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_149_cast = rsqrt(epsilon = denom_149_epsilon_0_to_fp16, x = var_5175_cast)[name = tensor("denom_149_cast")]; + tensor out_149_cast = mul(x = zero_mean_149_cast, y = denom_149_cast)[name = tensor("out_149_cast")]; + tensor var_5179_to_fp16 = const()[name = tensor("op_5179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(661380288)))]; + tensor var_5180_cast = add(x = out_149_cast, y = var_5179_to_fp16)[name = tensor("op_5180_cast")]; + tensor var_5182_to_fp16 = const()[name = tensor("op_5182_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(661382912)))]; + tensor input_331_cast = mul(x = var_5180_cast, y = var_5182_to_fp16)[name = tensor("input_331_cast")]; + tensor var_5190 = const()[name = tensor("op_5190"), val = tensor([1, 1])]; + tensor var_5192 = const()[name = tensor("op_5192"), val = tensor([1, 1])]; + tensor var_5194_pad_type_0 = const()[name = tensor("op_5194_pad_type_0"), val = tensor("custom")]; + tensor var_5194_pad_0 = const()[name = tensor("op_5194_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(661385536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(671216000))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(671216192)))]; + tensor var_5194_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_5192, groups = var_4943, pad = var_5194_pad_0, pad_type = var_5194_pad_type_0, strides = var_5190, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_331_cast)[name = tensor("op_5194_cast")]; + tensor var_5195_split_sizes_0 = const()[name = tensor("op_5195_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5195_axis_0 = const()[name = tensor("op_5195_axis_0"), val = tensor(1)]; + tensor var_5195_cast_0, tensor var_5195_cast_1 = split(axis = var_5195_axis_0, split_sizes = var_5195_split_sizes_0, x = var_5194_cast)[name = tensor("op_5195_cast")]; + tensor var_5197_mode_0 = const()[name = tensor("op_5197_mode_0"), val = tensor("EXACT")]; + tensor var_5197_cast = gelu(mode = var_5197_mode_0, x = var_5195_cast_1)[name = tensor("op_5197_cast")]; + tensor input_333_cast = mul(x = var_5195_cast_0, y = var_5197_cast)[name = tensor("input_333_cast")]; + tensor var_5201 = const()[name = tensor("op_5201"), val = tensor([1, 1])]; + tensor var_5203 = const()[name = tensor("op_5203"), val = tensor([1, 1])]; + tensor var_5205_pad_type_0 = const()[name = tensor("op_5205_pad_type_0"), val = tensor("custom")]; + tensor var_5205_pad_0 = const()[name = tensor("op_5205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(671236736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676152000))), name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676152192)))]; + tensor var_5205_cast = conv(bias = mid_block_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_5203, groups = var_4943, pad = var_5205_pad_0, pad_type = var_5205_pad_type_0, strides = var_5201, weight = mid_block_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_333_cast)[name = tensor("op_5205_cast")]; + tensor inputs_151_cast = add(x = var_5205_cast, y = inputs_149_cast)[name = tensor("inputs_151_cast")]; + tensor var_5215 = const()[name = tensor("op_5215"), val = tensor([1])]; + tensor channels_mean_151_cast = reduce_mean(axes = var_5215, keep_dims = var_4938, x = inputs_151_cast)[name = tensor("channels_mean_151_cast")]; + tensor zero_mean_151_cast = sub(x = inputs_151_cast, y = channels_mean_151_cast)[name = tensor("zero_mean_151_cast")]; + tensor zero_mean_sq_151_cast = mul(x = zero_mean_151_cast, y = zero_mean_151_cast)[name = tensor("zero_mean_sq_151_cast")]; + tensor var_5219 = const()[name = tensor("op_5219"), val = tensor([1])]; + tensor var_5220_cast = reduce_mean(axes = var_5219, keep_dims = var_4938, x = zero_mean_sq_151_cast)[name = tensor("op_5220_cast")]; + tensor var_5221_to_fp16 = const()[name = tensor("op_5221_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5222_cast = add(x = var_5220_cast, y = var_5221_to_fp16)[name = tensor("op_5222_cast")]; + tensor denom_151_epsilon_0_to_fp16 = const()[name = tensor("denom_151_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_151_cast = rsqrt(epsilon = denom_151_epsilon_0_to_fp16, x = var_5222_cast)[name = tensor("denom_151_cast")]; + tensor out_151_cast = mul(x = zero_mean_151_cast, y = denom_151_cast)[name = tensor("out_151_cast")]; + tensor var_5226_to_fp16 = const()[name = tensor("op_5226_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676154816)))]; + tensor var_5227_cast = add(x = out_151_cast, y = var_5226_to_fp16)[name = tensor("op_5227_cast")]; + tensor var_5229_to_fp16 = const()[name = tensor("op_5229_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676157440)))]; + tensor hidden_states_215_cast = mul(x = var_5227_cast, y = var_5229_to_fp16)[name = tensor("hidden_states_215_cast")]; + tensor var_5236 = const()[name = tensor("op_5236"), val = tensor([1, 1])]; + tensor var_5238 = const()[name = tensor("op_5238"), val = tensor([1, 1])]; + tensor q_101_pad_type_0 = const()[name = tensor("q_101_pad_type_0"), val = tensor("custom")]; + tensor q_101_pad_0 = const()[name = tensor("q_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(676160064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677388928))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_101_cast = conv(dilations = var_5238, groups = var_4943, pad = q_101_pad_0, pad_type = q_101_pad_type_0, strides = var_5236, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("q_101_cast")]; + tensor var_5242 = const()[name = tensor("op_5242"), val = tensor([1, 1])]; + tensor var_5244 = const()[name = tensor("op_5244"), val = tensor([1, 1])]; + tensor k_101_pad_type_0 = const()[name = tensor("k_101_pad_type_0"), val = tensor("custom")]; + tensor k_101_pad_0 = const()[name = tensor("k_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(677389120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678617984))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_101_cast = conv(dilations = var_5244, groups = var_4943, pad = k_101_pad_0, pad_type = k_101_pad_type_0, strides = var_5242, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("k_101_cast")]; + tensor var_5248 = const()[name = tensor("op_5248"), val = tensor([1, 1])]; + tensor var_5250 = const()[name = tensor("op_5250"), val = tensor([1, 1])]; + tensor v_101_pad_type_0 = const()[name = tensor("v_101_pad_type_0"), val = tensor("custom")]; + tensor v_101_pad_0 = const()[name = tensor("v_101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(678618176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680256640))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_101_cast = conv(dilations = var_5250, groups = var_4943, pad = v_101_pad_0, pad_type = v_101_pad_type_0, strides = var_5248, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_215_cast)[name = tensor("v_101_cast")]; + tensor var_5254 = const()[name = tensor("op_5254"), val = tensor([2, 20, 64, -1])]; + tensor var_5255_cast = reshape(shape = var_5254, x = q_101_cast)[name = tensor("op_5255_cast")]; + tensor var_5256 = const()[name = tensor("op_5256"), val = tensor([2, 20, 64, -1])]; + tensor var_5257_cast = reshape(shape = var_5256, x = k_101_cast)[name = tensor("op_5257_cast")]; + tensor var_5258 = const()[name = tensor("op_5258"), val = tensor([2, 20, 64, -1])]; + tensor var_5259_cast = reshape(shape = var_5258, x = v_101_cast)[name = tensor("op_5259_cast")]; + tensor attn_weights_201_transpose_x_0 = const()[name = tensor("attn_weights_201_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_201_transpose_y_0 = const()[name = tensor("attn_weights_201_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_201_cast = matmul(transpose_x = attn_weights_201_transpose_x_0, transpose_y = attn_weights_201_transpose_y_0, x = var_5255_cast, y = var_5257_cast)[name = tensor("attn_weights_201_cast")]; + tensor attn_weights_203_cast = mul(x = attn_weights_201_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_203_cast")]; + tensor var_5263_cast = softmax(axis = var_4927, x = attn_weights_203_cast)[name = tensor("op_5263_cast")]; + tensor attn_101_transpose_x_0 = const()[name = tensor("attn_101_transpose_x_0"), val = tensor(false)]; + tensor attn_101_transpose_y_0 = const()[name = tensor("attn_101_transpose_y_0"), val = tensor(true)]; + tensor attn_101_cast = matmul(transpose_x = attn_101_transpose_x_0, transpose_y = attn_101_transpose_y_0, x = var_5259_cast, y = var_5263_cast)[name = tensor("attn_101_cast")]; + tensor var_5267 = const()[name = tensor("op_5267"), val = tensor([2, 1280, 1, -1])]; + tensor input_335_cast = reshape(shape = var_5267, x = attn_101_cast)[name = tensor("input_335_cast")]; + tensor var_5272 = const()[name = tensor("op_5272"), val = tensor([1, 1])]; + tensor var_5274 = const()[name = tensor("op_5274"), val = tensor([1, 1])]; + tensor var_5276_pad_type_0 = const()[name = tensor("op_5276_pad_type_0"), val = tensor("custom")]; + tensor var_5276_pad_0 = const()[name = tensor("op_5276_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(680257216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681486080))), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681486272)))]; + tensor var_5276_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_5274, groups = var_4943, pad = var_5276_pad_0, pad_type = var_5276_pad_type_0, strides = var_5272, weight = mid_block_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_335_cast)[name = tensor("op_5276_cast")]; + tensor inputs_153_cast = add(x = var_5276_cast, y = inputs_151_cast)[name = tensor("inputs_153_cast")]; + tensor var_5280 = const()[name = tensor("op_5280"), val = tensor([1])]; + tensor channels_mean_153_cast = reduce_mean(axes = var_5280, keep_dims = var_4938, x = inputs_153_cast)[name = tensor("channels_mean_153_cast")]; + tensor zero_mean_153_cast = sub(x = inputs_153_cast, y = channels_mean_153_cast)[name = tensor("zero_mean_153_cast")]; + tensor zero_mean_sq_153_cast = mul(x = zero_mean_153_cast, y = zero_mean_153_cast)[name = tensor("zero_mean_sq_153_cast")]; + tensor var_5284 = const()[name = tensor("op_5284"), val = tensor([1])]; + tensor var_5285_cast = reduce_mean(axes = var_5284, keep_dims = var_4938, x = zero_mean_sq_153_cast)[name = tensor("op_5285_cast")]; + tensor var_5286_to_fp16 = const()[name = tensor("op_5286_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5287_cast = add(x = var_5285_cast, y = var_5286_to_fp16)[name = tensor("op_5287_cast")]; + tensor denom_153_epsilon_0_to_fp16 = const()[name = tensor("denom_153_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_153_cast = rsqrt(epsilon = denom_153_epsilon_0_to_fp16, x = var_5287_cast)[name = tensor("denom_153_cast")]; + tensor out_153_cast = mul(x = zero_mean_153_cast, y = denom_153_cast)[name = tensor("out_153_cast")]; + tensor var_5291_to_fp16 = const()[name = tensor("op_5291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681488896)))]; + tensor var_5292_cast = add(x = out_153_cast, y = var_5291_to_fp16)[name = tensor("op_5292_cast")]; + tensor var_5294_to_fp16 = const()[name = tensor("op_5294_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681491520)))]; + tensor hidden_states_217_cast = mul(x = var_5292_cast, y = var_5294_to_fp16)[name = tensor("hidden_states_217_cast")]; + tensor var_5301 = const()[name = tensor("op_5301"), val = tensor([1, 1])]; + tensor var_5303 = const()[name = tensor("op_5303"), val = tensor([1, 1])]; + tensor q_103_pad_type_0 = const()[name = tensor("q_103_pad_type_0"), val = tensor("custom")]; + tensor q_103_pad_0 = const()[name = tensor("q_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681494144))), lut = tensor([-0x1.acp-7, 0x1.ab4p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_103_cast = conv(dilations = var_5303, groups = var_4943, pad = q_103_pad_0, pad_type = q_103_pad_type_0, strides = var_5301, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_217_cast)[name = tensor("q_103_cast")]; + tensor var_5307 = const()[name = tensor("op_5307"), val = tensor([1, 1])]; + tensor var_5309 = const()[name = tensor("op_5309"), val = tensor([1, 1])]; + tensor k_103_pad_type_0 = const()[name = tensor("k_103_pad_type_0"), val = tensor("custom")]; + tensor k_103_pad_0 = const()[name = tensor("k_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(681699008))), lut = tensor([-0x1.434p-6, -0x1.81cp-8, 0x1.828p-8, 0x1.44p-6]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_103_cast = conv(dilations = var_5309, groups = var_4943, pad = k_103_pad_0, pad_type = k_103_pad_type_0, strides = var_5307, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_103_cast")]; + tensor var_5313 = const()[name = tensor("op_5313"), val = tensor([1, 1])]; + tensor var_5315 = const()[name = tensor("op_5315"), val = tensor([1, 1])]; + tensor v_103_pad_type_0 = const()[name = tensor("v_103_pad_type_0"), val = tensor("custom")]; + tensor v_103_pad_0 = const()[name = tensor("v_103_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(682354432))), lut = tensor([-0x1.8f4p-6, -0x1.ca8p-8, 0x1.cccp-8, 0x1.9p-6]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_103_cast = conv(dilations = var_5315, groups = var_4943, pad = v_103_pad_0, pad_type = v_103_pad_type_0, strides = var_5313, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_103_cast")]; + tensor var_5319 = const()[name = tensor("op_5319"), val = tensor([2, 20, 64, -1])]; + tensor var_5320_cast = reshape(shape = var_5319, x = q_103_cast)[name = tensor("op_5320_cast")]; + tensor var_5321 = const()[name = tensor("op_5321"), val = tensor([2, 20, 64, -1])]; + tensor var_5322_cast = reshape(shape = var_5321, x = k_103_cast)[name = tensor("op_5322_cast")]; + tensor var_5323 = const()[name = tensor("op_5323"), val = tensor([2, 20, 64, -1])]; + tensor var_5324_cast = reshape(shape = var_5323, x = v_103_cast)[name = tensor("op_5324_cast")]; + tensor attn_weights_205_transpose_x_0 = const()[name = tensor("attn_weights_205_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_205_transpose_y_0 = const()[name = tensor("attn_weights_205_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_205_cast = matmul(transpose_x = attn_weights_205_transpose_x_0, transpose_y = attn_weights_205_transpose_y_0, x = var_5320_cast, y = var_5322_cast)[name = tensor("attn_weights_205_cast")]; + tensor attn_weights_207_cast = mul(x = attn_weights_205_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_207_cast")]; + tensor var_5328_cast = softmax(axis = var_4927, x = attn_weights_207_cast)[name = tensor("op_5328_cast")]; + tensor attn_103_transpose_x_0 = const()[name = tensor("attn_103_transpose_x_0"), val = tensor(false)]; + tensor attn_103_transpose_y_0 = const()[name = tensor("attn_103_transpose_y_0"), val = tensor(true)]; + tensor attn_103_cast = matmul(transpose_x = attn_103_transpose_x_0, transpose_y = attn_103_transpose_y_0, x = var_5324_cast, y = var_5328_cast)[name = tensor("attn_103_cast")]; + tensor var_5332 = const()[name = tensor("op_5332"), val = tensor([2, 1280, 1, -1])]; + tensor input_337_cast = reshape(shape = var_5332, x = attn_103_cast)[name = tensor("input_337_cast")]; + tensor var_5337 = const()[name = tensor("op_5337"), val = tensor([1, 1])]; + tensor var_5339 = const()[name = tensor("op_5339"), val = tensor([1, 1])]; + tensor var_5341_pad_type_0 = const()[name = tensor("op_5341_pad_type_0"), val = tensor("custom")]; + tensor var_5341_pad_0 = const()[name = tensor("op_5341_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683009856))), lut = tensor([-0x1.96cp-7, -0x1.e24p-9, 0x1.e68p-9, 0x1.974p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683419520)))]; + tensor var_5341_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_5339, groups = var_4943, pad = var_5341_pad_0, pad_type = var_5341_pad_type_0, strides = var_5337, weight = mid_block_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_337_cast)[name = tensor("op_5341_cast")]; + tensor inputs_155_cast = add(x = var_5341_cast, y = inputs_153_cast)[name = tensor("inputs_155_cast")]; + tensor var_5345 = const()[name = tensor("op_5345"), val = tensor([1])]; + tensor channels_mean_155_cast = reduce_mean(axes = var_5345, keep_dims = var_4938, x = inputs_155_cast)[name = tensor("channels_mean_155_cast")]; + tensor zero_mean_155_cast = sub(x = inputs_155_cast, y = channels_mean_155_cast)[name = tensor("zero_mean_155_cast")]; + tensor zero_mean_sq_155_cast = mul(x = zero_mean_155_cast, y = zero_mean_155_cast)[name = tensor("zero_mean_sq_155_cast")]; + tensor var_5349 = const()[name = tensor("op_5349"), val = tensor([1])]; + tensor var_5350_cast = reduce_mean(axes = var_5349, keep_dims = var_4938, x = zero_mean_sq_155_cast)[name = tensor("op_5350_cast")]; + tensor var_5351_to_fp16 = const()[name = tensor("op_5351_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5352_cast = add(x = var_5350_cast, y = var_5351_to_fp16)[name = tensor("op_5352_cast")]; + tensor denom_155_epsilon_0_to_fp16 = const()[name = tensor("denom_155_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_155_cast = rsqrt(epsilon = denom_155_epsilon_0_to_fp16, x = var_5352_cast)[name = tensor("denom_155_cast")]; + tensor out_155_cast = mul(x = zero_mean_155_cast, y = denom_155_cast)[name = tensor("out_155_cast")]; + tensor var_5356_to_fp16 = const()[name = tensor("op_5356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683422144)))]; + tensor var_5357_cast = add(x = out_155_cast, y = var_5356_to_fp16)[name = tensor("op_5357_cast")]; + tensor var_5359_to_fp16 = const()[name = tensor("op_5359_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683424768)))]; + tensor input_339_cast = mul(x = var_5357_cast, y = var_5359_to_fp16)[name = tensor("input_339_cast")]; + tensor var_5367 = const()[name = tensor("op_5367"), val = tensor([1, 1])]; + tensor var_5369 = const()[name = tensor("op_5369"), val = tensor([1, 1])]; + tensor var_5371_pad_type_0 = const()[name = tensor("op_5371_pad_type_0"), val = tensor("custom")]; + tensor var_5371_pad_0 = const()[name = tensor("op_5371_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(683427392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(693257856))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(693258048)))]; + tensor var_5371_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_5369, groups = var_4943, pad = var_5371_pad_0, pad_type = var_5371_pad_type_0, strides = var_5367, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_339_cast)[name = tensor("op_5371_cast")]; + tensor var_5372_split_sizes_0 = const()[name = tensor("op_5372_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5372_axis_0 = const()[name = tensor("op_5372_axis_0"), val = tensor(1)]; + tensor var_5372_cast_0, tensor var_5372_cast_1 = split(axis = var_5372_axis_0, split_sizes = var_5372_split_sizes_0, x = var_5371_cast)[name = tensor("op_5372_cast")]; + tensor var_5374_mode_0 = const()[name = tensor("op_5374_mode_0"), val = tensor("EXACT")]; + tensor var_5374_cast = gelu(mode = var_5374_mode_0, x = var_5372_cast_1)[name = tensor("op_5374_cast")]; + tensor input_341_cast = mul(x = var_5372_cast_0, y = var_5374_cast)[name = tensor("input_341_cast")]; + tensor var_5378 = const()[name = tensor("op_5378"), val = tensor([1, 1])]; + tensor var_5380 = const()[name = tensor("op_5380"), val = tensor([1, 1])]; + tensor var_5382_pad_type_0 = const()[name = tensor("op_5382_pad_type_0"), val = tensor("custom")]; + tensor var_5382_pad_0 = const()[name = tensor("op_5382_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(693278592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698193856))), name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698194048)))]; + tensor var_5382_cast = conv(bias = mid_block_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_5380, groups = var_4943, pad = var_5382_pad_0, pad_type = var_5382_pad_type_0, strides = var_5378, weight = mid_block_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_341_cast)[name = tensor("op_5382_cast")]; + tensor inputs_157_cast = add(x = var_5382_cast, y = inputs_155_cast)[name = tensor("inputs_157_cast")]; + tensor var_5392 = const()[name = tensor("op_5392"), val = tensor([1])]; + tensor channels_mean_157_cast = reduce_mean(axes = var_5392, keep_dims = var_4938, x = inputs_157_cast)[name = tensor("channels_mean_157_cast")]; + tensor zero_mean_157_cast = sub(x = inputs_157_cast, y = channels_mean_157_cast)[name = tensor("zero_mean_157_cast")]; + tensor zero_mean_sq_157_cast = mul(x = zero_mean_157_cast, y = zero_mean_157_cast)[name = tensor("zero_mean_sq_157_cast")]; + tensor var_5396 = const()[name = tensor("op_5396"), val = tensor([1])]; + tensor var_5397_cast = reduce_mean(axes = var_5396, keep_dims = var_4938, x = zero_mean_sq_157_cast)[name = tensor("op_5397_cast")]; + tensor var_5398_to_fp16 = const()[name = tensor("op_5398_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5399_cast = add(x = var_5397_cast, y = var_5398_to_fp16)[name = tensor("op_5399_cast")]; + tensor denom_157_epsilon_0_to_fp16 = const()[name = tensor("denom_157_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_157_cast = rsqrt(epsilon = denom_157_epsilon_0_to_fp16, x = var_5399_cast)[name = tensor("denom_157_cast")]; + tensor out_157_cast = mul(x = zero_mean_157_cast, y = denom_157_cast)[name = tensor("out_157_cast")]; + tensor var_5403_to_fp16 = const()[name = tensor("op_5403_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698196672)))]; + tensor var_5404_cast = add(x = out_157_cast, y = var_5403_to_fp16)[name = tensor("op_5404_cast")]; + tensor var_5406_to_fp16 = const()[name = tensor("op_5406_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698199296)))]; + tensor hidden_states_221_cast = mul(x = var_5404_cast, y = var_5406_to_fp16)[name = tensor("hidden_states_221_cast")]; + tensor var_5413 = const()[name = tensor("op_5413"), val = tensor([1, 1])]; + tensor var_5415 = const()[name = tensor("op_5415"), val = tensor([1, 1])]; + tensor q_105_pad_type_0 = const()[name = tensor("q_105_pad_type_0"), val = tensor("custom")]; + tensor q_105_pad_0 = const()[name = tensor("q_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(698201920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699430784))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_105_cast = conv(dilations = var_5415, groups = var_4943, pad = q_105_pad_0, pad_type = q_105_pad_type_0, strides = var_5413, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("q_105_cast")]; + tensor var_5419 = const()[name = tensor("op_5419"), val = tensor([1, 1])]; + tensor var_5421 = const()[name = tensor("op_5421"), val = tensor([1, 1])]; + tensor k_105_pad_type_0 = const()[name = tensor("k_105_pad_type_0"), val = tensor("custom")]; + tensor k_105_pad_0 = const()[name = tensor("k_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(699430976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(700659840))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_105_cast = conv(dilations = var_5421, groups = var_4943, pad = k_105_pad_0, pad_type = k_105_pad_type_0, strides = var_5419, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("k_105_cast")]; + tensor var_5425 = const()[name = tensor("op_5425"), val = tensor([1, 1])]; + tensor var_5427 = const()[name = tensor("op_5427"), val = tensor([1, 1])]; + tensor v_105_pad_type_0 = const()[name = tensor("v_105_pad_type_0"), val = tensor("custom")]; + tensor v_105_pad_0 = const()[name = tensor("v_105_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(700660032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(702298496))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_105_cast = conv(dilations = var_5427, groups = var_4943, pad = v_105_pad_0, pad_type = v_105_pad_type_0, strides = var_5425, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_221_cast)[name = tensor("v_105_cast")]; + tensor var_5431 = const()[name = tensor("op_5431"), val = tensor([2, 20, 64, -1])]; + tensor var_5432_cast = reshape(shape = var_5431, x = q_105_cast)[name = tensor("op_5432_cast")]; + tensor var_5433 = const()[name = tensor("op_5433"), val = tensor([2, 20, 64, -1])]; + tensor var_5434_cast = reshape(shape = var_5433, x = k_105_cast)[name = tensor("op_5434_cast")]; + tensor var_5435 = const()[name = tensor("op_5435"), val = tensor([2, 20, 64, -1])]; + tensor var_5436_cast = reshape(shape = var_5435, x = v_105_cast)[name = tensor("op_5436_cast")]; + tensor attn_weights_209_transpose_x_0 = const()[name = tensor("attn_weights_209_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_209_transpose_y_0 = const()[name = tensor("attn_weights_209_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_209_cast = matmul(transpose_x = attn_weights_209_transpose_x_0, transpose_y = attn_weights_209_transpose_y_0, x = var_5432_cast, y = var_5434_cast)[name = tensor("attn_weights_209_cast")]; + tensor attn_weights_211_cast = mul(x = attn_weights_209_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_211_cast")]; + tensor var_5440_cast = softmax(axis = var_4927, x = attn_weights_211_cast)[name = tensor("op_5440_cast")]; + tensor attn_105_transpose_x_0 = const()[name = tensor("attn_105_transpose_x_0"), val = tensor(false)]; + tensor attn_105_transpose_y_0 = const()[name = tensor("attn_105_transpose_y_0"), val = tensor(true)]; + tensor attn_105_cast = matmul(transpose_x = attn_105_transpose_x_0, transpose_y = attn_105_transpose_y_0, x = var_5436_cast, y = var_5440_cast)[name = tensor("attn_105_cast")]; + tensor var_5444 = const()[name = tensor("op_5444"), val = tensor([2, 1280, 1, -1])]; + tensor input_343_cast = reshape(shape = var_5444, x = attn_105_cast)[name = tensor("input_343_cast")]; + tensor var_5449 = const()[name = tensor("op_5449"), val = tensor([1, 1])]; + tensor var_5451 = const()[name = tensor("op_5451"), val = tensor([1, 1])]; + tensor var_5453_pad_type_0 = const()[name = tensor("op_5453_pad_type_0"), val = tensor("custom")]; + tensor var_5453_pad_0 = const()[name = tensor("op_5453_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(702299072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703527936))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703528128)))]; + tensor var_5453_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_5451, groups = var_4943, pad = var_5453_pad_0, pad_type = var_5453_pad_type_0, strides = var_5449, weight = mid_block_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_343_cast)[name = tensor("op_5453_cast")]; + tensor inputs_159_cast = add(x = var_5453_cast, y = inputs_157_cast)[name = tensor("inputs_159_cast")]; + tensor var_5457 = const()[name = tensor("op_5457"), val = tensor([1])]; + tensor channels_mean_159_cast = reduce_mean(axes = var_5457, keep_dims = var_4938, x = inputs_159_cast)[name = tensor("channels_mean_159_cast")]; + tensor zero_mean_159_cast = sub(x = inputs_159_cast, y = channels_mean_159_cast)[name = tensor("zero_mean_159_cast")]; + tensor zero_mean_sq_159_cast = mul(x = zero_mean_159_cast, y = zero_mean_159_cast)[name = tensor("zero_mean_sq_159_cast")]; + tensor var_5461 = const()[name = tensor("op_5461"), val = tensor([1])]; + tensor var_5462_cast = reduce_mean(axes = var_5461, keep_dims = var_4938, x = zero_mean_sq_159_cast)[name = tensor("op_5462_cast")]; + tensor var_5463_to_fp16 = const()[name = tensor("op_5463_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5464_cast = add(x = var_5462_cast, y = var_5463_to_fp16)[name = tensor("op_5464_cast")]; + tensor denom_159_epsilon_0_to_fp16 = const()[name = tensor("denom_159_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_159_cast = rsqrt(epsilon = denom_159_epsilon_0_to_fp16, x = var_5464_cast)[name = tensor("denom_159_cast")]; + tensor out_159_cast = mul(x = zero_mean_159_cast, y = denom_159_cast)[name = tensor("out_159_cast")]; + tensor var_5468_to_fp16 = const()[name = tensor("op_5468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703530752)))]; + tensor var_5469_cast = add(x = out_159_cast, y = var_5468_to_fp16)[name = tensor("op_5469_cast")]; + tensor var_5471_to_fp16 = const()[name = tensor("op_5471_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703533376)))]; + tensor hidden_states_223_cast = mul(x = var_5469_cast, y = var_5471_to_fp16)[name = tensor("hidden_states_223_cast")]; + tensor var_5478 = const()[name = tensor("op_5478"), val = tensor([1, 1])]; + tensor var_5480 = const()[name = tensor("op_5480"), val = tensor([1, 1])]; + tensor q_107_pad_type_0 = const()[name = tensor("q_107_pad_type_0"), val = tensor("custom")]; + tensor q_107_pad_0 = const()[name = tensor("q_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703536000))), lut = tensor([-0x1.604p-6, -0x1.bc4p-8, 0x1.c14p-8, 0x1.61p-6]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_107_cast = conv(dilations = var_5480, groups = var_4943, pad = q_107_pad_0, pad_type = q_107_pad_type_0, strides = var_5478, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_223_cast)[name = tensor("q_107_cast")]; + tensor var_5484 = const()[name = tensor("op_5484"), val = tensor([1, 1])]; + tensor var_5486 = const()[name = tensor("op_5486"), val = tensor([1, 1])]; + tensor k_107_pad_type_0 = const()[name = tensor("k_107_pad_type_0"), val = tensor("custom")]; + tensor k_107_pad_0 = const()[name = tensor("k_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(703945664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705256448))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_107_cast = conv(dilations = var_5486, groups = var_4943, pad = k_107_pad_0, pad_type = k_107_pad_type_0, strides = var_5484, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_107_cast")]; + tensor var_5490 = const()[name = tensor("op_5490"), val = tensor([1, 1])]; + tensor var_5492 = const()[name = tensor("op_5492"), val = tensor([1, 1])]; + tensor v_107_pad_type_0 = const()[name = tensor("v_107_pad_type_0"), val = tensor("custom")]; + tensor v_107_pad_0 = const()[name = tensor("v_107_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(705256576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706567360))), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_107_cast = conv(dilations = var_5492, groups = var_4943, pad = v_107_pad_0, pad_type = v_107_pad_type_0, strides = var_5490, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_107_cast")]; + tensor var_5496 = const()[name = tensor("op_5496"), val = tensor([2, 20, 64, -1])]; + tensor var_5497_cast = reshape(shape = var_5496, x = q_107_cast)[name = tensor("op_5497_cast")]; + tensor var_5498 = const()[name = tensor("op_5498"), val = tensor([2, 20, 64, -1])]; + tensor var_5499_cast = reshape(shape = var_5498, x = k_107_cast)[name = tensor("op_5499_cast")]; + tensor var_5500 = const()[name = tensor("op_5500"), val = tensor([2, 20, 64, -1])]; + tensor var_5501_cast = reshape(shape = var_5500, x = v_107_cast)[name = tensor("op_5501_cast")]; + tensor attn_weights_213_transpose_x_0 = const()[name = tensor("attn_weights_213_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_213_transpose_y_0 = const()[name = tensor("attn_weights_213_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_213_cast = matmul(transpose_x = attn_weights_213_transpose_x_0, transpose_y = attn_weights_213_transpose_y_0, x = var_5497_cast, y = var_5499_cast)[name = tensor("attn_weights_213_cast")]; + tensor attn_weights_215_cast = mul(x = attn_weights_213_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_215_cast")]; + tensor var_5505_cast = softmax(axis = var_4927, x = attn_weights_215_cast)[name = tensor("op_5505_cast")]; + tensor attn_107_transpose_x_0 = const()[name = tensor("attn_107_transpose_x_0"), val = tensor(false)]; + tensor attn_107_transpose_y_0 = const()[name = tensor("attn_107_transpose_y_0"), val = tensor(true)]; + tensor attn_107_cast = matmul(transpose_x = attn_107_transpose_x_0, transpose_y = attn_107_transpose_y_0, x = var_5501_cast, y = var_5505_cast)[name = tensor("attn_107_cast")]; + tensor var_5509 = const()[name = tensor("op_5509"), val = tensor([2, 1280, 1, -1])]; + tensor input_345_cast = reshape(shape = var_5509, x = attn_107_cast)[name = tensor("input_345_cast")]; + tensor var_5514 = const()[name = tensor("op_5514"), val = tensor([1, 1])]; + tensor var_5516 = const()[name = tensor("op_5516"), val = tensor([1, 1])]; + tensor var_5518_pad_type_0 = const()[name = tensor("op_5518_pad_type_0"), val = tensor("custom")]; + tensor var_5518_pad_0 = const()[name = tensor("op_5518_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706567488))), lut = tensor([-0x1.878p-8, 0x1.86cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706772352)))]; + tensor var_5518_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_5516, groups = var_4943, pad = var_5518_pad_0, pad_type = var_5518_pad_type_0, strides = var_5514, weight = mid_block_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_345_cast)[name = tensor("op_5518_cast")]; + tensor inputs_161_cast = add(x = var_5518_cast, y = inputs_159_cast)[name = tensor("inputs_161_cast")]; + tensor var_5522 = const()[name = tensor("op_5522"), val = tensor([1])]; + tensor channels_mean_161_cast = reduce_mean(axes = var_5522, keep_dims = var_4938, x = inputs_161_cast)[name = tensor("channels_mean_161_cast")]; + tensor zero_mean_161_cast = sub(x = inputs_161_cast, y = channels_mean_161_cast)[name = tensor("zero_mean_161_cast")]; + tensor zero_mean_sq_161_cast = mul(x = zero_mean_161_cast, y = zero_mean_161_cast)[name = tensor("zero_mean_sq_161_cast")]; + tensor var_5526 = const()[name = tensor("op_5526"), val = tensor([1])]; + tensor var_5527_cast = reduce_mean(axes = var_5526, keep_dims = var_4938, x = zero_mean_sq_161_cast)[name = tensor("op_5527_cast")]; + tensor var_5528_to_fp16 = const()[name = tensor("op_5528_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5529_cast = add(x = var_5527_cast, y = var_5528_to_fp16)[name = tensor("op_5529_cast")]; + tensor denom_161_epsilon_0_to_fp16 = const()[name = tensor("denom_161_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_161_cast = rsqrt(epsilon = denom_161_epsilon_0_to_fp16, x = var_5529_cast)[name = tensor("denom_161_cast")]; + tensor out_161_cast = mul(x = zero_mean_161_cast, y = denom_161_cast)[name = tensor("out_161_cast")]; + tensor var_5533_to_fp16 = const()[name = tensor("op_5533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706774976)))]; + tensor var_5534_cast = add(x = out_161_cast, y = var_5533_to_fp16)[name = tensor("op_5534_cast")]; + tensor var_5536_to_fp16 = const()[name = tensor("op_5536_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706777600)))]; + tensor input_347_cast = mul(x = var_5534_cast, y = var_5536_to_fp16)[name = tensor("input_347_cast")]; + tensor var_5544 = const()[name = tensor("op_5544"), val = tensor([1, 1])]; + tensor var_5546 = const()[name = tensor("op_5546"), val = tensor([1, 1])]; + tensor var_5548_pad_type_0 = const()[name = tensor("op_5548_pad_type_0"), val = tensor("custom")]; + tensor var_5548_pad_0 = const()[name = tensor("op_5548_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(706780224))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(716610688))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(716610880)))]; + tensor var_5548_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_5546, groups = var_4943, pad = var_5548_pad_0, pad_type = var_5548_pad_type_0, strides = var_5544, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_347_cast)[name = tensor("op_5548_cast")]; + tensor var_5549_split_sizes_0 = const()[name = tensor("op_5549_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5549_axis_0 = const()[name = tensor("op_5549_axis_0"), val = tensor(1)]; + tensor var_5549_cast_0, tensor var_5549_cast_1 = split(axis = var_5549_axis_0, split_sizes = var_5549_split_sizes_0, x = var_5548_cast)[name = tensor("op_5549_cast")]; + tensor var_5551_mode_0 = const()[name = tensor("op_5551_mode_0"), val = tensor("EXACT")]; + tensor var_5551_cast = gelu(mode = var_5551_mode_0, x = var_5549_cast_1)[name = tensor("op_5551_cast")]; + tensor input_349_cast = mul(x = var_5549_cast_0, y = var_5551_cast)[name = tensor("input_349_cast")]; + tensor var_5555 = const()[name = tensor("op_5555"), val = tensor([1, 1])]; + tensor var_5557 = const()[name = tensor("op_5557"), val = tensor([1, 1])]; + tensor var_5559_pad_type_0 = const()[name = tensor("op_5559_pad_type_0"), val = tensor("custom")]; + tensor var_5559_pad_0 = const()[name = tensor("op_5559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(716631424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721546688))), name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721546880)))]; + tensor var_5559_cast = conv(bias = mid_block_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_5557, groups = var_4943, pad = var_5559_pad_0, pad_type = var_5559_pad_type_0, strides = var_5555, weight = mid_block_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_349_cast)[name = tensor("op_5559_cast")]; + tensor inputs_163_cast = add(x = var_5559_cast, y = inputs_161_cast)[name = tensor("inputs_163_cast")]; + tensor var_5569 = const()[name = tensor("op_5569"), val = tensor([1])]; + tensor channels_mean_163_cast = reduce_mean(axes = var_5569, keep_dims = var_4938, x = inputs_163_cast)[name = tensor("channels_mean_163_cast")]; + tensor zero_mean_163_cast = sub(x = inputs_163_cast, y = channels_mean_163_cast)[name = tensor("zero_mean_163_cast")]; + tensor zero_mean_sq_163_cast = mul(x = zero_mean_163_cast, y = zero_mean_163_cast)[name = tensor("zero_mean_sq_163_cast")]; + tensor var_5573 = const()[name = tensor("op_5573"), val = tensor([1])]; + tensor var_5574_cast = reduce_mean(axes = var_5573, keep_dims = var_4938, x = zero_mean_sq_163_cast)[name = tensor("op_5574_cast")]; + tensor var_5575_to_fp16 = const()[name = tensor("op_5575_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5576_cast = add(x = var_5574_cast, y = var_5575_to_fp16)[name = tensor("op_5576_cast")]; + tensor denom_163_epsilon_0_to_fp16 = const()[name = tensor("denom_163_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_163_cast = rsqrt(epsilon = denom_163_epsilon_0_to_fp16, x = var_5576_cast)[name = tensor("denom_163_cast")]; + tensor out_163_cast = mul(x = zero_mean_163_cast, y = denom_163_cast)[name = tensor("out_163_cast")]; + tensor var_5580_to_fp16 = const()[name = tensor("op_5580_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721549504)))]; + tensor var_5581_cast = add(x = out_163_cast, y = var_5580_to_fp16)[name = tensor("op_5581_cast")]; + tensor var_5583_to_fp16 = const()[name = tensor("op_5583_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721552128)))]; + tensor hidden_states_227_cast = mul(x = var_5581_cast, y = var_5583_to_fp16)[name = tensor("hidden_states_227_cast")]; + tensor var_5590 = const()[name = tensor("op_5590"), val = tensor([1, 1])]; + tensor var_5592 = const()[name = tensor("op_5592"), val = tensor([1, 1])]; + tensor q_109_pad_type_0 = const()[name = tensor("q_109_pad_type_0"), val = tensor("custom")]; + tensor q_109_pad_0 = const()[name = tensor("q_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(721554752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722783616))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_109_cast = conv(dilations = var_5592, groups = var_4943, pad = q_109_pad_0, pad_type = q_109_pad_type_0, strides = var_5590, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("q_109_cast")]; + tensor var_5596 = const()[name = tensor("op_5596"), val = tensor([1, 1])]; + tensor var_5598 = const()[name = tensor("op_5598"), val = tensor([1, 1])]; + tensor k_109_pad_type_0 = const()[name = tensor("k_109_pad_type_0"), val = tensor("custom")]; + tensor k_109_pad_0 = const()[name = tensor("k_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(722783808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(724012672))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_109_cast = conv(dilations = var_5598, groups = var_4943, pad = k_109_pad_0, pad_type = k_109_pad_type_0, strides = var_5596, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("k_109_cast")]; + tensor var_5602 = const()[name = tensor("op_5602"), val = tensor([1, 1])]; + tensor var_5604 = const()[name = tensor("op_5604"), val = tensor([1, 1])]; + tensor v_109_pad_type_0 = const()[name = tensor("v_109_pad_type_0"), val = tensor("custom")]; + tensor v_109_pad_0 = const()[name = tensor("v_109_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(724012864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725241728))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_109_cast = conv(dilations = var_5604, groups = var_4943, pad = v_109_pad_0, pad_type = v_109_pad_type_0, strides = var_5602, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_227_cast)[name = tensor("v_109_cast")]; + tensor var_5608 = const()[name = tensor("op_5608"), val = tensor([2, 20, 64, -1])]; + tensor var_5609_cast = reshape(shape = var_5608, x = q_109_cast)[name = tensor("op_5609_cast")]; + tensor var_5610 = const()[name = tensor("op_5610"), val = tensor([2, 20, 64, -1])]; + tensor var_5611_cast = reshape(shape = var_5610, x = k_109_cast)[name = tensor("op_5611_cast")]; + tensor var_5612 = const()[name = tensor("op_5612"), val = tensor([2, 20, 64, -1])]; + tensor var_5613_cast = reshape(shape = var_5612, x = v_109_cast)[name = tensor("op_5613_cast")]; + tensor attn_weights_217_transpose_x_0 = const()[name = tensor("attn_weights_217_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_217_transpose_y_0 = const()[name = tensor("attn_weights_217_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_217_cast = matmul(transpose_x = attn_weights_217_transpose_x_0, transpose_y = attn_weights_217_transpose_y_0, x = var_5609_cast, y = var_5611_cast)[name = tensor("attn_weights_217_cast")]; + tensor attn_weights_219_cast = mul(x = attn_weights_217_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_219_cast")]; + tensor var_5617_cast = softmax(axis = var_4927, x = attn_weights_219_cast)[name = tensor("op_5617_cast")]; + tensor attn_109_transpose_x_0 = const()[name = tensor("attn_109_transpose_x_0"), val = tensor(false)]; + tensor attn_109_transpose_y_0 = const()[name = tensor("attn_109_transpose_y_0"), val = tensor(true)]; + tensor attn_109_cast = matmul(transpose_x = attn_109_transpose_x_0, transpose_y = attn_109_transpose_y_0, x = var_5613_cast, y = var_5617_cast)[name = tensor("attn_109_cast")]; + tensor var_5621 = const()[name = tensor("op_5621"), val = tensor([2, 1280, 1, -1])]; + tensor input_351_cast = reshape(shape = var_5621, x = attn_109_cast)[name = tensor("input_351_cast")]; + tensor var_5626 = const()[name = tensor("op_5626"), val = tensor([1, 1])]; + tensor var_5628 = const()[name = tensor("op_5628"), val = tensor([1, 1])]; + tensor var_5630_pad_type_0 = const()[name = tensor("op_5630_pad_type_0"), val = tensor("custom")]; + tensor var_5630_pad_0 = const()[name = tensor("op_5630_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(725241920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726470784))), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726470976)))]; + tensor var_5630_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_5628, groups = var_4943, pad = var_5630_pad_0, pad_type = var_5630_pad_type_0, strides = var_5626, weight = mid_block_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_351_cast)[name = tensor("op_5630_cast")]; + tensor inputs_165_cast = add(x = var_5630_cast, y = inputs_163_cast)[name = tensor("inputs_165_cast")]; + tensor var_5634 = const()[name = tensor("op_5634"), val = tensor([1])]; + tensor channels_mean_165_cast = reduce_mean(axes = var_5634, keep_dims = var_4938, x = inputs_165_cast)[name = tensor("channels_mean_165_cast")]; + tensor zero_mean_165_cast = sub(x = inputs_165_cast, y = channels_mean_165_cast)[name = tensor("zero_mean_165_cast")]; + tensor zero_mean_sq_165_cast = mul(x = zero_mean_165_cast, y = zero_mean_165_cast)[name = tensor("zero_mean_sq_165_cast")]; + tensor var_5638 = const()[name = tensor("op_5638"), val = tensor([1])]; + tensor var_5639_cast = reduce_mean(axes = var_5638, keep_dims = var_4938, x = zero_mean_sq_165_cast)[name = tensor("op_5639_cast")]; + tensor var_5640_to_fp16 = const()[name = tensor("op_5640_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5641_cast = add(x = var_5639_cast, y = var_5640_to_fp16)[name = tensor("op_5641_cast")]; + tensor denom_165_epsilon_0_to_fp16 = const()[name = tensor("denom_165_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_165_cast = rsqrt(epsilon = denom_165_epsilon_0_to_fp16, x = var_5641_cast)[name = tensor("denom_165_cast")]; + tensor out_165_cast = mul(x = zero_mean_165_cast, y = denom_165_cast)[name = tensor("out_165_cast")]; + tensor var_5645_to_fp16 = const()[name = tensor("op_5645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726473600)))]; + tensor var_5646_cast = add(x = out_165_cast, y = var_5645_to_fp16)[name = tensor("op_5646_cast")]; + tensor var_5648_to_fp16 = const()[name = tensor("op_5648_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726476224)))]; + tensor hidden_states_229_cast = mul(x = var_5646_cast, y = var_5648_to_fp16)[name = tensor("hidden_states_229_cast")]; + tensor var_5655 = const()[name = tensor("op_5655"), val = tensor([1, 1])]; + tensor var_5657 = const()[name = tensor("op_5657"), val = tensor([1, 1])]; + tensor q_111_pad_type_0 = const()[name = tensor("q_111_pad_type_0"), val = tensor("custom")]; + tensor q_111_pad_0 = const()[name = tensor("q_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726478848))), lut = tensor([-0x1.3fp-6, -0x1.9c4p-8, 0x1.9c8p-8, 0x1.3f4p-6]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_111_cast = conv(dilations = var_5657, groups = var_4943, pad = q_111_pad_0, pad_type = q_111_pad_type_0, strides = var_5655, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_229_cast)[name = tensor("q_111_cast")]; + tensor var_5661 = const()[name = tensor("op_5661"), val = tensor([1, 1])]; + tensor var_5663 = const()[name = tensor("op_5663"), val = tensor([1, 1])]; + tensor k_111_pad_type_0 = const()[name = tensor("k_111_pad_type_0"), val = tensor("custom")]; + tensor k_111_pad_0 = const()[name = tensor("k_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(726888512))), lut = tensor([-0x1.038p-7, 0x1.044p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_111_cast = conv(dilations = var_5663, groups = var_4943, pad = k_111_pad_0, pad_type = k_111_pad_type_0, strides = var_5661, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_111_cast")]; + tensor var_5667 = const()[name = tensor("op_5667"), val = tensor([1, 1])]; + tensor var_5669 = const()[name = tensor("op_5669"), val = tensor([1, 1])]; + tensor v_111_pad_type_0 = const()[name = tensor("v_111_pad_type_0"), val = tensor("custom")]; + tensor v_111_pad_0 = const()[name = tensor("v_111_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727216256))), lut = tensor([-0x1.354p-7, 0x1.35cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_111_cast = conv(dilations = var_5669, groups = var_4943, pad = v_111_pad_0, pad_type = v_111_pad_type_0, strides = var_5667, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_111_cast")]; + tensor var_5673 = const()[name = tensor("op_5673"), val = tensor([2, 20, 64, -1])]; + tensor var_5674_cast = reshape(shape = var_5673, x = q_111_cast)[name = tensor("op_5674_cast")]; + tensor var_5675 = const()[name = tensor("op_5675"), val = tensor([2, 20, 64, -1])]; + tensor var_5676_cast = reshape(shape = var_5675, x = k_111_cast)[name = tensor("op_5676_cast")]; + tensor var_5677 = const()[name = tensor("op_5677"), val = tensor([2, 20, 64, -1])]; + tensor var_5678_cast = reshape(shape = var_5677, x = v_111_cast)[name = tensor("op_5678_cast")]; + tensor attn_weights_221_transpose_x_0 = const()[name = tensor("attn_weights_221_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_221_transpose_y_0 = const()[name = tensor("attn_weights_221_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_221_cast = matmul(transpose_x = attn_weights_221_transpose_x_0, transpose_y = attn_weights_221_transpose_y_0, x = var_5674_cast, y = var_5676_cast)[name = tensor("attn_weights_221_cast")]; + tensor attn_weights_223_cast = mul(x = attn_weights_221_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_223_cast")]; + tensor var_5682_cast = softmax(axis = var_4927, x = attn_weights_223_cast)[name = tensor("op_5682_cast")]; + tensor attn_111_transpose_x_0 = const()[name = tensor("attn_111_transpose_x_0"), val = tensor(false)]; + tensor attn_111_transpose_y_0 = const()[name = tensor("attn_111_transpose_y_0"), val = tensor(true)]; + tensor attn_111_cast = matmul(transpose_x = attn_111_transpose_x_0, transpose_y = attn_111_transpose_y_0, x = var_5678_cast, y = var_5682_cast)[name = tensor("attn_111_cast")]; + tensor var_5686 = const()[name = tensor("op_5686"), val = tensor([2, 1280, 1, -1])]; + tensor input_353_cast = reshape(shape = var_5686, x = attn_111_cast)[name = tensor("input_353_cast")]; + tensor var_5691 = const()[name = tensor("op_5691"), val = tensor([1, 1])]; + tensor var_5693 = const()[name = tensor("op_5693"), val = tensor([1, 1])]; + tensor var_5695_pad_type_0 = const()[name = tensor("op_5695_pad_type_0"), val = tensor("custom")]; + tensor var_5695_pad_0 = const()[name = tensor("op_5695_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727544000))), lut = tensor([-0x1.488p-7, -0x1.884p-9, 0x1.89p-9, 0x1.488p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727953664)))]; + tensor var_5695_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_5693, groups = var_4943, pad = var_5695_pad_0, pad_type = var_5695_pad_type_0, strides = var_5691, weight = mid_block_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_353_cast)[name = tensor("op_5695_cast")]; + tensor inputs_167_cast = add(x = var_5695_cast, y = inputs_165_cast)[name = tensor("inputs_167_cast")]; + tensor var_5699 = const()[name = tensor("op_5699"), val = tensor([1])]; + tensor channels_mean_167_cast = reduce_mean(axes = var_5699, keep_dims = var_4938, x = inputs_167_cast)[name = tensor("channels_mean_167_cast")]; + tensor zero_mean_167_cast = sub(x = inputs_167_cast, y = channels_mean_167_cast)[name = tensor("zero_mean_167_cast")]; + tensor zero_mean_sq_167_cast = mul(x = zero_mean_167_cast, y = zero_mean_167_cast)[name = tensor("zero_mean_sq_167_cast")]; + tensor var_5703 = const()[name = tensor("op_5703"), val = tensor([1])]; + tensor var_5704_cast = reduce_mean(axes = var_5703, keep_dims = var_4938, x = zero_mean_sq_167_cast)[name = tensor("op_5704_cast")]; + tensor var_5705_to_fp16 = const()[name = tensor("op_5705_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5706_cast = add(x = var_5704_cast, y = var_5705_to_fp16)[name = tensor("op_5706_cast")]; + tensor denom_167_epsilon_0_to_fp16 = const()[name = tensor("denom_167_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_167_cast = rsqrt(epsilon = denom_167_epsilon_0_to_fp16, x = var_5706_cast)[name = tensor("denom_167_cast")]; + tensor out_167_cast = mul(x = zero_mean_167_cast, y = denom_167_cast)[name = tensor("out_167_cast")]; + tensor var_5710_to_fp16 = const()[name = tensor("op_5710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727956288)))]; + tensor var_5711_cast = add(x = out_167_cast, y = var_5710_to_fp16)[name = tensor("op_5711_cast")]; + tensor var_5713_to_fp16 = const()[name = tensor("op_5713_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727958912)))]; + tensor input_355_cast = mul(x = var_5711_cast, y = var_5713_to_fp16)[name = tensor("input_355_cast")]; + tensor var_5721 = const()[name = tensor("op_5721"), val = tensor([1, 1])]; + tensor var_5723 = const()[name = tensor("op_5723"), val = tensor([1, 1])]; + tensor var_5725_pad_type_0 = const()[name = tensor("op_5725_pad_type_0"), val = tensor("custom")]; + tensor var_5725_pad_0 = const()[name = tensor("op_5725_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(727961536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737792000))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737792192)))]; + tensor var_5725_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_5723, groups = var_4943, pad = var_5725_pad_0, pad_type = var_5725_pad_type_0, strides = var_5721, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_355_cast)[name = tensor("op_5725_cast")]; + tensor var_5726_split_sizes_0 = const()[name = tensor("op_5726_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5726_axis_0 = const()[name = tensor("op_5726_axis_0"), val = tensor(1)]; + tensor var_5726_cast_0, tensor var_5726_cast_1 = split(axis = var_5726_axis_0, split_sizes = var_5726_split_sizes_0, x = var_5725_cast)[name = tensor("op_5726_cast")]; + tensor var_5728_mode_0 = const()[name = tensor("op_5728_mode_0"), val = tensor("EXACT")]; + tensor var_5728_cast = gelu(mode = var_5728_mode_0, x = var_5726_cast_1)[name = tensor("op_5728_cast")]; + tensor input_357_cast = mul(x = var_5726_cast_0, y = var_5728_cast)[name = tensor("input_357_cast")]; + tensor var_5732 = const()[name = tensor("op_5732"), val = tensor([1, 1])]; + tensor var_5734 = const()[name = tensor("op_5734"), val = tensor([1, 1])]; + tensor var_5736_pad_type_0 = const()[name = tensor("op_5736_pad_type_0"), val = tensor("custom")]; + tensor var_5736_pad_0 = const()[name = tensor("op_5736_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(737812736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742728000))), name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742728192)))]; + tensor var_5736_cast = conv(bias = mid_block_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_5734, groups = var_4943, pad = var_5736_pad_0, pad_type = var_5736_pad_type_0, strides = var_5732, weight = mid_block_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_357_cast)[name = tensor("op_5736_cast")]; + tensor inputs_169_cast = add(x = var_5736_cast, y = inputs_167_cast)[name = tensor("inputs_169_cast")]; + tensor var_5746 = const()[name = tensor("op_5746"), val = tensor([1])]; + tensor channels_mean_169_cast = reduce_mean(axes = var_5746, keep_dims = var_4938, x = inputs_169_cast)[name = tensor("channels_mean_169_cast")]; + tensor zero_mean_169_cast = sub(x = inputs_169_cast, y = channels_mean_169_cast)[name = tensor("zero_mean_169_cast")]; + tensor zero_mean_sq_169_cast = mul(x = zero_mean_169_cast, y = zero_mean_169_cast)[name = tensor("zero_mean_sq_169_cast")]; + tensor var_5750 = const()[name = tensor("op_5750"), val = tensor([1])]; + tensor var_5751_cast = reduce_mean(axes = var_5750, keep_dims = var_4938, x = zero_mean_sq_169_cast)[name = tensor("op_5751_cast")]; + tensor var_5752_to_fp16 = const()[name = tensor("op_5752_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5753_cast = add(x = var_5751_cast, y = var_5752_to_fp16)[name = tensor("op_5753_cast")]; + tensor denom_169_epsilon_0_to_fp16 = const()[name = tensor("denom_169_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_169_cast = rsqrt(epsilon = denom_169_epsilon_0_to_fp16, x = var_5753_cast)[name = tensor("denom_169_cast")]; + tensor out_169_cast = mul(x = zero_mean_169_cast, y = denom_169_cast)[name = tensor("out_169_cast")]; + tensor var_5757_to_fp16 = const()[name = tensor("op_5757_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742730816)))]; + tensor var_5758_cast = add(x = out_169_cast, y = var_5757_to_fp16)[name = tensor("op_5758_cast")]; + tensor var_5760_to_fp16 = const()[name = tensor("op_5760_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742733440)))]; + tensor hidden_states_233_cast = mul(x = var_5758_cast, y = var_5760_to_fp16)[name = tensor("hidden_states_233_cast")]; + tensor var_5767 = const()[name = tensor("op_5767"), val = tensor([1, 1])]; + tensor var_5769 = const()[name = tensor("op_5769"), val = tensor([1, 1])]; + tensor q_113_pad_type_0 = const()[name = tensor("q_113_pad_type_0"), val = tensor("custom")]; + tensor q_113_pad_0 = const()[name = tensor("q_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(742736064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743964928))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_113_cast = conv(dilations = var_5769, groups = var_4943, pad = q_113_pad_0, pad_type = q_113_pad_type_0, strides = var_5767, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("q_113_cast")]; + tensor var_5773 = const()[name = tensor("op_5773"), val = tensor([1, 1])]; + tensor var_5775 = const()[name = tensor("op_5775"), val = tensor([1, 1])]; + tensor k_113_pad_type_0 = const()[name = tensor("k_113_pad_type_0"), val = tensor("custom")]; + tensor k_113_pad_0 = const()[name = tensor("k_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(743965120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745193984))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_113_cast = conv(dilations = var_5775, groups = var_4943, pad = k_113_pad_0, pad_type = k_113_pad_type_0, strides = var_5773, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("k_113_cast")]; + tensor var_5779 = const()[name = tensor("op_5779"), val = tensor([1, 1])]; + tensor var_5781 = const()[name = tensor("op_5781"), val = tensor([1, 1])]; + tensor v_113_pad_type_0 = const()[name = tensor("v_113_pad_type_0"), val = tensor("custom")]; + tensor v_113_pad_0 = const()[name = tensor("v_113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(745194176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746832640))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_113_cast = conv(dilations = var_5781, groups = var_4943, pad = v_113_pad_0, pad_type = v_113_pad_type_0, strides = var_5779, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_233_cast)[name = tensor("v_113_cast")]; + tensor var_5785 = const()[name = tensor("op_5785"), val = tensor([2, 20, 64, -1])]; + tensor var_5786_cast = reshape(shape = var_5785, x = q_113_cast)[name = tensor("op_5786_cast")]; + tensor var_5787 = const()[name = tensor("op_5787"), val = tensor([2, 20, 64, -1])]; + tensor var_5788_cast = reshape(shape = var_5787, x = k_113_cast)[name = tensor("op_5788_cast")]; + tensor var_5789 = const()[name = tensor("op_5789"), val = tensor([2, 20, 64, -1])]; + tensor var_5790_cast = reshape(shape = var_5789, x = v_113_cast)[name = tensor("op_5790_cast")]; + tensor attn_weights_225_transpose_x_0 = const()[name = tensor("attn_weights_225_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_225_transpose_y_0 = const()[name = tensor("attn_weights_225_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_225_cast = matmul(transpose_x = attn_weights_225_transpose_x_0, transpose_y = attn_weights_225_transpose_y_0, x = var_5786_cast, y = var_5788_cast)[name = tensor("attn_weights_225_cast")]; + tensor attn_weights_227_cast = mul(x = attn_weights_225_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_227_cast")]; + tensor var_5794_cast = softmax(axis = var_4927, x = attn_weights_227_cast)[name = tensor("op_5794_cast")]; + tensor attn_113_transpose_x_0 = const()[name = tensor("attn_113_transpose_x_0"), val = tensor(false)]; + tensor attn_113_transpose_y_0 = const()[name = tensor("attn_113_transpose_y_0"), val = tensor(true)]; + tensor attn_113_cast = matmul(transpose_x = attn_113_transpose_x_0, transpose_y = attn_113_transpose_y_0, x = var_5790_cast, y = var_5794_cast)[name = tensor("attn_113_cast")]; + tensor var_5798 = const()[name = tensor("op_5798"), val = tensor([2, 1280, 1, -1])]; + tensor input_359_cast = reshape(shape = var_5798, x = attn_113_cast)[name = tensor("input_359_cast")]; + tensor var_5803 = const()[name = tensor("op_5803"), val = tensor([1, 1])]; + tensor var_5805 = const()[name = tensor("op_5805"), val = tensor([1, 1])]; + tensor var_5807_pad_type_0 = const()[name = tensor("op_5807_pad_type_0"), val = tensor("custom")]; + tensor var_5807_pad_0 = const()[name = tensor("op_5807_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(746833216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748062080))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748062272)))]; + tensor var_5807_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_5805, groups = var_4943, pad = var_5807_pad_0, pad_type = var_5807_pad_type_0, strides = var_5803, weight = mid_block_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_359_cast)[name = tensor("op_5807_cast")]; + tensor inputs_171_cast = add(x = var_5807_cast, y = inputs_169_cast)[name = tensor("inputs_171_cast")]; + tensor var_5811 = const()[name = tensor("op_5811"), val = tensor([1])]; + tensor channels_mean_171_cast = reduce_mean(axes = var_5811, keep_dims = var_4938, x = inputs_171_cast)[name = tensor("channels_mean_171_cast")]; + tensor zero_mean_171_cast = sub(x = inputs_171_cast, y = channels_mean_171_cast)[name = tensor("zero_mean_171_cast")]; + tensor zero_mean_sq_171_cast = mul(x = zero_mean_171_cast, y = zero_mean_171_cast)[name = tensor("zero_mean_sq_171_cast")]; + tensor var_5815 = const()[name = tensor("op_5815"), val = tensor([1])]; + tensor var_5816_cast = reduce_mean(axes = var_5815, keep_dims = var_4938, x = zero_mean_sq_171_cast)[name = tensor("op_5816_cast")]; + tensor var_5817_to_fp16 = const()[name = tensor("op_5817_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5818_cast = add(x = var_5816_cast, y = var_5817_to_fp16)[name = tensor("op_5818_cast")]; + tensor denom_171_epsilon_0_to_fp16 = const()[name = tensor("denom_171_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_171_cast = rsqrt(epsilon = denom_171_epsilon_0_to_fp16, x = var_5818_cast)[name = tensor("denom_171_cast")]; + tensor out_171_cast = mul(x = zero_mean_171_cast, y = denom_171_cast)[name = tensor("out_171_cast")]; + tensor var_5822_to_fp16 = const()[name = tensor("op_5822_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748064896)))]; + tensor var_5823_cast = add(x = out_171_cast, y = var_5822_to_fp16)[name = tensor("op_5823_cast")]; + tensor var_5825_to_fp16 = const()[name = tensor("op_5825_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748067520)))]; + tensor hidden_states_235_cast = mul(x = var_5823_cast, y = var_5825_to_fp16)[name = tensor("hidden_states_235_cast")]; + tensor var_5832 = const()[name = tensor("op_5832"), val = tensor([1, 1])]; + tensor var_5834 = const()[name = tensor("op_5834"), val = tensor([1, 1])]; + tensor q_115_pad_type_0 = const()[name = tensor("q_115_pad_type_0"), val = tensor("custom")]; + tensor q_115_pad_0 = const()[name = tensor("q_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748070144))), lut = tensor([-0x1.2fp-6, -0x1.8c4p-8, 0x1.8cp-8, 0x1.2fp-6]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_115_cast = conv(dilations = var_5834, groups = var_4943, pad = q_115_pad_0, pad_type = q_115_pad_type_0, strides = var_5832, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_235_cast)[name = tensor("q_115_cast")]; + tensor var_5838 = const()[name = tensor("op_5838"), val = tensor([1, 1])]; + tensor var_5840 = const()[name = tensor("op_5840"), val = tensor([1, 1])]; + tensor k_115_pad_type_0 = const()[name = tensor("k_115_pad_type_0"), val = tensor("custom")]; + tensor k_115_pad_0 = const()[name = tensor("k_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(748479808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749790592))), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_115_cast = conv(dilations = var_5840, groups = var_4943, pad = k_115_pad_0, pad_type = k_115_pad_type_0, strides = var_5838, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_115_cast")]; + tensor var_5844 = const()[name = tensor("op_5844"), val = tensor([1, 1])]; + tensor var_5846 = const()[name = tensor("op_5846"), val = tensor([1, 1])]; + tensor v_115_pad_type_0 = const()[name = tensor("v_115_pad_type_0"), val = tensor("custom")]; + tensor v_115_pad_0 = const()[name = tensor("v_115_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(749790720))), lut = tensor([-0x1.1b4p-7, 0x1.1bp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_115_cast = conv(dilations = var_5846, groups = var_4943, pad = v_115_pad_0, pad_type = v_115_pad_type_0, strides = var_5844, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_115_cast")]; + tensor var_5850 = const()[name = tensor("op_5850"), val = tensor([2, 20, 64, -1])]; + tensor var_5851_cast = reshape(shape = var_5850, x = q_115_cast)[name = tensor("op_5851_cast")]; + tensor var_5852 = const()[name = tensor("op_5852"), val = tensor([2, 20, 64, -1])]; + tensor var_5853_cast = reshape(shape = var_5852, x = k_115_cast)[name = tensor("op_5853_cast")]; + tensor var_5854 = const()[name = tensor("op_5854"), val = tensor([2, 20, 64, -1])]; + tensor var_5855_cast = reshape(shape = var_5854, x = v_115_cast)[name = tensor("op_5855_cast")]; + tensor attn_weights_229_transpose_x_0 = const()[name = tensor("attn_weights_229_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_229_transpose_y_0 = const()[name = tensor("attn_weights_229_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_229_cast = matmul(transpose_x = attn_weights_229_transpose_x_0, transpose_y = attn_weights_229_transpose_y_0, x = var_5851_cast, y = var_5853_cast)[name = tensor("attn_weights_229_cast")]; + tensor attn_weights_231_cast = mul(x = attn_weights_229_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_231_cast")]; + tensor var_5859_cast = softmax(axis = var_4927, x = attn_weights_231_cast)[name = tensor("op_5859_cast")]; + tensor attn_115_transpose_x_0 = const()[name = tensor("attn_115_transpose_x_0"), val = tensor(false)]; + tensor attn_115_transpose_y_0 = const()[name = tensor("attn_115_transpose_y_0"), val = tensor(true)]; + tensor attn_115_cast = matmul(transpose_x = attn_115_transpose_x_0, transpose_y = attn_115_transpose_y_0, x = var_5855_cast, y = var_5859_cast)[name = tensor("attn_115_cast")]; + tensor var_5863 = const()[name = tensor("op_5863"), val = tensor([2, 1280, 1, -1])]; + tensor input_361_cast = reshape(shape = var_5863, x = attn_115_cast)[name = tensor("input_361_cast")]; + tensor var_5868 = const()[name = tensor("op_5868"), val = tensor([1, 1])]; + tensor var_5870 = const()[name = tensor("op_5870"), val = tensor([1, 1])]; + tensor var_5872_pad_type_0 = const()[name = tensor("op_5872_pad_type_0"), val = tensor("custom")]; + tensor var_5872_pad_0 = const()[name = tensor("op_5872_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750118464))), lut = tensor([-0x1.478p-8, 0x1.47p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750323328)))]; + tensor var_5872_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_5870, groups = var_4943, pad = var_5872_pad_0, pad_type = var_5872_pad_type_0, strides = var_5868, weight = mid_block_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_361_cast)[name = tensor("op_5872_cast")]; + tensor inputs_173_cast = add(x = var_5872_cast, y = inputs_171_cast)[name = tensor("inputs_173_cast")]; + tensor var_5876 = const()[name = tensor("op_5876"), val = tensor([1])]; + tensor channels_mean_173_cast = reduce_mean(axes = var_5876, keep_dims = var_4938, x = inputs_173_cast)[name = tensor("channels_mean_173_cast")]; + tensor zero_mean_173_cast = sub(x = inputs_173_cast, y = channels_mean_173_cast)[name = tensor("zero_mean_173_cast")]; + tensor zero_mean_sq_173_cast = mul(x = zero_mean_173_cast, y = zero_mean_173_cast)[name = tensor("zero_mean_sq_173_cast")]; + tensor var_5880 = const()[name = tensor("op_5880"), val = tensor([1])]; + tensor var_5881_cast = reduce_mean(axes = var_5880, keep_dims = var_4938, x = zero_mean_sq_173_cast)[name = tensor("op_5881_cast")]; + tensor var_5882_to_fp16 = const()[name = tensor("op_5882_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5883_cast = add(x = var_5881_cast, y = var_5882_to_fp16)[name = tensor("op_5883_cast")]; + tensor denom_173_epsilon_0_to_fp16 = const()[name = tensor("denom_173_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_173_cast = rsqrt(epsilon = denom_173_epsilon_0_to_fp16, x = var_5883_cast)[name = tensor("denom_173_cast")]; + tensor out_173_cast = mul(x = zero_mean_173_cast, y = denom_173_cast)[name = tensor("out_173_cast")]; + tensor var_5887_to_fp16 = const()[name = tensor("op_5887_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750325952)))]; + tensor var_5888_cast = add(x = out_173_cast, y = var_5887_to_fp16)[name = tensor("op_5888_cast")]; + tensor var_5890_to_fp16 = const()[name = tensor("op_5890_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750328576)))]; + tensor input_363_cast = mul(x = var_5888_cast, y = var_5890_to_fp16)[name = tensor("input_363_cast")]; + tensor var_5898 = const()[name = tensor("op_5898"), val = tensor([1, 1])]; + tensor var_5900 = const()[name = tensor("op_5900"), val = tensor([1, 1])]; + tensor var_5902_pad_type_0 = const()[name = tensor("op_5902_pad_type_0"), val = tensor("custom")]; + tensor var_5902_pad_0 = const()[name = tensor("op_5902_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(750331200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760161664))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760161856)))]; + tensor var_5902_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_5900, groups = var_4943, pad = var_5902_pad_0, pad_type = var_5902_pad_type_0, strides = var_5898, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_363_cast)[name = tensor("op_5902_cast")]; + tensor var_5903_split_sizes_0 = const()[name = tensor("op_5903_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_5903_axis_0 = const()[name = tensor("op_5903_axis_0"), val = tensor(1)]; + tensor var_5903_cast_0, tensor var_5903_cast_1 = split(axis = var_5903_axis_0, split_sizes = var_5903_split_sizes_0, x = var_5902_cast)[name = tensor("op_5903_cast")]; + tensor var_5905_mode_0 = const()[name = tensor("op_5905_mode_0"), val = tensor("EXACT")]; + tensor var_5905_cast = gelu(mode = var_5905_mode_0, x = var_5903_cast_1)[name = tensor("op_5905_cast")]; + tensor input_365_cast = mul(x = var_5903_cast_0, y = var_5905_cast)[name = tensor("input_365_cast")]; + tensor var_5909 = const()[name = tensor("op_5909"), val = tensor([1, 1])]; + tensor var_5911 = const()[name = tensor("op_5911"), val = tensor([1, 1])]; + tensor var_5913_pad_type_0 = const()[name = tensor("op_5913_pad_type_0"), val = tensor("custom")]; + tensor var_5913_pad_0 = const()[name = tensor("op_5913_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(760182400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765097664))), name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765097856)))]; + tensor var_5913_cast = conv(bias = mid_block_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_5911, groups = var_4943, pad = var_5913_pad_0, pad_type = var_5913_pad_type_0, strides = var_5909, weight = mid_block_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_365_cast)[name = tensor("op_5913_cast")]; + tensor inputs_175_cast = add(x = var_5913_cast, y = inputs_173_cast)[name = tensor("inputs_175_cast")]; + tensor var_5923 = const()[name = tensor("op_5923"), val = tensor([1])]; + tensor channels_mean_175_cast = reduce_mean(axes = var_5923, keep_dims = var_4938, x = inputs_175_cast)[name = tensor("channels_mean_175_cast")]; + tensor zero_mean_175_cast = sub(x = inputs_175_cast, y = channels_mean_175_cast)[name = tensor("zero_mean_175_cast")]; + tensor zero_mean_sq_175_cast = mul(x = zero_mean_175_cast, y = zero_mean_175_cast)[name = tensor("zero_mean_sq_175_cast")]; + tensor var_5927 = const()[name = tensor("op_5927"), val = tensor([1])]; + tensor var_5928_cast = reduce_mean(axes = var_5927, keep_dims = var_4938, x = zero_mean_sq_175_cast)[name = tensor("op_5928_cast")]; + tensor var_5929_to_fp16 = const()[name = tensor("op_5929_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5930_cast = add(x = var_5928_cast, y = var_5929_to_fp16)[name = tensor("op_5930_cast")]; + tensor denom_175_epsilon_0_to_fp16 = const()[name = tensor("denom_175_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_175_cast = rsqrt(epsilon = denom_175_epsilon_0_to_fp16, x = var_5930_cast)[name = tensor("denom_175_cast")]; + tensor out_175_cast = mul(x = zero_mean_175_cast, y = denom_175_cast)[name = tensor("out_175_cast")]; + tensor var_5934_to_fp16 = const()[name = tensor("op_5934_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765100480)))]; + tensor var_5935_cast = add(x = out_175_cast, y = var_5934_to_fp16)[name = tensor("op_5935_cast")]; + tensor var_5937_to_fp16 = const()[name = tensor("op_5937_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765103104)))]; + tensor hidden_states_239_cast = mul(x = var_5935_cast, y = var_5937_to_fp16)[name = tensor("hidden_states_239_cast")]; + tensor var_5944 = const()[name = tensor("op_5944"), val = tensor([1, 1])]; + tensor var_5946 = const()[name = tensor("op_5946"), val = tensor([1, 1])]; + tensor q_117_pad_type_0 = const()[name = tensor("q_117_pad_type_0"), val = tensor("custom")]; + tensor q_117_pad_0 = const()[name = tensor("q_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765105728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765924992))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_117_cast = conv(dilations = var_5946, groups = var_4943, pad = q_117_pad_0, pad_type = q_117_pad_type_0, strides = var_5944, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("q_117_cast")]; + tensor var_5950 = const()[name = tensor("op_5950"), val = tensor([1, 1])]; + tensor var_5952 = const()[name = tensor("op_5952"), val = tensor([1, 1])]; + tensor k_117_pad_type_0 = const()[name = tensor("k_117_pad_type_0"), val = tensor("custom")]; + tensor k_117_pad_0 = const()[name = tensor("k_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(765925120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(767153984))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_117_cast = conv(dilations = var_5952, groups = var_4943, pad = k_117_pad_0, pad_type = k_117_pad_type_0, strides = var_5950, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("k_117_cast")]; + tensor var_5956 = const()[name = tensor("op_5956"), val = tensor([1, 1])]; + tensor var_5958 = const()[name = tensor("op_5958"), val = tensor([1, 1])]; + tensor v_117_pad_type_0 = const()[name = tensor("v_117_pad_type_0"), val = tensor("custom")]; + tensor v_117_pad_0 = const()[name = tensor("v_117_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(767154176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768383040))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_117_cast = conv(dilations = var_5958, groups = var_4943, pad = v_117_pad_0, pad_type = v_117_pad_type_0, strides = var_5956, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_239_cast)[name = tensor("v_117_cast")]; + tensor var_5962 = const()[name = tensor("op_5962"), val = tensor([2, 20, 64, -1])]; + tensor var_5963_cast = reshape(shape = var_5962, x = q_117_cast)[name = tensor("op_5963_cast")]; + tensor var_5964 = const()[name = tensor("op_5964"), val = tensor([2, 20, 64, -1])]; + tensor var_5965_cast = reshape(shape = var_5964, x = k_117_cast)[name = tensor("op_5965_cast")]; + tensor var_5966 = const()[name = tensor("op_5966"), val = tensor([2, 20, 64, -1])]; + tensor var_5967_cast = reshape(shape = var_5966, x = v_117_cast)[name = tensor("op_5967_cast")]; + tensor attn_weights_233_transpose_x_0 = const()[name = tensor("attn_weights_233_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_233_transpose_y_0 = const()[name = tensor("attn_weights_233_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_233_cast = matmul(transpose_x = attn_weights_233_transpose_x_0, transpose_y = attn_weights_233_transpose_y_0, x = var_5963_cast, y = var_5965_cast)[name = tensor("attn_weights_233_cast")]; + tensor attn_weights_235_cast = mul(x = attn_weights_233_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_235_cast")]; + tensor var_5971_cast = softmax(axis = var_4927, x = attn_weights_235_cast)[name = tensor("op_5971_cast")]; + tensor attn_117_transpose_x_0 = const()[name = tensor("attn_117_transpose_x_0"), val = tensor(false)]; + tensor attn_117_transpose_y_0 = const()[name = tensor("attn_117_transpose_y_0"), val = tensor(true)]; + tensor attn_117_cast = matmul(transpose_x = attn_117_transpose_x_0, transpose_y = attn_117_transpose_y_0, x = var_5967_cast, y = var_5971_cast)[name = tensor("attn_117_cast")]; + tensor var_5975 = const()[name = tensor("op_5975"), val = tensor([2, 1280, 1, -1])]; + tensor input_367_cast = reshape(shape = var_5975, x = attn_117_cast)[name = tensor("input_367_cast")]; + tensor var_5980 = const()[name = tensor("op_5980"), val = tensor([1, 1])]; + tensor var_5982 = const()[name = tensor("op_5982"), val = tensor([1, 1])]; + tensor var_5984_pad_type_0 = const()[name = tensor("op_5984_pad_type_0"), val = tensor("custom")]; + tensor var_5984_pad_0 = const()[name = tensor("op_5984_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(768383232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769612096))), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769612288)))]; + tensor var_5984_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_5982, groups = var_4943, pad = var_5984_pad_0, pad_type = var_5984_pad_type_0, strides = var_5980, weight = mid_block_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_367_cast)[name = tensor("op_5984_cast")]; + tensor inputs_177_cast = add(x = var_5984_cast, y = inputs_175_cast)[name = tensor("inputs_177_cast")]; + tensor var_5988 = const()[name = tensor("op_5988"), val = tensor([1])]; + tensor channels_mean_177_cast = reduce_mean(axes = var_5988, keep_dims = var_4938, x = inputs_177_cast)[name = tensor("channels_mean_177_cast")]; + tensor zero_mean_177_cast = sub(x = inputs_177_cast, y = channels_mean_177_cast)[name = tensor("zero_mean_177_cast")]; + tensor zero_mean_sq_177_cast = mul(x = zero_mean_177_cast, y = zero_mean_177_cast)[name = tensor("zero_mean_sq_177_cast")]; + tensor var_5992 = const()[name = tensor("op_5992"), val = tensor([1])]; + tensor var_5993_cast = reduce_mean(axes = var_5992, keep_dims = var_4938, x = zero_mean_sq_177_cast)[name = tensor("op_5993_cast")]; + tensor var_5994_to_fp16 = const()[name = tensor("op_5994_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_5995_cast = add(x = var_5993_cast, y = var_5994_to_fp16)[name = tensor("op_5995_cast")]; + tensor denom_177_epsilon_0_to_fp16 = const()[name = tensor("denom_177_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_177_cast = rsqrt(epsilon = denom_177_epsilon_0_to_fp16, x = var_5995_cast)[name = tensor("denom_177_cast")]; + tensor out_177_cast = mul(x = zero_mean_177_cast, y = denom_177_cast)[name = tensor("out_177_cast")]; + tensor var_5999_to_fp16 = const()[name = tensor("op_5999_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769614912)))]; + tensor var_6000_cast = add(x = out_177_cast, y = var_5999_to_fp16)[name = tensor("op_6000_cast")]; + tensor var_6002_to_fp16 = const()[name = tensor("op_6002_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769617536)))]; + tensor hidden_states_241_cast = mul(x = var_6000_cast, y = var_6002_to_fp16)[name = tensor("hidden_states_241_cast")]; + tensor var_6009 = const()[name = tensor("op_6009"), val = tensor([1, 1])]; + tensor var_6011 = const()[name = tensor("op_6011"), val = tensor([1, 1])]; + tensor q_119_pad_type_0 = const()[name = tensor("q_119_pad_type_0"), val = tensor("custom")]; + tensor q_119_pad_0 = const()[name = tensor("q_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769620160))), lut = tensor([-0x1.6a4p-7, 0x1.6ap-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_119_cast = conv(dilations = var_6011, groups = var_4943, pad = q_119_pad_0, pad_type = q_119_pad_type_0, strides = var_6009, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_241_cast)[name = tensor("q_119_cast")]; + tensor var_6015 = const()[name = tensor("op_6015"), val = tensor([1, 1])]; + tensor var_6017 = const()[name = tensor("op_6017"), val = tensor([1, 1])]; + tensor k_119_pad_type_0 = const()[name = tensor("k_119_pad_type_0"), val = tensor("custom")]; + tensor k_119_pad_0 = const()[name = tensor("k_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(769825024))), lut = tensor([-0x1.d9cp-8, 0x1.d8p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_119_cast = conv(dilations = var_6017, groups = var_4943, pad = k_119_pad_0, pad_type = k_119_pad_type_0, strides = var_6015, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_119_cast")]; + tensor var_6021 = const()[name = tensor("op_6021"), val = tensor([1, 1])]; + tensor var_6023 = const()[name = tensor("op_6023"), val = tensor([1, 1])]; + tensor v_119_pad_type_0 = const()[name = tensor("v_119_pad_type_0"), val = tensor("custom")]; + tensor v_119_pad_0 = const()[name = tensor("v_119_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770152768))), lut = tensor([-0x1.128p-7, 0x1.13p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_119_cast = conv(dilations = var_6023, groups = var_4943, pad = v_119_pad_0, pad_type = v_119_pad_type_0, strides = var_6021, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_119_cast")]; + tensor var_6027 = const()[name = tensor("op_6027"), val = tensor([2, 20, 64, -1])]; + tensor var_6028_cast = reshape(shape = var_6027, x = q_119_cast)[name = tensor("op_6028_cast")]; + tensor var_6029 = const()[name = tensor("op_6029"), val = tensor([2, 20, 64, -1])]; + tensor var_6030_cast = reshape(shape = var_6029, x = k_119_cast)[name = tensor("op_6030_cast")]; + tensor var_6031 = const()[name = tensor("op_6031"), val = tensor([2, 20, 64, -1])]; + tensor var_6032_cast = reshape(shape = var_6031, x = v_119_cast)[name = tensor("op_6032_cast")]; + tensor attn_weights_237_transpose_x_0 = const()[name = tensor("attn_weights_237_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_237_transpose_y_0 = const()[name = tensor("attn_weights_237_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_237_cast = matmul(transpose_x = attn_weights_237_transpose_x_0, transpose_y = attn_weights_237_transpose_y_0, x = var_6028_cast, y = var_6030_cast)[name = tensor("attn_weights_237_cast")]; + tensor attn_weights_239_cast = mul(x = attn_weights_237_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_239_cast")]; + tensor var_6036_cast = softmax(axis = var_4927, x = attn_weights_239_cast)[name = tensor("op_6036_cast")]; + tensor attn_119_transpose_x_0 = const()[name = tensor("attn_119_transpose_x_0"), val = tensor(false)]; + tensor attn_119_transpose_y_0 = const()[name = tensor("attn_119_transpose_y_0"), val = tensor(true)]; + tensor attn_119_cast = matmul(transpose_x = attn_119_transpose_x_0, transpose_y = attn_119_transpose_y_0, x = var_6032_cast, y = var_6036_cast)[name = tensor("attn_119_cast")]; + tensor var_6040 = const()[name = tensor("op_6040"), val = tensor([2, 1280, 1, -1])]; + tensor input_369_cast = reshape(shape = var_6040, x = attn_119_cast)[name = tensor("input_369_cast")]; + tensor var_6045 = const()[name = tensor("op_6045"), val = tensor([1, 1])]; + tensor var_6047 = const()[name = tensor("op_6047"), val = tensor([1, 1])]; + tensor var_6049_pad_type_0 = const()[name = tensor("op_6049_pad_type_0"), val = tensor("custom")]; + tensor var_6049_pad_0 = const()[name = tensor("op_6049_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770480512))), lut = tensor([-0x1.45p-8, 0x1.46p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770685376)))]; + tensor var_6049_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_6047, groups = var_4943, pad = var_6049_pad_0, pad_type = var_6049_pad_type_0, strides = var_6045, weight = mid_block_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_369_cast)[name = tensor("op_6049_cast")]; + tensor inputs_179_cast = add(x = var_6049_cast, y = inputs_177_cast)[name = tensor("inputs_179_cast")]; + tensor var_6053 = const()[name = tensor("op_6053"), val = tensor([1])]; + tensor channels_mean_179_cast = reduce_mean(axes = var_6053, keep_dims = var_4938, x = inputs_179_cast)[name = tensor("channels_mean_179_cast")]; + tensor zero_mean_179_cast = sub(x = inputs_179_cast, y = channels_mean_179_cast)[name = tensor("zero_mean_179_cast")]; + tensor zero_mean_sq_179_cast = mul(x = zero_mean_179_cast, y = zero_mean_179_cast)[name = tensor("zero_mean_sq_179_cast")]; + tensor var_6057 = const()[name = tensor("op_6057"), val = tensor([1])]; + tensor var_6058_cast = reduce_mean(axes = var_6057, keep_dims = var_4938, x = zero_mean_sq_179_cast)[name = tensor("op_6058_cast")]; + tensor var_6059_to_fp16 = const()[name = tensor("op_6059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6060_cast = add(x = var_6058_cast, y = var_6059_to_fp16)[name = tensor("op_6060_cast")]; + tensor denom_179_epsilon_0_to_fp16 = const()[name = tensor("denom_179_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_179_cast = rsqrt(epsilon = denom_179_epsilon_0_to_fp16, x = var_6060_cast)[name = tensor("denom_179_cast")]; + tensor out_179_cast = mul(x = zero_mean_179_cast, y = denom_179_cast)[name = tensor("out_179_cast")]; + tensor var_6064_to_fp16 = const()[name = tensor("op_6064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770688000)))]; + tensor var_6065_cast = add(x = out_179_cast, y = var_6064_to_fp16)[name = tensor("op_6065_cast")]; + tensor var_6067_to_fp16 = const()[name = tensor("op_6067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770690624)))]; + tensor input_371_cast = mul(x = var_6065_cast, y = var_6067_to_fp16)[name = tensor("input_371_cast")]; + tensor var_6075 = const()[name = tensor("op_6075"), val = tensor([1, 1])]; + tensor var_6077 = const()[name = tensor("op_6077"), val = tensor([1, 1])]; + tensor var_6079_pad_type_0 = const()[name = tensor("op_6079_pad_type_0"), val = tensor("custom")]; + tensor var_6079_pad_0 = const()[name = tensor("op_6079_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(770693248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780523712))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780523904)))]; + tensor var_6079_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_6077, groups = var_4943, pad = var_6079_pad_0, pad_type = var_6079_pad_type_0, strides = var_6075, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_371_cast)[name = tensor("op_6079_cast")]; + tensor var_6080_split_sizes_0 = const()[name = tensor("op_6080_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6080_axis_0 = const()[name = tensor("op_6080_axis_0"), val = tensor(1)]; + tensor var_6080_cast_0, tensor var_6080_cast_1 = split(axis = var_6080_axis_0, split_sizes = var_6080_split_sizes_0, x = var_6079_cast)[name = tensor("op_6080_cast")]; + tensor var_6082_mode_0 = const()[name = tensor("op_6082_mode_0"), val = tensor("EXACT")]; + tensor var_6082_cast = gelu(mode = var_6082_mode_0, x = var_6080_cast_1)[name = tensor("op_6082_cast")]; + tensor input_373_cast = mul(x = var_6080_cast_0, y = var_6082_cast)[name = tensor("input_373_cast")]; + tensor var_6086 = const()[name = tensor("op_6086"), val = tensor([1, 1])]; + tensor var_6088 = const()[name = tensor("op_6088"), val = tensor([1, 1])]; + tensor var_6090_pad_type_0 = const()[name = tensor("op_6090_pad_type_0"), val = tensor("custom")]; + tensor var_6090_pad_0 = const()[name = tensor("op_6090_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(780544448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787098112))), name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787098688)))]; + tensor var_6090_cast = conv(bias = mid_block_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_6088, groups = var_4943, pad = var_6090_pad_0, pad_type = var_6090_pad_type_0, strides = var_6086, weight = mid_block_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_373_cast)[name = tensor("op_6090_cast")]; + tensor inputs_181_cast = add(x = var_6090_cast, y = inputs_179_cast)[name = tensor("inputs_181_cast")]; + tensor var_6100 = const()[name = tensor("op_6100"), val = tensor([1])]; + tensor channels_mean_181_cast = reduce_mean(axes = var_6100, keep_dims = var_4938, x = inputs_181_cast)[name = tensor("channels_mean_181_cast")]; + tensor zero_mean_181_cast = sub(x = inputs_181_cast, y = channels_mean_181_cast)[name = tensor("zero_mean_181_cast")]; + tensor zero_mean_sq_181_cast = mul(x = zero_mean_181_cast, y = zero_mean_181_cast)[name = tensor("zero_mean_sq_181_cast")]; + tensor var_6104 = const()[name = tensor("op_6104"), val = tensor([1])]; + tensor var_6105_cast = reduce_mean(axes = var_6104, keep_dims = var_4938, x = zero_mean_sq_181_cast)[name = tensor("op_6105_cast")]; + tensor var_6106_to_fp16 = const()[name = tensor("op_6106_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6107_cast = add(x = var_6105_cast, y = var_6106_to_fp16)[name = tensor("op_6107_cast")]; + tensor denom_181_epsilon_0_to_fp16 = const()[name = tensor("denom_181_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_181_cast = rsqrt(epsilon = denom_181_epsilon_0_to_fp16, x = var_6107_cast)[name = tensor("denom_181_cast")]; + tensor out_181_cast = mul(x = zero_mean_181_cast, y = denom_181_cast)[name = tensor("out_181_cast")]; + tensor var_6111_to_fp16 = const()[name = tensor("op_6111_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787101312)))]; + tensor var_6112_cast = add(x = out_181_cast, y = var_6111_to_fp16)[name = tensor("op_6112_cast")]; + tensor var_6114_to_fp16 = const()[name = tensor("op_6114_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787103936)))]; + tensor hidden_states_245_cast = mul(x = var_6112_cast, y = var_6114_to_fp16)[name = tensor("hidden_states_245_cast")]; + tensor var_6121 = const()[name = tensor("op_6121"), val = tensor([1, 1])]; + tensor var_6123 = const()[name = tensor("op_6123"), val = tensor([1, 1])]; + tensor q_121_pad_type_0 = const()[name = tensor("q_121_pad_type_0"), val = tensor("custom")]; + tensor q_121_pad_0 = const()[name = tensor("q_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787106560))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787925824))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_121_cast = conv(dilations = var_6123, groups = var_4943, pad = q_121_pad_0, pad_type = q_121_pad_type_0, strides = var_6121, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("q_121_cast")]; + tensor var_6127 = const()[name = tensor("op_6127"), val = tensor([1, 1])]; + tensor var_6129 = const()[name = tensor("op_6129"), val = tensor([1, 1])]; + tensor k_121_pad_type_0 = const()[name = tensor("k_121_pad_type_0"), val = tensor("custom")]; + tensor k_121_pad_0 = const()[name = tensor("k_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(787925952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788745216))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_121_cast = conv(dilations = var_6129, groups = var_4943, pad = k_121_pad_0, pad_type = k_121_pad_type_0, strides = var_6127, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("k_121_cast")]; + tensor var_6133 = const()[name = tensor("op_6133"), val = tensor([1, 1])]; + tensor var_6135 = const()[name = tensor("op_6135"), val = tensor([1, 1])]; + tensor v_121_pad_type_0 = const()[name = tensor("v_121_pad_type_0"), val = tensor("custom")]; + tensor v_121_pad_0 = const()[name = tensor("v_121_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(788745344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789974208))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_121_cast = conv(dilations = var_6135, groups = var_4943, pad = v_121_pad_0, pad_type = v_121_pad_type_0, strides = var_6133, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_245_cast)[name = tensor("v_121_cast")]; + tensor var_6139 = const()[name = tensor("op_6139"), val = tensor([2, 20, 64, -1])]; + tensor var_6140_cast = reshape(shape = var_6139, x = q_121_cast)[name = tensor("op_6140_cast")]; + tensor var_6141 = const()[name = tensor("op_6141"), val = tensor([2, 20, 64, -1])]; + tensor var_6142_cast = reshape(shape = var_6141, x = k_121_cast)[name = tensor("op_6142_cast")]; + tensor var_6143 = const()[name = tensor("op_6143"), val = tensor([2, 20, 64, -1])]; + tensor var_6144_cast = reshape(shape = var_6143, x = v_121_cast)[name = tensor("op_6144_cast")]; + tensor attn_weights_241_transpose_x_0 = const()[name = tensor("attn_weights_241_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_241_transpose_y_0 = const()[name = tensor("attn_weights_241_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_241_cast = matmul(transpose_x = attn_weights_241_transpose_x_0, transpose_y = attn_weights_241_transpose_y_0, x = var_6140_cast, y = var_6142_cast)[name = tensor("attn_weights_241_cast")]; + tensor attn_weights_243_cast = mul(x = attn_weights_241_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_243_cast")]; + tensor var_6148_cast = softmax(axis = var_4927, x = attn_weights_243_cast)[name = tensor("op_6148_cast")]; + tensor attn_121_transpose_x_0 = const()[name = tensor("attn_121_transpose_x_0"), val = tensor(false)]; + tensor attn_121_transpose_y_0 = const()[name = tensor("attn_121_transpose_y_0"), val = tensor(true)]; + tensor attn_121_cast = matmul(transpose_x = attn_121_transpose_x_0, transpose_y = attn_121_transpose_y_0, x = var_6144_cast, y = var_6148_cast)[name = tensor("attn_121_cast")]; + tensor var_6152 = const()[name = tensor("op_6152"), val = tensor([2, 1280, 1, -1])]; + tensor input_375_cast = reshape(shape = var_6152, x = attn_121_cast)[name = tensor("input_375_cast")]; + tensor var_6157 = const()[name = tensor("op_6157"), val = tensor([1, 1])]; + tensor var_6159 = const()[name = tensor("op_6159"), val = tensor([1, 1])]; + tensor var_6161_pad_type_0 = const()[name = tensor("op_6161_pad_type_0"), val = tensor("custom")]; + tensor var_6161_pad_0 = const()[name = tensor("op_6161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(789974400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791203264))), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791203456)))]; + tensor var_6161_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_6159, groups = var_4943, pad = var_6161_pad_0, pad_type = var_6161_pad_type_0, strides = var_6157, weight = mid_block_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_375_cast)[name = tensor("op_6161_cast")]; + tensor inputs_183_cast = add(x = var_6161_cast, y = inputs_181_cast)[name = tensor("inputs_183_cast")]; + tensor var_6165 = const()[name = tensor("op_6165"), val = tensor([1])]; + tensor channels_mean_183_cast = reduce_mean(axes = var_6165, keep_dims = var_4938, x = inputs_183_cast)[name = tensor("channels_mean_183_cast")]; + tensor zero_mean_183_cast = sub(x = inputs_183_cast, y = channels_mean_183_cast)[name = tensor("zero_mean_183_cast")]; + tensor zero_mean_sq_183_cast = mul(x = zero_mean_183_cast, y = zero_mean_183_cast)[name = tensor("zero_mean_sq_183_cast")]; + tensor var_6169 = const()[name = tensor("op_6169"), val = tensor([1])]; + tensor var_6170_cast = reduce_mean(axes = var_6169, keep_dims = var_4938, x = zero_mean_sq_183_cast)[name = tensor("op_6170_cast")]; + tensor var_6171_to_fp16 = const()[name = tensor("op_6171_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6172_cast = add(x = var_6170_cast, y = var_6171_to_fp16)[name = tensor("op_6172_cast")]; + tensor denom_183_epsilon_0_to_fp16 = const()[name = tensor("denom_183_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_183_cast = rsqrt(epsilon = denom_183_epsilon_0_to_fp16, x = var_6172_cast)[name = tensor("denom_183_cast")]; + tensor out_183_cast = mul(x = zero_mean_183_cast, y = denom_183_cast)[name = tensor("out_183_cast")]; + tensor var_6176_to_fp16 = const()[name = tensor("op_6176_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791206080)))]; + tensor var_6177_cast = add(x = out_183_cast, y = var_6176_to_fp16)[name = tensor("op_6177_cast")]; + tensor var_6179_to_fp16 = const()[name = tensor("op_6179_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791208704)))]; + tensor hidden_states_247_cast = mul(x = var_6177_cast, y = var_6179_to_fp16)[name = tensor("hidden_states_247_cast")]; + tensor var_6186 = const()[name = tensor("op_6186"), val = tensor([1, 1])]; + tensor var_6188 = const()[name = tensor("op_6188"), val = tensor([1, 1])]; + tensor q_123_pad_type_0 = const()[name = tensor("q_123_pad_type_0"), val = tensor("custom")]; + tensor q_123_pad_0 = const()[name = tensor("q_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791211328))), lut = tensor([-0x1.664p-7, 0x1.668p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_123_cast = conv(dilations = var_6188, groups = var_4943, pad = q_123_pad_0, pad_type = q_123_pad_type_0, strides = var_6186, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_247_cast)[name = tensor("q_123_cast")]; + tensor var_6192 = const()[name = tensor("op_6192"), val = tensor([1, 1])]; + tensor var_6194 = const()[name = tensor("op_6194"), val = tensor([1, 1])]; + tensor k_123_pad_type_0 = const()[name = tensor("k_123_pad_type_0"), val = tensor("custom")]; + tensor k_123_pad_0 = const()[name = tensor("k_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(791416192))), lut = tensor([-0x1.784p-7, -0x1.ec4p-9, 0x1.eap-9, 0x1.78p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_123_cast = conv(dilations = var_6194, groups = var_4943, pad = k_123_pad_0, pad_type = k_123_pad_type_0, strides = var_6192, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_123_cast")]; + tensor var_6198 = const()[name = tensor("op_6198"), val = tensor([1, 1])]; + tensor var_6200 = const()[name = tensor("op_6200"), val = tensor([1, 1])]; + tensor v_123_pad_type_0 = const()[name = tensor("v_123_pad_type_0"), val = tensor("custom")]; + tensor v_123_pad_0 = const()[name = tensor("v_123_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792071616))), lut = tensor([-0x1.f8cp-8, 0x1.f88p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_123_cast = conv(dilations = var_6200, groups = var_4943, pad = v_123_pad_0, pad_type = v_123_pad_type_0, strides = var_6198, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_123_cast")]; + tensor var_6204 = const()[name = tensor("op_6204"), val = tensor([2, 20, 64, -1])]; + tensor var_6205_cast = reshape(shape = var_6204, x = q_123_cast)[name = tensor("op_6205_cast")]; + tensor var_6206 = const()[name = tensor("op_6206"), val = tensor([2, 20, 64, -1])]; + tensor var_6207_cast = reshape(shape = var_6206, x = k_123_cast)[name = tensor("op_6207_cast")]; + tensor var_6208 = const()[name = tensor("op_6208"), val = tensor([2, 20, 64, -1])]; + tensor var_6209_cast = reshape(shape = var_6208, x = v_123_cast)[name = tensor("op_6209_cast")]; + tensor attn_weights_245_transpose_x_0 = const()[name = tensor("attn_weights_245_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_245_transpose_y_0 = const()[name = tensor("attn_weights_245_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_245_cast = matmul(transpose_x = attn_weights_245_transpose_x_0, transpose_y = attn_weights_245_transpose_y_0, x = var_6205_cast, y = var_6207_cast)[name = tensor("attn_weights_245_cast")]; + tensor attn_weights_247_cast = mul(x = attn_weights_245_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_247_cast")]; + tensor var_6213_cast = softmax(axis = var_4927, x = attn_weights_247_cast)[name = tensor("op_6213_cast")]; + tensor attn_123_transpose_x_0 = const()[name = tensor("attn_123_transpose_x_0"), val = tensor(false)]; + tensor attn_123_transpose_y_0 = const()[name = tensor("attn_123_transpose_y_0"), val = tensor(true)]; + tensor attn_123_cast = matmul(transpose_x = attn_123_transpose_x_0, transpose_y = attn_123_transpose_y_0, x = var_6209_cast, y = var_6213_cast)[name = tensor("attn_123_cast")]; + tensor var_6217 = const()[name = tensor("op_6217"), val = tensor([2, 1280, 1, -1])]; + tensor input_377_cast = reshape(shape = var_6217, x = attn_123_cast)[name = tensor("input_377_cast")]; + tensor var_6222 = const()[name = tensor("op_6222"), val = tensor([1, 1])]; + tensor var_6224 = const()[name = tensor("op_6224"), val = tensor([1, 1])]; + tensor var_6226_pad_type_0 = const()[name = tensor("op_6226_pad_type_0"), val = tensor("custom")]; + tensor var_6226_pad_0 = const()[name = tensor("op_6226_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792399360))), lut = tensor([-0x1.35p-8, 0x1.37p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792604224)))]; + tensor var_6226_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_6224, groups = var_4943, pad = var_6226_pad_0, pad_type = var_6226_pad_type_0, strides = var_6222, weight = mid_block_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_377_cast)[name = tensor("op_6226_cast")]; + tensor inputs_185_cast = add(x = var_6226_cast, y = inputs_183_cast)[name = tensor("inputs_185_cast")]; + tensor var_6230 = const()[name = tensor("op_6230"), val = tensor([1])]; + tensor channels_mean_185_cast = reduce_mean(axes = var_6230, keep_dims = var_4938, x = inputs_185_cast)[name = tensor("channels_mean_185_cast")]; + tensor zero_mean_185_cast = sub(x = inputs_185_cast, y = channels_mean_185_cast)[name = tensor("zero_mean_185_cast")]; + tensor zero_mean_sq_185_cast = mul(x = zero_mean_185_cast, y = zero_mean_185_cast)[name = tensor("zero_mean_sq_185_cast")]; + tensor var_6234 = const()[name = tensor("op_6234"), val = tensor([1])]; + tensor var_6235_cast = reduce_mean(axes = var_6234, keep_dims = var_4938, x = zero_mean_sq_185_cast)[name = tensor("op_6235_cast")]; + tensor var_6236_to_fp16 = const()[name = tensor("op_6236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6237_cast = add(x = var_6235_cast, y = var_6236_to_fp16)[name = tensor("op_6237_cast")]; + tensor denom_185_epsilon_0_to_fp16 = const()[name = tensor("denom_185_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_185_cast = rsqrt(epsilon = denom_185_epsilon_0_to_fp16, x = var_6237_cast)[name = tensor("denom_185_cast")]; + tensor out_185_cast = mul(x = zero_mean_185_cast, y = denom_185_cast)[name = tensor("out_185_cast")]; + tensor var_6241_to_fp16 = const()[name = tensor("op_6241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792606848)))]; + tensor var_6242_cast = add(x = out_185_cast, y = var_6241_to_fp16)[name = tensor("op_6242_cast")]; + tensor var_6244_to_fp16 = const()[name = tensor("op_6244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792609472)))]; + tensor input_379_cast = mul(x = var_6242_cast, y = var_6244_to_fp16)[name = tensor("input_379_cast")]; + tensor var_6252 = const()[name = tensor("op_6252"), val = tensor([1, 1])]; + tensor var_6254 = const()[name = tensor("op_6254"), val = tensor([1, 1])]; + tensor var_6256_pad_type_0 = const()[name = tensor("op_6256_pad_type_0"), val = tensor("custom")]; + tensor var_6256_pad_0 = const()[name = tensor("op_6256_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(792612096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802442560))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802442752)))]; + tensor var_6256_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_6254, groups = var_4943, pad = var_6256_pad_0, pad_type = var_6256_pad_type_0, strides = var_6252, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_379_cast)[name = tensor("op_6256_cast")]; + tensor var_6257_split_sizes_0 = const()[name = tensor("op_6257_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6257_axis_0 = const()[name = tensor("op_6257_axis_0"), val = tensor(1)]; + tensor var_6257_cast_0, tensor var_6257_cast_1 = split(axis = var_6257_axis_0, split_sizes = var_6257_split_sizes_0, x = var_6256_cast)[name = tensor("op_6257_cast")]; + tensor var_6259_mode_0 = const()[name = tensor("op_6259_mode_0"), val = tensor("EXACT")]; + tensor var_6259_cast = gelu(mode = var_6259_mode_0, x = var_6257_cast_1)[name = tensor("op_6259_cast")]; + tensor input_381_cast = mul(x = var_6257_cast_0, y = var_6259_cast)[name = tensor("input_381_cast")]; + tensor var_6263 = const()[name = tensor("op_6263"), val = tensor([1, 1])]; + tensor var_6265 = const()[name = tensor("op_6265"), val = tensor([1, 1])]; + tensor var_6267_pad_type_0 = const()[name = tensor("op_6267_pad_type_0"), val = tensor("custom")]; + tensor var_6267_pad_0 = const()[name = tensor("op_6267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(802463296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805740160))), name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805740288)))]; + tensor var_6267_cast = conv(bias = mid_block_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_6265, groups = var_4943, pad = var_6267_pad_0, pad_type = var_6267_pad_type_0, strides = var_6263, weight = mid_block_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_381_cast)[name = tensor("op_6267_cast")]; + tensor inputs_187_cast = add(x = var_6267_cast, y = inputs_185_cast)[name = tensor("inputs_187_cast")]; + tensor var_6277 = const()[name = tensor("op_6277"), val = tensor([1])]; + tensor channels_mean_187_cast = reduce_mean(axes = var_6277, keep_dims = var_4938, x = inputs_187_cast)[name = tensor("channels_mean_187_cast")]; + tensor zero_mean_187_cast = sub(x = inputs_187_cast, y = channels_mean_187_cast)[name = tensor("zero_mean_187_cast")]; + tensor zero_mean_sq_187_cast = mul(x = zero_mean_187_cast, y = zero_mean_187_cast)[name = tensor("zero_mean_sq_187_cast")]; + tensor var_6281 = const()[name = tensor("op_6281"), val = tensor([1])]; + tensor var_6282_cast = reduce_mean(axes = var_6281, keep_dims = var_4938, x = zero_mean_sq_187_cast)[name = tensor("op_6282_cast")]; + tensor var_6283_to_fp16 = const()[name = tensor("op_6283_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6284_cast = add(x = var_6282_cast, y = var_6283_to_fp16)[name = tensor("op_6284_cast")]; + tensor denom_187_epsilon_0_to_fp16 = const()[name = tensor("denom_187_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_187_cast = rsqrt(epsilon = denom_187_epsilon_0_to_fp16, x = var_6284_cast)[name = tensor("denom_187_cast")]; + tensor out_187_cast = mul(x = zero_mean_187_cast, y = denom_187_cast)[name = tensor("out_187_cast")]; + tensor var_6288_to_fp16 = const()[name = tensor("op_6288_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805742912)))]; + tensor var_6289_cast = add(x = out_187_cast, y = var_6288_to_fp16)[name = tensor("op_6289_cast")]; + tensor var_6291_to_fp16 = const()[name = tensor("op_6291_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805745536)))]; + tensor hidden_states_251_cast = mul(x = var_6289_cast, y = var_6291_to_fp16)[name = tensor("hidden_states_251_cast")]; + tensor var_6298 = const()[name = tensor("op_6298"), val = tensor([1, 1])]; + tensor var_6300 = const()[name = tensor("op_6300"), val = tensor([1, 1])]; + tensor q_125_pad_type_0 = const()[name = tensor("q_125_pad_type_0"), val = tensor("custom")]; + tensor q_125_pad_0 = const()[name = tensor("q_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(805748160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(806977024))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_125_cast = conv(dilations = var_6300, groups = var_4943, pad = q_125_pad_0, pad_type = q_125_pad_type_0, strides = var_6298, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("q_125_cast")]; + tensor var_6304 = const()[name = tensor("op_6304"), val = tensor([1, 1])]; + tensor var_6306 = const()[name = tensor("op_6306"), val = tensor([1, 1])]; + tensor k_125_pad_type_0 = const()[name = tensor("k_125_pad_type_0"), val = tensor("custom")]; + tensor k_125_pad_0 = const()[name = tensor("k_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(806977216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(807796480))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_125_cast = conv(dilations = var_6306, groups = var_4943, pad = k_125_pad_0, pad_type = k_125_pad_type_0, strides = var_6304, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("k_125_cast")]; + tensor var_6310 = const()[name = tensor("op_6310"), val = tensor([1, 1])]; + tensor var_6312 = const()[name = tensor("op_6312"), val = tensor([1, 1])]; + tensor v_125_pad_type_0 = const()[name = tensor("v_125_pad_type_0"), val = tensor("custom")]; + tensor v_125_pad_0 = const()[name = tensor("v_125_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(807796608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809025472))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_125_cast = conv(dilations = var_6312, groups = var_4943, pad = v_125_pad_0, pad_type = v_125_pad_type_0, strides = var_6310, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_251_cast)[name = tensor("v_125_cast")]; + tensor var_6316 = const()[name = tensor("op_6316"), val = tensor([2, 20, 64, -1])]; + tensor var_6317_cast = reshape(shape = var_6316, x = q_125_cast)[name = tensor("op_6317_cast")]; + tensor var_6318 = const()[name = tensor("op_6318"), val = tensor([2, 20, 64, -1])]; + tensor var_6319_cast = reshape(shape = var_6318, x = k_125_cast)[name = tensor("op_6319_cast")]; + tensor var_6320 = const()[name = tensor("op_6320"), val = tensor([2, 20, 64, -1])]; + tensor var_6321_cast = reshape(shape = var_6320, x = v_125_cast)[name = tensor("op_6321_cast")]; + tensor attn_weights_249_transpose_x_0 = const()[name = tensor("attn_weights_249_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_249_transpose_y_0 = const()[name = tensor("attn_weights_249_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_249_cast = matmul(transpose_x = attn_weights_249_transpose_x_0, transpose_y = attn_weights_249_transpose_y_0, x = var_6317_cast, y = var_6319_cast)[name = tensor("attn_weights_249_cast")]; + tensor attn_weights_251_cast = mul(x = attn_weights_249_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_251_cast")]; + tensor var_6325_cast = softmax(axis = var_4927, x = attn_weights_251_cast)[name = tensor("op_6325_cast")]; + tensor attn_125_transpose_x_0 = const()[name = tensor("attn_125_transpose_x_0"), val = tensor(false)]; + tensor attn_125_transpose_y_0 = const()[name = tensor("attn_125_transpose_y_0"), val = tensor(true)]; + tensor attn_125_cast = matmul(transpose_x = attn_125_transpose_x_0, transpose_y = attn_125_transpose_y_0, x = var_6321_cast, y = var_6325_cast)[name = tensor("attn_125_cast")]; + tensor var_6329 = const()[name = tensor("op_6329"), val = tensor([2, 1280, 1, -1])]; + tensor input_383_cast = reshape(shape = var_6329, x = attn_125_cast)[name = tensor("input_383_cast")]; + tensor var_6334 = const()[name = tensor("op_6334"), val = tensor([1, 1])]; + tensor var_6336 = const()[name = tensor("op_6336"), val = tensor([1, 1])]; + tensor var_6338_pad_type_0 = const()[name = tensor("op_6338_pad_type_0"), val = tensor("custom")]; + tensor var_6338_pad_0 = const()[name = tensor("op_6338_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(809025664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810254528))), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810254720)))]; + tensor var_6338_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_6336, groups = var_4943, pad = var_6338_pad_0, pad_type = var_6338_pad_type_0, strides = var_6334, weight = mid_block_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_383_cast)[name = tensor("op_6338_cast")]; + tensor inputs_189_cast = add(x = var_6338_cast, y = inputs_187_cast)[name = tensor("inputs_189_cast")]; + tensor var_6342 = const()[name = tensor("op_6342"), val = tensor([1])]; + tensor channels_mean_189_cast = reduce_mean(axes = var_6342, keep_dims = var_4938, x = inputs_189_cast)[name = tensor("channels_mean_189_cast")]; + tensor zero_mean_189_cast = sub(x = inputs_189_cast, y = channels_mean_189_cast)[name = tensor("zero_mean_189_cast")]; + tensor zero_mean_sq_189_cast = mul(x = zero_mean_189_cast, y = zero_mean_189_cast)[name = tensor("zero_mean_sq_189_cast")]; + tensor var_6346 = const()[name = tensor("op_6346"), val = tensor([1])]; + tensor var_6347_cast = reduce_mean(axes = var_6346, keep_dims = var_4938, x = zero_mean_sq_189_cast)[name = tensor("op_6347_cast")]; + tensor var_6348_to_fp16 = const()[name = tensor("op_6348_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6349_cast = add(x = var_6347_cast, y = var_6348_to_fp16)[name = tensor("op_6349_cast")]; + tensor denom_189_epsilon_0_to_fp16 = const()[name = tensor("denom_189_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_189_cast = rsqrt(epsilon = denom_189_epsilon_0_to_fp16, x = var_6349_cast)[name = tensor("denom_189_cast")]; + tensor out_189_cast = mul(x = zero_mean_189_cast, y = denom_189_cast)[name = tensor("out_189_cast")]; + tensor var_6353_to_fp16 = const()[name = tensor("op_6353_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810257344)))]; + tensor var_6354_cast = add(x = out_189_cast, y = var_6353_to_fp16)[name = tensor("op_6354_cast")]; + tensor var_6356_to_fp16 = const()[name = tensor("op_6356_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810259968)))]; + tensor hidden_states_253_cast = mul(x = var_6354_cast, y = var_6356_to_fp16)[name = tensor("hidden_states_253_cast")]; + tensor var_6363 = const()[name = tensor("op_6363"), val = tensor([1, 1])]; + tensor var_6365 = const()[name = tensor("op_6365"), val = tensor([1, 1])]; + tensor q_127_pad_type_0 = const()[name = tensor("q_127_pad_type_0"), val = tensor("custom")]; + tensor q_127_pad_0 = const()[name = tensor("q_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810262592))), lut = tensor([-0x1.61p-7, 0x1.61p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_127_cast = conv(dilations = var_6365, groups = var_4943, pad = q_127_pad_0, pad_type = q_127_pad_type_0, strides = var_6363, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_253_cast)[name = tensor("q_127_cast")]; + tensor var_6369 = const()[name = tensor("op_6369"), val = tensor([1, 1])]; + tensor var_6371 = const()[name = tensor("op_6371"), val = tensor([1, 1])]; + tensor k_127_pad_type_0 = const()[name = tensor("k_127_pad_type_0"), val = tensor("custom")]; + tensor k_127_pad_0 = const()[name = tensor("k_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810467456))), lut = tensor([-0x1.b7p-8, 0x1.b6cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_127_cast = conv(dilations = var_6371, groups = var_4943, pad = k_127_pad_0, pad_type = k_127_pad_type_0, strides = var_6369, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_127_cast")]; + tensor var_6375 = const()[name = tensor("op_6375"), val = tensor([1, 1])]; + tensor var_6377 = const()[name = tensor("op_6377"), val = tensor([1, 1])]; + tensor v_127_pad_type_0 = const()[name = tensor("v_127_pad_type_0"), val = tensor("custom")]; + tensor v_127_pad_0 = const()[name = tensor("v_127_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(810795200))), lut = tensor([-0x1.e78p-8, 0x1.e74p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_127_cast = conv(dilations = var_6377, groups = var_4943, pad = v_127_pad_0, pad_type = v_127_pad_type_0, strides = var_6375, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_127_cast")]; + tensor var_6381 = const()[name = tensor("op_6381"), val = tensor([2, 20, 64, -1])]; + tensor var_6382_cast = reshape(shape = var_6381, x = q_127_cast)[name = tensor("op_6382_cast")]; + tensor var_6383 = const()[name = tensor("op_6383"), val = tensor([2, 20, 64, -1])]; + tensor var_6384_cast = reshape(shape = var_6383, x = k_127_cast)[name = tensor("op_6384_cast")]; + tensor var_6385 = const()[name = tensor("op_6385"), val = tensor([2, 20, 64, -1])]; + tensor var_6386_cast = reshape(shape = var_6385, x = v_127_cast)[name = tensor("op_6386_cast")]; + tensor attn_weights_253_transpose_x_0 = const()[name = tensor("attn_weights_253_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_253_transpose_y_0 = const()[name = tensor("attn_weights_253_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_253_cast = matmul(transpose_x = attn_weights_253_transpose_x_0, transpose_y = attn_weights_253_transpose_y_0, x = var_6382_cast, y = var_6384_cast)[name = tensor("attn_weights_253_cast")]; + tensor attn_weights_255_cast = mul(x = attn_weights_253_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_255_cast")]; + tensor var_6390_cast = softmax(axis = var_4927, x = attn_weights_255_cast)[name = tensor("op_6390_cast")]; + tensor attn_127_transpose_x_0 = const()[name = tensor("attn_127_transpose_x_0"), val = tensor(false)]; + tensor attn_127_transpose_y_0 = const()[name = tensor("attn_127_transpose_y_0"), val = tensor(true)]; + tensor attn_127_cast = matmul(transpose_x = attn_127_transpose_x_0, transpose_y = attn_127_transpose_y_0, x = var_6386_cast, y = var_6390_cast)[name = tensor("attn_127_cast")]; + tensor var_6394 = const()[name = tensor("op_6394"), val = tensor([2, 1280, 1, -1])]; + tensor input_385_cast = reshape(shape = var_6394, x = attn_127_cast)[name = tensor("input_385_cast")]; + tensor var_6399 = const()[name = tensor("op_6399"), val = tensor([1, 1])]; + tensor var_6401 = const()[name = tensor("op_6401"), val = tensor([1, 1])]; + tensor var_6403_pad_type_0 = const()[name = tensor("op_6403_pad_type_0"), val = tensor("custom")]; + tensor var_6403_pad_0 = const()[name = tensor("op_6403_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811122944))), lut = tensor([-0x1.37cp-8, 0x1.364p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811327808)))]; + tensor var_6403_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_6401, groups = var_4943, pad = var_6403_pad_0, pad_type = var_6403_pad_type_0, strides = var_6399, weight = mid_block_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_385_cast)[name = tensor("op_6403_cast")]; + tensor inputs_191_cast = add(x = var_6403_cast, y = inputs_189_cast)[name = tensor("inputs_191_cast")]; + tensor var_6407 = const()[name = tensor("op_6407"), val = tensor([1])]; + tensor channels_mean_191_cast = reduce_mean(axes = var_6407, keep_dims = var_4938, x = inputs_191_cast)[name = tensor("channels_mean_191_cast")]; + tensor zero_mean_191_cast = sub(x = inputs_191_cast, y = channels_mean_191_cast)[name = tensor("zero_mean_191_cast")]; + tensor zero_mean_sq_191_cast = mul(x = zero_mean_191_cast, y = zero_mean_191_cast)[name = tensor("zero_mean_sq_191_cast")]; + tensor var_6411 = const()[name = tensor("op_6411"), val = tensor([1])]; + tensor var_6412_cast = reduce_mean(axes = var_6411, keep_dims = var_4938, x = zero_mean_sq_191_cast)[name = tensor("op_6412_cast")]; + tensor var_6413_to_fp16 = const()[name = tensor("op_6413_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6414_cast = add(x = var_6412_cast, y = var_6413_to_fp16)[name = tensor("op_6414_cast")]; + tensor denom_191_epsilon_0_to_fp16 = const()[name = tensor("denom_191_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_191_cast = rsqrt(epsilon = denom_191_epsilon_0_to_fp16, x = var_6414_cast)[name = tensor("denom_191_cast")]; + tensor out_191_cast = mul(x = zero_mean_191_cast, y = denom_191_cast)[name = tensor("out_191_cast")]; + tensor var_6418_to_fp16 = const()[name = tensor("op_6418_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811330432)))]; + tensor var_6419_cast = add(x = out_191_cast, y = var_6418_to_fp16)[name = tensor("op_6419_cast")]; + tensor var_6421_to_fp16 = const()[name = tensor("op_6421_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811333056)))]; + tensor input_387_cast = mul(x = var_6419_cast, y = var_6421_to_fp16)[name = tensor("input_387_cast")]; + tensor var_6429 = const()[name = tensor("op_6429"), val = tensor([1, 1])]; + tensor var_6431 = const()[name = tensor("op_6431"), val = tensor([1, 1])]; + tensor var_6433_pad_type_0 = const()[name = tensor("op_6433_pad_type_0"), val = tensor("custom")]; + tensor var_6433_pad_0 = const()[name = tensor("op_6433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(811335680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821166144))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821166336)))]; + tensor var_6433_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_6431, groups = var_4943, pad = var_6433_pad_0, pad_type = var_6433_pad_type_0, strides = var_6429, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_387_cast)[name = tensor("op_6433_cast")]; + tensor var_6434_split_sizes_0 = const()[name = tensor("op_6434_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6434_axis_0 = const()[name = tensor("op_6434_axis_0"), val = tensor(1)]; + tensor var_6434_cast_0, tensor var_6434_cast_1 = split(axis = var_6434_axis_0, split_sizes = var_6434_split_sizes_0, x = var_6433_cast)[name = tensor("op_6434_cast")]; + tensor var_6436_mode_0 = const()[name = tensor("op_6436_mode_0"), val = tensor("EXACT")]; + tensor var_6436_cast = gelu(mode = var_6436_mode_0, x = var_6434_cast_1)[name = tensor("op_6436_cast")]; + tensor input_389_cast = mul(x = var_6434_cast_0, y = var_6436_cast)[name = tensor("input_389_cast")]; + tensor var_6440 = const()[name = tensor("op_6440"), val = tensor([1, 1])]; + tensor var_6442 = const()[name = tensor("op_6442"), val = tensor([1, 1])]; + tensor var_6444_pad_type_0 = const()[name = tensor("op_6444_pad_type_0"), val = tensor("custom")]; + tensor var_6444_pad_0 = const()[name = tensor("op_6444_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(821186880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824463744))), name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824463872)))]; + tensor var_6444_cast = conv(bias = mid_block_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_6442, groups = var_4943, pad = var_6444_pad_0, pad_type = var_6444_pad_type_0, strides = var_6440, weight = mid_block_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_389_cast)[name = tensor("op_6444_cast")]; + tensor inputs_193_cast = add(x = var_6444_cast, y = inputs_191_cast)[name = tensor("inputs_193_cast")]; + tensor var_6454 = const()[name = tensor("op_6454"), val = tensor([1])]; + tensor channels_mean_193_cast = reduce_mean(axes = var_6454, keep_dims = var_4938, x = inputs_193_cast)[name = tensor("channels_mean_193_cast")]; + tensor zero_mean_193_cast = sub(x = inputs_193_cast, y = channels_mean_193_cast)[name = tensor("zero_mean_193_cast")]; + tensor zero_mean_sq_193_cast = mul(x = zero_mean_193_cast, y = zero_mean_193_cast)[name = tensor("zero_mean_sq_193_cast")]; + tensor var_6458 = const()[name = tensor("op_6458"), val = tensor([1])]; + tensor var_6459_cast = reduce_mean(axes = var_6458, keep_dims = var_4938, x = zero_mean_sq_193_cast)[name = tensor("op_6459_cast")]; + tensor var_6460_to_fp16 = const()[name = tensor("op_6460_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6461_cast = add(x = var_6459_cast, y = var_6460_to_fp16)[name = tensor("op_6461_cast")]; + tensor denom_193_epsilon_0_to_fp16 = const()[name = tensor("denom_193_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_193_cast = rsqrt(epsilon = denom_193_epsilon_0_to_fp16, x = var_6461_cast)[name = tensor("denom_193_cast")]; + tensor out_193_cast = mul(x = zero_mean_193_cast, y = denom_193_cast)[name = tensor("out_193_cast")]; + tensor var_6465_to_fp16 = const()[name = tensor("op_6465_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824466496)))]; + tensor var_6466_cast = add(x = out_193_cast, y = var_6465_to_fp16)[name = tensor("op_6466_cast")]; + tensor var_6468_to_fp16 = const()[name = tensor("op_6468_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824469120)))]; + tensor hidden_states_257_cast = mul(x = var_6466_cast, y = var_6468_to_fp16)[name = tensor("hidden_states_257_cast")]; + tensor var_6475 = const()[name = tensor("op_6475"), val = tensor([1, 1])]; + tensor var_6477 = const()[name = tensor("op_6477"), val = tensor([1, 1])]; + tensor q_129_pad_type_0 = const()[name = tensor("q_129_pad_type_0"), val = tensor("custom")]; + tensor q_129_pad_0 = const()[name = tensor("q_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(824471744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825291008))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_129_cast = conv(dilations = var_6477, groups = var_4943, pad = q_129_pad_0, pad_type = q_129_pad_type_0, strides = var_6475, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("q_129_cast")]; + tensor var_6481 = const()[name = tensor("op_6481"), val = tensor([1, 1])]; + tensor var_6483 = const()[name = tensor("op_6483"), val = tensor([1, 1])]; + tensor k_129_pad_type_0 = const()[name = tensor("k_129_pad_type_0"), val = tensor("custom")]; + tensor k_129_pad_0 = const()[name = tensor("k_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(825291136))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826110400))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_129_cast = conv(dilations = var_6483, groups = var_4943, pad = k_129_pad_0, pad_type = k_129_pad_type_0, strides = var_6481, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("k_129_cast")]; + tensor var_6487 = const()[name = tensor("op_6487"), val = tensor([1, 1])]; + tensor var_6489 = const()[name = tensor("op_6489"), val = tensor([1, 1])]; + tensor v_129_pad_type_0 = const()[name = tensor("v_129_pad_type_0"), val = tensor("custom")]; + tensor v_129_pad_0 = const()[name = tensor("v_129_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826110528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826929792))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_129_cast = conv(dilations = var_6489, groups = var_4943, pad = v_129_pad_0, pad_type = v_129_pad_type_0, strides = var_6487, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_257_cast)[name = tensor("v_129_cast")]; + tensor var_6493 = const()[name = tensor("op_6493"), val = tensor([2, 20, 64, -1])]; + tensor var_6494_cast = reshape(shape = var_6493, x = q_129_cast)[name = tensor("op_6494_cast")]; + tensor var_6495 = const()[name = tensor("op_6495"), val = tensor([2, 20, 64, -1])]; + tensor var_6496_cast = reshape(shape = var_6495, x = k_129_cast)[name = tensor("op_6496_cast")]; + tensor var_6497 = const()[name = tensor("op_6497"), val = tensor([2, 20, 64, -1])]; + tensor var_6498_cast = reshape(shape = var_6497, x = v_129_cast)[name = tensor("op_6498_cast")]; + tensor attn_weights_257_transpose_x_0 = const()[name = tensor("attn_weights_257_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_257_transpose_y_0 = const()[name = tensor("attn_weights_257_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_257_cast = matmul(transpose_x = attn_weights_257_transpose_x_0, transpose_y = attn_weights_257_transpose_y_0, x = var_6494_cast, y = var_6496_cast)[name = tensor("attn_weights_257_cast")]; + tensor attn_weights_259_cast = mul(x = attn_weights_257_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_259_cast")]; + tensor var_6502_cast = softmax(axis = var_4927, x = attn_weights_259_cast)[name = tensor("op_6502_cast")]; + tensor attn_129_transpose_x_0 = const()[name = tensor("attn_129_transpose_x_0"), val = tensor(false)]; + tensor attn_129_transpose_y_0 = const()[name = tensor("attn_129_transpose_y_0"), val = tensor(true)]; + tensor attn_129_cast = matmul(transpose_x = attn_129_transpose_x_0, transpose_y = attn_129_transpose_y_0, x = var_6498_cast, y = var_6502_cast)[name = tensor("attn_129_cast")]; + tensor var_6506 = const()[name = tensor("op_6506"), val = tensor([2, 1280, 1, -1])]; + tensor input_391_cast = reshape(shape = var_6506, x = attn_129_cast)[name = tensor("input_391_cast")]; + tensor var_6511 = const()[name = tensor("op_6511"), val = tensor([1, 1])]; + tensor var_6513 = const()[name = tensor("op_6513"), val = tensor([1, 1])]; + tensor var_6515_pad_type_0 = const()[name = tensor("op_6515_pad_type_0"), val = tensor("custom")]; + tensor var_6515_pad_0 = const()[name = tensor("op_6515_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(826929920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828158784))), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828158976)))]; + tensor var_6515_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_6513, groups = var_4943, pad = var_6515_pad_0, pad_type = var_6515_pad_type_0, strides = var_6511, weight = mid_block_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_391_cast)[name = tensor("op_6515_cast")]; + tensor inputs_195_cast = add(x = var_6515_cast, y = inputs_193_cast)[name = tensor("inputs_195_cast")]; + tensor var_6519 = const()[name = tensor("op_6519"), val = tensor([1])]; + tensor channels_mean_195_cast = reduce_mean(axes = var_6519, keep_dims = var_4938, x = inputs_195_cast)[name = tensor("channels_mean_195_cast")]; + tensor zero_mean_195_cast = sub(x = inputs_195_cast, y = channels_mean_195_cast)[name = tensor("zero_mean_195_cast")]; + tensor zero_mean_sq_195_cast = mul(x = zero_mean_195_cast, y = zero_mean_195_cast)[name = tensor("zero_mean_sq_195_cast")]; + tensor var_6523 = const()[name = tensor("op_6523"), val = tensor([1])]; + tensor var_6524_cast = reduce_mean(axes = var_6523, keep_dims = var_4938, x = zero_mean_sq_195_cast)[name = tensor("op_6524_cast")]; + tensor var_6525_to_fp16 = const()[name = tensor("op_6525_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6526_cast = add(x = var_6524_cast, y = var_6525_to_fp16)[name = tensor("op_6526_cast")]; + tensor denom_195_epsilon_0_to_fp16 = const()[name = tensor("denom_195_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_195_cast = rsqrt(epsilon = denom_195_epsilon_0_to_fp16, x = var_6526_cast)[name = tensor("denom_195_cast")]; + tensor out_195_cast = mul(x = zero_mean_195_cast, y = denom_195_cast)[name = tensor("out_195_cast")]; + tensor var_6530_to_fp16 = const()[name = tensor("op_6530_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828161600)))]; + tensor var_6531_cast = add(x = out_195_cast, y = var_6530_to_fp16)[name = tensor("op_6531_cast")]; + tensor var_6533_to_fp16 = const()[name = tensor("op_6533_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828164224)))]; + tensor hidden_states_259_cast = mul(x = var_6531_cast, y = var_6533_to_fp16)[name = tensor("hidden_states_259_cast")]; + tensor var_6540 = const()[name = tensor("op_6540"), val = tensor([1, 1])]; + tensor var_6542 = const()[name = tensor("op_6542"), val = tensor([1, 1])]; + tensor q_131_pad_type_0 = const()[name = tensor("q_131_pad_type_0"), val = tensor("custom")]; + tensor q_131_pad_0 = const()[name = tensor("q_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828166848))), lut = tensor([-0x1.60cp-7, 0x1.604p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_131_cast = conv(dilations = var_6542, groups = var_4943, pad = q_131_pad_0, pad_type = q_131_pad_type_0, strides = var_6540, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_259_cast)[name = tensor("q_131_cast")]; + tensor var_6546 = const()[name = tensor("op_6546"), val = tensor([1, 1])]; + tensor var_6548 = const()[name = tensor("op_6548"), val = tensor([1, 1])]; + tensor k_131_pad_type_0 = const()[name = tensor("k_131_pad_type_0"), val = tensor("custom")]; + tensor k_131_pad_0 = const()[name = tensor("k_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(828371712))), lut = tensor([-0x1.63cp-7, -0x1.d48p-9, 0x1.d34p-9, 0x1.638p-7]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_131_cast = conv(dilations = var_6548, groups = var_4943, pad = k_131_pad_0, pad_type = k_131_pad_type_0, strides = var_6546, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_131_cast")]; + tensor var_6552 = const()[name = tensor("op_6552"), val = tensor([1, 1])]; + tensor var_6554 = const()[name = tensor("op_6554"), val = tensor([1, 1])]; + tensor v_131_pad_type_0 = const()[name = tensor("v_131_pad_type_0"), val = tensor("custom")]; + tensor v_131_pad_0 = const()[name = tensor("v_131_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829027136))), lut = tensor([-0x1.e54p-8, 0x1.e38p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_131_cast = conv(dilations = var_6554, groups = var_4943, pad = v_131_pad_0, pad_type = v_131_pad_type_0, strides = var_6552, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_131_cast")]; + tensor var_6558 = const()[name = tensor("op_6558"), val = tensor([2, 20, 64, -1])]; + tensor var_6559_cast = reshape(shape = var_6558, x = q_131_cast)[name = tensor("op_6559_cast")]; + tensor var_6560 = const()[name = tensor("op_6560"), val = tensor([2, 20, 64, -1])]; + tensor var_6561_cast = reshape(shape = var_6560, x = k_131_cast)[name = tensor("op_6561_cast")]; + tensor var_6562 = const()[name = tensor("op_6562"), val = tensor([2, 20, 64, -1])]; + tensor var_6563_cast = reshape(shape = var_6562, x = v_131_cast)[name = tensor("op_6563_cast")]; + tensor attn_weights_261_transpose_x_0 = const()[name = tensor("attn_weights_261_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_261_transpose_y_0 = const()[name = tensor("attn_weights_261_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_261_cast = matmul(transpose_x = attn_weights_261_transpose_x_0, transpose_y = attn_weights_261_transpose_y_0, x = var_6559_cast, y = var_6561_cast)[name = tensor("attn_weights_261_cast")]; + tensor attn_weights_263_cast = mul(x = attn_weights_261_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_263_cast")]; + tensor var_6567_cast = softmax(axis = var_4927, x = attn_weights_263_cast)[name = tensor("op_6567_cast")]; + tensor attn_131_transpose_x_0 = const()[name = tensor("attn_131_transpose_x_0"), val = tensor(false)]; + tensor attn_131_transpose_y_0 = const()[name = tensor("attn_131_transpose_y_0"), val = tensor(true)]; + tensor attn_131_cast = matmul(transpose_x = attn_131_transpose_x_0, transpose_y = attn_131_transpose_y_0, x = var_6563_cast, y = var_6567_cast)[name = tensor("attn_131_cast")]; + tensor var_6571 = const()[name = tensor("op_6571"), val = tensor([2, 1280, 1, -1])]; + tensor input_393_cast = reshape(shape = var_6571, x = attn_131_cast)[name = tensor("input_393_cast")]; + tensor var_6576 = const()[name = tensor("op_6576"), val = tensor([1, 1])]; + tensor var_6578 = const()[name = tensor("op_6578"), val = tensor([1, 1])]; + tensor var_6580_pad_type_0 = const()[name = tensor("op_6580_pad_type_0"), val = tensor("custom")]; + tensor var_6580_pad_0 = const()[name = tensor("op_6580_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829354880))), lut = tensor([-0x1.42p-8, 0x1.41cp-8]), name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829559744)))]; + tensor var_6580_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_6578, groups = var_4943, pad = var_6580_pad_0, pad_type = var_6580_pad_type_0, strides = var_6576, weight = mid_block_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_393_cast)[name = tensor("op_6580_cast")]; + tensor inputs_197_cast = add(x = var_6580_cast, y = inputs_195_cast)[name = tensor("inputs_197_cast")]; + tensor var_6584 = const()[name = tensor("op_6584"), val = tensor([1])]; + tensor channels_mean_197_cast = reduce_mean(axes = var_6584, keep_dims = var_4938, x = inputs_197_cast)[name = tensor("channels_mean_197_cast")]; + tensor zero_mean_197_cast = sub(x = inputs_197_cast, y = channels_mean_197_cast)[name = tensor("zero_mean_197_cast")]; + tensor zero_mean_sq_197_cast = mul(x = zero_mean_197_cast, y = zero_mean_197_cast)[name = tensor("zero_mean_sq_197_cast")]; + tensor var_6588 = const()[name = tensor("op_6588"), val = tensor([1])]; + tensor var_6589_cast = reduce_mean(axes = var_6588, keep_dims = var_4938, x = zero_mean_sq_197_cast)[name = tensor("op_6589_cast")]; + tensor var_6590_to_fp16 = const()[name = tensor("op_6590_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6591_cast = add(x = var_6589_cast, y = var_6590_to_fp16)[name = tensor("op_6591_cast")]; + tensor denom_197_epsilon_0_to_fp16 = const()[name = tensor("denom_197_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_197_cast = rsqrt(epsilon = denom_197_epsilon_0_to_fp16, x = var_6591_cast)[name = tensor("denom_197_cast")]; + tensor out_197_cast = mul(x = zero_mean_197_cast, y = denom_197_cast)[name = tensor("out_197_cast")]; + tensor var_6595_to_fp16 = const()[name = tensor("op_6595_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829562368)))]; + tensor var_6596_cast = add(x = out_197_cast, y = var_6595_to_fp16)[name = tensor("op_6596_cast")]; + tensor var_6598_to_fp16 = const()[name = tensor("op_6598_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829564992)))]; + tensor input_395_cast = mul(x = var_6596_cast, y = var_6598_to_fp16)[name = tensor("input_395_cast")]; + tensor var_6606 = const()[name = tensor("op_6606"), val = tensor([1, 1])]; + tensor var_6608 = const()[name = tensor("op_6608"), val = tensor([1, 1])]; + tensor var_6610_pad_type_0 = const()[name = tensor("op_6610_pad_type_0"), val = tensor("custom")]; + tensor var_6610_pad_0 = const()[name = tensor("op_6610_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(829567616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839398080))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839398272)))]; + tensor var_6610_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_6608, groups = var_4943, pad = var_6610_pad_0, pad_type = var_6610_pad_type_0, strides = var_6606, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_395_cast)[name = tensor("op_6610_cast")]; + tensor var_6611_split_sizes_0 = const()[name = tensor("op_6611_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6611_axis_0 = const()[name = tensor("op_6611_axis_0"), val = tensor(1)]; + tensor var_6611_cast_0, tensor var_6611_cast_1 = split(axis = var_6611_axis_0, split_sizes = var_6611_split_sizes_0, x = var_6610_cast)[name = tensor("op_6611_cast")]; + tensor var_6613_mode_0 = const()[name = tensor("op_6613_mode_0"), val = tensor("EXACT")]; + tensor var_6613_cast = gelu(mode = var_6613_mode_0, x = var_6611_cast_1)[name = tensor("op_6613_cast")]; + tensor input_397_cast = mul(x = var_6611_cast_0, y = var_6613_cast)[name = tensor("input_397_cast")]; + tensor var_6617 = const()[name = tensor("op_6617"), val = tensor([1, 1])]; + tensor var_6619 = const()[name = tensor("op_6619"), val = tensor([1, 1])]; + tensor var_6621_pad_type_0 = const()[name = tensor("op_6621_pad_type_0"), val = tensor("custom")]; + tensor var_6621_pad_0 = const()[name = tensor("op_6621_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(839418816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844334080))), name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844334272)))]; + tensor var_6621_cast = conv(bias = mid_block_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_6619, groups = var_4943, pad = var_6621_pad_0, pad_type = var_6621_pad_type_0, strides = var_6617, weight = mid_block_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_397_cast)[name = tensor("op_6621_cast")]; + tensor inputs_199_cast = add(x = var_6621_cast, y = inputs_197_cast)[name = tensor("inputs_199_cast")]; + tensor var_6631 = const()[name = tensor("op_6631"), val = tensor([1])]; + tensor channels_mean_199_cast = reduce_mean(axes = var_6631, keep_dims = var_4938, x = inputs_199_cast)[name = tensor("channels_mean_199_cast")]; + tensor zero_mean_199_cast = sub(x = inputs_199_cast, y = channels_mean_199_cast)[name = tensor("zero_mean_199_cast")]; + tensor zero_mean_sq_199_cast = mul(x = zero_mean_199_cast, y = zero_mean_199_cast)[name = tensor("zero_mean_sq_199_cast")]; + tensor var_6635 = const()[name = tensor("op_6635"), val = tensor([1])]; + tensor var_6636_cast = reduce_mean(axes = var_6635, keep_dims = var_4938, x = zero_mean_sq_199_cast)[name = tensor("op_6636_cast")]; + tensor var_6637_to_fp16 = const()[name = tensor("op_6637_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6638_cast = add(x = var_6636_cast, y = var_6637_to_fp16)[name = tensor("op_6638_cast")]; + tensor denom_199_epsilon_0_to_fp16 = const()[name = tensor("denom_199_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_199_cast = rsqrt(epsilon = denom_199_epsilon_0_to_fp16, x = var_6638_cast)[name = tensor("denom_199_cast")]; + tensor out_199_cast = mul(x = zero_mean_199_cast, y = denom_199_cast)[name = tensor("out_199_cast")]; + tensor var_6642_to_fp16 = const()[name = tensor("op_6642_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844336896)))]; + tensor var_6643_cast = add(x = out_199_cast, y = var_6642_to_fp16)[name = tensor("op_6643_cast")]; + tensor var_6645_to_fp16 = const()[name = tensor("op_6645_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844339520)))]; + tensor hidden_states_263_cast = mul(x = var_6643_cast, y = var_6645_to_fp16)[name = tensor("hidden_states_263_cast")]; + tensor var_6652 = const()[name = tensor("op_6652"), val = tensor([1, 1])]; + tensor var_6654 = const()[name = tensor("op_6654"), val = tensor([1, 1])]; + tensor q_133_pad_type_0 = const()[name = tensor("q_133_pad_type_0"), val = tensor("custom")]; + tensor q_133_pad_0 = const()[name = tensor("q_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(844342144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845571008))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_133_cast = conv(dilations = var_6654, groups = var_4943, pad = q_133_pad_0, pad_type = q_133_pad_type_0, strides = var_6652, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("q_133_cast")]; + tensor var_6658 = const()[name = tensor("op_6658"), val = tensor([1, 1])]; + tensor var_6660 = const()[name = tensor("op_6660"), val = tensor([1, 1])]; + tensor k_133_pad_type_0 = const()[name = tensor("k_133_pad_type_0"), val = tensor("custom")]; + tensor k_133_pad_0 = const()[name = tensor("k_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(845571200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(846390464))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_133_cast = conv(dilations = var_6660, groups = var_4943, pad = k_133_pad_0, pad_type = k_133_pad_type_0, strides = var_6658, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("k_133_cast")]; + tensor var_6664 = const()[name = tensor("op_6664"), val = tensor([1, 1])]; + tensor var_6666 = const()[name = tensor("op_6666"), val = tensor([1, 1])]; + tensor v_133_pad_type_0 = const()[name = tensor("v_133_pad_type_0"), val = tensor("custom")]; + tensor v_133_pad_0 = const()[name = tensor("v_133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(846390592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847619456))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_133_cast = conv(dilations = var_6666, groups = var_4943, pad = v_133_pad_0, pad_type = v_133_pad_type_0, strides = var_6664, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_263_cast)[name = tensor("v_133_cast")]; + tensor var_6670 = const()[name = tensor("op_6670"), val = tensor([2, 20, 64, -1])]; + tensor var_6671_cast = reshape(shape = var_6670, x = q_133_cast)[name = tensor("op_6671_cast")]; + tensor var_6672 = const()[name = tensor("op_6672"), val = tensor([2, 20, 64, -1])]; + tensor var_6673_cast = reshape(shape = var_6672, x = k_133_cast)[name = tensor("op_6673_cast")]; + tensor var_6674 = const()[name = tensor("op_6674"), val = tensor([2, 20, 64, -1])]; + tensor var_6675_cast = reshape(shape = var_6674, x = v_133_cast)[name = tensor("op_6675_cast")]; + tensor attn_weights_265_transpose_x_0 = const()[name = tensor("attn_weights_265_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_265_transpose_y_0 = const()[name = tensor("attn_weights_265_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_265_cast = matmul(transpose_x = attn_weights_265_transpose_x_0, transpose_y = attn_weights_265_transpose_y_0, x = var_6671_cast, y = var_6673_cast)[name = tensor("attn_weights_265_cast")]; + tensor attn_weights_267_cast = mul(x = attn_weights_265_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_267_cast")]; + tensor var_6679_cast = softmax(axis = var_4927, x = attn_weights_267_cast)[name = tensor("op_6679_cast")]; + tensor attn_133_transpose_x_0 = const()[name = tensor("attn_133_transpose_x_0"), val = tensor(false)]; + tensor attn_133_transpose_y_0 = const()[name = tensor("attn_133_transpose_y_0"), val = tensor(true)]; + tensor attn_133_cast = matmul(transpose_x = attn_133_transpose_x_0, transpose_y = attn_133_transpose_y_0, x = var_6675_cast, y = var_6679_cast)[name = tensor("attn_133_cast")]; + tensor var_6683 = const()[name = tensor("op_6683"), val = tensor([2, 1280, 1, -1])]; + tensor input_399_cast = reshape(shape = var_6683, x = attn_133_cast)[name = tensor("input_399_cast")]; + tensor var_6688 = const()[name = tensor("op_6688"), val = tensor([1, 1])]; + tensor var_6690 = const()[name = tensor("op_6690"), val = tensor([1, 1])]; + tensor var_6692_pad_type_0 = const()[name = tensor("op_6692_pad_type_0"), val = tensor("custom")]; + tensor var_6692_pad_0 = const()[name = tensor("op_6692_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(847619648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848848512))), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848848704)))]; + tensor var_6692_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_6690, groups = var_4943, pad = var_6692_pad_0, pad_type = var_6692_pad_type_0, strides = var_6688, weight = mid_block_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_399_cast)[name = tensor("op_6692_cast")]; + tensor inputs_201_cast = add(x = var_6692_cast, y = inputs_199_cast)[name = tensor("inputs_201_cast")]; + tensor var_6696 = const()[name = tensor("op_6696"), val = tensor([1])]; + tensor channels_mean_201_cast = reduce_mean(axes = var_6696, keep_dims = var_4938, x = inputs_201_cast)[name = tensor("channels_mean_201_cast")]; + tensor zero_mean_201_cast = sub(x = inputs_201_cast, y = channels_mean_201_cast)[name = tensor("zero_mean_201_cast")]; + tensor zero_mean_sq_201_cast = mul(x = zero_mean_201_cast, y = zero_mean_201_cast)[name = tensor("zero_mean_sq_201_cast")]; + tensor var_6700 = const()[name = tensor("op_6700"), val = tensor([1])]; + tensor var_6701_cast = reduce_mean(axes = var_6700, keep_dims = var_4938, x = zero_mean_sq_201_cast)[name = tensor("op_6701_cast")]; + tensor var_6702_to_fp16 = const()[name = tensor("op_6702_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6703_cast = add(x = var_6701_cast, y = var_6702_to_fp16)[name = tensor("op_6703_cast")]; + tensor denom_201_epsilon_0_to_fp16 = const()[name = tensor("denom_201_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_201_cast = rsqrt(epsilon = denom_201_epsilon_0_to_fp16, x = var_6703_cast)[name = tensor("denom_201_cast")]; + tensor out_201_cast = mul(x = zero_mean_201_cast, y = denom_201_cast)[name = tensor("out_201_cast")]; + tensor var_6707_to_fp16 = const()[name = tensor("op_6707_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848851328)))]; + tensor var_6708_cast = add(x = out_201_cast, y = var_6707_to_fp16)[name = tensor("op_6708_cast")]; + tensor var_6710_to_fp16 = const()[name = tensor("op_6710_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848853952)))]; + tensor hidden_states_265_cast = mul(x = var_6708_cast, y = var_6710_to_fp16)[name = tensor("hidden_states_265_cast")]; + tensor var_6717 = const()[name = tensor("op_6717"), val = tensor([1, 1])]; + tensor var_6719 = const()[name = tensor("op_6719"), val = tensor([1, 1])]; + tensor q_135_pad_type_0 = const()[name = tensor("q_135_pad_type_0"), val = tensor("custom")]; + tensor q_135_pad_0 = const()[name = tensor("q_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(848856576))), lut = tensor([-0x1.5ccp-7, 0x1.5ccp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_135_cast = conv(dilations = var_6719, groups = var_4943, pad = q_135_pad_0, pad_type = q_135_pad_type_0, strides = var_6717, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_265_cast)[name = tensor("q_135_cast")]; + tensor var_6723 = const()[name = tensor("op_6723"), val = tensor([1, 1])]; + tensor var_6725 = const()[name = tensor("op_6725"), val = tensor([1, 1])]; + tensor k_135_pad_type_0 = const()[name = tensor("k_135_pad_type_0"), val = tensor("custom")]; + tensor k_135_pad_0 = const()[name = tensor("k_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(849061440))), lut = tensor([-0x1.564p-7, -0x1.c58p-9, 0x1.c24p-9, 0x1.55cp-7]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_135_cast = conv(dilations = var_6725, groups = var_4943, pad = k_135_pad_0, pad_type = k_135_pad_type_0, strides = var_6723, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_135_cast")]; + tensor var_6729 = const()[name = tensor("op_6729"), val = tensor([1, 1])]; + tensor var_6731 = const()[name = tensor("op_6731"), val = tensor([1, 1])]; + tensor v_135_pad_type_0 = const()[name = tensor("v_135_pad_type_0"), val = tensor("custom")]; + tensor v_135_pad_0 = const()[name = tensor("v_135_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(849716864))), lut = tensor([-0x1.cdcp-8, 0x1.cd4p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_135_cast = conv(dilations = var_6731, groups = var_4943, pad = v_135_pad_0, pad_type = v_135_pad_type_0, strides = var_6729, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_135_cast")]; + tensor var_6735 = const()[name = tensor("op_6735"), val = tensor([2, 20, 64, -1])]; + tensor var_6736_cast = reshape(shape = var_6735, x = q_135_cast)[name = tensor("op_6736_cast")]; + tensor var_6737 = const()[name = tensor("op_6737"), val = tensor([2, 20, 64, -1])]; + tensor var_6738_cast = reshape(shape = var_6737, x = k_135_cast)[name = tensor("op_6738_cast")]; + tensor var_6739 = const()[name = tensor("op_6739"), val = tensor([2, 20, 64, -1])]; + tensor var_6740_cast = reshape(shape = var_6739, x = v_135_cast)[name = tensor("op_6740_cast")]; + tensor attn_weights_269_transpose_x_0 = const()[name = tensor("attn_weights_269_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_269_transpose_y_0 = const()[name = tensor("attn_weights_269_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_269_cast = matmul(transpose_x = attn_weights_269_transpose_x_0, transpose_y = attn_weights_269_transpose_y_0, x = var_6736_cast, y = var_6738_cast)[name = tensor("attn_weights_269_cast")]; + tensor attn_weights_271_cast = mul(x = attn_weights_269_cast, y = var_4934_to_fp16)[name = tensor("attn_weights_271_cast")]; + tensor var_6744_cast = softmax(axis = var_4927, x = attn_weights_271_cast)[name = tensor("op_6744_cast")]; + tensor attn_135_transpose_x_0 = const()[name = tensor("attn_135_transpose_x_0"), val = tensor(false)]; + tensor attn_135_transpose_y_0 = const()[name = tensor("attn_135_transpose_y_0"), val = tensor(true)]; + tensor attn_135_cast = matmul(transpose_x = attn_135_transpose_x_0, transpose_y = attn_135_transpose_y_0, x = var_6740_cast, y = var_6744_cast)[name = tensor("attn_135_cast")]; + tensor var_6748 = const()[name = tensor("op_6748"), val = tensor([2, 1280, 1, -1])]; + tensor input_401_cast = reshape(shape = var_6748, x = attn_135_cast)[name = tensor("input_401_cast")]; + tensor var_6753 = const()[name = tensor("op_6753"), val = tensor([1, 1])]; + tensor var_6755 = const()[name = tensor("op_6755"), val = tensor([1, 1])]; + tensor var_6757_pad_type_0 = const()[name = tensor("op_6757_pad_type_0"), val = tensor("custom")]; + tensor var_6757_pad_0 = const()[name = tensor("op_6757_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850044608))), lut = tensor([-0x1.48cp-8, 0x1.49p-8]), name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850249472)))]; + tensor var_6757_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_6755, groups = var_4943, pad = var_6757_pad_0, pad_type = var_6757_pad_type_0, strides = var_6753, weight = mid_block_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_401_cast)[name = tensor("op_6757_cast")]; + tensor inputs_203_cast = add(x = var_6757_cast, y = inputs_201_cast)[name = tensor("inputs_203_cast")]; + tensor var_6761 = const()[name = tensor("op_6761"), val = tensor([1])]; + tensor channels_mean_203_cast = reduce_mean(axes = var_6761, keep_dims = var_4938, x = inputs_203_cast)[name = tensor("channels_mean_203_cast")]; + tensor zero_mean_203_cast = sub(x = inputs_203_cast, y = channels_mean_203_cast)[name = tensor("zero_mean_203_cast")]; + tensor zero_mean_sq_203_cast = mul(x = zero_mean_203_cast, y = zero_mean_203_cast)[name = tensor("zero_mean_sq_203_cast")]; + tensor var_6765 = const()[name = tensor("op_6765"), val = tensor([1])]; + tensor var_6766_cast = reduce_mean(axes = var_6765, keep_dims = var_4938, x = zero_mean_sq_203_cast)[name = tensor("op_6766_cast")]; + tensor var_6767_to_fp16 = const()[name = tensor("op_6767_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6768_cast = add(x = var_6766_cast, y = var_6767_to_fp16)[name = tensor("op_6768_cast")]; + tensor denom_203_epsilon_0_to_fp16 = const()[name = tensor("denom_203_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_203_cast = rsqrt(epsilon = denom_203_epsilon_0_to_fp16, x = var_6768_cast)[name = tensor("denom_203_cast")]; + tensor out_203_cast = mul(x = zero_mean_203_cast, y = denom_203_cast)[name = tensor("out_203_cast")]; + tensor var_6772_to_fp16 = const()[name = tensor("op_6772_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850252096)))]; + tensor var_6773_cast = add(x = out_203_cast, y = var_6772_to_fp16)[name = tensor("op_6773_cast")]; + tensor var_6775_to_fp16 = const()[name = tensor("op_6775_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850254720)))]; + tensor input_403_cast = mul(x = var_6773_cast, y = var_6775_to_fp16)[name = tensor("input_403_cast")]; + tensor var_6783 = const()[name = tensor("op_6783"), val = tensor([1, 1])]; + tensor var_6785 = const()[name = tensor("op_6785"), val = tensor([1, 1])]; + tensor var_6787_pad_type_0 = const()[name = tensor("op_6787_pad_type_0"), val = tensor("custom")]; + tensor var_6787_pad_0 = const()[name = tensor("op_6787_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(850257344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(860087808))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(860088000)))]; + tensor var_6787_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_6785, groups = var_4943, pad = var_6787_pad_0, pad_type = var_6787_pad_type_0, strides = var_6783, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_403_cast)[name = tensor("op_6787_cast")]; + tensor var_6788_split_sizes_0 = const()[name = tensor("op_6788_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_6788_axis_0 = const()[name = tensor("op_6788_axis_0"), val = tensor(1)]; + tensor var_6788_cast_0, tensor var_6788_cast_1 = split(axis = var_6788_axis_0, split_sizes = var_6788_split_sizes_0, x = var_6787_cast)[name = tensor("op_6788_cast")]; + tensor var_6790_mode_0 = const()[name = tensor("op_6790_mode_0"), val = tensor("EXACT")]; + tensor var_6790_cast = gelu(mode = var_6790_mode_0, x = var_6788_cast_1)[name = tensor("op_6790_cast")]; + tensor input_405_cast = mul(x = var_6788_cast_0, y = var_6790_cast)[name = tensor("input_405_cast")]; + tensor var_6794 = const()[name = tensor("op_6794"), val = tensor([1, 1])]; + tensor var_6796 = const()[name = tensor("op_6796"), val = tensor([1, 1])]; + tensor var_6798_pad_type_0 = const()[name = tensor("op_6798_pad_type_0"), val = tensor("custom")]; + tensor var_6798_pad_0 = const()[name = tensor("op_6798_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(860108544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865023808))), name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865024000)))]; + tensor var_6798_cast = conv(bias = mid_block_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_6796, groups = var_4943, pad = var_6798_pad_0, pad_type = var_6798_pad_type_0, strides = var_6794, weight = mid_block_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_405_cast)[name = tensor("op_6798_cast")]; + tensor hidden_states_269_cast = add(x = var_6798_cast, y = inputs_203_cast)[name = tensor("hidden_states_269_cast")]; + tensor var_6800 = const()[name = tensor("op_6800"), val = tensor([2, 1280, 32, 32])]; + tensor input_407_cast = reshape(shape = var_6800, x = hidden_states_269_cast)[name = tensor("input_407_cast")]; + tensor var_6804 = const()[name = tensor("op_6804"), val = tensor([1, 1])]; + tensor var_6806 = const()[name = tensor("op_6806"), val = tensor([1, 1])]; + tensor hidden_states_271_pad_type_0 = const()[name = tensor("hidden_states_271_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_271_pad_0 = const()[name = tensor("hidden_states_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(865026624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866255488))), name = tensor("mid_block_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("mid_block_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866255680)))]; + tensor hidden_states_271_cast = conv(bias = mid_block_attentions_0_proj_out_bias_to_fp16, dilations = var_6806, groups = var_4943, pad = hidden_states_271_pad_0, pad_type = hidden_states_271_pad_type_0, strides = var_6804, weight = mid_block_attentions_0_proj_out_weight_to_fp16_palettized, x = input_407_cast)[name = tensor("hidden_states_271_cast")]; + tensor input_409_cast = add(x = hidden_states_271_cast, y = hidden_states_205_cast)[name = tensor("input_409_cast")]; + tensor reshape_76_shape_0 = const()[name = tensor("reshape_76_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_76_cast = reshape(shape = reshape_76_shape_0, x = input_409_cast)[name = tensor("reshape_76_cast")]; + tensor reduce_mean_57_axes_0 = const()[name = tensor("reduce_mean_57_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_57_keep_dims_0 = const()[name = tensor("reduce_mean_57_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_57_cast = reduce_mean(axes = reduce_mean_57_axes_0, keep_dims = reduce_mean_57_keep_dims_0, x = reshape_76_cast)[name = tensor("reduce_mean_57_cast")]; + tensor sub_38_cast = sub(x = reshape_76_cast, y = reduce_mean_57_cast)[name = tensor("sub_38_cast")]; + tensor square_19_cast = square(x = sub_38_cast)[name = tensor("square_19_cast")]; + tensor reduce_mean_59_axes_0 = const()[name = tensor("reduce_mean_59_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_59_keep_dims_0 = const()[name = tensor("reduce_mean_59_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_59_cast = reduce_mean(axes = reduce_mean_59_axes_0, keep_dims = reduce_mean_59_keep_dims_0, x = square_19_cast)[name = tensor("reduce_mean_59_cast")]; + tensor add_38_y_0_to_fp16 = const()[name = tensor("add_38_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_38_cast = add(x = reduce_mean_59_cast, y = add_38_y_0_to_fp16)[name = tensor("add_38_cast")]; + tensor sqrt_19_cast = sqrt(x = add_38_cast)[name = tensor("sqrt_19_cast")]; + tensor real_div_19_cast = real_div(x = sub_38_cast, y = sqrt_19_cast)[name = tensor("real_div_19_cast")]; + tensor reshape_77_shape_0 = const()[name = tensor("reshape_77_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_77_cast = reshape(shape = reshape_77_shape_0, x = real_div_19_cast)[name = tensor("reshape_77_cast")]; + tensor add_39_gamma_0_to_fp16 = const()[name = tensor("add_39_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866258304)))]; + tensor add_39_beta_0_to_fp16 = const()[name = tensor("add_39_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866260928)))]; + tensor add_39_epsilon_0_to_fp16 = const()[name = tensor("add_39_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_39_cast = batch_norm(beta = add_39_beta_0_to_fp16, epsilon = add_39_epsilon_0_to_fp16, gamma = add_39_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_77_cast)[name = tensor("add_39_cast")]; + tensor input_413_cast = silu(x = add_39_cast)[name = tensor("input_413_cast")]; + tensor var_6821 = const()[name = tensor("op_6821"), val = tensor([1, 1])]; + tensor var_6823 = const()[name = tensor("op_6823"), val = tensor([1, 1])]; + tensor hidden_states_273_pad_type_0 = const()[name = tensor("hidden_states_273_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_273_pad_0 = const()[name = tensor("hidden_states_273_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(866263552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881009216))), name = tensor("mid_block_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881009792)))]; + tensor hidden_states_273_cast = conv(bias = mid_block_resnets_1_conv1_bias_to_fp16, dilations = var_6823, groups = var_4943, pad = hidden_states_273_pad_0, pad_type = hidden_states_273_pad_type_0, strides = var_6821, weight = mid_block_resnets_1_conv1_weight_to_fp16_palettized, x = input_413_cast)[name = tensor("hidden_states_273_cast")]; + tensor var_6829 = const()[name = tensor("op_6829"), val = tensor([1, 1])]; + tensor var_6831 = const()[name = tensor("op_6831"), val = tensor([1, 1])]; + tensor temb_15_pad_type_0 = const()[name = tensor("temb_15_pad_type_0"), val = tensor("custom")]; + tensor temb_15_pad_0 = const()[name = tensor("temb_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(881012416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882241280))), name = tensor("mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor mid_block_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882241472)))]; + tensor temb_15_cast = conv(bias = mid_block_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_6831, groups = var_4943, pad = temb_15_pad_0, pad_type = temb_15_pad_type_0, strides = var_6829, weight = mid_block_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_15_cast")]; + tensor input_417_cast = add(x = hidden_states_273_cast, y = temb_15_cast)[name = tensor("input_417_cast")]; + tensor reshape_80_shape_0 = const()[name = tensor("reshape_80_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_80_cast = reshape(shape = reshape_80_shape_0, x = input_417_cast)[name = tensor("reshape_80_cast")]; + tensor reduce_mean_60_axes_0 = const()[name = tensor("reduce_mean_60_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_60_keep_dims_0 = const()[name = tensor("reduce_mean_60_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_60_cast = reduce_mean(axes = reduce_mean_60_axes_0, keep_dims = reduce_mean_60_keep_dims_0, x = reshape_80_cast)[name = tensor("reduce_mean_60_cast")]; + tensor sub_40_cast = sub(x = reshape_80_cast, y = reduce_mean_60_cast)[name = tensor("sub_40_cast")]; + tensor square_20_cast = square(x = sub_40_cast)[name = tensor("square_20_cast")]; + tensor reduce_mean_62_axes_0 = const()[name = tensor("reduce_mean_62_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_62_keep_dims_0 = const()[name = tensor("reduce_mean_62_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_62_cast = reduce_mean(axes = reduce_mean_62_axes_0, keep_dims = reduce_mean_62_keep_dims_0, x = square_20_cast)[name = tensor("reduce_mean_62_cast")]; + tensor add_40_y_0_to_fp16 = const()[name = tensor("add_40_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_40_cast = add(x = reduce_mean_62_cast, y = add_40_y_0_to_fp16)[name = tensor("add_40_cast")]; + tensor sqrt_20_cast = sqrt(x = add_40_cast)[name = tensor("sqrt_20_cast")]; + tensor real_div_20_cast = real_div(x = sub_40_cast, y = sqrt_20_cast)[name = tensor("real_div_20_cast")]; + tensor reshape_81_shape_0 = const()[name = tensor("reshape_81_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_81_cast = reshape(shape = reshape_81_shape_0, x = real_div_20_cast)[name = tensor("reshape_81_cast")]; + tensor add_41_gamma_0_to_fp16 = const()[name = tensor("add_41_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882244096)))]; + tensor add_41_beta_0_to_fp16 = const()[name = tensor("add_41_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882246720)))]; + tensor add_41_epsilon_0_to_fp16 = const()[name = tensor("add_41_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_41_cast = batch_norm(beta = add_41_beta_0_to_fp16, epsilon = add_41_epsilon_0_to_fp16, gamma = add_41_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_81_cast)[name = tensor("add_41_cast")]; + tensor input_421_cast = silu(x = add_41_cast)[name = tensor("input_421_cast")]; + tensor var_6841 = const()[name = tensor("op_6841"), val = tensor([1, 1])]; + tensor var_6843 = const()[name = tensor("op_6843"), val = tensor([1, 1])]; + tensor hidden_states_275_pad_type_0 = const()[name = tensor("hidden_states_275_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_275_pad_0 = const()[name = tensor("hidden_states_275_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor mid_block_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(882249344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893308608))), name = tensor("mid_block_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor mid_block_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("mid_block_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893308800)))]; + tensor hidden_states_275_cast = conv(bias = mid_block_resnets_1_conv2_bias_to_fp16, dilations = var_6843, groups = var_4943, pad = hidden_states_275_pad_0, pad_type = hidden_states_275_pad_type_0, strides = var_6841, weight = mid_block_resnets_1_conv2_weight_to_fp16_palettized, x = input_421_cast)[name = tensor("hidden_states_275_cast")]; + tensor hidden_states_277_cast = add(x = input_409_cast, y = hidden_states_275_cast)[name = tensor("hidden_states_277_cast")]; + tensor var_6849 = const()[name = tensor("op_6849"), val = tensor(3)]; + tensor var_6860 = const()[name = tensor("op_6860"), val = tensor(true)]; + tensor var_6865 = const()[name = tensor("op_6865"), val = tensor(1)]; + tensor input_423_interleave_0 = const()[name = tensor("input_423_interleave_0"), val = tensor(false)]; + tensor input_423_cast = concat(axis = var_6865, interleave = input_423_interleave_0, values = (hidden_states_277_cast, input_311_cast))[name = tensor("input_423_cast")]; + tensor reshape_84_shape_0 = const()[name = tensor("reshape_84_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_84_cast = reshape(shape = reshape_84_shape_0, x = input_423_cast)[name = tensor("reshape_84_cast")]; + tensor reduce_mean_63_axes_0 = const()[name = tensor("reduce_mean_63_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_63_keep_dims_0 = const()[name = tensor("reduce_mean_63_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_63_cast = reduce_mean(axes = reduce_mean_63_axes_0, keep_dims = reduce_mean_63_keep_dims_0, x = reshape_84_cast)[name = tensor("reduce_mean_63_cast")]; + tensor sub_42_cast = sub(x = reshape_84_cast, y = reduce_mean_63_cast)[name = tensor("sub_42_cast")]; + tensor square_21_cast = square(x = sub_42_cast)[name = tensor("square_21_cast")]; + tensor reduce_mean_65_axes_0 = const()[name = tensor("reduce_mean_65_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_65_keep_dims_0 = const()[name = tensor("reduce_mean_65_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_65_cast = reduce_mean(axes = reduce_mean_65_axes_0, keep_dims = reduce_mean_65_keep_dims_0, x = square_21_cast)[name = tensor("reduce_mean_65_cast")]; + tensor add_42_y_0_to_fp16 = const()[name = tensor("add_42_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_42_cast = add(x = reduce_mean_65_cast, y = add_42_y_0_to_fp16)[name = tensor("add_42_cast")]; + tensor sqrt_21_cast = sqrt(x = add_42_cast)[name = tensor("sqrt_21_cast")]; + tensor real_div_21_cast = real_div(x = sub_42_cast, y = sqrt_21_cast)[name = tensor("real_div_21_cast")]; + tensor reshape_85_shape_0 = const()[name = tensor("reshape_85_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_85_cast = reshape(shape = reshape_85_shape_0, x = real_div_21_cast)[name = tensor("reshape_85_cast")]; + tensor add_43_mean_0_to_fp16 = const()[name = tensor("add_43_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893311424)))]; + tensor add_43_variance_0_to_fp16 = const()[name = tensor("add_43_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893316608)))]; + tensor add_43_gamma_0_to_fp16 = const()[name = tensor("add_43_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893321792)))]; + tensor add_43_beta_0_to_fp16 = const()[name = tensor("add_43_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893326976)))]; + tensor add_43_epsilon_0_to_fp16 = const()[name = tensor("add_43_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_43_cast = batch_norm(beta = add_43_beta_0_to_fp16, epsilon = add_43_epsilon_0_to_fp16, gamma = add_43_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_85_cast)[name = tensor("add_43_cast")]; + tensor input_427_cast = silu(x = add_43_cast)[name = tensor("input_427_cast")]; + tensor var_6894 = const()[name = tensor("op_6894"), val = tensor([1, 1])]; + tensor var_6896 = const()[name = tensor("op_6896"), val = tensor([1, 1])]; + tensor hidden_states_279_pad_type_0 = const()[name = tensor("hidden_states_279_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_279_pad_0 = const()[name = tensor("hidden_states_279_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(893332160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922823424))), name = tensor("up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922824000)))]; + tensor hidden_states_279_cast = conv(bias = up_blocks_0_resnets_0_conv1_bias_to_fp16, dilations = var_6896, groups = var_6865, pad = hidden_states_279_pad_0, pad_type = hidden_states_279_pad_type_0, strides = var_6894, weight = up_blocks_0_resnets_0_conv1_weight_to_fp16_palettized, x = input_427_cast)[name = tensor("hidden_states_279_cast")]; + tensor var_6902 = const()[name = tensor("op_6902"), val = tensor([1, 1])]; + tensor var_6904 = const()[name = tensor("op_6904"), val = tensor([1, 1])]; + tensor temb_17_pad_type_0 = const()[name = tensor("temb_17_pad_type_0"), val = tensor("custom")]; + tensor temb_17_pad_0 = const()[name = tensor("temb_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(922826624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924055488))), name = tensor("up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924055680)))]; + tensor temb_17_cast = conv(bias = up_blocks_0_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_6904, groups = var_6865, pad = temb_17_pad_0, pad_type = temb_17_pad_type_0, strides = var_6902, weight = up_blocks_0_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_17_cast")]; + tensor input_431_cast = add(x = hidden_states_279_cast, y = temb_17_cast)[name = tensor("input_431_cast")]; + tensor reshape_88_shape_0 = const()[name = tensor("reshape_88_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_88_cast = reshape(shape = reshape_88_shape_0, x = input_431_cast)[name = tensor("reshape_88_cast")]; + tensor reduce_mean_66_axes_0 = const()[name = tensor("reduce_mean_66_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_66_keep_dims_0 = const()[name = tensor("reduce_mean_66_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_66_cast = reduce_mean(axes = reduce_mean_66_axes_0, keep_dims = reduce_mean_66_keep_dims_0, x = reshape_88_cast)[name = tensor("reduce_mean_66_cast")]; + tensor sub_44_cast = sub(x = reshape_88_cast, y = reduce_mean_66_cast)[name = tensor("sub_44_cast")]; + tensor square_22_cast = square(x = sub_44_cast)[name = tensor("square_22_cast")]; + tensor reduce_mean_68_axes_0 = const()[name = tensor("reduce_mean_68_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_68_keep_dims_0 = const()[name = tensor("reduce_mean_68_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_68_cast = reduce_mean(axes = reduce_mean_68_axes_0, keep_dims = reduce_mean_68_keep_dims_0, x = square_22_cast)[name = tensor("reduce_mean_68_cast")]; + tensor add_44_y_0_to_fp16 = const()[name = tensor("add_44_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_44_cast = add(x = reduce_mean_68_cast, y = add_44_y_0_to_fp16)[name = tensor("add_44_cast")]; + tensor sqrt_22_cast = sqrt(x = add_44_cast)[name = tensor("sqrt_22_cast")]; + tensor real_div_22_cast = real_div(x = sub_44_cast, y = sqrt_22_cast)[name = tensor("real_div_22_cast")]; + tensor reshape_89_shape_0 = const()[name = tensor("reshape_89_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_89_cast = reshape(shape = reshape_89_shape_0, x = real_div_22_cast)[name = tensor("reshape_89_cast")]; + tensor add_45_gamma_0_to_fp16 = const()[name = tensor("add_45_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924058304)))]; + tensor add_45_beta_0_to_fp16 = const()[name = tensor("add_45_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924060928)))]; + tensor add_45_epsilon_0_to_fp16 = const()[name = tensor("add_45_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_45_cast = batch_norm(beta = add_45_beta_0_to_fp16, epsilon = add_45_epsilon_0_to_fp16, gamma = add_45_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_89_cast)[name = tensor("add_45_cast")]; + tensor input_435_cast = silu(x = add_45_cast)[name = tensor("input_435_cast")]; + tensor var_6914 = const()[name = tensor("op_6914"), val = tensor([1, 1])]; + tensor var_6916 = const()[name = tensor("op_6916"), val = tensor([1, 1])]; + tensor hidden_states_281_pad_type_0 = const()[name = tensor("hidden_states_281_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_281_pad_0 = const()[name = tensor("hidden_states_281_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(924063552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938809216))), name = tensor("up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938809792)))]; + tensor hidden_states_281_cast = conv(bias = up_blocks_0_resnets_0_conv2_bias_to_fp16, dilations = var_6916, groups = var_6865, pad = hidden_states_281_pad_0, pad_type = hidden_states_281_pad_type_0, strides = var_6914, weight = up_blocks_0_resnets_0_conv2_weight_to_fp16_palettized, x = input_435_cast)[name = tensor("hidden_states_281_cast")]; + tensor var_6921 = const()[name = tensor("op_6921"), val = tensor([1, 1])]; + tensor var_6923 = const()[name = tensor("op_6923"), val = tensor([1, 1])]; + tensor x_5_pad_type_0 = const()[name = tensor("x_5_pad_type_0"), val = tensor("custom")]; + tensor x_5_pad_0 = const()[name = tensor("x_5_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(938812416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(942089280))), name = tensor("up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(942089856)))]; + tensor x_5_cast = conv(bias = up_blocks_0_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_6923, groups = var_6865, pad = x_5_pad_0, pad_type = x_5_pad_type_0, strides = var_6921, weight = up_blocks_0_resnets_0_conv_shortcut_weight_to_fp16_palettized, x = input_423_cast)[name = tensor("x_5_cast")]; + tensor hidden_states_283_cast = add(x = x_5_cast, y = hidden_states_281_cast)[name = tensor("hidden_states_283_cast")]; + tensor reshape_92_shape_0 = const()[name = tensor("reshape_92_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_92_cast = reshape(shape = reshape_92_shape_0, x = hidden_states_283_cast)[name = tensor("reshape_92_cast")]; + tensor reduce_mean_69_axes_0 = const()[name = tensor("reduce_mean_69_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_69_keep_dims_0 = const()[name = tensor("reduce_mean_69_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_69_cast = reduce_mean(axes = reduce_mean_69_axes_0, keep_dims = reduce_mean_69_keep_dims_0, x = reshape_92_cast)[name = tensor("reduce_mean_69_cast")]; + tensor sub_46_cast = sub(x = reshape_92_cast, y = reduce_mean_69_cast)[name = tensor("sub_46_cast")]; + tensor square_23_cast = square(x = sub_46_cast)[name = tensor("square_23_cast")]; + tensor reduce_mean_71_axes_0 = const()[name = tensor("reduce_mean_71_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_71_keep_dims_0 = const()[name = tensor("reduce_mean_71_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_71_cast = reduce_mean(axes = reduce_mean_71_axes_0, keep_dims = reduce_mean_71_keep_dims_0, x = square_23_cast)[name = tensor("reduce_mean_71_cast")]; + tensor add_46_y_0_to_fp16 = const()[name = tensor("add_46_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_46_cast = add(x = reduce_mean_71_cast, y = add_46_y_0_to_fp16)[name = tensor("add_46_cast")]; + tensor sqrt_23_cast = sqrt(x = add_46_cast)[name = tensor("sqrt_23_cast")]; + tensor real_div_23_cast = real_div(x = sub_46_cast, y = sqrt_23_cast)[name = tensor("real_div_23_cast")]; + tensor reshape_93_shape_0 = const()[name = tensor("reshape_93_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_93_cast = reshape(shape = reshape_93_shape_0, x = real_div_23_cast)[name = tensor("reshape_93_cast")]; + tensor add_47_gamma_0_to_fp16 = const()[name = tensor("add_47_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(942092480)))]; + tensor add_47_beta_0_to_fp16 = const()[name = tensor("add_47_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(942095104)))]; + tensor add_47_epsilon_0_to_fp16 = const()[name = tensor("add_47_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_47_cast = batch_norm(beta = add_47_beta_0_to_fp16, epsilon = add_47_epsilon_0_to_fp16, gamma = add_47_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_93_cast)[name = tensor("add_47_cast")]; + tensor var_6961 = const()[name = tensor("op_6961"), val = tensor([1, 1])]; + tensor var_6963 = const()[name = tensor("op_6963"), val = tensor([1, 1])]; + tensor hidden_states_285_pad_type_0 = const()[name = tensor("hidden_states_285_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_285_pad_0 = const()[name = tensor("hidden_states_285_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(942097728))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(943736192))), name = tensor("up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(943736768)))]; + tensor hidden_states_285_cast = conv(bias = up_blocks_0_attentions_0_proj_in_bias_to_fp16, dilations = var_6963, groups = var_6865, pad = hidden_states_285_pad_0, pad_type = hidden_states_285_pad_type_0, strides = var_6961, weight = up_blocks_0_attentions_0_proj_in_weight_to_fp16_palettized, x = add_47_cast)[name = tensor("hidden_states_285_cast")]; + tensor var_6968 = const()[name = tensor("op_6968"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_205_cast = reshape(shape = var_6968, x = hidden_states_285_cast)[name = tensor("inputs_205_cast")]; + tensor var_6978 = const()[name = tensor("op_6978"), val = tensor([1])]; + tensor channels_mean_205_cast = reduce_mean(axes = var_6978, keep_dims = var_6860, x = inputs_205_cast)[name = tensor("channels_mean_205_cast")]; + tensor zero_mean_205_cast = sub(x = inputs_205_cast, y = channels_mean_205_cast)[name = tensor("zero_mean_205_cast")]; + tensor zero_mean_sq_205_cast = mul(x = zero_mean_205_cast, y = zero_mean_205_cast)[name = tensor("zero_mean_sq_205_cast")]; + tensor var_6982 = const()[name = tensor("op_6982"), val = tensor([1])]; + tensor var_6983_cast = reduce_mean(axes = var_6982, keep_dims = var_6860, x = zero_mean_sq_205_cast)[name = tensor("op_6983_cast")]; + tensor var_6984_to_fp16 = const()[name = tensor("op_6984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_6985_cast = add(x = var_6983_cast, y = var_6984_to_fp16)[name = tensor("op_6985_cast")]; + tensor denom_205_epsilon_0_to_fp16 = const()[name = tensor("denom_205_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_205_cast = rsqrt(epsilon = denom_205_epsilon_0_to_fp16, x = var_6985_cast)[name = tensor("denom_205_cast")]; + tensor out_205_cast = mul(x = zero_mean_205_cast, y = denom_205_cast)[name = tensor("out_205_cast")]; + tensor var_6989_to_fp16 = const()[name = tensor("op_6989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(943739392)))]; + tensor var_6990_cast = add(x = out_205_cast, y = var_6989_to_fp16)[name = tensor("op_6990_cast")]; + tensor var_6992_to_fp16 = const()[name = tensor("op_6992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(943742016)))]; + tensor hidden_states_287_cast = mul(x = var_6990_cast, y = var_6992_to_fp16)[name = tensor("hidden_states_287_cast")]; + tensor var_6999 = const()[name = tensor("op_6999"), val = tensor([1, 1])]; + tensor var_7001 = const()[name = tensor("op_7001"), val = tensor([1, 1])]; + tensor q_137_pad_type_0 = const()[name = tensor("q_137_pad_type_0"), val = tensor("custom")]; + tensor q_137_pad_0 = const()[name = tensor("q_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(943744640))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(944973504))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_137_cast = conv(dilations = var_7001, groups = var_6865, pad = q_137_pad_0, pad_type = q_137_pad_type_0, strides = var_6999, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("q_137_cast")]; + tensor var_7005 = const()[name = tensor("op_7005"), val = tensor([1, 1])]; + tensor var_7007 = const()[name = tensor("op_7007"), val = tensor([1, 1])]; + tensor k_137_pad_type_0 = const()[name = tensor("k_137_pad_type_0"), val = tensor("custom")]; + tensor k_137_pad_0 = const()[name = tensor("k_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(944973696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(946202560))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_137_cast = conv(dilations = var_7007, groups = var_6865, pad = k_137_pad_0, pad_type = k_137_pad_type_0, strides = var_7005, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("k_137_cast")]; + tensor var_7011 = const()[name = tensor("op_7011"), val = tensor([1, 1])]; + tensor var_7013 = const()[name = tensor("op_7013"), val = tensor([1, 1])]; + tensor v_137_pad_type_0 = const()[name = tensor("v_137_pad_type_0"), val = tensor("custom")]; + tensor v_137_pad_0 = const()[name = tensor("v_137_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(946202752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947431616))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_137_cast = conv(dilations = var_7013, groups = var_6865, pad = v_137_pad_0, pad_type = v_137_pad_type_0, strides = var_7011, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_287_cast)[name = tensor("v_137_cast")]; + tensor var_7017 = const()[name = tensor("op_7017"), val = tensor([2, 20, 64, -1])]; + tensor var_7018_cast = reshape(shape = var_7017, x = q_137_cast)[name = tensor("op_7018_cast")]; + tensor var_7019 = const()[name = tensor("op_7019"), val = tensor([2, 20, 64, -1])]; + tensor var_7020_cast = reshape(shape = var_7019, x = k_137_cast)[name = tensor("op_7020_cast")]; + tensor var_7021 = const()[name = tensor("op_7021"), val = tensor([2, 20, 64, -1])]; + tensor var_7022_cast = reshape(shape = var_7021, x = v_137_cast)[name = tensor("op_7022_cast")]; + tensor attn_weights_273_transpose_x_0 = const()[name = tensor("attn_weights_273_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_273_transpose_y_0 = const()[name = tensor("attn_weights_273_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_273_cast = matmul(transpose_x = attn_weights_273_transpose_x_0, transpose_y = attn_weights_273_transpose_y_0, x = var_7018_cast, y = var_7020_cast)[name = tensor("attn_weights_273_cast")]; + tensor var_6856_to_fp16 = const()[name = tensor("op_6856_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_275_cast = mul(x = attn_weights_273_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_275_cast")]; + tensor var_7026_cast = softmax(axis = var_6849, x = attn_weights_275_cast)[name = tensor("op_7026_cast")]; + tensor attn_137_transpose_x_0 = const()[name = tensor("attn_137_transpose_x_0"), val = tensor(false)]; + tensor attn_137_transpose_y_0 = const()[name = tensor("attn_137_transpose_y_0"), val = tensor(true)]; + tensor attn_137_cast = matmul(transpose_x = attn_137_transpose_x_0, transpose_y = attn_137_transpose_y_0, x = var_7022_cast, y = var_7026_cast)[name = tensor("attn_137_cast")]; + tensor var_7030 = const()[name = tensor("op_7030"), val = tensor([2, 1280, 1, -1])]; + tensor input_439_cast = reshape(shape = var_7030, x = attn_137_cast)[name = tensor("input_439_cast")]; + tensor var_7035 = const()[name = tensor("op_7035"), val = tensor([1, 1])]; + tensor var_7037 = const()[name = tensor("op_7037"), val = tensor([1, 1])]; + tensor var_7039_pad_type_0 = const()[name = tensor("op_7039_pad_type_0"), val = tensor("custom")]; + tensor var_7039_pad_0 = const()[name = tensor("op_7039_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(947431808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(948660672))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(948660864)))]; + tensor var_7039_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_7037, groups = var_6865, pad = var_7039_pad_0, pad_type = var_7039_pad_type_0, strides = var_7035, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_439_cast)[name = tensor("op_7039_cast")]; + tensor inputs_207_cast = add(x = var_7039_cast, y = inputs_205_cast)[name = tensor("inputs_207_cast")]; + tensor var_7043 = const()[name = tensor("op_7043"), val = tensor([1])]; + tensor channels_mean_207_cast = reduce_mean(axes = var_7043, keep_dims = var_6860, x = inputs_207_cast)[name = tensor("channels_mean_207_cast")]; + tensor zero_mean_207_cast = sub(x = inputs_207_cast, y = channels_mean_207_cast)[name = tensor("zero_mean_207_cast")]; + tensor zero_mean_sq_207_cast = mul(x = zero_mean_207_cast, y = zero_mean_207_cast)[name = tensor("zero_mean_sq_207_cast")]; + tensor var_7047 = const()[name = tensor("op_7047"), val = tensor([1])]; + tensor var_7048_cast = reduce_mean(axes = var_7047, keep_dims = var_6860, x = zero_mean_sq_207_cast)[name = tensor("op_7048_cast")]; + tensor var_7049_to_fp16 = const()[name = tensor("op_7049_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7050_cast = add(x = var_7048_cast, y = var_7049_to_fp16)[name = tensor("op_7050_cast")]; + tensor denom_207_epsilon_0_to_fp16 = const()[name = tensor("denom_207_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_207_cast = rsqrt(epsilon = denom_207_epsilon_0_to_fp16, x = var_7050_cast)[name = tensor("denom_207_cast")]; + tensor out_207_cast = mul(x = zero_mean_207_cast, y = denom_207_cast)[name = tensor("out_207_cast")]; + tensor var_7054_to_fp16 = const()[name = tensor("op_7054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(948663488)))]; + tensor var_7055_cast = add(x = out_207_cast, y = var_7054_to_fp16)[name = tensor("op_7055_cast")]; + tensor var_7057_to_fp16 = const()[name = tensor("op_7057_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(948666112)))]; + tensor hidden_states_289_cast = mul(x = var_7055_cast, y = var_7057_to_fp16)[name = tensor("hidden_states_289_cast")]; + tensor var_7064 = const()[name = tensor("op_7064"), val = tensor([1, 1])]; + tensor var_7066 = const()[name = tensor("op_7066"), val = tensor([1, 1])]; + tensor q_139_pad_type_0 = const()[name = tensor("q_139_pad_type_0"), val = tensor("custom")]; + tensor q_139_pad_0 = const()[name = tensor("q_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(948668736))), lut = tensor([-0x1.d5p-6, -0x1.1d8p-7, 0x1.1d8p-7, 0x1.d58p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_139_cast = conv(dilations = var_7066, groups = var_6865, pad = q_139_pad_0, pad_type = q_139_pad_type_0, strides = var_7064, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_289_cast)[name = tensor("q_139_cast")]; + tensor var_7070 = const()[name = tensor("op_7070"), val = tensor([1, 1])]; + tensor var_7072 = const()[name = tensor("op_7072"), val = tensor([1, 1])]; + tensor k_139_pad_type_0 = const()[name = tensor("k_139_pad_type_0"), val = tensor("custom")]; + tensor k_139_pad_0 = const()[name = tensor("k_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949078400))), lut = tensor([-0x1.c24p-6, -0x1.0bcp-7, 0x1.0d8p-7, 0x1.c28p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_139_cast = conv(dilations = var_7072, groups = var_6865, pad = k_139_pad_0, pad_type = k_139_pad_type_0, strides = var_7070, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_139_cast")]; + tensor var_7076 = const()[name = tensor("op_7076"), val = tensor([1, 1])]; + tensor var_7078 = const()[name = tensor("op_7078"), val = tensor([1, 1])]; + tensor v_139_pad_type_0 = const()[name = tensor("v_139_pad_type_0"), val = tensor("custom")]; + tensor v_139_pad_0 = const()[name = tensor("v_139_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(949733824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951044608))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_139_cast = conv(dilations = var_7078, groups = var_6865, pad = v_139_pad_0, pad_type = v_139_pad_type_0, strides = var_7076, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_139_cast")]; + tensor var_7082 = const()[name = tensor("op_7082"), val = tensor([2, 20, 64, -1])]; + tensor var_7083_cast = reshape(shape = var_7082, x = q_139_cast)[name = tensor("op_7083_cast")]; + tensor var_7084 = const()[name = tensor("op_7084"), val = tensor([2, 20, 64, -1])]; + tensor var_7085_cast = reshape(shape = var_7084, x = k_139_cast)[name = tensor("op_7085_cast")]; + tensor var_7086 = const()[name = tensor("op_7086"), val = tensor([2, 20, 64, -1])]; + tensor var_7087_cast = reshape(shape = var_7086, x = v_139_cast)[name = tensor("op_7087_cast")]; + tensor attn_weights_277_transpose_x_0 = const()[name = tensor("attn_weights_277_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_277_transpose_y_0 = const()[name = tensor("attn_weights_277_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_277_cast = matmul(transpose_x = attn_weights_277_transpose_x_0, transpose_y = attn_weights_277_transpose_y_0, x = var_7083_cast, y = var_7085_cast)[name = tensor("attn_weights_277_cast")]; + tensor attn_weights_279_cast = mul(x = attn_weights_277_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_279_cast")]; + tensor var_7091_cast = softmax(axis = var_6849, x = attn_weights_279_cast)[name = tensor("op_7091_cast")]; + tensor attn_139_transpose_x_0 = const()[name = tensor("attn_139_transpose_x_0"), val = tensor(false)]; + tensor attn_139_transpose_y_0 = const()[name = tensor("attn_139_transpose_y_0"), val = tensor(true)]; + tensor attn_139_cast = matmul(transpose_x = attn_139_transpose_x_0, transpose_y = attn_139_transpose_y_0, x = var_7087_cast, y = var_7091_cast)[name = tensor("attn_139_cast")]; + tensor var_7095 = const()[name = tensor("op_7095"), val = tensor([2, 1280, 1, -1])]; + tensor input_441_cast = reshape(shape = var_7095, x = attn_139_cast)[name = tensor("input_441_cast")]; + tensor var_7100 = const()[name = tensor("op_7100"), val = tensor([1, 1])]; + tensor var_7102 = const()[name = tensor("op_7102"), val = tensor([1, 1])]; + tensor var_7104_pad_type_0 = const()[name = tensor("op_7104_pad_type_0"), val = tensor("custom")]; + tensor var_7104_pad_0 = const()[name = tensor("op_7104_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951044736))), lut = tensor([-0x1.dbcp-7, -0x1.1dcp-8, 0x1.1b4p-8, 0x1.dacp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951454400)))]; + tensor var_7104_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_7102, groups = var_6865, pad = var_7104_pad_0, pad_type = var_7104_pad_type_0, strides = var_7100, weight = up_blocks_0_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_441_cast)[name = tensor("op_7104_cast")]; + tensor inputs_209_cast = add(x = var_7104_cast, y = inputs_207_cast)[name = tensor("inputs_209_cast")]; + tensor var_7108 = const()[name = tensor("op_7108"), val = tensor([1])]; + tensor channels_mean_209_cast = reduce_mean(axes = var_7108, keep_dims = var_6860, x = inputs_209_cast)[name = tensor("channels_mean_209_cast")]; + tensor zero_mean_209_cast = sub(x = inputs_209_cast, y = channels_mean_209_cast)[name = tensor("zero_mean_209_cast")]; + tensor zero_mean_sq_209_cast = mul(x = zero_mean_209_cast, y = zero_mean_209_cast)[name = tensor("zero_mean_sq_209_cast")]; + tensor var_7112 = const()[name = tensor("op_7112"), val = tensor([1])]; + tensor var_7113_cast = reduce_mean(axes = var_7112, keep_dims = var_6860, x = zero_mean_sq_209_cast)[name = tensor("op_7113_cast")]; + tensor var_7114_to_fp16 = const()[name = tensor("op_7114_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7115_cast = add(x = var_7113_cast, y = var_7114_to_fp16)[name = tensor("op_7115_cast")]; + tensor denom_209_epsilon_0_to_fp16 = const()[name = tensor("denom_209_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_209_cast = rsqrt(epsilon = denom_209_epsilon_0_to_fp16, x = var_7115_cast)[name = tensor("denom_209_cast")]; + tensor out_209_cast = mul(x = zero_mean_209_cast, y = denom_209_cast)[name = tensor("out_209_cast")]; + tensor var_7119_to_fp16 = const()[name = tensor("op_7119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951457024)))]; + tensor var_7120_cast = add(x = out_209_cast, y = var_7119_to_fp16)[name = tensor("op_7120_cast")]; + tensor var_7122_to_fp16 = const()[name = tensor("op_7122_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951459648)))]; + tensor input_443_cast = mul(x = var_7120_cast, y = var_7122_to_fp16)[name = tensor("input_443_cast")]; + tensor var_7130 = const()[name = tensor("op_7130"), val = tensor([1, 1])]; + tensor var_7132 = const()[name = tensor("op_7132"), val = tensor([1, 1])]; + tensor var_7134_pad_type_0 = const()[name = tensor("op_7134_pad_type_0"), val = tensor("custom")]; + tensor var_7134_pad_0 = const()[name = tensor("op_7134_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(951462272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961292736))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961292928)))]; + tensor var_7134_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_7132, groups = var_6865, pad = var_7134_pad_0, pad_type = var_7134_pad_type_0, strides = var_7130, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_443_cast)[name = tensor("op_7134_cast")]; + tensor var_7135_split_sizes_0 = const()[name = tensor("op_7135_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7135_axis_0 = const()[name = tensor("op_7135_axis_0"), val = tensor(1)]; + tensor var_7135_cast_0, tensor var_7135_cast_1 = split(axis = var_7135_axis_0, split_sizes = var_7135_split_sizes_0, x = var_7134_cast)[name = tensor("op_7135_cast")]; + tensor var_7137_mode_0 = const()[name = tensor("op_7137_mode_0"), val = tensor("EXACT")]; + tensor var_7137_cast = gelu(mode = var_7137_mode_0, x = var_7135_cast_1)[name = tensor("op_7137_cast")]; + tensor input_445_cast = mul(x = var_7135_cast_0, y = var_7137_cast)[name = tensor("input_445_cast")]; + tensor var_7141 = const()[name = tensor("op_7141"), val = tensor([1, 1])]; + tensor var_7143 = const()[name = tensor("op_7143"), val = tensor([1, 1])]; + tensor var_7145_pad_type_0 = const()[name = tensor("op_7145_pad_type_0"), val = tensor("custom")]; + tensor var_7145_pad_0 = const()[name = tensor("op_7145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(961313472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967867136))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967867712)))]; + tensor var_7145_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_7143, groups = var_6865, pad = var_7145_pad_0, pad_type = var_7145_pad_type_0, strides = var_7141, weight = up_blocks_0_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_445_cast)[name = tensor("op_7145_cast")]; + tensor inputs_211_cast = add(x = var_7145_cast, y = inputs_209_cast)[name = tensor("inputs_211_cast")]; + tensor var_7155 = const()[name = tensor("op_7155"), val = tensor([1])]; + tensor channels_mean_211_cast = reduce_mean(axes = var_7155, keep_dims = var_6860, x = inputs_211_cast)[name = tensor("channels_mean_211_cast")]; + tensor zero_mean_211_cast = sub(x = inputs_211_cast, y = channels_mean_211_cast)[name = tensor("zero_mean_211_cast")]; + tensor zero_mean_sq_211_cast = mul(x = zero_mean_211_cast, y = zero_mean_211_cast)[name = tensor("zero_mean_sq_211_cast")]; + tensor var_7159 = const()[name = tensor("op_7159"), val = tensor([1])]; + tensor var_7160_cast = reduce_mean(axes = var_7159, keep_dims = var_6860, x = zero_mean_sq_211_cast)[name = tensor("op_7160_cast")]; + tensor var_7161_to_fp16 = const()[name = tensor("op_7161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7162_cast = add(x = var_7160_cast, y = var_7161_to_fp16)[name = tensor("op_7162_cast")]; + tensor denom_211_epsilon_0_to_fp16 = const()[name = tensor("denom_211_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_211_cast = rsqrt(epsilon = denom_211_epsilon_0_to_fp16, x = var_7162_cast)[name = tensor("denom_211_cast")]; + tensor out_211_cast = mul(x = zero_mean_211_cast, y = denom_211_cast)[name = tensor("out_211_cast")]; + tensor var_7166_to_fp16 = const()[name = tensor("op_7166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967870336)))]; + tensor var_7167_cast = add(x = out_211_cast, y = var_7166_to_fp16)[name = tensor("op_7167_cast")]; + tensor var_7169_to_fp16 = const()[name = tensor("op_7169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967872960)))]; + tensor hidden_states_293_cast = mul(x = var_7167_cast, y = var_7169_to_fp16)[name = tensor("hidden_states_293_cast")]; + tensor var_7176 = const()[name = tensor("op_7176"), val = tensor([1, 1])]; + tensor var_7178 = const()[name = tensor("op_7178"), val = tensor([1, 1])]; + tensor q_141_pad_type_0 = const()[name = tensor("q_141_pad_type_0"), val = tensor("custom")]; + tensor q_141_pad_0 = const()[name = tensor("q_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(967875584))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(968694848))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_141_cast = conv(dilations = var_7178, groups = var_6865, pad = q_141_pad_0, pad_type = q_141_pad_type_0, strides = var_7176, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("q_141_cast")]; + tensor var_7182 = const()[name = tensor("op_7182"), val = tensor([1, 1])]; + tensor var_7184 = const()[name = tensor("op_7184"), val = tensor([1, 1])]; + tensor k_141_pad_type_0 = const()[name = tensor("k_141_pad_type_0"), val = tensor("custom")]; + tensor k_141_pad_0 = const()[name = tensor("k_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(968694976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969514240))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_141_cast = conv(dilations = var_7184, groups = var_6865, pad = k_141_pad_0, pad_type = k_141_pad_type_0, strides = var_7182, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("k_141_cast")]; + tensor var_7188 = const()[name = tensor("op_7188"), val = tensor([1, 1])]; + tensor var_7190 = const()[name = tensor("op_7190"), val = tensor([1, 1])]; + tensor v_141_pad_type_0 = const()[name = tensor("v_141_pad_type_0"), val = tensor("custom")]; + tensor v_141_pad_0 = const()[name = tensor("v_141_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(969514368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970743232))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_141_cast = conv(dilations = var_7190, groups = var_6865, pad = v_141_pad_0, pad_type = v_141_pad_type_0, strides = var_7188, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_293_cast)[name = tensor("v_141_cast")]; + tensor var_7194 = const()[name = tensor("op_7194"), val = tensor([2, 20, 64, -1])]; + tensor var_7195_cast = reshape(shape = var_7194, x = q_141_cast)[name = tensor("op_7195_cast")]; + tensor var_7196 = const()[name = tensor("op_7196"), val = tensor([2, 20, 64, -1])]; + tensor var_7197_cast = reshape(shape = var_7196, x = k_141_cast)[name = tensor("op_7197_cast")]; + tensor var_7198 = const()[name = tensor("op_7198"), val = tensor([2, 20, 64, -1])]; + tensor var_7199_cast = reshape(shape = var_7198, x = v_141_cast)[name = tensor("op_7199_cast")]; + tensor attn_weights_281_transpose_x_0 = const()[name = tensor("attn_weights_281_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_281_transpose_y_0 = const()[name = tensor("attn_weights_281_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_281_cast = matmul(transpose_x = attn_weights_281_transpose_x_0, transpose_y = attn_weights_281_transpose_y_0, x = var_7195_cast, y = var_7197_cast)[name = tensor("attn_weights_281_cast")]; + tensor attn_weights_283_cast = mul(x = attn_weights_281_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_283_cast")]; + tensor var_7203_cast = softmax(axis = var_6849, x = attn_weights_283_cast)[name = tensor("op_7203_cast")]; + tensor attn_141_transpose_x_0 = const()[name = tensor("attn_141_transpose_x_0"), val = tensor(false)]; + tensor attn_141_transpose_y_0 = const()[name = tensor("attn_141_transpose_y_0"), val = tensor(true)]; + tensor attn_141_cast = matmul(transpose_x = attn_141_transpose_x_0, transpose_y = attn_141_transpose_y_0, x = var_7199_cast, y = var_7203_cast)[name = tensor("attn_141_cast")]; + tensor var_7207 = const()[name = tensor("op_7207"), val = tensor([2, 1280, 1, -1])]; + tensor input_447_cast = reshape(shape = var_7207, x = attn_141_cast)[name = tensor("input_447_cast")]; + tensor var_7212 = const()[name = tensor("op_7212"), val = tensor([1, 1])]; + tensor var_7214 = const()[name = tensor("op_7214"), val = tensor([1, 1])]; + tensor var_7216_pad_type_0 = const()[name = tensor("op_7216_pad_type_0"), val = tensor("custom")]; + tensor var_7216_pad_0 = const()[name = tensor("op_7216_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(970743424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971972288))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971972480)))]; + tensor var_7216_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_7214, groups = var_6865, pad = var_7216_pad_0, pad_type = var_7216_pad_type_0, strides = var_7212, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_447_cast)[name = tensor("op_7216_cast")]; + tensor inputs_213_cast = add(x = var_7216_cast, y = inputs_211_cast)[name = tensor("inputs_213_cast")]; + tensor var_7220 = const()[name = tensor("op_7220"), val = tensor([1])]; + tensor channels_mean_213_cast = reduce_mean(axes = var_7220, keep_dims = var_6860, x = inputs_213_cast)[name = tensor("channels_mean_213_cast")]; + tensor zero_mean_213_cast = sub(x = inputs_213_cast, y = channels_mean_213_cast)[name = tensor("zero_mean_213_cast")]; + tensor zero_mean_sq_213_cast = mul(x = zero_mean_213_cast, y = zero_mean_213_cast)[name = tensor("zero_mean_sq_213_cast")]; + tensor var_7224 = const()[name = tensor("op_7224"), val = tensor([1])]; + tensor var_7225_cast = reduce_mean(axes = var_7224, keep_dims = var_6860, x = zero_mean_sq_213_cast)[name = tensor("op_7225_cast")]; + tensor var_7226_to_fp16 = const()[name = tensor("op_7226_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7227_cast = add(x = var_7225_cast, y = var_7226_to_fp16)[name = tensor("op_7227_cast")]; + tensor denom_213_epsilon_0_to_fp16 = const()[name = tensor("denom_213_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_213_cast = rsqrt(epsilon = denom_213_epsilon_0_to_fp16, x = var_7227_cast)[name = tensor("denom_213_cast")]; + tensor out_213_cast = mul(x = zero_mean_213_cast, y = denom_213_cast)[name = tensor("out_213_cast")]; + tensor var_7231_to_fp16 = const()[name = tensor("op_7231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971975104)))]; + tensor var_7232_cast = add(x = out_213_cast, y = var_7231_to_fp16)[name = tensor("op_7232_cast")]; + tensor var_7234_to_fp16 = const()[name = tensor("op_7234_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971977728)))]; + tensor hidden_states_295_cast = mul(x = var_7232_cast, y = var_7234_to_fp16)[name = tensor("hidden_states_295_cast")]; + tensor var_7241 = const()[name = tensor("op_7241"), val = tensor([1, 1])]; + tensor var_7243 = const()[name = tensor("op_7243"), val = tensor([1, 1])]; + tensor q_143_pad_type_0 = const()[name = tensor("q_143_pad_type_0"), val = tensor("custom")]; + tensor q_143_pad_0 = const()[name = tensor("q_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(971980352))), lut = tensor([-0x1.19p-5, -0x1.524p-7, 0x1.50cp-7, 0x1.188p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_143_cast = conv(dilations = var_7243, groups = var_6865, pad = q_143_pad_0, pad_type = q_143_pad_type_0, strides = var_7241, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_295_cast)[name = tensor("q_143_cast")]; + tensor var_7247 = const()[name = tensor("op_7247"), val = tensor([1, 1])]; + tensor var_7249 = const()[name = tensor("op_7249"), val = tensor([1, 1])]; + tensor k_143_pad_type_0 = const()[name = tensor("k_143_pad_type_0"), val = tensor("custom")]; + tensor k_143_pad_0 = const()[name = tensor("k_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(972390016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(973700800))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_143_cast = conv(dilations = var_7249, groups = var_6865, pad = k_143_pad_0, pad_type = k_143_pad_type_0, strides = var_7247, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_143_cast")]; + tensor var_7253 = const()[name = tensor("op_7253"), val = tensor([1, 1])]; + tensor var_7255 = const()[name = tensor("op_7255"), val = tensor([1, 1])]; + tensor v_143_pad_type_0 = const()[name = tensor("v_143_pad_type_0"), val = tensor("custom")]; + tensor v_143_pad_0 = const()[name = tensor("v_143_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(973700928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975011712))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_143_cast = conv(dilations = var_7255, groups = var_6865, pad = v_143_pad_0, pad_type = v_143_pad_type_0, strides = var_7253, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_143_cast")]; + tensor var_7259 = const()[name = tensor("op_7259"), val = tensor([2, 20, 64, -1])]; + tensor var_7260_cast = reshape(shape = var_7259, x = q_143_cast)[name = tensor("op_7260_cast")]; + tensor var_7261 = const()[name = tensor("op_7261"), val = tensor([2, 20, 64, -1])]; + tensor var_7262_cast = reshape(shape = var_7261, x = k_143_cast)[name = tensor("op_7262_cast")]; + tensor var_7263 = const()[name = tensor("op_7263"), val = tensor([2, 20, 64, -1])]; + tensor var_7264_cast = reshape(shape = var_7263, x = v_143_cast)[name = tensor("op_7264_cast")]; + tensor attn_weights_285_transpose_x_0 = const()[name = tensor("attn_weights_285_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_285_transpose_y_0 = const()[name = tensor("attn_weights_285_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_285_cast = matmul(transpose_x = attn_weights_285_transpose_x_0, transpose_y = attn_weights_285_transpose_y_0, x = var_7260_cast, y = var_7262_cast)[name = tensor("attn_weights_285_cast")]; + tensor attn_weights_287_cast = mul(x = attn_weights_285_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_287_cast")]; + tensor var_7268_cast = softmax(axis = var_6849, x = attn_weights_287_cast)[name = tensor("op_7268_cast")]; + tensor attn_143_transpose_x_0 = const()[name = tensor("attn_143_transpose_x_0"), val = tensor(false)]; + tensor attn_143_transpose_y_0 = const()[name = tensor("attn_143_transpose_y_0"), val = tensor(true)]; + tensor attn_143_cast = matmul(transpose_x = attn_143_transpose_x_0, transpose_y = attn_143_transpose_y_0, x = var_7264_cast, y = var_7268_cast)[name = tensor("attn_143_cast")]; + tensor var_7272 = const()[name = tensor("op_7272"), val = tensor([2, 1280, 1, -1])]; + tensor input_449_cast = reshape(shape = var_7272, x = attn_143_cast)[name = tensor("input_449_cast")]; + tensor var_7277 = const()[name = tensor("op_7277"), val = tensor([1, 1])]; + tensor var_7279 = const()[name = tensor("op_7279"), val = tensor([1, 1])]; + tensor var_7281_pad_type_0 = const()[name = tensor("op_7281_pad_type_0"), val = tensor("custom")]; + tensor var_7281_pad_0 = const()[name = tensor("op_7281_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975011840))), lut = tensor([-0x1.2fp-7, 0x1.2f4p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975216704)))]; + tensor var_7281_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_7279, groups = var_6865, pad = var_7281_pad_0, pad_type = var_7281_pad_type_0, strides = var_7277, weight = up_blocks_0_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_449_cast)[name = tensor("op_7281_cast")]; + tensor inputs_215_cast = add(x = var_7281_cast, y = inputs_213_cast)[name = tensor("inputs_215_cast")]; + tensor var_7285 = const()[name = tensor("op_7285"), val = tensor([1])]; + tensor channels_mean_215_cast = reduce_mean(axes = var_7285, keep_dims = var_6860, x = inputs_215_cast)[name = tensor("channels_mean_215_cast")]; + tensor zero_mean_215_cast = sub(x = inputs_215_cast, y = channels_mean_215_cast)[name = tensor("zero_mean_215_cast")]; + tensor zero_mean_sq_215_cast = mul(x = zero_mean_215_cast, y = zero_mean_215_cast)[name = tensor("zero_mean_sq_215_cast")]; + tensor var_7289 = const()[name = tensor("op_7289"), val = tensor([1])]; + tensor var_7290_cast = reduce_mean(axes = var_7289, keep_dims = var_6860, x = zero_mean_sq_215_cast)[name = tensor("op_7290_cast")]; + tensor var_7291_to_fp16 = const()[name = tensor("op_7291_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7292_cast = add(x = var_7290_cast, y = var_7291_to_fp16)[name = tensor("op_7292_cast")]; + tensor denom_215_epsilon_0_to_fp16 = const()[name = tensor("denom_215_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_215_cast = rsqrt(epsilon = denom_215_epsilon_0_to_fp16, x = var_7292_cast)[name = tensor("denom_215_cast")]; + tensor out_215_cast = mul(x = zero_mean_215_cast, y = denom_215_cast)[name = tensor("out_215_cast")]; + tensor var_7296_to_fp16 = const()[name = tensor("op_7296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975219328)))]; + tensor var_7297_cast = add(x = out_215_cast, y = var_7296_to_fp16)[name = tensor("op_7297_cast")]; + tensor var_7299_to_fp16 = const()[name = tensor("op_7299_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975221952)))]; + tensor input_451_cast = mul(x = var_7297_cast, y = var_7299_to_fp16)[name = tensor("input_451_cast")]; + tensor var_7307 = const()[name = tensor("op_7307"), val = tensor([1, 1])]; + tensor var_7309 = const()[name = tensor("op_7309"), val = tensor([1, 1])]; + tensor var_7311_pad_type_0 = const()[name = tensor("op_7311_pad_type_0"), val = tensor("custom")]; + tensor var_7311_pad_0 = const()[name = tensor("op_7311_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(975224576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(985055040))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(985055232)))]; + tensor var_7311_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_7309, groups = var_6865, pad = var_7311_pad_0, pad_type = var_7311_pad_type_0, strides = var_7307, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_451_cast)[name = tensor("op_7311_cast")]; + tensor var_7312_split_sizes_0 = const()[name = tensor("op_7312_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7312_axis_0 = const()[name = tensor("op_7312_axis_0"), val = tensor(1)]; + tensor var_7312_cast_0, tensor var_7312_cast_1 = split(axis = var_7312_axis_0, split_sizes = var_7312_split_sizes_0, x = var_7311_cast)[name = tensor("op_7312_cast")]; + tensor var_7314_mode_0 = const()[name = tensor("op_7314_mode_0"), val = tensor("EXACT")]; + tensor var_7314_cast = gelu(mode = var_7314_mode_0, x = var_7312_cast_1)[name = tensor("op_7314_cast")]; + tensor input_453_cast = mul(x = var_7312_cast_0, y = var_7314_cast)[name = tensor("input_453_cast")]; + tensor var_7318 = const()[name = tensor("op_7318"), val = tensor([1, 1])]; + tensor var_7320 = const()[name = tensor("op_7320"), val = tensor([1, 1])]; + tensor var_7322_pad_type_0 = const()[name = tensor("op_7322_pad_type_0"), val = tensor("custom")]; + tensor var_7322_pad_0 = const()[name = tensor("op_7322_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(985075776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(989991040))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(989991232)))]; + tensor var_7322_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_7320, groups = var_6865, pad = var_7322_pad_0, pad_type = var_7322_pad_type_0, strides = var_7318, weight = up_blocks_0_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_453_cast)[name = tensor("op_7322_cast")]; + tensor inputs_217_cast = add(x = var_7322_cast, y = inputs_215_cast)[name = tensor("inputs_217_cast")]; + tensor var_7332 = const()[name = tensor("op_7332"), val = tensor([1])]; + tensor channels_mean_217_cast = reduce_mean(axes = var_7332, keep_dims = var_6860, x = inputs_217_cast)[name = tensor("channels_mean_217_cast")]; + tensor zero_mean_217_cast = sub(x = inputs_217_cast, y = channels_mean_217_cast)[name = tensor("zero_mean_217_cast")]; + tensor zero_mean_sq_217_cast = mul(x = zero_mean_217_cast, y = zero_mean_217_cast)[name = tensor("zero_mean_sq_217_cast")]; + tensor var_7336 = const()[name = tensor("op_7336"), val = tensor([1])]; + tensor var_7337_cast = reduce_mean(axes = var_7336, keep_dims = var_6860, x = zero_mean_sq_217_cast)[name = tensor("op_7337_cast")]; + tensor var_7338_to_fp16 = const()[name = tensor("op_7338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7339_cast = add(x = var_7337_cast, y = var_7338_to_fp16)[name = tensor("op_7339_cast")]; + tensor denom_217_epsilon_0_to_fp16 = const()[name = tensor("denom_217_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_217_cast = rsqrt(epsilon = denom_217_epsilon_0_to_fp16, x = var_7339_cast)[name = tensor("denom_217_cast")]; + tensor out_217_cast = mul(x = zero_mean_217_cast, y = denom_217_cast)[name = tensor("out_217_cast")]; + tensor var_7343_to_fp16 = const()[name = tensor("op_7343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(989993856)))]; + tensor var_7344_cast = add(x = out_217_cast, y = var_7343_to_fp16)[name = tensor("op_7344_cast")]; + tensor var_7346_to_fp16 = const()[name = tensor("op_7346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(989996480)))]; + tensor hidden_states_299_cast = mul(x = var_7344_cast, y = var_7346_to_fp16)[name = tensor("hidden_states_299_cast")]; + tensor var_7353 = const()[name = tensor("op_7353"), val = tensor([1, 1])]; + tensor var_7355 = const()[name = tensor("op_7355"), val = tensor([1, 1])]; + tensor q_145_pad_type_0 = const()[name = tensor("q_145_pad_type_0"), val = tensor("custom")]; + tensor q_145_pad_0 = const()[name = tensor("q_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(989999104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990818368))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_145_cast = conv(dilations = var_7355, groups = var_6865, pad = q_145_pad_0, pad_type = q_145_pad_type_0, strides = var_7353, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("q_145_cast")]; + tensor var_7359 = const()[name = tensor("op_7359"), val = tensor([1, 1])]; + tensor var_7361 = const()[name = tensor("op_7361"), val = tensor([1, 1])]; + tensor k_145_pad_type_0 = const()[name = tensor("k_145_pad_type_0"), val = tensor("custom")]; + tensor k_145_pad_0 = const()[name = tensor("k_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(990818496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(991637760))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_145_cast = conv(dilations = var_7361, groups = var_6865, pad = k_145_pad_0, pad_type = k_145_pad_type_0, strides = var_7359, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("k_145_cast")]; + tensor var_7365 = const()[name = tensor("op_7365"), val = tensor([1, 1])]; + tensor var_7367 = const()[name = tensor("op_7367"), val = tensor([1, 1])]; + tensor v_145_pad_type_0 = const()[name = tensor("v_145_pad_type_0"), val = tensor("custom")]; + tensor v_145_pad_0 = const()[name = tensor("v_145_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(991637888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992866752))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_145_cast = conv(dilations = var_7367, groups = var_6865, pad = v_145_pad_0, pad_type = v_145_pad_type_0, strides = var_7365, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_299_cast)[name = tensor("v_145_cast")]; + tensor var_7371 = const()[name = tensor("op_7371"), val = tensor([2, 20, 64, -1])]; + tensor var_7372_cast = reshape(shape = var_7371, x = q_145_cast)[name = tensor("op_7372_cast")]; + tensor var_7373 = const()[name = tensor("op_7373"), val = tensor([2, 20, 64, -1])]; + tensor var_7374_cast = reshape(shape = var_7373, x = k_145_cast)[name = tensor("op_7374_cast")]; + tensor var_7375 = const()[name = tensor("op_7375"), val = tensor([2, 20, 64, -1])]; + tensor var_7376_cast = reshape(shape = var_7375, x = v_145_cast)[name = tensor("op_7376_cast")]; + tensor attn_weights_289_transpose_x_0 = const()[name = tensor("attn_weights_289_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_289_transpose_y_0 = const()[name = tensor("attn_weights_289_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_289_cast = matmul(transpose_x = attn_weights_289_transpose_x_0, transpose_y = attn_weights_289_transpose_y_0, x = var_7372_cast, y = var_7374_cast)[name = tensor("attn_weights_289_cast")]; + tensor attn_weights_291_cast = mul(x = attn_weights_289_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_291_cast")]; + tensor var_7380_cast = softmax(axis = var_6849, x = attn_weights_291_cast)[name = tensor("op_7380_cast")]; + tensor attn_145_transpose_x_0 = const()[name = tensor("attn_145_transpose_x_0"), val = tensor(false)]; + tensor attn_145_transpose_y_0 = const()[name = tensor("attn_145_transpose_y_0"), val = tensor(true)]; + tensor attn_145_cast = matmul(transpose_x = attn_145_transpose_x_0, transpose_y = attn_145_transpose_y_0, x = var_7376_cast, y = var_7380_cast)[name = tensor("attn_145_cast")]; + tensor var_7384 = const()[name = tensor("op_7384"), val = tensor([2, 1280, 1, -1])]; + tensor input_455_cast = reshape(shape = var_7384, x = attn_145_cast)[name = tensor("input_455_cast")]; + tensor var_7389 = const()[name = tensor("op_7389"), val = tensor([1, 1])]; + tensor var_7391 = const()[name = tensor("op_7391"), val = tensor([1, 1])]; + tensor var_7393_pad_type_0 = const()[name = tensor("op_7393_pad_type_0"), val = tensor("custom")]; + tensor var_7393_pad_0 = const()[name = tensor("op_7393_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(992866944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994095808))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994096000)))]; + tensor var_7393_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_7391, groups = var_6865, pad = var_7393_pad_0, pad_type = var_7393_pad_type_0, strides = var_7389, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_455_cast)[name = tensor("op_7393_cast")]; + tensor inputs_219_cast = add(x = var_7393_cast, y = inputs_217_cast)[name = tensor("inputs_219_cast")]; + tensor var_7397 = const()[name = tensor("op_7397"), val = tensor([1])]; + tensor channels_mean_219_cast = reduce_mean(axes = var_7397, keep_dims = var_6860, x = inputs_219_cast)[name = tensor("channels_mean_219_cast")]; + tensor zero_mean_219_cast = sub(x = inputs_219_cast, y = channels_mean_219_cast)[name = tensor("zero_mean_219_cast")]; + tensor zero_mean_sq_219_cast = mul(x = zero_mean_219_cast, y = zero_mean_219_cast)[name = tensor("zero_mean_sq_219_cast")]; + tensor var_7401 = const()[name = tensor("op_7401"), val = tensor([1])]; + tensor var_7402_cast = reduce_mean(axes = var_7401, keep_dims = var_6860, x = zero_mean_sq_219_cast)[name = tensor("op_7402_cast")]; + tensor var_7403_to_fp16 = const()[name = tensor("op_7403_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7404_cast = add(x = var_7402_cast, y = var_7403_to_fp16)[name = tensor("op_7404_cast")]; + tensor denom_219_epsilon_0_to_fp16 = const()[name = tensor("denom_219_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_219_cast = rsqrt(epsilon = denom_219_epsilon_0_to_fp16, x = var_7404_cast)[name = tensor("denom_219_cast")]; + tensor out_219_cast = mul(x = zero_mean_219_cast, y = denom_219_cast)[name = tensor("out_219_cast")]; + tensor var_7408_to_fp16 = const()[name = tensor("op_7408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994098624)))]; + tensor var_7409_cast = add(x = out_219_cast, y = var_7408_to_fp16)[name = tensor("op_7409_cast")]; + tensor var_7411_to_fp16 = const()[name = tensor("op_7411_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994101248)))]; + tensor hidden_states_301_cast = mul(x = var_7409_cast, y = var_7411_to_fp16)[name = tensor("hidden_states_301_cast")]; + tensor var_7418 = const()[name = tensor("op_7418"), val = tensor([1, 1])]; + tensor var_7420 = const()[name = tensor("op_7420"), val = tensor([1, 1])]; + tensor q_147_pad_type_0 = const()[name = tensor("q_147_pad_type_0"), val = tensor("custom")]; + tensor q_147_pad_0 = const()[name = tensor("q_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994103872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994923136))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_147_cast = conv(dilations = var_7420, groups = var_6865, pad = q_147_pad_0, pad_type = q_147_pad_type_0, strides = var_7418, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_301_cast)[name = tensor("q_147_cast")]; + tensor var_7424 = const()[name = tensor("op_7424"), val = tensor([1, 1])]; + tensor var_7426 = const()[name = tensor("op_7426"), val = tensor([1, 1])]; + tensor k_147_pad_type_0 = const()[name = tensor("k_147_pad_type_0"), val = tensor("custom")]; + tensor k_147_pad_0 = const()[name = tensor("k_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(994923264))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996889408))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_147_cast = conv(dilations = var_7426, groups = var_6865, pad = k_147_pad_0, pad_type = k_147_pad_type_0, strides = var_7424, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_147_cast")]; + tensor var_7430 = const()[name = tensor("op_7430"), val = tensor([1, 1])]; + tensor var_7432 = const()[name = tensor("op_7432"), val = tensor([1, 1])]; + tensor v_147_pad_type_0 = const()[name = tensor("v_147_pad_type_0"), val = tensor("custom")]; + tensor v_147_pad_0 = const()[name = tensor("v_147_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(996889600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998855744))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_147_cast = conv(dilations = var_7432, groups = var_6865, pad = v_147_pad_0, pad_type = v_147_pad_type_0, strides = var_7430, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_147_cast")]; + tensor var_7436 = const()[name = tensor("op_7436"), val = tensor([2, 20, 64, -1])]; + tensor var_7437_cast = reshape(shape = var_7436, x = q_147_cast)[name = tensor("op_7437_cast")]; + tensor var_7438 = const()[name = tensor("op_7438"), val = tensor([2, 20, 64, -1])]; + tensor var_7439_cast = reshape(shape = var_7438, x = k_147_cast)[name = tensor("op_7439_cast")]; + tensor var_7440 = const()[name = tensor("op_7440"), val = tensor([2, 20, 64, -1])]; + tensor var_7441_cast = reshape(shape = var_7440, x = v_147_cast)[name = tensor("op_7441_cast")]; + tensor attn_weights_293_transpose_x_0 = const()[name = tensor("attn_weights_293_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_293_transpose_y_0 = const()[name = tensor("attn_weights_293_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_293_cast = matmul(transpose_x = attn_weights_293_transpose_x_0, transpose_y = attn_weights_293_transpose_y_0, x = var_7437_cast, y = var_7439_cast)[name = tensor("attn_weights_293_cast")]; + tensor attn_weights_295_cast = mul(x = attn_weights_293_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_295_cast")]; + tensor var_7445_cast = softmax(axis = var_6849, x = attn_weights_295_cast)[name = tensor("op_7445_cast")]; + tensor attn_147_transpose_x_0 = const()[name = tensor("attn_147_transpose_x_0"), val = tensor(false)]; + tensor attn_147_transpose_y_0 = const()[name = tensor("attn_147_transpose_y_0"), val = tensor(true)]; + tensor attn_147_cast = matmul(transpose_x = attn_147_transpose_x_0, transpose_y = attn_147_transpose_y_0, x = var_7441_cast, y = var_7445_cast)[name = tensor("attn_147_cast")]; + tensor var_7449 = const()[name = tensor("op_7449"), val = tensor([2, 1280, 1, -1])]; + tensor input_457_cast = reshape(shape = var_7449, x = attn_147_cast)[name = tensor("input_457_cast")]; + tensor var_7454 = const()[name = tensor("op_7454"), val = tensor([1, 1])]; + tensor var_7456 = const()[name = tensor("op_7456"), val = tensor([1, 1])]; + tensor var_7458_pad_type_0 = const()[name = tensor("op_7458_pad_type_0"), val = tensor("custom")]; + tensor var_7458_pad_0 = const()[name = tensor("op_7458_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(998855936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999675200))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999675328)))]; + tensor var_7458_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_7456, groups = var_6865, pad = var_7458_pad_0, pad_type = var_7458_pad_type_0, strides = var_7454, weight = up_blocks_0_attentions_0_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_457_cast)[name = tensor("op_7458_cast")]; + tensor inputs_221_cast = add(x = var_7458_cast, y = inputs_219_cast)[name = tensor("inputs_221_cast")]; + tensor var_7462 = const()[name = tensor("op_7462"), val = tensor([1])]; + tensor channels_mean_221_cast = reduce_mean(axes = var_7462, keep_dims = var_6860, x = inputs_221_cast)[name = tensor("channels_mean_221_cast")]; + tensor zero_mean_221_cast = sub(x = inputs_221_cast, y = channels_mean_221_cast)[name = tensor("zero_mean_221_cast")]; + tensor zero_mean_sq_221_cast = mul(x = zero_mean_221_cast, y = zero_mean_221_cast)[name = tensor("zero_mean_sq_221_cast")]; + tensor var_7466 = const()[name = tensor("op_7466"), val = tensor([1])]; + tensor var_7467_cast = reduce_mean(axes = var_7466, keep_dims = var_6860, x = zero_mean_sq_221_cast)[name = tensor("op_7467_cast")]; + tensor var_7468_to_fp16 = const()[name = tensor("op_7468_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7469_cast = add(x = var_7467_cast, y = var_7468_to_fp16)[name = tensor("op_7469_cast")]; + tensor denom_221_epsilon_0_to_fp16 = const()[name = tensor("denom_221_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_221_cast = rsqrt(epsilon = denom_221_epsilon_0_to_fp16, x = var_7469_cast)[name = tensor("denom_221_cast")]; + tensor out_221_cast = mul(x = zero_mean_221_cast, y = denom_221_cast)[name = tensor("out_221_cast")]; + tensor var_7473_to_fp16 = const()[name = tensor("op_7473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999677952)))]; + tensor var_7474_cast = add(x = out_221_cast, y = var_7473_to_fp16)[name = tensor("op_7474_cast")]; + tensor var_7476_to_fp16 = const()[name = tensor("op_7476_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999680576)))]; + tensor input_459_cast = mul(x = var_7474_cast, y = var_7476_to_fp16)[name = tensor("input_459_cast")]; + tensor var_7484 = const()[name = tensor("op_7484"), val = tensor([1, 1])]; + tensor var_7486 = const()[name = tensor("op_7486"), val = tensor([1, 1])]; + tensor var_7488_pad_type_0 = const()[name = tensor("op_7488_pad_type_0"), val = tensor("custom")]; + tensor var_7488_pad_0 = const()[name = tensor("op_7488_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(999683200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1009513664))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1009513856)))]; + tensor var_7488_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_7486, groups = var_6865, pad = var_7488_pad_0, pad_type = var_7488_pad_type_0, strides = var_7484, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_459_cast)[name = tensor("op_7488_cast")]; + tensor var_7489_split_sizes_0 = const()[name = tensor("op_7489_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7489_axis_0 = const()[name = tensor("op_7489_axis_0"), val = tensor(1)]; + tensor var_7489_cast_0, tensor var_7489_cast_1 = split(axis = var_7489_axis_0, split_sizes = var_7489_split_sizes_0, x = var_7488_cast)[name = tensor("op_7489_cast")]; + tensor var_7491_mode_0 = const()[name = tensor("op_7491_mode_0"), val = tensor("EXACT")]; + tensor var_7491_cast = gelu(mode = var_7491_mode_0, x = var_7489_cast_1)[name = tensor("op_7491_cast")]; + tensor input_461_cast = mul(x = var_7489_cast_0, y = var_7491_cast)[name = tensor("input_461_cast")]; + tensor var_7495 = const()[name = tensor("op_7495"), val = tensor([1, 1])]; + tensor var_7497 = const()[name = tensor("op_7497"), val = tensor([1, 1])]; + tensor var_7499_pad_type_0 = const()[name = tensor("op_7499_pad_type_0"), val = tensor("custom")]; + tensor var_7499_pad_0 = const()[name = tensor("op_7499_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1009534400))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1016088064))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1016088640)))]; + tensor var_7499_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_7497, groups = var_6865, pad = var_7499_pad_0, pad_type = var_7499_pad_type_0, strides = var_7495, weight = up_blocks_0_attentions_0_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_461_cast)[name = tensor("op_7499_cast")]; + tensor inputs_223_cast = add(x = var_7499_cast, y = inputs_221_cast)[name = tensor("inputs_223_cast")]; + tensor var_7509 = const()[name = tensor("op_7509"), val = tensor([1])]; + tensor channels_mean_223_cast = reduce_mean(axes = var_7509, keep_dims = var_6860, x = inputs_223_cast)[name = tensor("channels_mean_223_cast")]; + tensor zero_mean_223_cast = sub(x = inputs_223_cast, y = channels_mean_223_cast)[name = tensor("zero_mean_223_cast")]; + tensor zero_mean_sq_223_cast = mul(x = zero_mean_223_cast, y = zero_mean_223_cast)[name = tensor("zero_mean_sq_223_cast")]; + tensor var_7513 = const()[name = tensor("op_7513"), val = tensor([1])]; + tensor var_7514_cast = reduce_mean(axes = var_7513, keep_dims = var_6860, x = zero_mean_sq_223_cast)[name = tensor("op_7514_cast")]; + tensor var_7515_to_fp16 = const()[name = tensor("op_7515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7516_cast = add(x = var_7514_cast, y = var_7515_to_fp16)[name = tensor("op_7516_cast")]; + tensor denom_223_epsilon_0_to_fp16 = const()[name = tensor("denom_223_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_223_cast = rsqrt(epsilon = denom_223_epsilon_0_to_fp16, x = var_7516_cast)[name = tensor("denom_223_cast")]; + tensor out_223_cast = mul(x = zero_mean_223_cast, y = denom_223_cast)[name = tensor("out_223_cast")]; + tensor var_7520_to_fp16 = const()[name = tensor("op_7520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1016091264)))]; + tensor var_7521_cast = add(x = out_223_cast, y = var_7520_to_fp16)[name = tensor("op_7521_cast")]; + tensor var_7523_to_fp16 = const()[name = tensor("op_7523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1016093888)))]; + tensor hidden_states_305_cast = mul(x = var_7521_cast, y = var_7523_to_fp16)[name = tensor("hidden_states_305_cast")]; + tensor var_7530 = const()[name = tensor("op_7530"), val = tensor([1, 1])]; + tensor var_7532 = const()[name = tensor("op_7532"), val = tensor([1, 1])]; + tensor q_149_pad_type_0 = const()[name = tensor("q_149_pad_type_0"), val = tensor("custom")]; + tensor q_149_pad_0 = const()[name = tensor("q_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1016096512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017325376))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_149_cast = conv(dilations = var_7532, groups = var_6865, pad = q_149_pad_0, pad_type = q_149_pad_type_0, strides = var_7530, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("q_149_cast")]; + tensor var_7536 = const()[name = tensor("op_7536"), val = tensor([1, 1])]; + tensor var_7538 = const()[name = tensor("op_7538"), val = tensor([1, 1])]; + tensor k_149_pad_type_0 = const()[name = tensor("k_149_pad_type_0"), val = tensor("custom")]; + tensor k_149_pad_0 = const()[name = tensor("k_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1017325568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018554432))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_149_cast = conv(dilations = var_7538, groups = var_6865, pad = k_149_pad_0, pad_type = k_149_pad_type_0, strides = var_7536, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("k_149_cast")]; + tensor var_7542 = const()[name = tensor("op_7542"), val = tensor([1, 1])]; + tensor var_7544 = const()[name = tensor("op_7544"), val = tensor([1, 1])]; + tensor v_149_pad_type_0 = const()[name = tensor("v_149_pad_type_0"), val = tensor("custom")]; + tensor v_149_pad_0 = const()[name = tensor("v_149_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1018554624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020193088))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_149_cast = conv(dilations = var_7544, groups = var_6865, pad = v_149_pad_0, pad_type = v_149_pad_type_0, strides = var_7542, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_305_cast)[name = tensor("v_149_cast")]; + tensor var_7548 = const()[name = tensor("op_7548"), val = tensor([2, 20, 64, -1])]; + tensor var_7549_cast = reshape(shape = var_7548, x = q_149_cast)[name = tensor("op_7549_cast")]; + tensor var_7550 = const()[name = tensor("op_7550"), val = tensor([2, 20, 64, -1])]; + tensor var_7551_cast = reshape(shape = var_7550, x = k_149_cast)[name = tensor("op_7551_cast")]; + tensor var_7552 = const()[name = tensor("op_7552"), val = tensor([2, 20, 64, -1])]; + tensor var_7553_cast = reshape(shape = var_7552, x = v_149_cast)[name = tensor("op_7553_cast")]; + tensor attn_weights_297_transpose_x_0 = const()[name = tensor("attn_weights_297_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_297_transpose_y_0 = const()[name = tensor("attn_weights_297_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_297_cast = matmul(transpose_x = attn_weights_297_transpose_x_0, transpose_y = attn_weights_297_transpose_y_0, x = var_7549_cast, y = var_7551_cast)[name = tensor("attn_weights_297_cast")]; + tensor attn_weights_299_cast = mul(x = attn_weights_297_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_299_cast")]; + tensor var_7557_cast = softmax(axis = var_6849, x = attn_weights_299_cast)[name = tensor("op_7557_cast")]; + tensor attn_149_transpose_x_0 = const()[name = tensor("attn_149_transpose_x_0"), val = tensor(false)]; + tensor attn_149_transpose_y_0 = const()[name = tensor("attn_149_transpose_y_0"), val = tensor(true)]; + tensor attn_149_cast = matmul(transpose_x = attn_149_transpose_x_0, transpose_y = attn_149_transpose_y_0, x = var_7553_cast, y = var_7557_cast)[name = tensor("attn_149_cast")]; + tensor var_7561 = const()[name = tensor("op_7561"), val = tensor([2, 1280, 1, -1])]; + tensor input_463_cast = reshape(shape = var_7561, x = attn_149_cast)[name = tensor("input_463_cast")]; + tensor var_7566 = const()[name = tensor("op_7566"), val = tensor([1, 1])]; + tensor var_7568 = const()[name = tensor("op_7568"), val = tensor([1, 1])]; + tensor var_7570_pad_type_0 = const()[name = tensor("op_7570_pad_type_0"), val = tensor("custom")]; + tensor var_7570_pad_0 = const()[name = tensor("op_7570_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1020193664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021422528))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021422720)))]; + tensor var_7570_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_7568, groups = var_6865, pad = var_7570_pad_0, pad_type = var_7570_pad_type_0, strides = var_7566, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_463_cast)[name = tensor("op_7570_cast")]; + tensor inputs_225_cast = add(x = var_7570_cast, y = inputs_223_cast)[name = tensor("inputs_225_cast")]; + tensor var_7574 = const()[name = tensor("op_7574"), val = tensor([1])]; + tensor channels_mean_225_cast = reduce_mean(axes = var_7574, keep_dims = var_6860, x = inputs_225_cast)[name = tensor("channels_mean_225_cast")]; + tensor zero_mean_225_cast = sub(x = inputs_225_cast, y = channels_mean_225_cast)[name = tensor("zero_mean_225_cast")]; + tensor zero_mean_sq_225_cast = mul(x = zero_mean_225_cast, y = zero_mean_225_cast)[name = tensor("zero_mean_sq_225_cast")]; + tensor var_7578 = const()[name = tensor("op_7578"), val = tensor([1])]; + tensor var_7579_cast = reduce_mean(axes = var_7578, keep_dims = var_6860, x = zero_mean_sq_225_cast)[name = tensor("op_7579_cast")]; + tensor var_7580_to_fp16 = const()[name = tensor("op_7580_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7581_cast = add(x = var_7579_cast, y = var_7580_to_fp16)[name = tensor("op_7581_cast")]; + tensor denom_225_epsilon_0_to_fp16 = const()[name = tensor("denom_225_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_225_cast = rsqrt(epsilon = denom_225_epsilon_0_to_fp16, x = var_7581_cast)[name = tensor("denom_225_cast")]; + tensor out_225_cast = mul(x = zero_mean_225_cast, y = denom_225_cast)[name = tensor("out_225_cast")]; + tensor var_7585_to_fp16 = const()[name = tensor("op_7585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021425344)))]; + tensor var_7586_cast = add(x = out_225_cast, y = var_7585_to_fp16)[name = tensor("op_7586_cast")]; + tensor var_7588_to_fp16 = const()[name = tensor("op_7588_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021427968)))]; + tensor hidden_states_307_cast = mul(x = var_7586_cast, y = var_7588_to_fp16)[name = tensor("hidden_states_307_cast")]; + tensor var_7595 = const()[name = tensor("op_7595"), val = tensor([1, 1])]; + tensor var_7597 = const()[name = tensor("op_7597"), val = tensor([1, 1])]; + tensor q_151_pad_type_0 = const()[name = tensor("q_151_pad_type_0"), val = tensor("custom")]; + tensor q_151_pad_0 = const()[name = tensor("q_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021430592))), lut = tensor([-0x1.2d4p-5, -0x1.6a8p-7, 0x1.68cp-7, 0x1.2dp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_151_cast = conv(dilations = var_7597, groups = var_6865, pad = q_151_pad_0, pad_type = q_151_pad_type_0, strides = var_7595, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_307_cast)[name = tensor("q_151_cast")]; + tensor var_7601 = const()[name = tensor("op_7601"), val = tensor([1, 1])]; + tensor var_7603 = const()[name = tensor("op_7603"), val = tensor([1, 1])]; + tensor k_151_pad_type_0 = const()[name = tensor("k_151_pad_type_0"), val = tensor("custom")]; + tensor k_151_pad_0 = const()[name = tensor("k_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1021840256))), lut = tensor([-0x1.07cp-5, -0x1.39p-7, 0x1.398p-7, 0x1.084p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_151_cast = conv(dilations = var_7603, groups = var_6865, pad = k_151_pad_0, pad_type = k_151_pad_type_0, strides = var_7601, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_151_cast")]; + tensor var_7607 = const()[name = tensor("op_7607"), val = tensor([1, 1])]; + tensor var_7609 = const()[name = tensor("op_7609"), val = tensor([1, 1])]; + tensor v_151_pad_type_0 = const()[name = tensor("v_151_pad_type_0"), val = tensor("custom")]; + tensor v_151_pad_0 = const()[name = tensor("v_151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1022495680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024461824))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_151_cast = conv(dilations = var_7609, groups = var_6865, pad = v_151_pad_0, pad_type = v_151_pad_type_0, strides = var_7607, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_151_cast")]; + tensor var_7613 = const()[name = tensor("op_7613"), val = tensor([2, 20, 64, -1])]; + tensor var_7614_cast = reshape(shape = var_7613, x = q_151_cast)[name = tensor("op_7614_cast")]; + tensor var_7615 = const()[name = tensor("op_7615"), val = tensor([2, 20, 64, -1])]; + tensor var_7616_cast = reshape(shape = var_7615, x = k_151_cast)[name = tensor("op_7616_cast")]; + tensor var_7617 = const()[name = tensor("op_7617"), val = tensor([2, 20, 64, -1])]; + tensor var_7618_cast = reshape(shape = var_7617, x = v_151_cast)[name = tensor("op_7618_cast")]; + tensor attn_weights_301_transpose_x_0 = const()[name = tensor("attn_weights_301_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_301_transpose_y_0 = const()[name = tensor("attn_weights_301_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_301_cast = matmul(transpose_x = attn_weights_301_transpose_x_0, transpose_y = attn_weights_301_transpose_y_0, x = var_7614_cast, y = var_7616_cast)[name = tensor("attn_weights_301_cast")]; + tensor attn_weights_303_cast = mul(x = attn_weights_301_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_303_cast")]; + tensor var_7622_cast = softmax(axis = var_6849, x = attn_weights_303_cast)[name = tensor("op_7622_cast")]; + tensor attn_151_transpose_x_0 = const()[name = tensor("attn_151_transpose_x_0"), val = tensor(false)]; + tensor attn_151_transpose_y_0 = const()[name = tensor("attn_151_transpose_y_0"), val = tensor(true)]; + tensor attn_151_cast = matmul(transpose_x = attn_151_transpose_x_0, transpose_y = attn_151_transpose_y_0, x = var_7618_cast, y = var_7622_cast)[name = tensor("attn_151_cast")]; + tensor var_7626 = const()[name = tensor("op_7626"), val = tensor([2, 1280, 1, -1])]; + tensor input_465_cast = reshape(shape = var_7626, x = attn_151_cast)[name = tensor("input_465_cast")]; + tensor var_7631 = const()[name = tensor("op_7631"), val = tensor([1, 1])]; + tensor var_7633 = const()[name = tensor("op_7633"), val = tensor([1, 1])]; + tensor var_7635_pad_type_0 = const()[name = tensor("op_7635_pad_type_0"), val = tensor("custom")]; + tensor var_7635_pad_0 = const()[name = tensor("op_7635_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1024462016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025281280))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025281408)))]; + tensor var_7635_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_7633, groups = var_6865, pad = var_7635_pad_0, pad_type = var_7635_pad_type_0, strides = var_7631, weight = up_blocks_0_attentions_0_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_465_cast)[name = tensor("op_7635_cast")]; + tensor inputs_227_cast = add(x = var_7635_cast, y = inputs_225_cast)[name = tensor("inputs_227_cast")]; + tensor var_7639 = const()[name = tensor("op_7639"), val = tensor([1])]; + tensor channels_mean_227_cast = reduce_mean(axes = var_7639, keep_dims = var_6860, x = inputs_227_cast)[name = tensor("channels_mean_227_cast")]; + tensor zero_mean_227_cast = sub(x = inputs_227_cast, y = channels_mean_227_cast)[name = tensor("zero_mean_227_cast")]; + tensor zero_mean_sq_227_cast = mul(x = zero_mean_227_cast, y = zero_mean_227_cast)[name = tensor("zero_mean_sq_227_cast")]; + tensor var_7643 = const()[name = tensor("op_7643"), val = tensor([1])]; + tensor var_7644_cast = reduce_mean(axes = var_7643, keep_dims = var_6860, x = zero_mean_sq_227_cast)[name = tensor("op_7644_cast")]; + tensor var_7645_to_fp16 = const()[name = tensor("op_7645_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7646_cast = add(x = var_7644_cast, y = var_7645_to_fp16)[name = tensor("op_7646_cast")]; + tensor denom_227_epsilon_0_to_fp16 = const()[name = tensor("denom_227_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_227_cast = rsqrt(epsilon = denom_227_epsilon_0_to_fp16, x = var_7646_cast)[name = tensor("denom_227_cast")]; + tensor out_227_cast = mul(x = zero_mean_227_cast, y = denom_227_cast)[name = tensor("out_227_cast")]; + tensor var_7650_to_fp16 = const()[name = tensor("op_7650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025284032)))]; + tensor var_7651_cast = add(x = out_227_cast, y = var_7650_to_fp16)[name = tensor("op_7651_cast")]; + tensor var_7653_to_fp16 = const()[name = tensor("op_7653_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025286656)))]; + tensor input_467_cast = mul(x = var_7651_cast, y = var_7653_to_fp16)[name = tensor("input_467_cast")]; + tensor var_7661 = const()[name = tensor("op_7661"), val = tensor([1, 1])]; + tensor var_7663 = const()[name = tensor("op_7663"), val = tensor([1, 1])]; + tensor var_7665_pad_type_0 = const()[name = tensor("op_7665_pad_type_0"), val = tensor("custom")]; + tensor var_7665_pad_0 = const()[name = tensor("op_7665_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1025289280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035119744))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035119936)))]; + tensor var_7665_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_7663, groups = var_6865, pad = var_7665_pad_0, pad_type = var_7665_pad_type_0, strides = var_7661, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_467_cast)[name = tensor("op_7665_cast")]; + tensor var_7666_split_sizes_0 = const()[name = tensor("op_7666_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7666_axis_0 = const()[name = tensor("op_7666_axis_0"), val = tensor(1)]; + tensor var_7666_cast_0, tensor var_7666_cast_1 = split(axis = var_7666_axis_0, split_sizes = var_7666_split_sizes_0, x = var_7665_cast)[name = tensor("op_7666_cast")]; + tensor var_7668_mode_0 = const()[name = tensor("op_7668_mode_0"), val = tensor("EXACT")]; + tensor var_7668_cast = gelu(mode = var_7668_mode_0, x = var_7666_cast_1)[name = tensor("op_7668_cast")]; + tensor input_469_cast = mul(x = var_7666_cast_0, y = var_7668_cast)[name = tensor("input_469_cast")]; + tensor var_7672 = const()[name = tensor("op_7672"), val = tensor([1, 1])]; + tensor var_7674 = const()[name = tensor("op_7674"), val = tensor([1, 1])]; + tensor var_7676_pad_type_0 = const()[name = tensor("op_7676_pad_type_0"), val = tensor("custom")]; + tensor var_7676_pad_0 = const()[name = tensor("op_7676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1035140480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040055744))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040055936)))]; + tensor var_7676_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_7674, groups = var_6865, pad = var_7676_pad_0, pad_type = var_7676_pad_type_0, strides = var_7672, weight = up_blocks_0_attentions_0_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_469_cast)[name = tensor("op_7676_cast")]; + tensor inputs_229_cast = add(x = var_7676_cast, y = inputs_227_cast)[name = tensor("inputs_229_cast")]; + tensor var_7686 = const()[name = tensor("op_7686"), val = tensor([1])]; + tensor channels_mean_229_cast = reduce_mean(axes = var_7686, keep_dims = var_6860, x = inputs_229_cast)[name = tensor("channels_mean_229_cast")]; + tensor zero_mean_229_cast = sub(x = inputs_229_cast, y = channels_mean_229_cast)[name = tensor("zero_mean_229_cast")]; + tensor zero_mean_sq_229_cast = mul(x = zero_mean_229_cast, y = zero_mean_229_cast)[name = tensor("zero_mean_sq_229_cast")]; + tensor var_7690 = const()[name = tensor("op_7690"), val = tensor([1])]; + tensor var_7691_cast = reduce_mean(axes = var_7690, keep_dims = var_6860, x = zero_mean_sq_229_cast)[name = tensor("op_7691_cast")]; + tensor var_7692_to_fp16 = const()[name = tensor("op_7692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7693_cast = add(x = var_7691_cast, y = var_7692_to_fp16)[name = tensor("op_7693_cast")]; + tensor denom_229_epsilon_0_to_fp16 = const()[name = tensor("denom_229_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_229_cast = rsqrt(epsilon = denom_229_epsilon_0_to_fp16, x = var_7693_cast)[name = tensor("denom_229_cast")]; + tensor out_229_cast = mul(x = zero_mean_229_cast, y = denom_229_cast)[name = tensor("out_229_cast")]; + tensor var_7697_to_fp16 = const()[name = tensor("op_7697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040058560)))]; + tensor var_7698_cast = add(x = out_229_cast, y = var_7697_to_fp16)[name = tensor("op_7698_cast")]; + tensor var_7700_to_fp16 = const()[name = tensor("op_7700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040061184)))]; + tensor hidden_states_311_cast = mul(x = var_7698_cast, y = var_7700_to_fp16)[name = tensor("hidden_states_311_cast")]; + tensor var_7707 = const()[name = tensor("op_7707"), val = tensor([1, 1])]; + tensor var_7709 = const()[name = tensor("op_7709"), val = tensor([1, 1])]; + tensor q_153_pad_type_0 = const()[name = tensor("q_153_pad_type_0"), val = tensor("custom")]; + tensor q_153_pad_0 = const()[name = tensor("q_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040063808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040883072))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_153_cast = conv(dilations = var_7709, groups = var_6865, pad = q_153_pad_0, pad_type = q_153_pad_type_0, strides = var_7707, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("q_153_cast")]; + tensor var_7713 = const()[name = tensor("op_7713"), val = tensor([1, 1])]; + tensor var_7715 = const()[name = tensor("op_7715"), val = tensor([1, 1])]; + tensor k_153_pad_type_0 = const()[name = tensor("k_153_pad_type_0"), val = tensor("custom")]; + tensor k_153_pad_0 = const()[name = tensor("k_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1040883200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041702464))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_153_cast = conv(dilations = var_7715, groups = var_6865, pad = k_153_pad_0, pad_type = k_153_pad_type_0, strides = var_7713, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("k_153_cast")]; + tensor var_7719 = const()[name = tensor("op_7719"), val = tensor([1, 1])]; + tensor var_7721 = const()[name = tensor("op_7721"), val = tensor([1, 1])]; + tensor v_153_pad_type_0 = const()[name = tensor("v_153_pad_type_0"), val = tensor("custom")]; + tensor v_153_pad_0 = const()[name = tensor("v_153_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1041702592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042931456))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_153_cast = conv(dilations = var_7721, groups = var_6865, pad = v_153_pad_0, pad_type = v_153_pad_type_0, strides = var_7719, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_311_cast)[name = tensor("v_153_cast")]; + tensor var_7725 = const()[name = tensor("op_7725"), val = tensor([2, 20, 64, -1])]; + tensor var_7726_cast = reshape(shape = var_7725, x = q_153_cast)[name = tensor("op_7726_cast")]; + tensor var_7727 = const()[name = tensor("op_7727"), val = tensor([2, 20, 64, -1])]; + tensor var_7728_cast = reshape(shape = var_7727, x = k_153_cast)[name = tensor("op_7728_cast")]; + tensor var_7729 = const()[name = tensor("op_7729"), val = tensor([2, 20, 64, -1])]; + tensor var_7730_cast = reshape(shape = var_7729, x = v_153_cast)[name = tensor("op_7730_cast")]; + tensor attn_weights_305_transpose_x_0 = const()[name = tensor("attn_weights_305_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_305_transpose_y_0 = const()[name = tensor("attn_weights_305_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_305_cast = matmul(transpose_x = attn_weights_305_transpose_x_0, transpose_y = attn_weights_305_transpose_y_0, x = var_7726_cast, y = var_7728_cast)[name = tensor("attn_weights_305_cast")]; + tensor attn_weights_307_cast = mul(x = attn_weights_305_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_307_cast")]; + tensor var_7734_cast = softmax(axis = var_6849, x = attn_weights_307_cast)[name = tensor("op_7734_cast")]; + tensor attn_153_transpose_x_0 = const()[name = tensor("attn_153_transpose_x_0"), val = tensor(false)]; + tensor attn_153_transpose_y_0 = const()[name = tensor("attn_153_transpose_y_0"), val = tensor(true)]; + tensor attn_153_cast = matmul(transpose_x = attn_153_transpose_x_0, transpose_y = attn_153_transpose_y_0, x = var_7730_cast, y = var_7734_cast)[name = tensor("attn_153_cast")]; + tensor var_7738 = const()[name = tensor("op_7738"), val = tensor([2, 1280, 1, -1])]; + tensor input_471_cast = reshape(shape = var_7738, x = attn_153_cast)[name = tensor("input_471_cast")]; + tensor var_7743 = const()[name = tensor("op_7743"), val = tensor([1, 1])]; + tensor var_7745 = const()[name = tensor("op_7745"), val = tensor([1, 1])]; + tensor var_7747_pad_type_0 = const()[name = tensor("op_7747_pad_type_0"), val = tensor("custom")]; + tensor var_7747_pad_0 = const()[name = tensor("op_7747_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1042931648))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044160512))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044160704)))]; + tensor var_7747_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_7745, groups = var_6865, pad = var_7747_pad_0, pad_type = var_7747_pad_type_0, strides = var_7743, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_471_cast)[name = tensor("op_7747_cast")]; + tensor inputs_231_cast = add(x = var_7747_cast, y = inputs_229_cast)[name = tensor("inputs_231_cast")]; + tensor var_7751 = const()[name = tensor("op_7751"), val = tensor([1])]; + tensor channels_mean_231_cast = reduce_mean(axes = var_7751, keep_dims = var_6860, x = inputs_231_cast)[name = tensor("channels_mean_231_cast")]; + tensor zero_mean_231_cast = sub(x = inputs_231_cast, y = channels_mean_231_cast)[name = tensor("zero_mean_231_cast")]; + tensor zero_mean_sq_231_cast = mul(x = zero_mean_231_cast, y = zero_mean_231_cast)[name = tensor("zero_mean_sq_231_cast")]; + tensor var_7755 = const()[name = tensor("op_7755"), val = tensor([1])]; + tensor var_7756_cast = reduce_mean(axes = var_7755, keep_dims = var_6860, x = zero_mean_sq_231_cast)[name = tensor("op_7756_cast")]; + tensor var_7757_to_fp16 = const()[name = tensor("op_7757_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7758_cast = add(x = var_7756_cast, y = var_7757_to_fp16)[name = tensor("op_7758_cast")]; + tensor denom_231_epsilon_0_to_fp16 = const()[name = tensor("denom_231_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_231_cast = rsqrt(epsilon = denom_231_epsilon_0_to_fp16, x = var_7758_cast)[name = tensor("denom_231_cast")]; + tensor out_231_cast = mul(x = zero_mean_231_cast, y = denom_231_cast)[name = tensor("out_231_cast")]; + tensor var_7762_to_fp16 = const()[name = tensor("op_7762_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044163328)))]; + tensor var_7763_cast = add(x = out_231_cast, y = var_7762_to_fp16)[name = tensor("op_7763_cast")]; + tensor var_7765_to_fp16 = const()[name = tensor("op_7765_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044165952)))]; + tensor hidden_states_313_cast = mul(x = var_7763_cast, y = var_7765_to_fp16)[name = tensor("hidden_states_313_cast")]; + tensor var_7772 = const()[name = tensor("op_7772"), val = tensor([1, 1])]; + tensor var_7774 = const()[name = tensor("op_7774"), val = tensor([1, 1])]; + tensor q_155_pad_type_0 = const()[name = tensor("q_155_pad_type_0"), val = tensor("custom")]; + tensor q_155_pad_0 = const()[name = tensor("q_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044168576))), lut = tensor([-0x1.22cp-6, 0x1.234p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_155_cast = conv(dilations = var_7774, groups = var_6865, pad = q_155_pad_0, pad_type = q_155_pad_type_0, strides = var_7772, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_313_cast)[name = tensor("q_155_cast")]; + tensor var_7778 = const()[name = tensor("op_7778"), val = tensor([1, 1])]; + tensor var_7780 = const()[name = tensor("op_7780"), val = tensor([1, 1])]; + tensor k_155_pad_type_0 = const()[name = tensor("k_155_pad_type_0"), val = tensor("custom")]; + tensor k_155_pad_0 = const()[name = tensor("k_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1044373440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045684224))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_155_cast = conv(dilations = var_7780, groups = var_6865, pad = k_155_pad_0, pad_type = k_155_pad_type_0, strides = var_7778, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_155_cast")]; + tensor var_7784 = const()[name = tensor("op_7784"), val = tensor([1, 1])]; + tensor var_7786 = const()[name = tensor("op_7786"), val = tensor([1, 1])]; + tensor v_155_pad_type_0 = const()[name = tensor("v_155_pad_type_0"), val = tensor("custom")]; + tensor v_155_pad_0 = const()[name = tensor("v_155_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1045684352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046995136))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_155_cast = conv(dilations = var_7786, groups = var_6865, pad = v_155_pad_0, pad_type = v_155_pad_type_0, strides = var_7784, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_155_cast")]; + tensor var_7790 = const()[name = tensor("op_7790"), val = tensor([2, 20, 64, -1])]; + tensor var_7791_cast = reshape(shape = var_7790, x = q_155_cast)[name = tensor("op_7791_cast")]; + tensor var_7792 = const()[name = tensor("op_7792"), val = tensor([2, 20, 64, -1])]; + tensor var_7793_cast = reshape(shape = var_7792, x = k_155_cast)[name = tensor("op_7793_cast")]; + tensor var_7794 = const()[name = tensor("op_7794"), val = tensor([2, 20, 64, -1])]; + tensor var_7795_cast = reshape(shape = var_7794, x = v_155_cast)[name = tensor("op_7795_cast")]; + tensor attn_weights_309_transpose_x_0 = const()[name = tensor("attn_weights_309_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_309_transpose_y_0 = const()[name = tensor("attn_weights_309_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_309_cast = matmul(transpose_x = attn_weights_309_transpose_x_0, transpose_y = attn_weights_309_transpose_y_0, x = var_7791_cast, y = var_7793_cast)[name = tensor("attn_weights_309_cast")]; + tensor attn_weights_311_cast = mul(x = attn_weights_309_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_311_cast")]; + tensor var_7799_cast = softmax(axis = var_6849, x = attn_weights_311_cast)[name = tensor("op_7799_cast")]; + tensor attn_155_transpose_x_0 = const()[name = tensor("attn_155_transpose_x_0"), val = tensor(false)]; + tensor attn_155_transpose_y_0 = const()[name = tensor("attn_155_transpose_y_0"), val = tensor(true)]; + tensor attn_155_cast = matmul(transpose_x = attn_155_transpose_x_0, transpose_y = attn_155_transpose_y_0, x = var_7795_cast, y = var_7799_cast)[name = tensor("attn_155_cast")]; + tensor var_7803 = const()[name = tensor("op_7803"), val = tensor([2, 1280, 1, -1])]; + tensor input_473_cast = reshape(shape = var_7803, x = attn_155_cast)[name = tensor("input_473_cast")]; + tensor var_7808 = const()[name = tensor("op_7808"), val = tensor([1, 1])]; + tensor var_7810 = const()[name = tensor("op_7810"), val = tensor([1, 1])]; + tensor var_7812_pad_type_0 = const()[name = tensor("op_7812_pad_type_0"), val = tensor("custom")]; + tensor var_7812_pad_0 = const()[name = tensor("op_7812_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1046995264))), lut = tensor([-0x1.4acp-7, 0x1.494p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047200128)))]; + tensor var_7812_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_7810, groups = var_6865, pad = var_7812_pad_0, pad_type = var_7812_pad_type_0, strides = var_7808, weight = up_blocks_0_attentions_0_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_473_cast)[name = tensor("op_7812_cast")]; + tensor inputs_233_cast = add(x = var_7812_cast, y = inputs_231_cast)[name = tensor("inputs_233_cast")]; + tensor var_7816 = const()[name = tensor("op_7816"), val = tensor([1])]; + tensor channels_mean_233_cast = reduce_mean(axes = var_7816, keep_dims = var_6860, x = inputs_233_cast)[name = tensor("channels_mean_233_cast")]; + tensor zero_mean_233_cast = sub(x = inputs_233_cast, y = channels_mean_233_cast)[name = tensor("zero_mean_233_cast")]; + tensor zero_mean_sq_233_cast = mul(x = zero_mean_233_cast, y = zero_mean_233_cast)[name = tensor("zero_mean_sq_233_cast")]; + tensor var_7820 = const()[name = tensor("op_7820"), val = tensor([1])]; + tensor var_7821_cast = reduce_mean(axes = var_7820, keep_dims = var_6860, x = zero_mean_sq_233_cast)[name = tensor("op_7821_cast")]; + tensor var_7822_to_fp16 = const()[name = tensor("op_7822_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7823_cast = add(x = var_7821_cast, y = var_7822_to_fp16)[name = tensor("op_7823_cast")]; + tensor denom_233_epsilon_0_to_fp16 = const()[name = tensor("denom_233_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_233_cast = rsqrt(epsilon = denom_233_epsilon_0_to_fp16, x = var_7823_cast)[name = tensor("denom_233_cast")]; + tensor out_233_cast = mul(x = zero_mean_233_cast, y = denom_233_cast)[name = tensor("out_233_cast")]; + tensor var_7827_to_fp16 = const()[name = tensor("op_7827_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047202752)))]; + tensor var_7828_cast = add(x = out_233_cast, y = var_7827_to_fp16)[name = tensor("op_7828_cast")]; + tensor var_7830_to_fp16 = const()[name = tensor("op_7830_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047205376)))]; + tensor input_475_cast = mul(x = var_7828_cast, y = var_7830_to_fp16)[name = tensor("input_475_cast")]; + tensor var_7838 = const()[name = tensor("op_7838"), val = tensor([1, 1])]; + tensor var_7840 = const()[name = tensor("op_7840"), val = tensor([1, 1])]; + tensor var_7842_pad_type_0 = const()[name = tensor("op_7842_pad_type_0"), val = tensor("custom")]; + tensor var_7842_pad_0 = const()[name = tensor("op_7842_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1047208000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1057038464))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1057038656)))]; + tensor var_7842_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_7840, groups = var_6865, pad = var_7842_pad_0, pad_type = var_7842_pad_type_0, strides = var_7838, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_475_cast)[name = tensor("op_7842_cast")]; + tensor var_7843_split_sizes_0 = const()[name = tensor("op_7843_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_7843_axis_0 = const()[name = tensor("op_7843_axis_0"), val = tensor(1)]; + tensor var_7843_cast_0, tensor var_7843_cast_1 = split(axis = var_7843_axis_0, split_sizes = var_7843_split_sizes_0, x = var_7842_cast)[name = tensor("op_7843_cast")]; + tensor var_7845_mode_0 = const()[name = tensor("op_7845_mode_0"), val = tensor("EXACT")]; + tensor var_7845_cast = gelu(mode = var_7845_mode_0, x = var_7843_cast_1)[name = tensor("op_7845_cast")]; + tensor input_477_cast = mul(x = var_7843_cast_0, y = var_7845_cast)[name = tensor("input_477_cast")]; + tensor var_7849 = const()[name = tensor("op_7849"), val = tensor([1, 1])]; + tensor var_7851 = const()[name = tensor("op_7851"), val = tensor([1, 1])]; + tensor var_7853_pad_type_0 = const()[name = tensor("op_7853_pad_type_0"), val = tensor("custom")]; + tensor var_7853_pad_0 = const()[name = tensor("op_7853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1057059200))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1061974464))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1061974656)))]; + tensor var_7853_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_7851, groups = var_6865, pad = var_7853_pad_0, pad_type = var_7853_pad_type_0, strides = var_7849, weight = up_blocks_0_attentions_0_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_477_cast)[name = tensor("op_7853_cast")]; + tensor inputs_235_cast = add(x = var_7853_cast, y = inputs_233_cast)[name = tensor("inputs_235_cast")]; + tensor var_7863 = const()[name = tensor("op_7863"), val = tensor([1])]; + tensor channels_mean_235_cast = reduce_mean(axes = var_7863, keep_dims = var_6860, x = inputs_235_cast)[name = tensor("channels_mean_235_cast")]; + tensor zero_mean_235_cast = sub(x = inputs_235_cast, y = channels_mean_235_cast)[name = tensor("zero_mean_235_cast")]; + tensor zero_mean_sq_235_cast = mul(x = zero_mean_235_cast, y = zero_mean_235_cast)[name = tensor("zero_mean_sq_235_cast")]; + tensor var_7867 = const()[name = tensor("op_7867"), val = tensor([1])]; + tensor var_7868_cast = reduce_mean(axes = var_7867, keep_dims = var_6860, x = zero_mean_sq_235_cast)[name = tensor("op_7868_cast")]; + tensor var_7869_to_fp16 = const()[name = tensor("op_7869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7870_cast = add(x = var_7868_cast, y = var_7869_to_fp16)[name = tensor("op_7870_cast")]; + tensor denom_235_epsilon_0_to_fp16 = const()[name = tensor("denom_235_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_235_cast = rsqrt(epsilon = denom_235_epsilon_0_to_fp16, x = var_7870_cast)[name = tensor("denom_235_cast")]; + tensor out_235_cast = mul(x = zero_mean_235_cast, y = denom_235_cast)[name = tensor("out_235_cast")]; + tensor var_7874_to_fp16 = const()[name = tensor("op_7874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1061977280)))]; + tensor var_7875_cast = add(x = out_235_cast, y = var_7874_to_fp16)[name = tensor("op_7875_cast")]; + tensor var_7877_to_fp16 = const()[name = tensor("op_7877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1061979904)))]; + tensor hidden_states_317_cast = mul(x = var_7875_cast, y = var_7877_to_fp16)[name = tensor("hidden_states_317_cast")]; + tensor var_7884 = const()[name = tensor("op_7884"), val = tensor([1, 1])]; + tensor var_7886 = const()[name = tensor("op_7886"), val = tensor([1, 1])]; + tensor q_157_pad_type_0 = const()[name = tensor("q_157_pad_type_0"), val = tensor("custom")]; + tensor q_157_pad_0 = const()[name = tensor("q_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1061982528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062801792))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_157_cast = conv(dilations = var_7886, groups = var_6865, pad = q_157_pad_0, pad_type = q_157_pad_type_0, strides = var_7884, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("q_157_cast")]; + tensor var_7890 = const()[name = tensor("op_7890"), val = tensor([1, 1])]; + tensor var_7892 = const()[name = tensor("op_7892"), val = tensor([1, 1])]; + tensor k_157_pad_type_0 = const()[name = tensor("k_157_pad_type_0"), val = tensor("custom")]; + tensor k_157_pad_0 = const()[name = tensor("k_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1062801920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1063621184))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_157_cast = conv(dilations = var_7892, groups = var_6865, pad = k_157_pad_0, pad_type = k_157_pad_type_0, strides = var_7890, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("k_157_cast")]; + tensor var_7896 = const()[name = tensor("op_7896"), val = tensor([1, 1])]; + tensor var_7898 = const()[name = tensor("op_7898"), val = tensor([1, 1])]; + tensor v_157_pad_type_0 = const()[name = tensor("v_157_pad_type_0"), val = tensor("custom")]; + tensor v_157_pad_0 = const()[name = tensor("v_157_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1063621312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1064850176))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_157_cast = conv(dilations = var_7898, groups = var_6865, pad = v_157_pad_0, pad_type = v_157_pad_type_0, strides = var_7896, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_317_cast)[name = tensor("v_157_cast")]; + tensor var_7902 = const()[name = tensor("op_7902"), val = tensor([2, 20, 64, -1])]; + tensor var_7903_cast = reshape(shape = var_7902, x = q_157_cast)[name = tensor("op_7903_cast")]; + tensor var_7904 = const()[name = tensor("op_7904"), val = tensor([2, 20, 64, -1])]; + tensor var_7905_cast = reshape(shape = var_7904, x = k_157_cast)[name = tensor("op_7905_cast")]; + tensor var_7906 = const()[name = tensor("op_7906"), val = tensor([2, 20, 64, -1])]; + tensor var_7907_cast = reshape(shape = var_7906, x = v_157_cast)[name = tensor("op_7907_cast")]; + tensor attn_weights_313_transpose_x_0 = const()[name = tensor("attn_weights_313_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_313_transpose_y_0 = const()[name = tensor("attn_weights_313_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_313_cast = matmul(transpose_x = attn_weights_313_transpose_x_0, transpose_y = attn_weights_313_transpose_y_0, x = var_7903_cast, y = var_7905_cast)[name = tensor("attn_weights_313_cast")]; + tensor attn_weights_315_cast = mul(x = attn_weights_313_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_315_cast")]; + tensor var_7911_cast = softmax(axis = var_6849, x = attn_weights_315_cast)[name = tensor("op_7911_cast")]; + tensor attn_157_transpose_x_0 = const()[name = tensor("attn_157_transpose_x_0"), val = tensor(false)]; + tensor attn_157_transpose_y_0 = const()[name = tensor("attn_157_transpose_y_0"), val = tensor(true)]; + tensor attn_157_cast = matmul(transpose_x = attn_157_transpose_x_0, transpose_y = attn_157_transpose_y_0, x = var_7907_cast, y = var_7911_cast)[name = tensor("attn_157_cast")]; + tensor var_7915 = const()[name = tensor("op_7915"), val = tensor([2, 1280, 1, -1])]; + tensor input_479_cast = reshape(shape = var_7915, x = attn_157_cast)[name = tensor("input_479_cast")]; + tensor var_7920 = const()[name = tensor("op_7920"), val = tensor([1, 1])]; + tensor var_7922 = const()[name = tensor("op_7922"), val = tensor([1, 1])]; + tensor var_7924_pad_type_0 = const()[name = tensor("op_7924_pad_type_0"), val = tensor("custom")]; + tensor var_7924_pad_0 = const()[name = tensor("op_7924_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1064850368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066079232))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066079424)))]; + tensor var_7924_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_7922, groups = var_6865, pad = var_7924_pad_0, pad_type = var_7924_pad_type_0, strides = var_7920, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_479_cast)[name = tensor("op_7924_cast")]; + tensor inputs_237_cast = add(x = var_7924_cast, y = inputs_235_cast)[name = tensor("inputs_237_cast")]; + tensor var_7928 = const()[name = tensor("op_7928"), val = tensor([1])]; + tensor channels_mean_237_cast = reduce_mean(axes = var_7928, keep_dims = var_6860, x = inputs_237_cast)[name = tensor("channels_mean_237_cast")]; + tensor zero_mean_237_cast = sub(x = inputs_237_cast, y = channels_mean_237_cast)[name = tensor("zero_mean_237_cast")]; + tensor zero_mean_sq_237_cast = mul(x = zero_mean_237_cast, y = zero_mean_237_cast)[name = tensor("zero_mean_sq_237_cast")]; + tensor var_7932 = const()[name = tensor("op_7932"), val = tensor([1])]; + tensor var_7933_cast = reduce_mean(axes = var_7932, keep_dims = var_6860, x = zero_mean_sq_237_cast)[name = tensor("op_7933_cast")]; + tensor var_7934_to_fp16 = const()[name = tensor("op_7934_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_7935_cast = add(x = var_7933_cast, y = var_7934_to_fp16)[name = tensor("op_7935_cast")]; + tensor denom_237_epsilon_0_to_fp16 = const()[name = tensor("denom_237_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_237_cast = rsqrt(epsilon = denom_237_epsilon_0_to_fp16, x = var_7935_cast)[name = tensor("denom_237_cast")]; + tensor out_237_cast = mul(x = zero_mean_237_cast, y = denom_237_cast)[name = tensor("out_237_cast")]; + tensor var_7939_to_fp16 = const()[name = tensor("op_7939_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066082048)))]; + tensor var_7940_cast = add(x = out_237_cast, y = var_7939_to_fp16)[name = tensor("op_7940_cast")]; + tensor var_7942_to_fp16 = const()[name = tensor("op_7942_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066084672)))]; + tensor hidden_states_319_cast = mul(x = var_7940_cast, y = var_7942_to_fp16)[name = tensor("hidden_states_319_cast")]; + tensor var_7949 = const()[name = tensor("op_7949"), val = tensor([1, 1])]; + tensor var_7951 = const()[name = tensor("op_7951"), val = tensor([1, 1])]; + tensor q_159_pad_type_0 = const()[name = tensor("q_159_pad_type_0"), val = tensor("custom")]; + tensor q_159_pad_0 = const()[name = tensor("q_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066087296))), lut = tensor([-0x1.018p-5, -0x1.398p-7, 0x1.36cp-7, 0x1.01p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_159_cast = conv(dilations = var_7951, groups = var_6865, pad = q_159_pad_0, pad_type = q_159_pad_type_0, strides = var_7949, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_319_cast)[name = tensor("q_159_cast")]; + tensor var_7955 = const()[name = tensor("op_7955"), val = tensor([1, 1])]; + tensor var_7957 = const()[name = tensor("op_7957"), val = tensor([1, 1])]; + tensor k_159_pad_type_0 = const()[name = tensor("k_159_pad_type_0"), val = tensor("custom")]; + tensor k_159_pad_0 = const()[name = tensor("k_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1066496960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067807744))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_159_cast = conv(dilations = var_7957, groups = var_6865, pad = k_159_pad_0, pad_type = k_159_pad_type_0, strides = var_7955, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_159_cast")]; + tensor var_7961 = const()[name = tensor("op_7961"), val = tensor([1, 1])]; + tensor var_7963 = const()[name = tensor("op_7963"), val = tensor([1, 1])]; + tensor v_159_pad_type_0 = const()[name = tensor("v_159_pad_type_0"), val = tensor("custom")]; + tensor v_159_pad_0 = const()[name = tensor("v_159_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1067807872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069118656))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_159_cast = conv(dilations = var_7963, groups = var_6865, pad = v_159_pad_0, pad_type = v_159_pad_type_0, strides = var_7961, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_159_cast")]; + tensor var_7967 = const()[name = tensor("op_7967"), val = tensor([2, 20, 64, -1])]; + tensor var_7968_cast = reshape(shape = var_7967, x = q_159_cast)[name = tensor("op_7968_cast")]; + tensor var_7969 = const()[name = tensor("op_7969"), val = tensor([2, 20, 64, -1])]; + tensor var_7970_cast = reshape(shape = var_7969, x = k_159_cast)[name = tensor("op_7970_cast")]; + tensor var_7971 = const()[name = tensor("op_7971"), val = tensor([2, 20, 64, -1])]; + tensor var_7972_cast = reshape(shape = var_7971, x = v_159_cast)[name = tensor("op_7972_cast")]; + tensor attn_weights_317_transpose_x_0 = const()[name = tensor("attn_weights_317_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_317_transpose_y_0 = const()[name = tensor("attn_weights_317_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_317_cast = matmul(transpose_x = attn_weights_317_transpose_x_0, transpose_y = attn_weights_317_transpose_y_0, x = var_7968_cast, y = var_7970_cast)[name = tensor("attn_weights_317_cast")]; + tensor attn_weights_319_cast = mul(x = attn_weights_317_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_319_cast")]; + tensor var_7976_cast = softmax(axis = var_6849, x = attn_weights_319_cast)[name = tensor("op_7976_cast")]; + tensor attn_159_transpose_x_0 = const()[name = tensor("attn_159_transpose_x_0"), val = tensor(false)]; + tensor attn_159_transpose_y_0 = const()[name = tensor("attn_159_transpose_y_0"), val = tensor(true)]; + tensor attn_159_cast = matmul(transpose_x = attn_159_transpose_x_0, transpose_y = attn_159_transpose_y_0, x = var_7972_cast, y = var_7976_cast)[name = tensor("attn_159_cast")]; + tensor var_7980 = const()[name = tensor("op_7980"), val = tensor([2, 1280, 1, -1])]; + tensor input_481_cast = reshape(shape = var_7980, x = attn_159_cast)[name = tensor("input_481_cast")]; + tensor var_7985 = const()[name = tensor("op_7985"), val = tensor([1, 1])]; + tensor var_7987 = const()[name = tensor("op_7987"), val = tensor([1, 1])]; + tensor var_7989_pad_type_0 = const()[name = tensor("op_7989_pad_type_0"), val = tensor("custom")]; + tensor var_7989_pad_0 = const()[name = tensor("op_7989_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069118784))), lut = tensor([-0x1.46p-7, 0x1.45cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069323648)))]; + tensor var_7989_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_7987, groups = var_6865, pad = var_7989_pad_0, pad_type = var_7989_pad_type_0, strides = var_7985, weight = up_blocks_0_attentions_0_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_481_cast)[name = tensor("op_7989_cast")]; + tensor inputs_239_cast = add(x = var_7989_cast, y = inputs_237_cast)[name = tensor("inputs_239_cast")]; + tensor var_7993 = const()[name = tensor("op_7993"), val = tensor([1])]; + tensor channels_mean_239_cast = reduce_mean(axes = var_7993, keep_dims = var_6860, x = inputs_239_cast)[name = tensor("channels_mean_239_cast")]; + tensor zero_mean_239_cast = sub(x = inputs_239_cast, y = channels_mean_239_cast)[name = tensor("zero_mean_239_cast")]; + tensor zero_mean_sq_239_cast = mul(x = zero_mean_239_cast, y = zero_mean_239_cast)[name = tensor("zero_mean_sq_239_cast")]; + tensor var_7997 = const()[name = tensor("op_7997"), val = tensor([1])]; + tensor var_7998_cast = reduce_mean(axes = var_7997, keep_dims = var_6860, x = zero_mean_sq_239_cast)[name = tensor("op_7998_cast")]; + tensor var_7999_to_fp16 = const()[name = tensor("op_7999_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8000_cast = add(x = var_7998_cast, y = var_7999_to_fp16)[name = tensor("op_8000_cast")]; + tensor denom_239_epsilon_0_to_fp16 = const()[name = tensor("denom_239_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_239_cast = rsqrt(epsilon = denom_239_epsilon_0_to_fp16, x = var_8000_cast)[name = tensor("denom_239_cast")]; + tensor out_239_cast = mul(x = zero_mean_239_cast, y = denom_239_cast)[name = tensor("out_239_cast")]; + tensor var_8004_to_fp16 = const()[name = tensor("op_8004_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069326272)))]; + tensor var_8005_cast = add(x = out_239_cast, y = var_8004_to_fp16)[name = tensor("op_8005_cast")]; + tensor var_8007_to_fp16 = const()[name = tensor("op_8007_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069328896)))]; + tensor input_483_cast = mul(x = var_8005_cast, y = var_8007_to_fp16)[name = tensor("input_483_cast")]; + tensor var_8015 = const()[name = tensor("op_8015"), val = tensor([1, 1])]; + tensor var_8017 = const()[name = tensor("op_8017"), val = tensor([1, 1])]; + tensor var_8019_pad_type_0 = const()[name = tensor("op_8019_pad_type_0"), val = tensor("custom")]; + tensor var_8019_pad_0 = const()[name = tensor("op_8019_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1069331520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079161984))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079162176)))]; + tensor var_8019_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_8017, groups = var_6865, pad = var_8019_pad_0, pad_type = var_8019_pad_type_0, strides = var_8015, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_483_cast)[name = tensor("op_8019_cast")]; + tensor var_8020_split_sizes_0 = const()[name = tensor("op_8020_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8020_axis_0 = const()[name = tensor("op_8020_axis_0"), val = tensor(1)]; + tensor var_8020_cast_0, tensor var_8020_cast_1 = split(axis = var_8020_axis_0, split_sizes = var_8020_split_sizes_0, x = var_8019_cast)[name = tensor("op_8020_cast")]; + tensor var_8022_mode_0 = const()[name = tensor("op_8022_mode_0"), val = tensor("EXACT")]; + tensor var_8022_cast = gelu(mode = var_8022_mode_0, x = var_8020_cast_1)[name = tensor("op_8022_cast")]; + tensor input_485_cast = mul(x = var_8020_cast_0, y = var_8022_cast)[name = tensor("input_485_cast")]; + tensor var_8026 = const()[name = tensor("op_8026"), val = tensor([1, 1])]; + tensor var_8028 = const()[name = tensor("op_8028"), val = tensor([1, 1])]; + tensor var_8030_pad_type_0 = const()[name = tensor("op_8030_pad_type_0"), val = tensor("custom")]; + tensor var_8030_pad_0 = const()[name = tensor("op_8030_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1079182720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084097984))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084098176)))]; + tensor var_8030_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_8028, groups = var_6865, pad = var_8030_pad_0, pad_type = var_8030_pad_type_0, strides = var_8026, weight = up_blocks_0_attentions_0_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_485_cast)[name = tensor("op_8030_cast")]; + tensor inputs_241_cast = add(x = var_8030_cast, y = inputs_239_cast)[name = tensor("inputs_241_cast")]; + tensor var_8040 = const()[name = tensor("op_8040"), val = tensor([1])]; + tensor channels_mean_241_cast = reduce_mean(axes = var_8040, keep_dims = var_6860, x = inputs_241_cast)[name = tensor("channels_mean_241_cast")]; + tensor zero_mean_241_cast = sub(x = inputs_241_cast, y = channels_mean_241_cast)[name = tensor("zero_mean_241_cast")]; + tensor zero_mean_sq_241_cast = mul(x = zero_mean_241_cast, y = zero_mean_241_cast)[name = tensor("zero_mean_sq_241_cast")]; + tensor var_8044 = const()[name = tensor("op_8044"), val = tensor([1])]; + tensor var_8045_cast = reduce_mean(axes = var_8044, keep_dims = var_6860, x = zero_mean_sq_241_cast)[name = tensor("op_8045_cast")]; + tensor var_8046_to_fp16 = const()[name = tensor("op_8046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8047_cast = add(x = var_8045_cast, y = var_8046_to_fp16)[name = tensor("op_8047_cast")]; + tensor denom_241_epsilon_0_to_fp16 = const()[name = tensor("denom_241_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_241_cast = rsqrt(epsilon = denom_241_epsilon_0_to_fp16, x = var_8047_cast)[name = tensor("denom_241_cast")]; + tensor out_241_cast = mul(x = zero_mean_241_cast, y = denom_241_cast)[name = tensor("out_241_cast")]; + tensor var_8051_to_fp16 = const()[name = tensor("op_8051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084100800)))]; + tensor var_8052_cast = add(x = out_241_cast, y = var_8051_to_fp16)[name = tensor("op_8052_cast")]; + tensor var_8054_to_fp16 = const()[name = tensor("op_8054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084103424)))]; + tensor hidden_states_323_cast = mul(x = var_8052_cast, y = var_8054_to_fp16)[name = tensor("hidden_states_323_cast")]; + tensor var_8061 = const()[name = tensor("op_8061"), val = tensor([1, 1])]; + tensor var_8063 = const()[name = tensor("op_8063"), val = tensor([1, 1])]; + tensor q_161_pad_type_0 = const()[name = tensor("q_161_pad_type_0"), val = tensor("custom")]; + tensor q_161_pad_0 = const()[name = tensor("q_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084106048))), lut = tensor([-0x1.654p-5, -0x1.ac4p-7, 0x1.ae4p-7, 0x1.658p-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_161_cast = conv(dilations = var_8063, groups = var_6865, pad = q_161_pad_0, pad_type = q_161_pad_type_0, strides = var_8061, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("q_161_cast")]; + tensor var_8067 = const()[name = tensor("op_8067"), val = tensor([1, 1])]; + tensor var_8069 = const()[name = tensor("op_8069"), val = tensor([1, 1])]; + tensor k_161_pad_type_0 = const()[name = tensor("k_161_pad_type_0"), val = tensor("custom")]; + tensor k_161_pad_0 = const()[name = tensor("k_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084515712))), lut = tensor([-0x1.66p-5, -0x1.ae4p-7, 0x1.ae4p-7, 0x1.65cp-5]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_161_cast = conv(dilations = var_8069, groups = var_6865, pad = k_161_pad_0, pad_type = k_161_pad_type_0, strides = var_8067, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("k_161_cast")]; + tensor var_8073 = const()[name = tensor("op_8073"), val = tensor([1, 1])]; + tensor var_8075 = const()[name = tensor("op_8075"), val = tensor([1, 1])]; + tensor v_161_pad_type_0 = const()[name = tensor("v_161_pad_type_0"), val = tensor("custom")]; + tensor v_161_pad_0 = const()[name = tensor("v_161_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1084925376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1086154240))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_161_cast = conv(dilations = var_8075, groups = var_6865, pad = v_161_pad_0, pad_type = v_161_pad_type_0, strides = var_8073, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_323_cast)[name = tensor("v_161_cast")]; + tensor var_8079 = const()[name = tensor("op_8079"), val = tensor([2, 20, 64, -1])]; + tensor var_8080_cast = reshape(shape = var_8079, x = q_161_cast)[name = tensor("op_8080_cast")]; + tensor var_8081 = const()[name = tensor("op_8081"), val = tensor([2, 20, 64, -1])]; + tensor var_8082_cast = reshape(shape = var_8081, x = k_161_cast)[name = tensor("op_8082_cast")]; + tensor var_8083 = const()[name = tensor("op_8083"), val = tensor([2, 20, 64, -1])]; + tensor var_8084_cast = reshape(shape = var_8083, x = v_161_cast)[name = tensor("op_8084_cast")]; + tensor attn_weights_321_transpose_x_0 = const()[name = tensor("attn_weights_321_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_321_transpose_y_0 = const()[name = tensor("attn_weights_321_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_321_cast = matmul(transpose_x = attn_weights_321_transpose_x_0, transpose_y = attn_weights_321_transpose_y_0, x = var_8080_cast, y = var_8082_cast)[name = tensor("attn_weights_321_cast")]; + tensor attn_weights_323_cast = mul(x = attn_weights_321_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_323_cast")]; + tensor var_8088_cast = softmax(axis = var_6849, x = attn_weights_323_cast)[name = tensor("op_8088_cast")]; + tensor attn_161_transpose_x_0 = const()[name = tensor("attn_161_transpose_x_0"), val = tensor(false)]; + tensor attn_161_transpose_y_0 = const()[name = tensor("attn_161_transpose_y_0"), val = tensor(true)]; + tensor attn_161_cast = matmul(transpose_x = attn_161_transpose_x_0, transpose_y = attn_161_transpose_y_0, x = var_8084_cast, y = var_8088_cast)[name = tensor("attn_161_cast")]; + tensor var_8092 = const()[name = tensor("op_8092"), val = tensor([2, 1280, 1, -1])]; + tensor input_487_cast = reshape(shape = var_8092, x = attn_161_cast)[name = tensor("input_487_cast")]; + tensor var_8097 = const()[name = tensor("op_8097"), val = tensor([1, 1])]; + tensor var_8099 = const()[name = tensor("op_8099"), val = tensor([1, 1])]; + tensor var_8101_pad_type_0 = const()[name = tensor("op_8101_pad_type_0"), val = tensor("custom")]; + tensor var_8101_pad_0 = const()[name = tensor("op_8101_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1086154432))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087383296))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087383488)))]; + tensor var_8101_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_8099, groups = var_6865, pad = var_8101_pad_0, pad_type = var_8101_pad_type_0, strides = var_8097, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_487_cast)[name = tensor("op_8101_cast")]; + tensor inputs_243_cast = add(x = var_8101_cast, y = inputs_241_cast)[name = tensor("inputs_243_cast")]; + tensor var_8105 = const()[name = tensor("op_8105"), val = tensor([1])]; + tensor channels_mean_243_cast = reduce_mean(axes = var_8105, keep_dims = var_6860, x = inputs_243_cast)[name = tensor("channels_mean_243_cast")]; + tensor zero_mean_243_cast = sub(x = inputs_243_cast, y = channels_mean_243_cast)[name = tensor("zero_mean_243_cast")]; + tensor zero_mean_sq_243_cast = mul(x = zero_mean_243_cast, y = zero_mean_243_cast)[name = tensor("zero_mean_sq_243_cast")]; + tensor var_8109 = const()[name = tensor("op_8109"), val = tensor([1])]; + tensor var_8110_cast = reduce_mean(axes = var_8109, keep_dims = var_6860, x = zero_mean_sq_243_cast)[name = tensor("op_8110_cast")]; + tensor var_8111_to_fp16 = const()[name = tensor("op_8111_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8112_cast = add(x = var_8110_cast, y = var_8111_to_fp16)[name = tensor("op_8112_cast")]; + tensor denom_243_epsilon_0_to_fp16 = const()[name = tensor("denom_243_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_243_cast = rsqrt(epsilon = denom_243_epsilon_0_to_fp16, x = var_8112_cast)[name = tensor("denom_243_cast")]; + tensor out_243_cast = mul(x = zero_mean_243_cast, y = denom_243_cast)[name = tensor("out_243_cast")]; + tensor var_8116_to_fp16 = const()[name = tensor("op_8116_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087386112)))]; + tensor var_8117_cast = add(x = out_243_cast, y = var_8116_to_fp16)[name = tensor("op_8117_cast")]; + tensor var_8119_to_fp16 = const()[name = tensor("op_8119_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087388736)))]; + tensor hidden_states_325_cast = mul(x = var_8117_cast, y = var_8119_to_fp16)[name = tensor("hidden_states_325_cast")]; + tensor var_8126 = const()[name = tensor("op_8126"), val = tensor([1, 1])]; + tensor var_8128 = const()[name = tensor("op_8128"), val = tensor([1, 1])]; + tensor q_163_pad_type_0 = const()[name = tensor("q_163_pad_type_0"), val = tensor("custom")]; + tensor q_163_pad_0 = const()[name = tensor("q_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087391360))), lut = tensor([-0x1.ce4p-6, -0x1.1cp-7, 0x1.1acp-7, 0x1.cd8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_163_cast = conv(dilations = var_8128, groups = var_6865, pad = q_163_pad_0, pad_type = q_163_pad_type_0, strides = var_8126, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_325_cast)[name = tensor("q_163_cast")]; + tensor var_8132 = const()[name = tensor("op_8132"), val = tensor([1, 1])]; + tensor var_8134 = const()[name = tensor("op_8134"), val = tensor([1, 1])]; + tensor k_163_pad_type_0 = const()[name = tensor("k_163_pad_type_0"), val = tensor("custom")]; + tensor k_163_pad_0 = const()[name = tensor("k_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1087801024))), lut = tensor([-0x1.6ecp-6, -0x1.b44p-8, 0x1.b5cp-8, 0x1.6fp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_163_cast = conv(dilations = var_8134, groups = var_6865, pad = k_163_pad_0, pad_type = k_163_pad_type_0, strides = var_8132, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_163_cast")]; + tensor var_8138 = const()[name = tensor("op_8138"), val = tensor([1, 1])]; + tensor var_8140 = const()[name = tensor("op_8140"), val = tensor([1, 1])]; + tensor v_163_pad_type_0 = const()[name = tensor("v_163_pad_type_0"), val = tensor("custom")]; + tensor v_163_pad_0 = const()[name = tensor("v_163_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1088456448))), lut = tensor([-0x1.eacp-6, -0x1.1ep-7, 0x1.1d8p-7, 0x1.ea8p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_163_cast = conv(dilations = var_8140, groups = var_6865, pad = v_163_pad_0, pad_type = v_163_pad_type_0, strides = var_8138, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_163_cast")]; + tensor var_8144 = const()[name = tensor("op_8144"), val = tensor([2, 20, 64, -1])]; + tensor var_8145_cast = reshape(shape = var_8144, x = q_163_cast)[name = tensor("op_8145_cast")]; + tensor var_8146 = const()[name = tensor("op_8146"), val = tensor([2, 20, 64, -1])]; + tensor var_8147_cast = reshape(shape = var_8146, x = k_163_cast)[name = tensor("op_8147_cast")]; + tensor var_8148 = const()[name = tensor("op_8148"), val = tensor([2, 20, 64, -1])]; + tensor var_8149_cast = reshape(shape = var_8148, x = v_163_cast)[name = tensor("op_8149_cast")]; + tensor attn_weights_325_transpose_x_0 = const()[name = tensor("attn_weights_325_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_325_transpose_y_0 = const()[name = tensor("attn_weights_325_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_325_cast = matmul(transpose_x = attn_weights_325_transpose_x_0, transpose_y = attn_weights_325_transpose_y_0, x = var_8145_cast, y = var_8147_cast)[name = tensor("attn_weights_325_cast")]; + tensor attn_weights_327_cast = mul(x = attn_weights_325_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_327_cast")]; + tensor var_8153_cast = softmax(axis = var_6849, x = attn_weights_327_cast)[name = tensor("op_8153_cast")]; + tensor attn_163_transpose_x_0 = const()[name = tensor("attn_163_transpose_x_0"), val = tensor(false)]; + tensor attn_163_transpose_y_0 = const()[name = tensor("attn_163_transpose_y_0"), val = tensor(true)]; + tensor attn_163_cast = matmul(transpose_x = attn_163_transpose_x_0, transpose_y = attn_163_transpose_y_0, x = var_8149_cast, y = var_8153_cast)[name = tensor("attn_163_cast")]; + tensor var_8157 = const()[name = tensor("op_8157"), val = tensor([2, 1280, 1, -1])]; + tensor input_489_cast = reshape(shape = var_8157, x = attn_163_cast)[name = tensor("input_489_cast")]; + tensor var_8162 = const()[name = tensor("op_8162"), val = tensor([1, 1])]; + tensor var_8164 = const()[name = tensor("op_8164"), val = tensor([1, 1])]; + tensor var_8166_pad_type_0 = const()[name = tensor("op_8166_pad_type_0"), val = tensor("custom")]; + tensor var_8166_pad_0 = const()[name = tensor("op_8166_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089111872))), lut = tensor([-0x1.1a8p-6, -0x1.54p-8, 0x1.544p-8, 0x1.1bp-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089521536)))]; + tensor var_8166_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_8164, groups = var_6865, pad = var_8166_pad_0, pad_type = var_8166_pad_type_0, strides = var_8162, weight = up_blocks_0_attentions_0_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_489_cast)[name = tensor("op_8166_cast")]; + tensor inputs_245_cast = add(x = var_8166_cast, y = inputs_243_cast)[name = tensor("inputs_245_cast")]; + tensor var_8170 = const()[name = tensor("op_8170"), val = tensor([1])]; + tensor channels_mean_245_cast = reduce_mean(axes = var_8170, keep_dims = var_6860, x = inputs_245_cast)[name = tensor("channels_mean_245_cast")]; + tensor zero_mean_245_cast = sub(x = inputs_245_cast, y = channels_mean_245_cast)[name = tensor("zero_mean_245_cast")]; + tensor zero_mean_sq_245_cast = mul(x = zero_mean_245_cast, y = zero_mean_245_cast)[name = tensor("zero_mean_sq_245_cast")]; + tensor var_8174 = const()[name = tensor("op_8174"), val = tensor([1])]; + tensor var_8175_cast = reduce_mean(axes = var_8174, keep_dims = var_6860, x = zero_mean_sq_245_cast)[name = tensor("op_8175_cast")]; + tensor var_8176_to_fp16 = const()[name = tensor("op_8176_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8177_cast = add(x = var_8175_cast, y = var_8176_to_fp16)[name = tensor("op_8177_cast")]; + tensor denom_245_epsilon_0_to_fp16 = const()[name = tensor("denom_245_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_245_cast = rsqrt(epsilon = denom_245_epsilon_0_to_fp16, x = var_8177_cast)[name = tensor("denom_245_cast")]; + tensor out_245_cast = mul(x = zero_mean_245_cast, y = denom_245_cast)[name = tensor("out_245_cast")]; + tensor var_8181_to_fp16 = const()[name = tensor("op_8181_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089524160)))]; + tensor var_8182_cast = add(x = out_245_cast, y = var_8181_to_fp16)[name = tensor("op_8182_cast")]; + tensor var_8184_to_fp16 = const()[name = tensor("op_8184_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089526784)))]; + tensor input_491_cast = mul(x = var_8182_cast, y = var_8184_to_fp16)[name = tensor("input_491_cast")]; + tensor var_8192 = const()[name = tensor("op_8192"), val = tensor([1, 1])]; + tensor var_8194 = const()[name = tensor("op_8194"), val = tensor([1, 1])]; + tensor var_8196_pad_type_0 = const()[name = tensor("op_8196_pad_type_0"), val = tensor("custom")]; + tensor var_8196_pad_0 = const()[name = tensor("op_8196_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1089529408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099359872))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099360064)))]; + tensor var_8196_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_8194, groups = var_6865, pad = var_8196_pad_0, pad_type = var_8196_pad_type_0, strides = var_8192, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_491_cast)[name = tensor("op_8196_cast")]; + tensor var_8197_split_sizes_0 = const()[name = tensor("op_8197_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8197_axis_0 = const()[name = tensor("op_8197_axis_0"), val = tensor(1)]; + tensor var_8197_cast_0, tensor var_8197_cast_1 = split(axis = var_8197_axis_0, split_sizes = var_8197_split_sizes_0, x = var_8196_cast)[name = tensor("op_8197_cast")]; + tensor var_8199_mode_0 = const()[name = tensor("op_8199_mode_0"), val = tensor("EXACT")]; + tensor var_8199_cast = gelu(mode = var_8199_mode_0, x = var_8197_cast_1)[name = tensor("op_8199_cast")]; + tensor input_493_cast = mul(x = var_8197_cast_0, y = var_8199_cast)[name = tensor("input_493_cast")]; + tensor var_8203 = const()[name = tensor("op_8203"), val = tensor([1, 1])]; + tensor var_8205 = const()[name = tensor("op_8205"), val = tensor([1, 1])]; + tensor var_8207_pad_type_0 = const()[name = tensor("op_8207_pad_type_0"), val = tensor("custom")]; + tensor var_8207_pad_0 = const()[name = tensor("op_8207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1099380608))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104295872))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104296064)))]; + tensor var_8207_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_8205, groups = var_6865, pad = var_8207_pad_0, pad_type = var_8207_pad_type_0, strides = var_8203, weight = up_blocks_0_attentions_0_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_493_cast)[name = tensor("op_8207_cast")]; + tensor inputs_247_cast = add(x = var_8207_cast, y = inputs_245_cast)[name = tensor("inputs_247_cast")]; + tensor var_8217 = const()[name = tensor("op_8217"), val = tensor([1])]; + tensor channels_mean_247_cast = reduce_mean(axes = var_8217, keep_dims = var_6860, x = inputs_247_cast)[name = tensor("channels_mean_247_cast")]; + tensor zero_mean_247_cast = sub(x = inputs_247_cast, y = channels_mean_247_cast)[name = tensor("zero_mean_247_cast")]; + tensor zero_mean_sq_247_cast = mul(x = zero_mean_247_cast, y = zero_mean_247_cast)[name = tensor("zero_mean_sq_247_cast")]; + tensor var_8221 = const()[name = tensor("op_8221"), val = tensor([1])]; + tensor var_8222_cast = reduce_mean(axes = var_8221, keep_dims = var_6860, x = zero_mean_sq_247_cast)[name = tensor("op_8222_cast")]; + tensor var_8223_to_fp16 = const()[name = tensor("op_8223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8224_cast = add(x = var_8222_cast, y = var_8223_to_fp16)[name = tensor("op_8224_cast")]; + tensor denom_247_epsilon_0_to_fp16 = const()[name = tensor("denom_247_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_247_cast = rsqrt(epsilon = denom_247_epsilon_0_to_fp16, x = var_8224_cast)[name = tensor("denom_247_cast")]; + tensor out_247_cast = mul(x = zero_mean_247_cast, y = denom_247_cast)[name = tensor("out_247_cast")]; + tensor var_8228_to_fp16 = const()[name = tensor("op_8228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104298688)))]; + tensor var_8229_cast = add(x = out_247_cast, y = var_8228_to_fp16)[name = tensor("op_8229_cast")]; + tensor var_8231_to_fp16 = const()[name = tensor("op_8231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104301312)))]; + tensor hidden_states_329_cast = mul(x = var_8229_cast, y = var_8231_to_fp16)[name = tensor("hidden_states_329_cast")]; + tensor var_8238 = const()[name = tensor("op_8238"), val = tensor([1, 1])]; + tensor var_8240 = const()[name = tensor("op_8240"), val = tensor([1, 1])]; + tensor q_165_pad_type_0 = const()[name = tensor("q_165_pad_type_0"), val = tensor("custom")]; + tensor q_165_pad_0 = const()[name = tensor("q_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1104303936))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1105532800))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_165_cast = conv(dilations = var_8240, groups = var_6865, pad = q_165_pad_0, pad_type = q_165_pad_type_0, strides = var_8238, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("q_165_cast")]; + tensor var_8244 = const()[name = tensor("op_8244"), val = tensor([1, 1])]; + tensor var_8246 = const()[name = tensor("op_8246"), val = tensor([1, 1])]; + tensor k_165_pad_type_0 = const()[name = tensor("k_165_pad_type_0"), val = tensor("custom")]; + tensor k_165_pad_0 = const()[name = tensor("k_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1105532992))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106352256))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_165_cast = conv(dilations = var_8246, groups = var_6865, pad = k_165_pad_0, pad_type = k_165_pad_type_0, strides = var_8244, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("k_165_cast")]; + tensor var_8250 = const()[name = tensor("op_8250"), val = tensor([1, 1])]; + tensor var_8252 = const()[name = tensor("op_8252"), val = tensor([1, 1])]; + tensor v_165_pad_type_0 = const()[name = tensor("v_165_pad_type_0"), val = tensor("custom")]; + tensor v_165_pad_0 = const()[name = tensor("v_165_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1106352384))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1107581248))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_165_cast = conv(dilations = var_8252, groups = var_6865, pad = v_165_pad_0, pad_type = v_165_pad_type_0, strides = var_8250, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_329_cast)[name = tensor("v_165_cast")]; + tensor var_8256 = const()[name = tensor("op_8256"), val = tensor([2, 20, 64, -1])]; + tensor var_8257_cast = reshape(shape = var_8256, x = q_165_cast)[name = tensor("op_8257_cast")]; + tensor var_8258 = const()[name = tensor("op_8258"), val = tensor([2, 20, 64, -1])]; + tensor var_8259_cast = reshape(shape = var_8258, x = k_165_cast)[name = tensor("op_8259_cast")]; + tensor var_8260 = const()[name = tensor("op_8260"), val = tensor([2, 20, 64, -1])]; + tensor var_8261_cast = reshape(shape = var_8260, x = v_165_cast)[name = tensor("op_8261_cast")]; + tensor attn_weights_329_transpose_x_0 = const()[name = tensor("attn_weights_329_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_329_transpose_y_0 = const()[name = tensor("attn_weights_329_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_329_cast = matmul(transpose_x = attn_weights_329_transpose_x_0, transpose_y = attn_weights_329_transpose_y_0, x = var_8257_cast, y = var_8259_cast)[name = tensor("attn_weights_329_cast")]; + tensor attn_weights_331_cast = mul(x = attn_weights_329_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_331_cast")]; + tensor var_8265_cast = softmax(axis = var_6849, x = attn_weights_331_cast)[name = tensor("op_8265_cast")]; + tensor attn_165_transpose_x_0 = const()[name = tensor("attn_165_transpose_x_0"), val = tensor(false)]; + tensor attn_165_transpose_y_0 = const()[name = tensor("attn_165_transpose_y_0"), val = tensor(true)]; + tensor attn_165_cast = matmul(transpose_x = attn_165_transpose_x_0, transpose_y = attn_165_transpose_y_0, x = var_8261_cast, y = var_8265_cast)[name = tensor("attn_165_cast")]; + tensor var_8269 = const()[name = tensor("op_8269"), val = tensor([2, 1280, 1, -1])]; + tensor input_495_cast = reshape(shape = var_8269, x = attn_165_cast)[name = tensor("input_495_cast")]; + tensor var_8274 = const()[name = tensor("op_8274"), val = tensor([1, 1])]; + tensor var_8276 = const()[name = tensor("op_8276"), val = tensor([1, 1])]; + tensor var_8278_pad_type_0 = const()[name = tensor("op_8278_pad_type_0"), val = tensor("custom")]; + tensor var_8278_pad_0 = const()[name = tensor("op_8278_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1107581440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1108810304))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1108810496)))]; + tensor var_8278_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_8276, groups = var_6865, pad = var_8278_pad_0, pad_type = var_8278_pad_type_0, strides = var_8274, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_495_cast)[name = tensor("op_8278_cast")]; + tensor inputs_249_cast = add(x = var_8278_cast, y = inputs_247_cast)[name = tensor("inputs_249_cast")]; + tensor var_8282 = const()[name = tensor("op_8282"), val = tensor([1])]; + tensor channels_mean_249_cast = reduce_mean(axes = var_8282, keep_dims = var_6860, x = inputs_249_cast)[name = tensor("channels_mean_249_cast")]; + tensor zero_mean_249_cast = sub(x = inputs_249_cast, y = channels_mean_249_cast)[name = tensor("zero_mean_249_cast")]; + tensor zero_mean_sq_249_cast = mul(x = zero_mean_249_cast, y = zero_mean_249_cast)[name = tensor("zero_mean_sq_249_cast")]; + tensor var_8286 = const()[name = tensor("op_8286"), val = tensor([1])]; + tensor var_8287_cast = reduce_mean(axes = var_8286, keep_dims = var_6860, x = zero_mean_sq_249_cast)[name = tensor("op_8287_cast")]; + tensor var_8288_to_fp16 = const()[name = tensor("op_8288_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8289_cast = add(x = var_8287_cast, y = var_8288_to_fp16)[name = tensor("op_8289_cast")]; + tensor denom_249_epsilon_0_to_fp16 = const()[name = tensor("denom_249_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_249_cast = rsqrt(epsilon = denom_249_epsilon_0_to_fp16, x = var_8289_cast)[name = tensor("denom_249_cast")]; + tensor out_249_cast = mul(x = zero_mean_249_cast, y = denom_249_cast)[name = tensor("out_249_cast")]; + tensor var_8293_to_fp16 = const()[name = tensor("op_8293_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1108813120)))]; + tensor var_8294_cast = add(x = out_249_cast, y = var_8293_to_fp16)[name = tensor("op_8294_cast")]; + tensor var_8296_to_fp16 = const()[name = tensor("op_8296_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1108815744)))]; + tensor hidden_states_331_cast = mul(x = var_8294_cast, y = var_8296_to_fp16)[name = tensor("hidden_states_331_cast")]; + tensor var_8303 = const()[name = tensor("op_8303"), val = tensor([1, 1])]; + tensor var_8305 = const()[name = tensor("op_8305"), val = tensor([1, 1])]; + tensor q_167_pad_type_0 = const()[name = tensor("q_167_pad_type_0"), val = tensor("custom")]; + tensor q_167_pad_0 = const()[name = tensor("q_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1108818368))), lut = tensor([-0x1.d44p-7, 0x1.d3cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_167_cast = conv(dilations = var_8305, groups = var_6865, pad = q_167_pad_0, pad_type = q_167_pad_type_0, strides = var_8303, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_331_cast)[name = tensor("q_167_cast")]; + tensor var_8309 = const()[name = tensor("op_8309"), val = tensor([1, 1])]; + tensor var_8311 = const()[name = tensor("op_8311"), val = tensor([1, 1])]; + tensor k_167_pad_type_0 = const()[name = tensor("k_167_pad_type_0"), val = tensor("custom")]; + tensor k_167_pad_0 = const()[name = tensor("k_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1109023232))), lut = tensor([-0x1.4bcp-6, -0x1.8dp-8, 0x1.8b8p-8, 0x1.4b4p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_167_cast = conv(dilations = var_8311, groups = var_6865, pad = k_167_pad_0, pad_type = k_167_pad_type_0, strides = var_8309, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_167_cast")]; + tensor var_8315 = const()[name = tensor("op_8315"), val = tensor([1, 1])]; + tensor var_8317 = const()[name = tensor("op_8317"), val = tensor([1, 1])]; + tensor v_167_pad_type_0 = const()[name = tensor("v_167_pad_type_0"), val = tensor("custom")]; + tensor v_167_pad_0 = const()[name = tensor("v_167_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1109678656))), lut = tensor([-0x1.c4p-6, -0x1.08cp-7, 0x1.08cp-7, 0x1.c3p-6]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_167_cast = conv(dilations = var_8317, groups = var_6865, pad = v_167_pad_0, pad_type = v_167_pad_type_0, strides = var_8315, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_167_cast")]; + tensor var_8321 = const()[name = tensor("op_8321"), val = tensor([2, 20, 64, -1])]; + tensor var_8322_cast = reshape(shape = var_8321, x = q_167_cast)[name = tensor("op_8322_cast")]; + tensor var_8323 = const()[name = tensor("op_8323"), val = tensor([2, 20, 64, -1])]; + tensor var_8324_cast = reshape(shape = var_8323, x = k_167_cast)[name = tensor("op_8324_cast")]; + tensor var_8325 = const()[name = tensor("op_8325"), val = tensor([2, 20, 64, -1])]; + tensor var_8326_cast = reshape(shape = var_8325, x = v_167_cast)[name = tensor("op_8326_cast")]; + tensor attn_weights_333_transpose_x_0 = const()[name = tensor("attn_weights_333_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_333_transpose_y_0 = const()[name = tensor("attn_weights_333_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_333_cast = matmul(transpose_x = attn_weights_333_transpose_x_0, transpose_y = attn_weights_333_transpose_y_0, x = var_8322_cast, y = var_8324_cast)[name = tensor("attn_weights_333_cast")]; + tensor attn_weights_335_cast = mul(x = attn_weights_333_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_335_cast")]; + tensor var_8330_cast = softmax(axis = var_6849, x = attn_weights_335_cast)[name = tensor("op_8330_cast")]; + tensor attn_167_transpose_x_0 = const()[name = tensor("attn_167_transpose_x_0"), val = tensor(false)]; + tensor attn_167_transpose_y_0 = const()[name = tensor("attn_167_transpose_y_0"), val = tensor(true)]; + tensor attn_167_cast = matmul(transpose_x = attn_167_transpose_x_0, transpose_y = attn_167_transpose_y_0, x = var_8326_cast, y = var_8330_cast)[name = tensor("attn_167_cast")]; + tensor var_8334 = const()[name = tensor("op_8334"), val = tensor([2, 1280, 1, -1])]; + tensor input_497_cast = reshape(shape = var_8334, x = attn_167_cast)[name = tensor("input_497_cast")]; + tensor var_8339 = const()[name = tensor("op_8339"), val = tensor([1, 1])]; + tensor var_8341 = const()[name = tensor("op_8341"), val = tensor([1, 1])]; + tensor var_8343_pad_type_0 = const()[name = tensor("op_8343_pad_type_0"), val = tensor("custom")]; + tensor var_8343_pad_0 = const()[name = tensor("op_8343_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110334080))), lut = tensor([-0x1.1a4p-7, 0x1.194p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110538944)))]; + tensor var_8343_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_8341, groups = var_6865, pad = var_8343_pad_0, pad_type = var_8343_pad_type_0, strides = var_8339, weight = up_blocks_0_attentions_0_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_497_cast)[name = tensor("op_8343_cast")]; + tensor inputs_251_cast = add(x = var_8343_cast, y = inputs_249_cast)[name = tensor("inputs_251_cast")]; + tensor var_8347 = const()[name = tensor("op_8347"), val = tensor([1])]; + tensor channels_mean_251_cast = reduce_mean(axes = var_8347, keep_dims = var_6860, x = inputs_251_cast)[name = tensor("channels_mean_251_cast")]; + tensor zero_mean_251_cast = sub(x = inputs_251_cast, y = channels_mean_251_cast)[name = tensor("zero_mean_251_cast")]; + tensor zero_mean_sq_251_cast = mul(x = zero_mean_251_cast, y = zero_mean_251_cast)[name = tensor("zero_mean_sq_251_cast")]; + tensor var_8351 = const()[name = tensor("op_8351"), val = tensor([1])]; + tensor var_8352_cast = reduce_mean(axes = var_8351, keep_dims = var_6860, x = zero_mean_sq_251_cast)[name = tensor("op_8352_cast")]; + tensor var_8353_to_fp16 = const()[name = tensor("op_8353_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8354_cast = add(x = var_8352_cast, y = var_8353_to_fp16)[name = tensor("op_8354_cast")]; + tensor denom_251_epsilon_0_to_fp16 = const()[name = tensor("denom_251_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_251_cast = rsqrt(epsilon = denom_251_epsilon_0_to_fp16, x = var_8354_cast)[name = tensor("denom_251_cast")]; + tensor out_251_cast = mul(x = zero_mean_251_cast, y = denom_251_cast)[name = tensor("out_251_cast")]; + tensor var_8358_to_fp16 = const()[name = tensor("op_8358_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110541568)))]; + tensor var_8359_cast = add(x = out_251_cast, y = var_8358_to_fp16)[name = tensor("op_8359_cast")]; + tensor var_8361_to_fp16 = const()[name = tensor("op_8361_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110544192)))]; + tensor input_499_cast = mul(x = var_8359_cast, y = var_8361_to_fp16)[name = tensor("input_499_cast")]; + tensor var_8369 = const()[name = tensor("op_8369"), val = tensor([1, 1])]; + tensor var_8371 = const()[name = tensor("op_8371"), val = tensor([1, 1])]; + tensor var_8373_pad_type_0 = const()[name = tensor("op_8373_pad_type_0"), val = tensor("custom")]; + tensor var_8373_pad_0 = const()[name = tensor("op_8373_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1110546816))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120377280))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120377472)))]; + tensor var_8373_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_8371, groups = var_6865, pad = var_8373_pad_0, pad_type = var_8373_pad_type_0, strides = var_8369, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_499_cast)[name = tensor("op_8373_cast")]; + tensor var_8374_split_sizes_0 = const()[name = tensor("op_8374_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8374_axis_0 = const()[name = tensor("op_8374_axis_0"), val = tensor(1)]; + tensor var_8374_cast_0, tensor var_8374_cast_1 = split(axis = var_8374_axis_0, split_sizes = var_8374_split_sizes_0, x = var_8373_cast)[name = tensor("op_8374_cast")]; + tensor var_8376_mode_0 = const()[name = tensor("op_8376_mode_0"), val = tensor("EXACT")]; + tensor var_8376_cast = gelu(mode = var_8376_mode_0, x = var_8374_cast_1)[name = tensor("op_8376_cast")]; + tensor input_501_cast = mul(x = var_8374_cast_0, y = var_8376_cast)[name = tensor("input_501_cast")]; + tensor var_8380 = const()[name = tensor("op_8380"), val = tensor([1, 1])]; + tensor var_8382 = const()[name = tensor("op_8382"), val = tensor([1, 1])]; + tensor var_8384_pad_type_0 = const()[name = tensor("op_8384_pad_type_0"), val = tensor("custom")]; + tensor var_8384_pad_0 = const()[name = tensor("op_8384_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1120398016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125313280))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125313472)))]; + tensor var_8384_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_8382, groups = var_6865, pad = var_8384_pad_0, pad_type = var_8384_pad_type_0, strides = var_8380, weight = up_blocks_0_attentions_0_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_501_cast)[name = tensor("op_8384_cast")]; + tensor inputs_253_cast = add(x = var_8384_cast, y = inputs_251_cast)[name = tensor("inputs_253_cast")]; + tensor var_8394 = const()[name = tensor("op_8394"), val = tensor([1])]; + tensor channels_mean_253_cast = reduce_mean(axes = var_8394, keep_dims = var_6860, x = inputs_253_cast)[name = tensor("channels_mean_253_cast")]; + tensor zero_mean_253_cast = sub(x = inputs_253_cast, y = channels_mean_253_cast)[name = tensor("zero_mean_253_cast")]; + tensor zero_mean_sq_253_cast = mul(x = zero_mean_253_cast, y = zero_mean_253_cast)[name = tensor("zero_mean_sq_253_cast")]; + tensor var_8398 = const()[name = tensor("op_8398"), val = tensor([1])]; + tensor var_8399_cast = reduce_mean(axes = var_8398, keep_dims = var_6860, x = zero_mean_sq_253_cast)[name = tensor("op_8399_cast")]; + tensor var_8400_to_fp16 = const()[name = tensor("op_8400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8401_cast = add(x = var_8399_cast, y = var_8400_to_fp16)[name = tensor("op_8401_cast")]; + tensor denom_253_epsilon_0_to_fp16 = const()[name = tensor("denom_253_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_253_cast = rsqrt(epsilon = denom_253_epsilon_0_to_fp16, x = var_8401_cast)[name = tensor("denom_253_cast")]; + tensor out_253_cast = mul(x = zero_mean_253_cast, y = denom_253_cast)[name = tensor("out_253_cast")]; + tensor var_8405_to_fp16 = const()[name = tensor("op_8405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125316096)))]; + tensor var_8406_cast = add(x = out_253_cast, y = var_8405_to_fp16)[name = tensor("op_8406_cast")]; + tensor var_8408_to_fp16 = const()[name = tensor("op_8408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125318720)))]; + tensor hidden_states_335_cast = mul(x = var_8406_cast, y = var_8408_to_fp16)[name = tensor("hidden_states_335_cast")]; + tensor var_8415 = const()[name = tensor("op_8415"), val = tensor([1, 1])]; + tensor var_8417 = const()[name = tensor("op_8417"), val = tensor([1, 1])]; + tensor q_169_pad_type_0 = const()[name = tensor("q_169_pad_type_0"), val = tensor("custom")]; + tensor q_169_pad_0 = const()[name = tensor("q_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1125321344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126140608))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_169_cast = conv(dilations = var_8417, groups = var_6865, pad = q_169_pad_0, pad_type = q_169_pad_type_0, strides = var_8415, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("q_169_cast")]; + tensor var_8421 = const()[name = tensor("op_8421"), val = tensor([1, 1])]; + tensor var_8423 = const()[name = tensor("op_8423"), val = tensor([1, 1])]; + tensor k_169_pad_type_0 = const()[name = tensor("k_169_pad_type_0"), val = tensor("custom")]; + tensor k_169_pad_0 = const()[name = tensor("k_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126140736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126960000))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_169_cast = conv(dilations = var_8423, groups = var_6865, pad = k_169_pad_0, pad_type = k_169_pad_type_0, strides = var_8421, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("k_169_cast")]; + tensor var_8427 = const()[name = tensor("op_8427"), val = tensor([1, 1])]; + tensor var_8429 = const()[name = tensor("op_8429"), val = tensor([1, 1])]; + tensor v_169_pad_type_0 = const()[name = tensor("v_169_pad_type_0"), val = tensor("custom")]; + tensor v_169_pad_0 = const()[name = tensor("v_169_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1126960128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1128188992))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_169_cast = conv(dilations = var_8429, groups = var_6865, pad = v_169_pad_0, pad_type = v_169_pad_type_0, strides = var_8427, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_335_cast)[name = tensor("v_169_cast")]; + tensor var_8433 = const()[name = tensor("op_8433"), val = tensor([2, 20, 64, -1])]; + tensor var_8434_cast = reshape(shape = var_8433, x = q_169_cast)[name = tensor("op_8434_cast")]; + tensor var_8435 = const()[name = tensor("op_8435"), val = tensor([2, 20, 64, -1])]; + tensor var_8436_cast = reshape(shape = var_8435, x = k_169_cast)[name = tensor("op_8436_cast")]; + tensor var_8437 = const()[name = tensor("op_8437"), val = tensor([2, 20, 64, -1])]; + tensor var_8438_cast = reshape(shape = var_8437, x = v_169_cast)[name = tensor("op_8438_cast")]; + tensor attn_weights_337_transpose_x_0 = const()[name = tensor("attn_weights_337_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_337_transpose_y_0 = const()[name = tensor("attn_weights_337_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_337_cast = matmul(transpose_x = attn_weights_337_transpose_x_0, transpose_y = attn_weights_337_transpose_y_0, x = var_8434_cast, y = var_8436_cast)[name = tensor("attn_weights_337_cast")]; + tensor attn_weights_339_cast = mul(x = attn_weights_337_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_339_cast")]; + tensor var_8442_cast = softmax(axis = var_6849, x = attn_weights_339_cast)[name = tensor("op_8442_cast")]; + tensor attn_169_transpose_x_0 = const()[name = tensor("attn_169_transpose_x_0"), val = tensor(false)]; + tensor attn_169_transpose_y_0 = const()[name = tensor("attn_169_transpose_y_0"), val = tensor(true)]; + tensor attn_169_cast = matmul(transpose_x = attn_169_transpose_x_0, transpose_y = attn_169_transpose_y_0, x = var_8438_cast, y = var_8442_cast)[name = tensor("attn_169_cast")]; + tensor var_8446 = const()[name = tensor("op_8446"), val = tensor([2, 1280, 1, -1])]; + tensor input_503_cast = reshape(shape = var_8446, x = attn_169_cast)[name = tensor("input_503_cast")]; + tensor var_8451 = const()[name = tensor("op_8451"), val = tensor([1, 1])]; + tensor var_8453 = const()[name = tensor("op_8453"), val = tensor([1, 1])]; + tensor var_8455_pad_type_0 = const()[name = tensor("op_8455_pad_type_0"), val = tensor("custom")]; + tensor var_8455_pad_0 = const()[name = tensor("op_8455_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1128189184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129418048))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129418240)))]; + tensor var_8455_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_8453, groups = var_6865, pad = var_8455_pad_0, pad_type = var_8455_pad_type_0, strides = var_8451, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_503_cast)[name = tensor("op_8455_cast")]; + tensor inputs_255_cast = add(x = var_8455_cast, y = inputs_253_cast)[name = tensor("inputs_255_cast")]; + tensor var_8459 = const()[name = tensor("op_8459"), val = tensor([1])]; + tensor channels_mean_255_cast = reduce_mean(axes = var_8459, keep_dims = var_6860, x = inputs_255_cast)[name = tensor("channels_mean_255_cast")]; + tensor zero_mean_255_cast = sub(x = inputs_255_cast, y = channels_mean_255_cast)[name = tensor("zero_mean_255_cast")]; + tensor zero_mean_sq_255_cast = mul(x = zero_mean_255_cast, y = zero_mean_255_cast)[name = tensor("zero_mean_sq_255_cast")]; + tensor var_8463 = const()[name = tensor("op_8463"), val = tensor([1])]; + tensor var_8464_cast = reduce_mean(axes = var_8463, keep_dims = var_6860, x = zero_mean_sq_255_cast)[name = tensor("op_8464_cast")]; + tensor var_8465_to_fp16 = const()[name = tensor("op_8465_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8466_cast = add(x = var_8464_cast, y = var_8465_to_fp16)[name = tensor("op_8466_cast")]; + tensor denom_255_epsilon_0_to_fp16 = const()[name = tensor("denom_255_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_255_cast = rsqrt(epsilon = denom_255_epsilon_0_to_fp16, x = var_8466_cast)[name = tensor("denom_255_cast")]; + tensor out_255_cast = mul(x = zero_mean_255_cast, y = denom_255_cast)[name = tensor("out_255_cast")]; + tensor var_8470_to_fp16 = const()[name = tensor("op_8470_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129420864)))]; + tensor var_8471_cast = add(x = out_255_cast, y = var_8470_to_fp16)[name = tensor("op_8471_cast")]; + tensor var_8473_to_fp16 = const()[name = tensor("op_8473_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129423488)))]; + tensor hidden_states_337_cast = mul(x = var_8471_cast, y = var_8473_to_fp16)[name = tensor("hidden_states_337_cast")]; + tensor var_8480 = const()[name = tensor("op_8480"), val = tensor([1, 1])]; + tensor var_8482 = const()[name = tensor("op_8482"), val = tensor([1, 1])]; + tensor q_171_pad_type_0 = const()[name = tensor("q_171_pad_type_0"), val = tensor("custom")]; + tensor q_171_pad_0 = const()[name = tensor("q_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129426112))), lut = tensor([-0x1.b48p-7, 0x1.b54p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_171_cast = conv(dilations = var_8482, groups = var_6865, pad = q_171_pad_0, pad_type = q_171_pad_type_0, strides = var_8480, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_337_cast)[name = tensor("q_171_cast")]; + tensor var_8486 = const()[name = tensor("op_8486"), val = tensor([1, 1])]; + tensor var_8488 = const()[name = tensor("op_8488"), val = tensor([1, 1])]; + tensor k_171_pad_type_0 = const()[name = tensor("k_171_pad_type_0"), val = tensor("custom")]; + tensor k_171_pad_0 = const()[name = tensor("k_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129630976))), lut = tensor([-0x1.38p-7, 0x1.37p-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_171_cast = conv(dilations = var_8488, groups = var_6865, pad = k_171_pad_0, pad_type = k_171_pad_type_0, strides = var_8486, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_171_cast")]; + tensor var_8492 = const()[name = tensor("op_8492"), val = tensor([1, 1])]; + tensor var_8494 = const()[name = tensor("op_8494"), val = tensor([1, 1])]; + tensor v_171_pad_type_0 = const()[name = tensor("v_171_pad_type_0"), val = tensor("custom")]; + tensor v_171_pad_0 = const()[name = tensor("v_171_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1129958720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131269504))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_171_cast = conv(dilations = var_8494, groups = var_6865, pad = v_171_pad_0, pad_type = v_171_pad_type_0, strides = var_8492, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_171_cast")]; + tensor var_8498 = const()[name = tensor("op_8498"), val = tensor([2, 20, 64, -1])]; + tensor var_8499_cast = reshape(shape = var_8498, x = q_171_cast)[name = tensor("op_8499_cast")]; + tensor var_8500 = const()[name = tensor("op_8500"), val = tensor([2, 20, 64, -1])]; + tensor var_8501_cast = reshape(shape = var_8500, x = k_171_cast)[name = tensor("op_8501_cast")]; + tensor var_8502 = const()[name = tensor("op_8502"), val = tensor([2, 20, 64, -1])]; + tensor var_8503_cast = reshape(shape = var_8502, x = v_171_cast)[name = tensor("op_8503_cast")]; + tensor attn_weights_341_transpose_x_0 = const()[name = tensor("attn_weights_341_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_341_transpose_y_0 = const()[name = tensor("attn_weights_341_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_341_cast = matmul(transpose_x = attn_weights_341_transpose_x_0, transpose_y = attn_weights_341_transpose_y_0, x = var_8499_cast, y = var_8501_cast)[name = tensor("attn_weights_341_cast")]; + tensor attn_weights_343_cast = mul(x = attn_weights_341_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_343_cast")]; + tensor var_8507_cast = softmax(axis = var_6849, x = attn_weights_343_cast)[name = tensor("op_8507_cast")]; + tensor attn_171_transpose_x_0 = const()[name = tensor("attn_171_transpose_x_0"), val = tensor(false)]; + tensor attn_171_transpose_y_0 = const()[name = tensor("attn_171_transpose_y_0"), val = tensor(true)]; + tensor attn_171_cast = matmul(transpose_x = attn_171_transpose_x_0, transpose_y = attn_171_transpose_y_0, x = var_8503_cast, y = var_8507_cast)[name = tensor("attn_171_cast")]; + tensor var_8511 = const()[name = tensor("op_8511"), val = tensor([2, 1280, 1, -1])]; + tensor input_505_cast = reshape(shape = var_8511, x = attn_171_cast)[name = tensor("input_505_cast")]; + tensor var_8516 = const()[name = tensor("op_8516"), val = tensor([1, 1])]; + tensor var_8518 = const()[name = tensor("op_8518"), val = tensor([1, 1])]; + tensor var_8520_pad_type_0 = const()[name = tensor("op_8520_pad_type_0"), val = tensor("custom")]; + tensor var_8520_pad_0 = const()[name = tensor("op_8520_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131269632))), lut = tensor([-0x1.048p-7, 0x1.04cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131474496)))]; + tensor var_8520_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_8518, groups = var_6865, pad = var_8520_pad_0, pad_type = var_8520_pad_type_0, strides = var_8516, weight = up_blocks_0_attentions_0_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_505_cast)[name = tensor("op_8520_cast")]; + tensor inputs_257_cast = add(x = var_8520_cast, y = inputs_255_cast)[name = tensor("inputs_257_cast")]; + tensor var_8524 = const()[name = tensor("op_8524"), val = tensor([1])]; + tensor channels_mean_257_cast = reduce_mean(axes = var_8524, keep_dims = var_6860, x = inputs_257_cast)[name = tensor("channels_mean_257_cast")]; + tensor zero_mean_257_cast = sub(x = inputs_257_cast, y = channels_mean_257_cast)[name = tensor("zero_mean_257_cast")]; + tensor zero_mean_sq_257_cast = mul(x = zero_mean_257_cast, y = zero_mean_257_cast)[name = tensor("zero_mean_sq_257_cast")]; + tensor var_8528 = const()[name = tensor("op_8528"), val = tensor([1])]; + tensor var_8529_cast = reduce_mean(axes = var_8528, keep_dims = var_6860, x = zero_mean_sq_257_cast)[name = tensor("op_8529_cast")]; + tensor var_8530_to_fp16 = const()[name = tensor("op_8530_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8531_cast = add(x = var_8529_cast, y = var_8530_to_fp16)[name = tensor("op_8531_cast")]; + tensor denom_257_epsilon_0_to_fp16 = const()[name = tensor("denom_257_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_257_cast = rsqrt(epsilon = denom_257_epsilon_0_to_fp16, x = var_8531_cast)[name = tensor("denom_257_cast")]; + tensor out_257_cast = mul(x = zero_mean_257_cast, y = denom_257_cast)[name = tensor("out_257_cast")]; + tensor var_8535_to_fp16 = const()[name = tensor("op_8535_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131477120)))]; + tensor var_8536_cast = add(x = out_257_cast, y = var_8535_to_fp16)[name = tensor("op_8536_cast")]; + tensor var_8538_to_fp16 = const()[name = tensor("op_8538_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131479744)))]; + tensor input_507_cast = mul(x = var_8536_cast, y = var_8538_to_fp16)[name = tensor("input_507_cast")]; + tensor var_8546 = const()[name = tensor("op_8546"), val = tensor([1, 1])]; + tensor var_8548 = const()[name = tensor("op_8548"), val = tensor([1, 1])]; + tensor var_8550_pad_type_0 = const()[name = tensor("op_8550_pad_type_0"), val = tensor("custom")]; + tensor var_8550_pad_0 = const()[name = tensor("op_8550_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1131482368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141312832))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141313024)))]; + tensor var_8550_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_8548, groups = var_6865, pad = var_8550_pad_0, pad_type = var_8550_pad_type_0, strides = var_8546, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_507_cast)[name = tensor("op_8550_cast")]; + tensor var_8551_split_sizes_0 = const()[name = tensor("op_8551_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8551_axis_0 = const()[name = tensor("op_8551_axis_0"), val = tensor(1)]; + tensor var_8551_cast_0, tensor var_8551_cast_1 = split(axis = var_8551_axis_0, split_sizes = var_8551_split_sizes_0, x = var_8550_cast)[name = tensor("op_8551_cast")]; + tensor var_8553_mode_0 = const()[name = tensor("op_8553_mode_0"), val = tensor("EXACT")]; + tensor var_8553_cast = gelu(mode = var_8553_mode_0, x = var_8551_cast_1)[name = tensor("op_8553_cast")]; + tensor input_509_cast = mul(x = var_8551_cast_0, y = var_8553_cast)[name = tensor("input_509_cast")]; + tensor var_8557 = const()[name = tensor("op_8557"), val = tensor([1, 1])]; + tensor var_8559 = const()[name = tensor("op_8559"), val = tensor([1, 1])]; + tensor var_8561_pad_type_0 = const()[name = tensor("op_8561_pad_type_0"), val = tensor("custom")]; + tensor var_8561_pad_0 = const()[name = tensor("op_8561_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1141333568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146248832))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146249024)))]; + tensor var_8561_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_8559, groups = var_6865, pad = var_8561_pad_0, pad_type = var_8561_pad_type_0, strides = var_8557, weight = up_blocks_0_attentions_0_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_509_cast)[name = tensor("op_8561_cast")]; + tensor inputs_259_cast = add(x = var_8561_cast, y = inputs_257_cast)[name = tensor("inputs_259_cast")]; + tensor var_8571 = const()[name = tensor("op_8571"), val = tensor([1])]; + tensor channels_mean_259_cast = reduce_mean(axes = var_8571, keep_dims = var_6860, x = inputs_259_cast)[name = tensor("channels_mean_259_cast")]; + tensor zero_mean_259_cast = sub(x = inputs_259_cast, y = channels_mean_259_cast)[name = tensor("zero_mean_259_cast")]; + tensor zero_mean_sq_259_cast = mul(x = zero_mean_259_cast, y = zero_mean_259_cast)[name = tensor("zero_mean_sq_259_cast")]; + tensor var_8575 = const()[name = tensor("op_8575"), val = tensor([1])]; + tensor var_8576_cast = reduce_mean(axes = var_8575, keep_dims = var_6860, x = zero_mean_sq_259_cast)[name = tensor("op_8576_cast")]; + tensor var_8577_to_fp16 = const()[name = tensor("op_8577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8578_cast = add(x = var_8576_cast, y = var_8577_to_fp16)[name = tensor("op_8578_cast")]; + tensor denom_259_epsilon_0_to_fp16 = const()[name = tensor("denom_259_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_259_cast = rsqrt(epsilon = denom_259_epsilon_0_to_fp16, x = var_8578_cast)[name = tensor("denom_259_cast")]; + tensor out_259_cast = mul(x = zero_mean_259_cast, y = denom_259_cast)[name = tensor("out_259_cast")]; + tensor var_8582_to_fp16 = const()[name = tensor("op_8582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146251648)))]; + tensor var_8583_cast = add(x = out_259_cast, y = var_8582_to_fp16)[name = tensor("op_8583_cast")]; + tensor var_8585_to_fp16 = const()[name = tensor("op_8585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146254272)))]; + tensor hidden_states_341_cast = mul(x = var_8583_cast, y = var_8585_to_fp16)[name = tensor("hidden_states_341_cast")]; + tensor var_8592 = const()[name = tensor("op_8592"), val = tensor([1, 1])]; + tensor var_8594 = const()[name = tensor("op_8594"), val = tensor([1, 1])]; + tensor q_173_pad_type_0 = const()[name = tensor("q_173_pad_type_0"), val = tensor("custom")]; + tensor q_173_pad_0 = const()[name = tensor("q_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1146256896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147076160))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_173_cast = conv(dilations = var_8594, groups = var_6865, pad = q_173_pad_0, pad_type = q_173_pad_type_0, strides = var_8592, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("q_173_cast")]; + tensor var_8598 = const()[name = tensor("op_8598"), val = tensor([1, 1])]; + tensor var_8600 = const()[name = tensor("op_8600"), val = tensor([1, 1])]; + tensor k_173_pad_type_0 = const()[name = tensor("k_173_pad_type_0"), val = tensor("custom")]; + tensor k_173_pad_0 = const()[name = tensor("k_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147076288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147895552))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_173_cast = conv(dilations = var_8600, groups = var_6865, pad = k_173_pad_0, pad_type = k_173_pad_type_0, strides = var_8598, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("k_173_cast")]; + tensor var_8604 = const()[name = tensor("op_8604"), val = tensor([1, 1])]; + tensor var_8606 = const()[name = tensor("op_8606"), val = tensor([1, 1])]; + tensor v_173_pad_type_0 = const()[name = tensor("v_173_pad_type_0"), val = tensor("custom")]; + tensor v_173_pad_0 = const()[name = tensor("v_173_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1147895680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149124544))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_173_cast = conv(dilations = var_8606, groups = var_6865, pad = v_173_pad_0, pad_type = v_173_pad_type_0, strides = var_8604, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_341_cast)[name = tensor("v_173_cast")]; + tensor var_8610 = const()[name = tensor("op_8610"), val = tensor([2, 20, 64, -1])]; + tensor var_8611_cast = reshape(shape = var_8610, x = q_173_cast)[name = tensor("op_8611_cast")]; + tensor var_8612 = const()[name = tensor("op_8612"), val = tensor([2, 20, 64, -1])]; + tensor var_8613_cast = reshape(shape = var_8612, x = k_173_cast)[name = tensor("op_8613_cast")]; + tensor var_8614 = const()[name = tensor("op_8614"), val = tensor([2, 20, 64, -1])]; + tensor var_8615_cast = reshape(shape = var_8614, x = v_173_cast)[name = tensor("op_8615_cast")]; + tensor attn_weights_345_transpose_x_0 = const()[name = tensor("attn_weights_345_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_345_transpose_y_0 = const()[name = tensor("attn_weights_345_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_345_cast = matmul(transpose_x = attn_weights_345_transpose_x_0, transpose_y = attn_weights_345_transpose_y_0, x = var_8611_cast, y = var_8613_cast)[name = tensor("attn_weights_345_cast")]; + tensor attn_weights_347_cast = mul(x = attn_weights_345_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_347_cast")]; + tensor var_8619_cast = softmax(axis = var_6849, x = attn_weights_347_cast)[name = tensor("op_8619_cast")]; + tensor attn_173_transpose_x_0 = const()[name = tensor("attn_173_transpose_x_0"), val = tensor(false)]; + tensor attn_173_transpose_y_0 = const()[name = tensor("attn_173_transpose_y_0"), val = tensor(true)]; + tensor attn_173_cast = matmul(transpose_x = attn_173_transpose_x_0, transpose_y = attn_173_transpose_y_0, x = var_8615_cast, y = var_8619_cast)[name = tensor("attn_173_cast")]; + tensor var_8623 = const()[name = tensor("op_8623"), val = tensor([2, 1280, 1, -1])]; + tensor input_511_cast = reshape(shape = var_8623, x = attn_173_cast)[name = tensor("input_511_cast")]; + tensor var_8628 = const()[name = tensor("op_8628"), val = tensor([1, 1])]; + tensor var_8630 = const()[name = tensor("op_8630"), val = tensor([1, 1])]; + tensor var_8632_pad_type_0 = const()[name = tensor("op_8632_pad_type_0"), val = tensor("custom")]; + tensor var_8632_pad_0 = const()[name = tensor("op_8632_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1149124736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1150353600))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1150353792)))]; + tensor var_8632_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_8630, groups = var_6865, pad = var_8632_pad_0, pad_type = var_8632_pad_type_0, strides = var_8628, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_511_cast)[name = tensor("op_8632_cast")]; + tensor inputs_261_cast = add(x = var_8632_cast, y = inputs_259_cast)[name = tensor("inputs_261_cast")]; + tensor var_8636 = const()[name = tensor("op_8636"), val = tensor([1])]; + tensor channels_mean_261_cast = reduce_mean(axes = var_8636, keep_dims = var_6860, x = inputs_261_cast)[name = tensor("channels_mean_261_cast")]; + tensor zero_mean_261_cast = sub(x = inputs_261_cast, y = channels_mean_261_cast)[name = tensor("zero_mean_261_cast")]; + tensor zero_mean_sq_261_cast = mul(x = zero_mean_261_cast, y = zero_mean_261_cast)[name = tensor("zero_mean_sq_261_cast")]; + tensor var_8640 = const()[name = tensor("op_8640"), val = tensor([1])]; + tensor var_8641_cast = reduce_mean(axes = var_8640, keep_dims = var_6860, x = zero_mean_sq_261_cast)[name = tensor("op_8641_cast")]; + tensor var_8642_to_fp16 = const()[name = tensor("op_8642_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8643_cast = add(x = var_8641_cast, y = var_8642_to_fp16)[name = tensor("op_8643_cast")]; + tensor denom_261_epsilon_0_to_fp16 = const()[name = tensor("denom_261_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_261_cast = rsqrt(epsilon = denom_261_epsilon_0_to_fp16, x = var_8643_cast)[name = tensor("denom_261_cast")]; + tensor out_261_cast = mul(x = zero_mean_261_cast, y = denom_261_cast)[name = tensor("out_261_cast")]; + tensor var_8647_to_fp16 = const()[name = tensor("op_8647_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1150356416)))]; + tensor var_8648_cast = add(x = out_261_cast, y = var_8647_to_fp16)[name = tensor("op_8648_cast")]; + tensor var_8650_to_fp16 = const()[name = tensor("op_8650_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1150359040)))]; + tensor hidden_states_343_cast = mul(x = var_8648_cast, y = var_8650_to_fp16)[name = tensor("hidden_states_343_cast")]; + tensor var_8657 = const()[name = tensor("op_8657"), val = tensor([1, 1])]; + tensor var_8659 = const()[name = tensor("op_8659"), val = tensor([1, 1])]; + tensor q_175_pad_type_0 = const()[name = tensor("q_175_pad_type_0"), val = tensor("custom")]; + tensor q_175_pad_0 = const()[name = tensor("q_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1150361664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1151180928))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_175_cast = conv(dilations = var_8659, groups = var_6865, pad = q_175_pad_0, pad_type = q_175_pad_type_0, strides = var_8657, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_343_cast)[name = tensor("q_175_cast")]; + tensor var_8663 = const()[name = tensor("op_8663"), val = tensor([1, 1])]; + tensor var_8665 = const()[name = tensor("op_8665"), val = tensor([1, 1])]; + tensor k_175_pad_type_0 = const()[name = tensor("k_175_pad_type_0"), val = tensor("custom")]; + tensor k_175_pad_0 = const()[name = tensor("k_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1151181056))), lut = tensor([-0x1.ffcp-7, -0x1.3cp-8, 0x1.3b8p-8, 0x1.ffp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_175_cast = conv(dilations = var_8665, groups = var_6865, pad = k_175_pad_0, pad_type = k_175_pad_type_0, strides = var_8663, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_175_cast")]; + tensor var_8669 = const()[name = tensor("op_8669"), val = tensor([1, 1])]; + tensor var_8671 = const()[name = tensor("op_8671"), val = tensor([1, 1])]; + tensor v_175_pad_type_0 = const()[name = tensor("v_175_pad_type_0"), val = tensor("custom")]; + tensor v_175_pad_0 = const()[name = tensor("v_175_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1151836480))), lut = tensor([-0x1.57p-7, 0x1.57cp-7]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_175_cast = conv(dilations = var_8671, groups = var_6865, pad = v_175_pad_0, pad_type = v_175_pad_type_0, strides = var_8669, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_175_cast")]; + tensor var_8675 = const()[name = tensor("op_8675"), val = tensor([2, 20, 64, -1])]; + tensor var_8676_cast = reshape(shape = var_8675, x = q_175_cast)[name = tensor("op_8676_cast")]; + tensor var_8677 = const()[name = tensor("op_8677"), val = tensor([2, 20, 64, -1])]; + tensor var_8678_cast = reshape(shape = var_8677, x = k_175_cast)[name = tensor("op_8678_cast")]; + tensor var_8679 = const()[name = tensor("op_8679"), val = tensor([2, 20, 64, -1])]; + tensor var_8680_cast = reshape(shape = var_8679, x = v_175_cast)[name = tensor("op_8680_cast")]; + tensor attn_weights_349_transpose_x_0 = const()[name = tensor("attn_weights_349_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_349_transpose_y_0 = const()[name = tensor("attn_weights_349_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_349_cast = matmul(transpose_x = attn_weights_349_transpose_x_0, transpose_y = attn_weights_349_transpose_y_0, x = var_8676_cast, y = var_8678_cast)[name = tensor("attn_weights_349_cast")]; + tensor attn_weights_351_cast = mul(x = attn_weights_349_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_351_cast")]; + tensor var_8684_cast = softmax(axis = var_6849, x = attn_weights_351_cast)[name = tensor("op_8684_cast")]; + tensor attn_175_transpose_x_0 = const()[name = tensor("attn_175_transpose_x_0"), val = tensor(false)]; + tensor attn_175_transpose_y_0 = const()[name = tensor("attn_175_transpose_y_0"), val = tensor(true)]; + tensor attn_175_cast = matmul(transpose_x = attn_175_transpose_x_0, transpose_y = attn_175_transpose_y_0, x = var_8680_cast, y = var_8684_cast)[name = tensor("attn_175_cast")]; + tensor var_8688 = const()[name = tensor("op_8688"), val = tensor([2, 1280, 1, -1])]; + tensor input_513_cast = reshape(shape = var_8688, x = attn_175_cast)[name = tensor("input_513_cast")]; + tensor var_8693 = const()[name = tensor("op_8693"), val = tensor([1, 1])]; + tensor var_8695 = const()[name = tensor("op_8695"), val = tensor([1, 1])]; + tensor var_8697_pad_type_0 = const()[name = tensor("op_8697_pad_type_0"), val = tensor("custom")]; + tensor var_8697_pad_0 = const()[name = tensor("op_8697_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1152164224))), lut = tensor([-0x1.bep-8, 0x1.bdcp-8]), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1152369088)))]; + tensor var_8697_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_8695, groups = var_6865, pad = var_8697_pad_0, pad_type = var_8697_pad_type_0, strides = var_8693, weight = up_blocks_0_attentions_0_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_513_cast)[name = tensor("op_8697_cast")]; + tensor inputs_263_cast = add(x = var_8697_cast, y = inputs_261_cast)[name = tensor("inputs_263_cast")]; + tensor var_8701 = const()[name = tensor("op_8701"), val = tensor([1])]; + tensor channels_mean_263_cast = reduce_mean(axes = var_8701, keep_dims = var_6860, x = inputs_263_cast)[name = tensor("channels_mean_263_cast")]; + tensor zero_mean_263_cast = sub(x = inputs_263_cast, y = channels_mean_263_cast)[name = tensor("zero_mean_263_cast")]; + tensor zero_mean_sq_263_cast = mul(x = zero_mean_263_cast, y = zero_mean_263_cast)[name = tensor("zero_mean_sq_263_cast")]; + tensor var_8705 = const()[name = tensor("op_8705"), val = tensor([1])]; + tensor var_8706_cast = reduce_mean(axes = var_8705, keep_dims = var_6860, x = zero_mean_sq_263_cast)[name = tensor("op_8706_cast")]; + tensor var_8707_to_fp16 = const()[name = tensor("op_8707_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8708_cast = add(x = var_8706_cast, y = var_8707_to_fp16)[name = tensor("op_8708_cast")]; + tensor denom_263_epsilon_0_to_fp16 = const()[name = tensor("denom_263_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_263_cast = rsqrt(epsilon = denom_263_epsilon_0_to_fp16, x = var_8708_cast)[name = tensor("denom_263_cast")]; + tensor out_263_cast = mul(x = zero_mean_263_cast, y = denom_263_cast)[name = tensor("out_263_cast")]; + tensor var_8712_to_fp16 = const()[name = tensor("op_8712_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1152371712)))]; + tensor var_8713_cast = add(x = out_263_cast, y = var_8712_to_fp16)[name = tensor("op_8713_cast")]; + tensor var_8715_to_fp16 = const()[name = tensor("op_8715_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1152374336)))]; + tensor input_515_cast = mul(x = var_8713_cast, y = var_8715_to_fp16)[name = tensor("input_515_cast")]; + tensor var_8723 = const()[name = tensor("op_8723"), val = tensor([1, 1])]; + tensor var_8725 = const()[name = tensor("op_8725"), val = tensor([1, 1])]; + tensor var_8727_pad_type_0 = const()[name = tensor("op_8727_pad_type_0"), val = tensor("custom")]; + tensor var_8727_pad_0 = const()[name = tensor("op_8727_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1152376960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165484224))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165484800)))]; + tensor var_8727_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_8725, groups = var_6865, pad = var_8727_pad_0, pad_type = var_8727_pad_type_0, strides = var_8723, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_515_cast)[name = tensor("op_8727_cast")]; + tensor var_8728_split_sizes_0 = const()[name = tensor("op_8728_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_8728_axis_0 = const()[name = tensor("op_8728_axis_0"), val = tensor(1)]; + tensor var_8728_cast_0, tensor var_8728_cast_1 = split(axis = var_8728_axis_0, split_sizes = var_8728_split_sizes_0, x = var_8727_cast)[name = tensor("op_8728_cast")]; + tensor var_8730_mode_0 = const()[name = tensor("op_8730_mode_0"), val = tensor("EXACT")]; + tensor var_8730_cast = gelu(mode = var_8730_mode_0, x = var_8728_cast_1)[name = tensor("op_8730_cast")]; + tensor input_517_cast = mul(x = var_8728_cast_0, y = var_8730_cast)[name = tensor("input_517_cast")]; + tensor var_8734 = const()[name = tensor("op_8734"), val = tensor([1, 1])]; + tensor var_8736 = const()[name = tensor("op_8736"), val = tensor([1, 1])]; + tensor var_8738_pad_type_0 = const()[name = tensor("op_8738_pad_type_0"), val = tensor("custom")]; + tensor var_8738_pad_0 = const()[name = tensor("op_8738_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1165505344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1172059008))), name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1172059584)))]; + tensor var_8738_cast = conv(bias = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_8736, groups = var_6865, pad = var_8738_pad_0, pad_type = var_8738_pad_type_0, strides = var_8734, weight = up_blocks_0_attentions_0_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_517_cast)[name = tensor("op_8738_cast")]; + tensor hidden_states_347_cast = add(x = var_8738_cast, y = inputs_263_cast)[name = tensor("hidden_states_347_cast")]; + tensor var_8740 = const()[name = tensor("op_8740"), val = tensor([2, 1280, 32, 32])]; + tensor input_519_cast = reshape(shape = var_8740, x = hidden_states_347_cast)[name = tensor("input_519_cast")]; + tensor var_8744 = const()[name = tensor("op_8744"), val = tensor([1, 1])]; + tensor var_8746 = const()[name = tensor("op_8746"), val = tensor([1, 1])]; + tensor hidden_states_349_pad_type_0 = const()[name = tensor("hidden_states_349_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_349_pad_0 = const()[name = tensor("hidden_states_349_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1172062208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1173700672))), name = tensor("up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1173701248)))]; + tensor hidden_states_349_cast = conv(bias = up_blocks_0_attentions_0_proj_out_bias_to_fp16, dilations = var_8746, groups = var_6865, pad = hidden_states_349_pad_0, pad_type = hidden_states_349_pad_type_0, strides = var_8744, weight = up_blocks_0_attentions_0_proj_out_weight_to_fp16_palettized, x = input_519_cast)[name = tensor("hidden_states_349_cast")]; + tensor hidden_states_351_cast = add(x = hidden_states_349_cast, y = hidden_states_283_cast)[name = tensor("hidden_states_351_cast")]; + tensor input_521_interleave_0 = const()[name = tensor("input_521_interleave_0"), val = tensor(false)]; + tensor input_521_cast = concat(axis = var_6865, interleave = input_521_interleave_0, values = (hidden_states_351_cast, input_213_cast))[name = tensor("input_521_cast")]; + tensor reshape_96_shape_0 = const()[name = tensor("reshape_96_shape_0"), val = tensor([2, 32, 80, 32, 32])]; + tensor reshape_96_cast = reshape(shape = reshape_96_shape_0, x = input_521_cast)[name = tensor("reshape_96_cast")]; + tensor reduce_mean_72_axes_0 = const()[name = tensor("reduce_mean_72_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_72_keep_dims_0 = const()[name = tensor("reduce_mean_72_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_72_cast = reduce_mean(axes = reduce_mean_72_axes_0, keep_dims = reduce_mean_72_keep_dims_0, x = reshape_96_cast)[name = tensor("reduce_mean_72_cast")]; + tensor sub_48_cast = sub(x = reshape_96_cast, y = reduce_mean_72_cast)[name = tensor("sub_48_cast")]; + tensor square_24_cast = square(x = sub_48_cast)[name = tensor("square_24_cast")]; + tensor reduce_mean_74_axes_0 = const()[name = tensor("reduce_mean_74_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_74_keep_dims_0 = const()[name = tensor("reduce_mean_74_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_74_cast = reduce_mean(axes = reduce_mean_74_axes_0, keep_dims = reduce_mean_74_keep_dims_0, x = square_24_cast)[name = tensor("reduce_mean_74_cast")]; + tensor add_48_y_0_to_fp16 = const()[name = tensor("add_48_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_48_cast = add(x = reduce_mean_74_cast, y = add_48_y_0_to_fp16)[name = tensor("add_48_cast")]; + tensor sqrt_24_cast = sqrt(x = add_48_cast)[name = tensor("sqrt_24_cast")]; + tensor real_div_24_cast = real_div(x = sub_48_cast, y = sqrt_24_cast)[name = tensor("real_div_24_cast")]; + tensor reshape_97_shape_0 = const()[name = tensor("reshape_97_shape_0"), val = tensor([2, 2560, 32, 32])]; + tensor reshape_97_cast = reshape(shape = reshape_97_shape_0, x = real_div_24_cast)[name = tensor("reshape_97_cast")]; + tensor add_49_gamma_0_to_fp16 = const()[name = tensor("add_49_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1173703872)))]; + tensor add_49_beta_0_to_fp16 = const()[name = tensor("add_49_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1173709056)))]; + tensor add_49_epsilon_0_to_fp16 = const()[name = tensor("add_49_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_49_cast = batch_norm(beta = add_49_beta_0_to_fp16, epsilon = add_49_epsilon_0_to_fp16, gamma = add_49_gamma_0_to_fp16, mean = add_43_mean_0_to_fp16, variance = add_43_variance_0_to_fp16, x = reshape_97_cast)[name = tensor("add_49_cast")]; + tensor input_525_cast = silu(x = add_49_cast)[name = tensor("input_525_cast")]; + tensor var_8764 = const()[name = tensor("op_8764"), val = tensor([1, 1])]; + tensor var_8766 = const()[name = tensor("op_8766"), val = tensor([1, 1])]; + tensor hidden_states_353_pad_type_0 = const()[name = tensor("hidden_states_353_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_353_pad_0 = const()[name = tensor("hidden_states_353_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1173714240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1203205504))), name = tensor("up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 3, 3])]; + tensor up_blocks_0_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1203206080)))]; + tensor hidden_states_353_cast = conv(bias = up_blocks_0_resnets_1_conv1_bias_to_fp16, dilations = var_8766, groups = var_6865, pad = hidden_states_353_pad_0, pad_type = hidden_states_353_pad_type_0, strides = var_8764, weight = up_blocks_0_resnets_1_conv1_weight_to_fp16_palettized, x = input_525_cast)[name = tensor("hidden_states_353_cast")]; + tensor var_8772 = const()[name = tensor("op_8772"), val = tensor([1, 1])]; + tensor var_8774 = const()[name = tensor("op_8774"), val = tensor([1, 1])]; + tensor temb_19_pad_type_0 = const()[name = tensor("temb_19_pad_type_0"), val = tensor("custom")]; + tensor temb_19_pad_0 = const()[name = tensor("temb_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1203208704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204847168))), name = tensor("up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204847744)))]; + tensor temb_19_cast = conv(bias = up_blocks_0_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_8774, groups = var_6865, pad = temb_19_pad_0, pad_type = temb_19_pad_type_0, strides = var_8772, weight = up_blocks_0_resnets_1_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_19_cast")]; + tensor input_529_cast = add(x = hidden_states_353_cast, y = temb_19_cast)[name = tensor("input_529_cast")]; + tensor reshape_100_shape_0 = const()[name = tensor("reshape_100_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_100_cast = reshape(shape = reshape_100_shape_0, x = input_529_cast)[name = tensor("reshape_100_cast")]; + tensor reduce_mean_75_axes_0 = const()[name = tensor("reduce_mean_75_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_75_keep_dims_0 = const()[name = tensor("reduce_mean_75_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_75_cast = reduce_mean(axes = reduce_mean_75_axes_0, keep_dims = reduce_mean_75_keep_dims_0, x = reshape_100_cast)[name = tensor("reduce_mean_75_cast")]; + tensor sub_50_cast = sub(x = reshape_100_cast, y = reduce_mean_75_cast)[name = tensor("sub_50_cast")]; + tensor square_25_cast = square(x = sub_50_cast)[name = tensor("square_25_cast")]; + tensor reduce_mean_77_axes_0 = const()[name = tensor("reduce_mean_77_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_77_keep_dims_0 = const()[name = tensor("reduce_mean_77_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_77_cast = reduce_mean(axes = reduce_mean_77_axes_0, keep_dims = reduce_mean_77_keep_dims_0, x = square_25_cast)[name = tensor("reduce_mean_77_cast")]; + tensor add_50_y_0_to_fp16 = const()[name = tensor("add_50_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_50_cast = add(x = reduce_mean_77_cast, y = add_50_y_0_to_fp16)[name = tensor("add_50_cast")]; + tensor sqrt_25_cast = sqrt(x = add_50_cast)[name = tensor("sqrt_25_cast")]; + tensor real_div_25_cast = real_div(x = sub_50_cast, y = sqrt_25_cast)[name = tensor("real_div_25_cast")]; + tensor reshape_101_shape_0 = const()[name = tensor("reshape_101_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_101_cast = reshape(shape = reshape_101_shape_0, x = real_div_25_cast)[name = tensor("reshape_101_cast")]; + tensor add_51_gamma_0_to_fp16 = const()[name = tensor("add_51_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204850368)))]; + tensor add_51_beta_0_to_fp16 = const()[name = tensor("add_51_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204852992)))]; + tensor add_51_epsilon_0_to_fp16 = const()[name = tensor("add_51_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_51_cast = batch_norm(beta = add_51_beta_0_to_fp16, epsilon = add_51_epsilon_0_to_fp16, gamma = add_51_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_101_cast)[name = tensor("add_51_cast")]; + tensor input_533_cast = silu(x = add_51_cast)[name = tensor("input_533_cast")]; + tensor var_8784 = const()[name = tensor("op_8784"), val = tensor([1, 1])]; + tensor var_8786 = const()[name = tensor("op_8786"), val = tensor([1, 1])]; + tensor hidden_states_355_pad_type_0 = const()[name = tensor("hidden_states_355_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_355_pad_0 = const()[name = tensor("hidden_states_355_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1204855616))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1219601280))), name = tensor("up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1219601856)))]; + tensor hidden_states_355_cast = conv(bias = up_blocks_0_resnets_1_conv2_bias_to_fp16, dilations = var_8786, groups = var_6865, pad = hidden_states_355_pad_0, pad_type = hidden_states_355_pad_type_0, strides = var_8784, weight = up_blocks_0_resnets_1_conv2_weight_to_fp16_palettized, x = input_533_cast)[name = tensor("hidden_states_355_cast")]; + tensor var_8791 = const()[name = tensor("op_8791"), val = tensor([1, 1])]; + tensor var_8793 = const()[name = tensor("op_8793"), val = tensor([1, 1])]; + tensor x_7_pad_type_0 = const()[name = tensor("x_7_pad_type_0"), val = tensor("custom")]; + tensor x_7_pad_0 = const()[name = tensor("x_7_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1219604480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222881344))), name = tensor("up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized"), shape = tensor([1280, 2560, 1, 1])]; + tensor up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222881920)))]; + tensor x_7_cast = conv(bias = up_blocks_0_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_8793, groups = var_6865, pad = x_7_pad_0, pad_type = x_7_pad_type_0, strides = var_8791, weight = up_blocks_0_resnets_1_conv_shortcut_weight_to_fp16_palettized, x = input_521_cast)[name = tensor("x_7_cast")]; + tensor hidden_states_357_cast = add(x = x_7_cast, y = hidden_states_355_cast)[name = tensor("hidden_states_357_cast")]; + tensor reshape_104_shape_0 = const()[name = tensor("reshape_104_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_104_cast = reshape(shape = reshape_104_shape_0, x = hidden_states_357_cast)[name = tensor("reshape_104_cast")]; + tensor reduce_mean_78_axes_0 = const()[name = tensor("reduce_mean_78_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_78_keep_dims_0 = const()[name = tensor("reduce_mean_78_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_78_cast = reduce_mean(axes = reduce_mean_78_axes_0, keep_dims = reduce_mean_78_keep_dims_0, x = reshape_104_cast)[name = tensor("reduce_mean_78_cast")]; + tensor sub_52_cast = sub(x = reshape_104_cast, y = reduce_mean_78_cast)[name = tensor("sub_52_cast")]; + tensor square_26_cast = square(x = sub_52_cast)[name = tensor("square_26_cast")]; + tensor reduce_mean_80_axes_0 = const()[name = tensor("reduce_mean_80_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_80_keep_dims_0 = const()[name = tensor("reduce_mean_80_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_80_cast = reduce_mean(axes = reduce_mean_80_axes_0, keep_dims = reduce_mean_80_keep_dims_0, x = square_26_cast)[name = tensor("reduce_mean_80_cast")]; + tensor add_52_y_0_to_fp16 = const()[name = tensor("add_52_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_52_cast = add(x = reduce_mean_80_cast, y = add_52_y_0_to_fp16)[name = tensor("add_52_cast")]; + tensor sqrt_26_cast = sqrt(x = add_52_cast)[name = tensor("sqrt_26_cast")]; + tensor real_div_26_cast = real_div(x = sub_52_cast, y = sqrt_26_cast)[name = tensor("real_div_26_cast")]; + tensor reshape_105_shape_0 = const()[name = tensor("reshape_105_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_105_cast = reshape(shape = reshape_105_shape_0, x = real_div_26_cast)[name = tensor("reshape_105_cast")]; + tensor add_53_gamma_0_to_fp16 = const()[name = tensor("add_53_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222884544)))]; + tensor add_53_beta_0_to_fp16 = const()[name = tensor("add_53_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222887168)))]; + tensor add_53_epsilon_0_to_fp16 = const()[name = tensor("add_53_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_53_cast = batch_norm(beta = add_53_beta_0_to_fp16, epsilon = add_53_epsilon_0_to_fp16, gamma = add_53_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_105_cast)[name = tensor("add_53_cast")]; + tensor var_8831 = const()[name = tensor("op_8831"), val = tensor([1, 1])]; + tensor var_8833 = const()[name = tensor("op_8833"), val = tensor([1, 1])]; + tensor hidden_states_359_pad_type_0 = const()[name = tensor("hidden_states_359_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_359_pad_0 = const()[name = tensor("hidden_states_359_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1222889792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1224528256))), name = tensor("up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1224528832)))]; + tensor hidden_states_359_cast = conv(bias = up_blocks_0_attentions_1_proj_in_bias_to_fp16, dilations = var_8833, groups = var_6865, pad = hidden_states_359_pad_0, pad_type = hidden_states_359_pad_type_0, strides = var_8831, weight = up_blocks_0_attentions_1_proj_in_weight_to_fp16_palettized, x = add_53_cast)[name = tensor("hidden_states_359_cast")]; + tensor var_8838 = const()[name = tensor("op_8838"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_265_cast = reshape(shape = var_8838, x = hidden_states_359_cast)[name = tensor("inputs_265_cast")]; + tensor var_8848 = const()[name = tensor("op_8848"), val = tensor([1])]; + tensor channels_mean_265_cast = reduce_mean(axes = var_8848, keep_dims = var_6860, x = inputs_265_cast)[name = tensor("channels_mean_265_cast")]; + tensor zero_mean_265_cast = sub(x = inputs_265_cast, y = channels_mean_265_cast)[name = tensor("zero_mean_265_cast")]; + tensor zero_mean_sq_265_cast = mul(x = zero_mean_265_cast, y = zero_mean_265_cast)[name = tensor("zero_mean_sq_265_cast")]; + tensor var_8852 = const()[name = tensor("op_8852"), val = tensor([1])]; + tensor var_8853_cast = reduce_mean(axes = var_8852, keep_dims = var_6860, x = zero_mean_sq_265_cast)[name = tensor("op_8853_cast")]; + tensor var_8854_to_fp16 = const()[name = tensor("op_8854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8855_cast = add(x = var_8853_cast, y = var_8854_to_fp16)[name = tensor("op_8855_cast")]; + tensor denom_265_epsilon_0_to_fp16 = const()[name = tensor("denom_265_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_265_cast = rsqrt(epsilon = denom_265_epsilon_0_to_fp16, x = var_8855_cast)[name = tensor("denom_265_cast")]; + tensor out_265_cast = mul(x = zero_mean_265_cast, y = denom_265_cast)[name = tensor("out_265_cast")]; + tensor var_8859_to_fp16 = const()[name = tensor("op_8859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1224531456)))]; + tensor var_8860_cast = add(x = out_265_cast, y = var_8859_to_fp16)[name = tensor("op_8860_cast")]; + tensor var_8862_to_fp16 = const()[name = tensor("op_8862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1224534080)))]; + tensor hidden_states_361_cast = mul(x = var_8860_cast, y = var_8862_to_fp16)[name = tensor("hidden_states_361_cast")]; + tensor var_8869 = const()[name = tensor("op_8869"), val = tensor([1, 1])]; + tensor var_8871 = const()[name = tensor("op_8871"), val = tensor([1, 1])]; + tensor q_177_pad_type_0 = const()[name = tensor("q_177_pad_type_0"), val = tensor("custom")]; + tensor q_177_pad_0 = const()[name = tensor("q_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1224536704))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1225765568))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_177_cast = conv(dilations = var_8871, groups = var_6865, pad = q_177_pad_0, pad_type = q_177_pad_type_0, strides = var_8869, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("q_177_cast")]; + tensor var_8875 = const()[name = tensor("op_8875"), val = tensor([1, 1])]; + tensor var_8877 = const()[name = tensor("op_8877"), val = tensor([1, 1])]; + tensor k_177_pad_type_0 = const()[name = tensor("k_177_pad_type_0"), val = tensor("custom")]; + tensor k_177_pad_0 = const()[name = tensor("k_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1225765760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1226585024))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_177_cast = conv(dilations = var_8877, groups = var_6865, pad = k_177_pad_0, pad_type = k_177_pad_type_0, strides = var_8875, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("k_177_cast")]; + tensor var_8881 = const()[name = tensor("op_8881"), val = tensor([1, 1])]; + tensor var_8883 = const()[name = tensor("op_8883"), val = tensor([1, 1])]; + tensor v_177_pad_type_0 = const()[name = tensor("v_177_pad_type_0"), val = tensor("custom")]; + tensor v_177_pad_0 = const()[name = tensor("v_177_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1226585152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228223616))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_177_cast = conv(dilations = var_8883, groups = var_6865, pad = v_177_pad_0, pad_type = v_177_pad_type_0, strides = var_8881, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_361_cast)[name = tensor("v_177_cast")]; + tensor var_8887 = const()[name = tensor("op_8887"), val = tensor([2, 20, 64, -1])]; + tensor var_8888_cast = reshape(shape = var_8887, x = q_177_cast)[name = tensor("op_8888_cast")]; + tensor var_8889 = const()[name = tensor("op_8889"), val = tensor([2, 20, 64, -1])]; + tensor var_8890_cast = reshape(shape = var_8889, x = k_177_cast)[name = tensor("op_8890_cast")]; + tensor var_8891 = const()[name = tensor("op_8891"), val = tensor([2, 20, 64, -1])]; + tensor var_8892_cast = reshape(shape = var_8891, x = v_177_cast)[name = tensor("op_8892_cast")]; + tensor attn_weights_353_transpose_x_0 = const()[name = tensor("attn_weights_353_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_353_transpose_y_0 = const()[name = tensor("attn_weights_353_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_353_cast = matmul(transpose_x = attn_weights_353_transpose_x_0, transpose_y = attn_weights_353_transpose_y_0, x = var_8888_cast, y = var_8890_cast)[name = tensor("attn_weights_353_cast")]; + tensor attn_weights_355_cast = mul(x = attn_weights_353_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_355_cast")]; + tensor var_8896_cast = softmax(axis = var_6849, x = attn_weights_355_cast)[name = tensor("op_8896_cast")]; + tensor attn_177_transpose_x_0 = const()[name = tensor("attn_177_transpose_x_0"), val = tensor(false)]; + tensor attn_177_transpose_y_0 = const()[name = tensor("attn_177_transpose_y_0"), val = tensor(true)]; + tensor attn_177_cast = matmul(transpose_x = attn_177_transpose_x_0, transpose_y = attn_177_transpose_y_0, x = var_8892_cast, y = var_8896_cast)[name = tensor("attn_177_cast")]; + tensor var_8900 = const()[name = tensor("op_8900"), val = tensor([2, 1280, 1, -1])]; + tensor input_537_cast = reshape(shape = var_8900, x = attn_177_cast)[name = tensor("input_537_cast")]; + tensor var_8905 = const()[name = tensor("op_8905"), val = tensor([1, 1])]; + tensor var_8907 = const()[name = tensor("op_8907"), val = tensor([1, 1])]; + tensor var_8909_pad_type_0 = const()[name = tensor("op_8909_pad_type_0"), val = tensor("custom")]; + tensor var_8909_pad_0 = const()[name = tensor("op_8909_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1228224192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229862656))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229863232)))]; + tensor var_8909_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_8907, groups = var_6865, pad = var_8909_pad_0, pad_type = var_8909_pad_type_0, strides = var_8905, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_537_cast)[name = tensor("op_8909_cast")]; + tensor inputs_267_cast = add(x = var_8909_cast, y = inputs_265_cast)[name = tensor("inputs_267_cast")]; + tensor var_8913 = const()[name = tensor("op_8913"), val = tensor([1])]; + tensor channels_mean_267_cast = reduce_mean(axes = var_8913, keep_dims = var_6860, x = inputs_267_cast)[name = tensor("channels_mean_267_cast")]; + tensor zero_mean_267_cast = sub(x = inputs_267_cast, y = channels_mean_267_cast)[name = tensor("zero_mean_267_cast")]; + tensor zero_mean_sq_267_cast = mul(x = zero_mean_267_cast, y = zero_mean_267_cast)[name = tensor("zero_mean_sq_267_cast")]; + tensor var_8917 = const()[name = tensor("op_8917"), val = tensor([1])]; + tensor var_8918_cast = reduce_mean(axes = var_8917, keep_dims = var_6860, x = zero_mean_sq_267_cast)[name = tensor("op_8918_cast")]; + tensor var_8919_to_fp16 = const()[name = tensor("op_8919_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8920_cast = add(x = var_8918_cast, y = var_8919_to_fp16)[name = tensor("op_8920_cast")]; + tensor denom_267_epsilon_0_to_fp16 = const()[name = tensor("denom_267_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_267_cast = rsqrt(epsilon = denom_267_epsilon_0_to_fp16, x = var_8920_cast)[name = tensor("denom_267_cast")]; + tensor out_267_cast = mul(x = zero_mean_267_cast, y = denom_267_cast)[name = tensor("out_267_cast")]; + tensor var_8924_to_fp16 = const()[name = tensor("op_8924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229865856)))]; + tensor var_8925_cast = add(x = out_267_cast, y = var_8924_to_fp16)[name = tensor("op_8925_cast")]; + tensor var_8927_to_fp16 = const()[name = tensor("op_8927_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229868480)))]; + tensor hidden_states_363_cast = mul(x = var_8925_cast, y = var_8927_to_fp16)[name = tensor("hidden_states_363_cast")]; + tensor var_8934 = const()[name = tensor("op_8934"), val = tensor([1, 1])]; + tensor var_8936 = const()[name = tensor("op_8936"), val = tensor([1, 1])]; + tensor q_179_pad_type_0 = const()[name = tensor("q_179_pad_type_0"), val = tensor("custom")]; + tensor q_179_pad_0 = const()[name = tensor("q_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1229871104))), lut = tensor([-0x1.d34p-6, -0x1.1dp-7, 0x1.1c8p-7, 0x1.d38p-6]), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_179_cast = conv(dilations = var_8936, groups = var_6865, pad = q_179_pad_0, pad_type = q_179_pad_type_0, strides = var_8934, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_363_cast)[name = tensor("q_179_cast")]; + tensor var_8940 = const()[name = tensor("op_8940"), val = tensor([1, 1])]; + tensor var_8942 = const()[name = tensor("op_8942"), val = tensor([1, 1])]; + tensor k_179_pad_type_0 = const()[name = tensor("k_179_pad_type_0"), val = tensor("custom")]; + tensor k_179_pad_0 = const()[name = tensor("k_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1230280768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231591552))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_179_cast = conv(dilations = var_8942, groups = var_6865, pad = k_179_pad_0, pad_type = k_179_pad_type_0, strides = var_8940, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_179_cast")]; + tensor var_8946 = const()[name = tensor("op_8946"), val = tensor([1, 1])]; + tensor var_8948 = const()[name = tensor("op_8948"), val = tensor([1, 1])]; + tensor v_179_pad_type_0 = const()[name = tensor("v_179_pad_type_0"), val = tensor("custom")]; + tensor v_179_pad_0 = const()[name = tensor("v_179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1231591680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233557824))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_179_cast = conv(dilations = var_8948, groups = var_6865, pad = v_179_pad_0, pad_type = v_179_pad_type_0, strides = var_8946, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_179_cast")]; + tensor var_8952 = const()[name = tensor("op_8952"), val = tensor([2, 20, 64, -1])]; + tensor var_8953_cast = reshape(shape = var_8952, x = q_179_cast)[name = tensor("op_8953_cast")]; + tensor var_8954 = const()[name = tensor("op_8954"), val = tensor([2, 20, 64, -1])]; + tensor var_8955_cast = reshape(shape = var_8954, x = k_179_cast)[name = tensor("op_8955_cast")]; + tensor var_8956 = const()[name = tensor("op_8956"), val = tensor([2, 20, 64, -1])]; + tensor var_8957_cast = reshape(shape = var_8956, x = v_179_cast)[name = tensor("op_8957_cast")]; + tensor attn_weights_357_transpose_x_0 = const()[name = tensor("attn_weights_357_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_357_transpose_y_0 = const()[name = tensor("attn_weights_357_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_357_cast = matmul(transpose_x = attn_weights_357_transpose_x_0, transpose_y = attn_weights_357_transpose_y_0, x = var_8953_cast, y = var_8955_cast)[name = tensor("attn_weights_357_cast")]; + tensor attn_weights_359_cast = mul(x = attn_weights_357_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_359_cast")]; + tensor var_8961_cast = softmax(axis = var_6849, x = attn_weights_359_cast)[name = tensor("op_8961_cast")]; + tensor attn_179_transpose_x_0 = const()[name = tensor("attn_179_transpose_x_0"), val = tensor(false)]; + tensor attn_179_transpose_y_0 = const()[name = tensor("attn_179_transpose_y_0"), val = tensor(true)]; + tensor attn_179_cast = matmul(transpose_x = attn_179_transpose_x_0, transpose_y = attn_179_transpose_y_0, x = var_8957_cast, y = var_8961_cast)[name = tensor("attn_179_cast")]; + tensor var_8965 = const()[name = tensor("op_8965"), val = tensor([2, 1280, 1, -1])]; + tensor input_539_cast = reshape(shape = var_8965, x = attn_179_cast)[name = tensor("input_539_cast")]; + tensor var_8970 = const()[name = tensor("op_8970"), val = tensor([1, 1])]; + tensor var_8972 = const()[name = tensor("op_8972"), val = tensor([1, 1])]; + tensor var_8974_pad_type_0 = const()[name = tensor("op_8974_pad_type_0"), val = tensor("custom")]; + tensor var_8974_pad_0 = const()[name = tensor("op_8974_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1233558016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234786880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234787072)))]; + tensor var_8974_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_8972, groups = var_6865, pad = var_8974_pad_0, pad_type = var_8974_pad_type_0, strides = var_8970, weight = up_blocks_0_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_539_cast)[name = tensor("op_8974_cast")]; + tensor inputs_269_cast = add(x = var_8974_cast, y = inputs_267_cast)[name = tensor("inputs_269_cast")]; + tensor var_8978 = const()[name = tensor("op_8978"), val = tensor([1])]; + tensor channels_mean_269_cast = reduce_mean(axes = var_8978, keep_dims = var_6860, x = inputs_269_cast)[name = tensor("channels_mean_269_cast")]; + tensor zero_mean_269_cast = sub(x = inputs_269_cast, y = channels_mean_269_cast)[name = tensor("zero_mean_269_cast")]; + tensor zero_mean_sq_269_cast = mul(x = zero_mean_269_cast, y = zero_mean_269_cast)[name = tensor("zero_mean_sq_269_cast")]; + tensor var_8982 = const()[name = tensor("op_8982"), val = tensor([1])]; + tensor var_8983_cast = reduce_mean(axes = var_8982, keep_dims = var_6860, x = zero_mean_sq_269_cast)[name = tensor("op_8983_cast")]; + tensor var_8984_to_fp16 = const()[name = tensor("op_8984_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_8985_cast = add(x = var_8983_cast, y = var_8984_to_fp16)[name = tensor("op_8985_cast")]; + tensor denom_269_epsilon_0_to_fp16 = const()[name = tensor("denom_269_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_269_cast = rsqrt(epsilon = denom_269_epsilon_0_to_fp16, x = var_8985_cast)[name = tensor("denom_269_cast")]; + tensor out_269_cast = mul(x = zero_mean_269_cast, y = denom_269_cast)[name = tensor("out_269_cast")]; + tensor var_8989_to_fp16 = const()[name = tensor("op_8989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234789696)))]; + tensor var_8990_cast = add(x = out_269_cast, y = var_8989_to_fp16)[name = tensor("op_8990_cast")]; + tensor var_8992_to_fp16 = const()[name = tensor("op_8992_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234792320)))]; + tensor input_541_cast = mul(x = var_8990_cast, y = var_8992_to_fp16)[name = tensor("input_541_cast")]; + tensor var_9000 = const()[name = tensor("op_9000"), val = tensor([1, 1])]; + tensor var_9002 = const()[name = tensor("op_9002"), val = tensor([1, 1])]; + tensor var_9004_pad_type_0 = const()[name = tensor("op_9004_pad_type_0"), val = tensor("custom")]; + tensor var_9004_pad_0 = const()[name = tensor("op_9004_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1234794944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247902208))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247902784)))]; + tensor var_9004_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_9002, groups = var_6865, pad = var_9004_pad_0, pad_type = var_9004_pad_type_0, strides = var_9000, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_541_cast)[name = tensor("op_9004_cast")]; + tensor var_9005_split_sizes_0 = const()[name = tensor("op_9005_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9005_axis_0 = const()[name = tensor("op_9005_axis_0"), val = tensor(1)]; + tensor var_9005_cast_0, tensor var_9005_cast_1 = split(axis = var_9005_axis_0, split_sizes = var_9005_split_sizes_0, x = var_9004_cast)[name = tensor("op_9005_cast")]; + tensor var_9007_mode_0 = const()[name = tensor("op_9007_mode_0"), val = tensor("EXACT")]; + tensor var_9007_cast = gelu(mode = var_9007_mode_0, x = var_9005_cast_1)[name = tensor("op_9007_cast")]; + tensor input_543_cast = mul(x = var_9005_cast_0, y = var_9007_cast)[name = tensor("input_543_cast")]; + tensor var_9011 = const()[name = tensor("op_9011"), val = tensor([1, 1])]; + tensor var_9013 = const()[name = tensor("op_9013"), val = tensor([1, 1])]; + tensor var_9015_pad_type_0 = const()[name = tensor("op_9015_pad_type_0"), val = tensor("custom")]; + tensor var_9015_pad_0 = const()[name = tensor("op_9015_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1247923328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254476992))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254477568)))]; + tensor var_9015_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_9013, groups = var_6865, pad = var_9015_pad_0, pad_type = var_9015_pad_type_0, strides = var_9011, weight = up_blocks_0_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_543_cast)[name = tensor("op_9015_cast")]; + tensor inputs_271_cast = add(x = var_9015_cast, y = inputs_269_cast)[name = tensor("inputs_271_cast")]; + tensor var_9025 = const()[name = tensor("op_9025"), val = tensor([1])]; + tensor channels_mean_271_cast = reduce_mean(axes = var_9025, keep_dims = var_6860, x = inputs_271_cast)[name = tensor("channels_mean_271_cast")]; + tensor zero_mean_271_cast = sub(x = inputs_271_cast, y = channels_mean_271_cast)[name = tensor("zero_mean_271_cast")]; + tensor zero_mean_sq_271_cast = mul(x = zero_mean_271_cast, y = zero_mean_271_cast)[name = tensor("zero_mean_sq_271_cast")]; + tensor var_9029 = const()[name = tensor("op_9029"), val = tensor([1])]; + tensor var_9030_cast = reduce_mean(axes = var_9029, keep_dims = var_6860, x = zero_mean_sq_271_cast)[name = tensor("op_9030_cast")]; + tensor var_9031_to_fp16 = const()[name = tensor("op_9031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9032_cast = add(x = var_9030_cast, y = var_9031_to_fp16)[name = tensor("op_9032_cast")]; + tensor denom_271_epsilon_0_to_fp16 = const()[name = tensor("denom_271_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_271_cast = rsqrt(epsilon = denom_271_epsilon_0_to_fp16, x = var_9032_cast)[name = tensor("denom_271_cast")]; + tensor out_271_cast = mul(x = zero_mean_271_cast, y = denom_271_cast)[name = tensor("out_271_cast")]; + tensor var_9036_to_fp16 = const()[name = tensor("op_9036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254480192)))]; + tensor var_9037_cast = add(x = out_271_cast, y = var_9036_to_fp16)[name = tensor("op_9037_cast")]; + tensor var_9039_to_fp16 = const()[name = tensor("op_9039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254482816)))]; + tensor hidden_states_367_cast = mul(x = var_9037_cast, y = var_9039_to_fp16)[name = tensor("hidden_states_367_cast")]; + tensor var_9046 = const()[name = tensor("op_9046"), val = tensor([1, 1])]; + tensor var_9048 = const()[name = tensor("op_9048"), val = tensor([1, 1])]; + tensor q_181_pad_type_0 = const()[name = tensor("q_181_pad_type_0"), val = tensor("custom")]; + tensor q_181_pad_0 = const()[name = tensor("q_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1254485440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255714304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_181_cast = conv(dilations = var_9048, groups = var_6865, pad = q_181_pad_0, pad_type = q_181_pad_type_0, strides = var_9046, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("q_181_cast")]; + tensor var_9052 = const()[name = tensor("op_9052"), val = tensor([1, 1])]; + tensor var_9054 = const()[name = tensor("op_9054"), val = tensor([1, 1])]; + tensor k_181_pad_type_0 = const()[name = tensor("k_181_pad_type_0"), val = tensor("custom")]; + tensor k_181_pad_0 = const()[name = tensor("k_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1255714496))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256533760))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_181_cast = conv(dilations = var_9054, groups = var_6865, pad = k_181_pad_0, pad_type = k_181_pad_type_0, strides = var_9052, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("k_181_cast")]; + tensor var_9058 = const()[name = tensor("op_9058"), val = tensor([1, 1])]; + tensor var_9060 = const()[name = tensor("op_9060"), val = tensor([1, 1])]; + tensor v_181_pad_type_0 = const()[name = tensor("v_181_pad_type_0"), val = tensor("custom")]; + tensor v_181_pad_0 = const()[name = tensor("v_181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1256533888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1258172352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_181_cast = conv(dilations = var_9060, groups = var_6865, pad = v_181_pad_0, pad_type = v_181_pad_type_0, strides = var_9058, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_367_cast)[name = tensor("v_181_cast")]; + tensor var_9064 = const()[name = tensor("op_9064"), val = tensor([2, 20, 64, -1])]; + tensor var_9065_cast = reshape(shape = var_9064, x = q_181_cast)[name = tensor("op_9065_cast")]; + tensor var_9066 = const()[name = tensor("op_9066"), val = tensor([2, 20, 64, -1])]; + tensor var_9067_cast = reshape(shape = var_9066, x = k_181_cast)[name = tensor("op_9067_cast")]; + tensor var_9068 = const()[name = tensor("op_9068"), val = tensor([2, 20, 64, -1])]; + tensor var_9069_cast = reshape(shape = var_9068, x = v_181_cast)[name = tensor("op_9069_cast")]; + tensor attn_weights_361_transpose_x_0 = const()[name = tensor("attn_weights_361_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_361_transpose_y_0 = const()[name = tensor("attn_weights_361_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_361_cast = matmul(transpose_x = attn_weights_361_transpose_x_0, transpose_y = attn_weights_361_transpose_y_0, x = var_9065_cast, y = var_9067_cast)[name = tensor("attn_weights_361_cast")]; + tensor attn_weights_363_cast = mul(x = attn_weights_361_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_363_cast")]; + tensor var_9073_cast = softmax(axis = var_6849, x = attn_weights_363_cast)[name = tensor("op_9073_cast")]; + tensor attn_181_transpose_x_0 = const()[name = tensor("attn_181_transpose_x_0"), val = tensor(false)]; + tensor attn_181_transpose_y_0 = const()[name = tensor("attn_181_transpose_y_0"), val = tensor(true)]; + tensor attn_181_cast = matmul(transpose_x = attn_181_transpose_x_0, transpose_y = attn_181_transpose_y_0, x = var_9069_cast, y = var_9073_cast)[name = tensor("attn_181_cast")]; + tensor var_9077 = const()[name = tensor("op_9077"), val = tensor([2, 1280, 1, -1])]; + tensor input_545_cast = reshape(shape = var_9077, x = attn_181_cast)[name = tensor("input_545_cast")]; + tensor var_9082 = const()[name = tensor("op_9082"), val = tensor([1, 1])]; + tensor var_9084 = const()[name = tensor("op_9084"), val = tensor([1, 1])]; + tensor var_9086_pad_type_0 = const()[name = tensor("op_9086_pad_type_0"), val = tensor("custom")]; + tensor var_9086_pad_0 = const()[name = tensor("op_9086_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1258172928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1259811392))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1259811968)))]; + tensor var_9086_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_9084, groups = var_6865, pad = var_9086_pad_0, pad_type = var_9086_pad_type_0, strides = var_9082, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_545_cast)[name = tensor("op_9086_cast")]; + tensor inputs_273_cast = add(x = var_9086_cast, y = inputs_271_cast)[name = tensor("inputs_273_cast")]; + tensor var_9090 = const()[name = tensor("op_9090"), val = tensor([1])]; + tensor channels_mean_273_cast = reduce_mean(axes = var_9090, keep_dims = var_6860, x = inputs_273_cast)[name = tensor("channels_mean_273_cast")]; + tensor zero_mean_273_cast = sub(x = inputs_273_cast, y = channels_mean_273_cast)[name = tensor("zero_mean_273_cast")]; + tensor zero_mean_sq_273_cast = mul(x = zero_mean_273_cast, y = zero_mean_273_cast)[name = tensor("zero_mean_sq_273_cast")]; + tensor var_9094 = const()[name = tensor("op_9094"), val = tensor([1])]; + tensor var_9095_cast = reduce_mean(axes = var_9094, keep_dims = var_6860, x = zero_mean_sq_273_cast)[name = tensor("op_9095_cast")]; + tensor var_9096_to_fp16 = const()[name = tensor("op_9096_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9097_cast = add(x = var_9095_cast, y = var_9096_to_fp16)[name = tensor("op_9097_cast")]; + tensor denom_273_epsilon_0_to_fp16 = const()[name = tensor("denom_273_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_273_cast = rsqrt(epsilon = denom_273_epsilon_0_to_fp16, x = var_9097_cast)[name = tensor("denom_273_cast")]; + tensor out_273_cast = mul(x = zero_mean_273_cast, y = denom_273_cast)[name = tensor("out_273_cast")]; + tensor var_9101_to_fp16 = const()[name = tensor("op_9101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1259814592)))]; + tensor var_9102_cast = add(x = out_273_cast, y = var_9101_to_fp16)[name = tensor("op_9102_cast")]; + tensor var_9104_to_fp16 = const()[name = tensor("op_9104_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1259817216)))]; + tensor hidden_states_369_cast = mul(x = var_9102_cast, y = var_9104_to_fp16)[name = tensor("hidden_states_369_cast")]; + tensor var_9111 = const()[name = tensor("op_9111"), val = tensor([1, 1])]; + tensor var_9113 = const()[name = tensor("op_9113"), val = tensor([1, 1])]; + tensor q_183_pad_type_0 = const()[name = tensor("q_183_pad_type_0"), val = tensor("custom")]; + tensor q_183_pad_0 = const()[name = tensor("q_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1259819840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1260639104))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_183_cast = conv(dilations = var_9113, groups = var_6865, pad = q_183_pad_0, pad_type = q_183_pad_type_0, strides = var_9111, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_369_cast)[name = tensor("q_183_cast")]; + tensor var_9117 = const()[name = tensor("op_9117"), val = tensor([1, 1])]; + tensor var_9119 = const()[name = tensor("op_9119"), val = tensor([1, 1])]; + tensor k_183_pad_type_0 = const()[name = tensor("k_183_pad_type_0"), val = tensor("custom")]; + tensor k_183_pad_0 = const()[name = tensor("k_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1260639232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1261950016))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_183_cast = conv(dilations = var_9119, groups = var_6865, pad = k_183_pad_0, pad_type = k_183_pad_type_0, strides = var_9117, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_183_cast")]; + tensor var_9123 = const()[name = tensor("op_9123"), val = tensor([1, 1])]; + tensor var_9125 = const()[name = tensor("op_9125"), val = tensor([1, 1])]; + tensor v_183_pad_type_0 = const()[name = tensor("v_183_pad_type_0"), val = tensor("custom")]; + tensor v_183_pad_0 = const()[name = tensor("v_183_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1261950144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1263916288))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_183_cast = conv(dilations = var_9125, groups = var_6865, pad = v_183_pad_0, pad_type = v_183_pad_type_0, strides = var_9123, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_183_cast")]; + tensor var_9129 = const()[name = tensor("op_9129"), val = tensor([2, 20, 64, -1])]; + tensor var_9130_cast = reshape(shape = var_9129, x = q_183_cast)[name = tensor("op_9130_cast")]; + tensor var_9131 = const()[name = tensor("op_9131"), val = tensor([2, 20, 64, -1])]; + tensor var_9132_cast = reshape(shape = var_9131, x = k_183_cast)[name = tensor("op_9132_cast")]; + tensor var_9133 = const()[name = tensor("op_9133"), val = tensor([2, 20, 64, -1])]; + tensor var_9134_cast = reshape(shape = var_9133, x = v_183_cast)[name = tensor("op_9134_cast")]; + tensor attn_weights_365_transpose_x_0 = const()[name = tensor("attn_weights_365_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_365_transpose_y_0 = const()[name = tensor("attn_weights_365_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_365_cast = matmul(transpose_x = attn_weights_365_transpose_x_0, transpose_y = attn_weights_365_transpose_y_0, x = var_9130_cast, y = var_9132_cast)[name = tensor("attn_weights_365_cast")]; + tensor attn_weights_367_cast = mul(x = attn_weights_365_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_367_cast")]; + tensor var_9138_cast = softmax(axis = var_6849, x = attn_weights_367_cast)[name = tensor("op_9138_cast")]; + tensor attn_183_transpose_x_0 = const()[name = tensor("attn_183_transpose_x_0"), val = tensor(false)]; + tensor attn_183_transpose_y_0 = const()[name = tensor("attn_183_transpose_y_0"), val = tensor(true)]; + tensor attn_183_cast = matmul(transpose_x = attn_183_transpose_x_0, transpose_y = attn_183_transpose_y_0, x = var_9134_cast, y = var_9138_cast)[name = tensor("attn_183_cast")]; + tensor var_9142 = const()[name = tensor("op_9142"), val = tensor([2, 1280, 1, -1])]; + tensor input_547_cast = reshape(shape = var_9142, x = attn_183_cast)[name = tensor("input_547_cast")]; + tensor var_9147 = const()[name = tensor("op_9147"), val = tensor([1, 1])]; + tensor var_9149 = const()[name = tensor("op_9149"), val = tensor([1, 1])]; + tensor var_9151_pad_type_0 = const()[name = tensor("op_9151_pad_type_0"), val = tensor("custom")]; + tensor var_9151_pad_0 = const()[name = tensor("op_9151_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1263916480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265554944))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265555520)))]; + tensor var_9151_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_9149, groups = var_6865, pad = var_9151_pad_0, pad_type = var_9151_pad_type_0, strides = var_9147, weight = up_blocks_0_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_547_cast)[name = tensor("op_9151_cast")]; + tensor inputs_275_cast = add(x = var_9151_cast, y = inputs_273_cast)[name = tensor("inputs_275_cast")]; + tensor var_9155 = const()[name = tensor("op_9155"), val = tensor([1])]; + tensor channels_mean_275_cast = reduce_mean(axes = var_9155, keep_dims = var_6860, x = inputs_275_cast)[name = tensor("channels_mean_275_cast")]; + tensor zero_mean_275_cast = sub(x = inputs_275_cast, y = channels_mean_275_cast)[name = tensor("zero_mean_275_cast")]; + tensor zero_mean_sq_275_cast = mul(x = zero_mean_275_cast, y = zero_mean_275_cast)[name = tensor("zero_mean_sq_275_cast")]; + tensor var_9159 = const()[name = tensor("op_9159"), val = tensor([1])]; + tensor var_9160_cast = reduce_mean(axes = var_9159, keep_dims = var_6860, x = zero_mean_sq_275_cast)[name = tensor("op_9160_cast")]; + tensor var_9161_to_fp16 = const()[name = tensor("op_9161_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9162_cast = add(x = var_9160_cast, y = var_9161_to_fp16)[name = tensor("op_9162_cast")]; + tensor denom_275_epsilon_0_to_fp16 = const()[name = tensor("denom_275_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_275_cast = rsqrt(epsilon = denom_275_epsilon_0_to_fp16, x = var_9162_cast)[name = tensor("denom_275_cast")]; + tensor out_275_cast = mul(x = zero_mean_275_cast, y = denom_275_cast)[name = tensor("out_275_cast")]; + tensor var_9166_to_fp16 = const()[name = tensor("op_9166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265558144)))]; + tensor var_9167_cast = add(x = out_275_cast, y = var_9166_to_fp16)[name = tensor("op_9167_cast")]; + tensor var_9169_to_fp16 = const()[name = tensor("op_9169_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265560768)))]; + tensor input_549_cast = mul(x = var_9167_cast, y = var_9169_to_fp16)[name = tensor("input_549_cast")]; + tensor var_9177 = const()[name = tensor("op_9177"), val = tensor([1, 1])]; + tensor var_9179 = const()[name = tensor("op_9179"), val = tensor([1, 1])]; + tensor var_9181_pad_type_0 = const()[name = tensor("op_9181_pad_type_0"), val = tensor("custom")]; + tensor var_9181_pad_0 = const()[name = tensor("op_9181_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1265563392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1278670656))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1278671232)))]; + tensor var_9181_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_9179, groups = var_6865, pad = var_9181_pad_0, pad_type = var_9181_pad_type_0, strides = var_9177, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_549_cast)[name = tensor("op_9181_cast")]; + tensor var_9182_split_sizes_0 = const()[name = tensor("op_9182_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9182_axis_0 = const()[name = tensor("op_9182_axis_0"), val = tensor(1)]; + tensor var_9182_cast_0, tensor var_9182_cast_1 = split(axis = var_9182_axis_0, split_sizes = var_9182_split_sizes_0, x = var_9181_cast)[name = tensor("op_9182_cast")]; + tensor var_9184_mode_0 = const()[name = tensor("op_9184_mode_0"), val = tensor("EXACT")]; + tensor var_9184_cast = gelu(mode = var_9184_mode_0, x = var_9182_cast_1)[name = tensor("op_9184_cast")]; + tensor input_551_cast = mul(x = var_9182_cast_0, y = var_9184_cast)[name = tensor("input_551_cast")]; + tensor var_9188 = const()[name = tensor("op_9188"), val = tensor([1, 1])]; + tensor var_9190 = const()[name = tensor("op_9190"), val = tensor([1, 1])]; + tensor var_9192_pad_type_0 = const()[name = tensor("op_9192_pad_type_0"), val = tensor("custom")]; + tensor var_9192_pad_0 = const()[name = tensor("op_9192_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1278691776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1285245440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1285246016)))]; + tensor var_9192_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_9190, groups = var_6865, pad = var_9192_pad_0, pad_type = var_9192_pad_type_0, strides = var_9188, weight = up_blocks_0_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_551_cast)[name = tensor("op_9192_cast")]; + tensor inputs_277_cast = add(x = var_9192_cast, y = inputs_275_cast)[name = tensor("inputs_277_cast")]; + tensor var_9202 = const()[name = tensor("op_9202"), val = tensor([1])]; + tensor channels_mean_277_cast = reduce_mean(axes = var_9202, keep_dims = var_6860, x = inputs_277_cast)[name = tensor("channels_mean_277_cast")]; + tensor zero_mean_277_cast = sub(x = inputs_277_cast, y = channels_mean_277_cast)[name = tensor("zero_mean_277_cast")]; + tensor zero_mean_sq_277_cast = mul(x = zero_mean_277_cast, y = zero_mean_277_cast)[name = tensor("zero_mean_sq_277_cast")]; + tensor var_9206 = const()[name = tensor("op_9206"), val = tensor([1])]; + tensor var_9207_cast = reduce_mean(axes = var_9206, keep_dims = var_6860, x = zero_mean_sq_277_cast)[name = tensor("op_9207_cast")]; + tensor var_9208_to_fp16 = const()[name = tensor("op_9208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9209_cast = add(x = var_9207_cast, y = var_9208_to_fp16)[name = tensor("op_9209_cast")]; + tensor denom_277_epsilon_0_to_fp16 = const()[name = tensor("denom_277_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_277_cast = rsqrt(epsilon = denom_277_epsilon_0_to_fp16, x = var_9209_cast)[name = tensor("denom_277_cast")]; + tensor out_277_cast = mul(x = zero_mean_277_cast, y = denom_277_cast)[name = tensor("out_277_cast")]; + tensor var_9213_to_fp16 = const()[name = tensor("op_9213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1285248640)))]; + tensor var_9214_cast = add(x = out_277_cast, y = var_9213_to_fp16)[name = tensor("op_9214_cast")]; + tensor var_9216_to_fp16 = const()[name = tensor("op_9216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1285251264)))]; + tensor hidden_states_373_cast = mul(x = var_9214_cast, y = var_9216_to_fp16)[name = tensor("hidden_states_373_cast")]; + tensor var_9223 = const()[name = tensor("op_9223"), val = tensor([1, 1])]; + tensor var_9225 = const()[name = tensor("op_9225"), val = tensor([1, 1])]; + tensor q_185_pad_type_0 = const()[name = tensor("q_185_pad_type_0"), val = tensor("custom")]; + tensor q_185_pad_0 = const()[name = tensor("q_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1285253888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286482752))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_185_cast = conv(dilations = var_9225, groups = var_6865, pad = q_185_pad_0, pad_type = q_185_pad_type_0, strides = var_9223, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("q_185_cast")]; + tensor var_9229 = const()[name = tensor("op_9229"), val = tensor([1, 1])]; + tensor var_9231 = const()[name = tensor("op_9231"), val = tensor([1, 1])]; + tensor k_185_pad_type_0 = const()[name = tensor("k_185_pad_type_0"), val = tensor("custom")]; + tensor k_185_pad_0 = const()[name = tensor("k_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1286482944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1287711808))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_185_cast = conv(dilations = var_9231, groups = var_6865, pad = k_185_pad_0, pad_type = k_185_pad_type_0, strides = var_9229, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("k_185_cast")]; + tensor var_9235 = const()[name = tensor("op_9235"), val = tensor([1, 1])]; + tensor var_9237 = const()[name = tensor("op_9237"), val = tensor([1, 1])]; + tensor v_185_pad_type_0 = const()[name = tensor("v_185_pad_type_0"), val = tensor("custom")]; + tensor v_185_pad_0 = const()[name = tensor("v_185_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1287712000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1289350464))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_185_cast = conv(dilations = var_9237, groups = var_6865, pad = v_185_pad_0, pad_type = v_185_pad_type_0, strides = var_9235, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_373_cast)[name = tensor("v_185_cast")]; + tensor var_9241 = const()[name = tensor("op_9241"), val = tensor([2, 20, 64, -1])]; + tensor var_9242_cast = reshape(shape = var_9241, x = q_185_cast)[name = tensor("op_9242_cast")]; + tensor var_9243 = const()[name = tensor("op_9243"), val = tensor([2, 20, 64, -1])]; + tensor var_9244_cast = reshape(shape = var_9243, x = k_185_cast)[name = tensor("op_9244_cast")]; + tensor var_9245 = const()[name = tensor("op_9245"), val = tensor([2, 20, 64, -1])]; + tensor var_9246_cast = reshape(shape = var_9245, x = v_185_cast)[name = tensor("op_9246_cast")]; + tensor attn_weights_369_transpose_x_0 = const()[name = tensor("attn_weights_369_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_369_transpose_y_0 = const()[name = tensor("attn_weights_369_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_369_cast = matmul(transpose_x = attn_weights_369_transpose_x_0, transpose_y = attn_weights_369_transpose_y_0, x = var_9242_cast, y = var_9244_cast)[name = tensor("attn_weights_369_cast")]; + tensor attn_weights_371_cast = mul(x = attn_weights_369_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_371_cast")]; + tensor var_9250_cast = softmax(axis = var_6849, x = attn_weights_371_cast)[name = tensor("op_9250_cast")]; + tensor attn_185_transpose_x_0 = const()[name = tensor("attn_185_transpose_x_0"), val = tensor(false)]; + tensor attn_185_transpose_y_0 = const()[name = tensor("attn_185_transpose_y_0"), val = tensor(true)]; + tensor attn_185_cast = matmul(transpose_x = attn_185_transpose_x_0, transpose_y = attn_185_transpose_y_0, x = var_9246_cast, y = var_9250_cast)[name = tensor("attn_185_cast")]; + tensor var_9254 = const()[name = tensor("op_9254"), val = tensor([2, 1280, 1, -1])]; + tensor input_553_cast = reshape(shape = var_9254, x = attn_185_cast)[name = tensor("input_553_cast")]; + tensor var_9259 = const()[name = tensor("op_9259"), val = tensor([1, 1])]; + tensor var_9261 = const()[name = tensor("op_9261"), val = tensor([1, 1])]; + tensor var_9263_pad_type_0 = const()[name = tensor("op_9263_pad_type_0"), val = tensor("custom")]; + tensor var_9263_pad_0 = const()[name = tensor("op_9263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1289351040))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1290989504))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1290990080)))]; + tensor var_9263_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_9261, groups = var_6865, pad = var_9263_pad_0, pad_type = var_9263_pad_type_0, strides = var_9259, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_553_cast)[name = tensor("op_9263_cast")]; + tensor inputs_279_cast = add(x = var_9263_cast, y = inputs_277_cast)[name = tensor("inputs_279_cast")]; + tensor var_9267 = const()[name = tensor("op_9267"), val = tensor([1])]; + tensor channels_mean_279_cast = reduce_mean(axes = var_9267, keep_dims = var_6860, x = inputs_279_cast)[name = tensor("channels_mean_279_cast")]; + tensor zero_mean_279_cast = sub(x = inputs_279_cast, y = channels_mean_279_cast)[name = tensor("zero_mean_279_cast")]; + tensor zero_mean_sq_279_cast = mul(x = zero_mean_279_cast, y = zero_mean_279_cast)[name = tensor("zero_mean_sq_279_cast")]; + tensor var_9271 = const()[name = tensor("op_9271"), val = tensor([1])]; + tensor var_9272_cast = reduce_mean(axes = var_9271, keep_dims = var_6860, x = zero_mean_sq_279_cast)[name = tensor("op_9272_cast")]; + tensor var_9273_to_fp16 = const()[name = tensor("op_9273_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9274_cast = add(x = var_9272_cast, y = var_9273_to_fp16)[name = tensor("op_9274_cast")]; + tensor denom_279_epsilon_0_to_fp16 = const()[name = tensor("denom_279_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_279_cast = rsqrt(epsilon = denom_279_epsilon_0_to_fp16, x = var_9274_cast)[name = tensor("denom_279_cast")]; + tensor out_279_cast = mul(x = zero_mean_279_cast, y = denom_279_cast)[name = tensor("out_279_cast")]; + tensor var_9278_to_fp16 = const()[name = tensor("op_9278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1290992704)))]; + tensor var_9279_cast = add(x = out_279_cast, y = var_9278_to_fp16)[name = tensor("op_9279_cast")]; + tensor var_9281_to_fp16 = const()[name = tensor("op_9281_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1290995328)))]; + tensor hidden_states_375_cast = mul(x = var_9279_cast, y = var_9281_to_fp16)[name = tensor("hidden_states_375_cast")]; + tensor var_9288 = const()[name = tensor("op_9288"), val = tensor([1, 1])]; + tensor var_9290 = const()[name = tensor("op_9290"), val = tensor([1, 1])]; + tensor q_187_pad_type_0 = const()[name = tensor("q_187_pad_type_0"), val = tensor("custom")]; + tensor q_187_pad_0 = const()[name = tensor("q_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1290997952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291817216))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_187_cast = conv(dilations = var_9290, groups = var_6865, pad = q_187_pad_0, pad_type = q_187_pad_type_0, strides = var_9288, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_375_cast)[name = tensor("q_187_cast")]; + tensor var_9294 = const()[name = tensor("op_9294"), val = tensor([1, 1])]; + tensor var_9296 = const()[name = tensor("op_9296"), val = tensor([1, 1])]; + tensor k_187_pad_type_0 = const()[name = tensor("k_187_pad_type_0"), val = tensor("custom")]; + tensor k_187_pad_0 = const()[name = tensor("k_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1291817344))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1293128128))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_187_cast = conv(dilations = var_9296, groups = var_6865, pad = k_187_pad_0, pad_type = k_187_pad_type_0, strides = var_9294, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_187_cast")]; + tensor var_9300 = const()[name = tensor("op_9300"), val = tensor([1, 1])]; + tensor var_9302 = const()[name = tensor("op_9302"), val = tensor([1, 1])]; + tensor v_187_pad_type_0 = const()[name = tensor("v_187_pad_type_0"), val = tensor("custom")]; + tensor v_187_pad_0 = const()[name = tensor("v_187_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1293128256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295094400))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_187_cast = conv(dilations = var_9302, groups = var_6865, pad = v_187_pad_0, pad_type = v_187_pad_type_0, strides = var_9300, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_187_cast")]; + tensor var_9306 = const()[name = tensor("op_9306"), val = tensor([2, 20, 64, -1])]; + tensor var_9307_cast = reshape(shape = var_9306, x = q_187_cast)[name = tensor("op_9307_cast")]; + tensor var_9308 = const()[name = tensor("op_9308"), val = tensor([2, 20, 64, -1])]; + tensor var_9309_cast = reshape(shape = var_9308, x = k_187_cast)[name = tensor("op_9309_cast")]; + tensor var_9310 = const()[name = tensor("op_9310"), val = tensor([2, 20, 64, -1])]; + tensor var_9311_cast = reshape(shape = var_9310, x = v_187_cast)[name = tensor("op_9311_cast")]; + tensor attn_weights_373_transpose_x_0 = const()[name = tensor("attn_weights_373_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_373_transpose_y_0 = const()[name = tensor("attn_weights_373_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_373_cast = matmul(transpose_x = attn_weights_373_transpose_x_0, transpose_y = attn_weights_373_transpose_y_0, x = var_9307_cast, y = var_9309_cast)[name = tensor("attn_weights_373_cast")]; + tensor attn_weights_375_cast = mul(x = attn_weights_373_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_375_cast")]; + tensor var_9315_cast = softmax(axis = var_6849, x = attn_weights_375_cast)[name = tensor("op_9315_cast")]; + tensor attn_187_transpose_x_0 = const()[name = tensor("attn_187_transpose_x_0"), val = tensor(false)]; + tensor attn_187_transpose_y_0 = const()[name = tensor("attn_187_transpose_y_0"), val = tensor(true)]; + tensor attn_187_cast = matmul(transpose_x = attn_187_transpose_x_0, transpose_y = attn_187_transpose_y_0, x = var_9311_cast, y = var_9315_cast)[name = tensor("attn_187_cast")]; + tensor var_9319 = const()[name = tensor("op_9319"), val = tensor([2, 1280, 1, -1])]; + tensor input_555_cast = reshape(shape = var_9319, x = attn_187_cast)[name = tensor("input_555_cast")]; + tensor var_9324 = const()[name = tensor("op_9324"), val = tensor([1, 1])]; + tensor var_9326 = const()[name = tensor("op_9326"), val = tensor([1, 1])]; + tensor var_9328_pad_type_0 = const()[name = tensor("op_9328_pad_type_0"), val = tensor("custom")]; + tensor var_9328_pad_0 = const()[name = tensor("op_9328_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1295094592))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296323456))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296323648)))]; + tensor var_9328_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_9326, groups = var_6865, pad = var_9328_pad_0, pad_type = var_9328_pad_type_0, strides = var_9324, weight = up_blocks_0_attentions_1_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_555_cast)[name = tensor("op_9328_cast")]; + tensor inputs_281_cast = add(x = var_9328_cast, y = inputs_279_cast)[name = tensor("inputs_281_cast")]; + tensor var_9332 = const()[name = tensor("op_9332"), val = tensor([1])]; + tensor channels_mean_281_cast = reduce_mean(axes = var_9332, keep_dims = var_6860, x = inputs_281_cast)[name = tensor("channels_mean_281_cast")]; + tensor zero_mean_281_cast = sub(x = inputs_281_cast, y = channels_mean_281_cast)[name = tensor("zero_mean_281_cast")]; + tensor zero_mean_sq_281_cast = mul(x = zero_mean_281_cast, y = zero_mean_281_cast)[name = tensor("zero_mean_sq_281_cast")]; + tensor var_9336 = const()[name = tensor("op_9336"), val = tensor([1])]; + tensor var_9337_cast = reduce_mean(axes = var_9336, keep_dims = var_6860, x = zero_mean_sq_281_cast)[name = tensor("op_9337_cast")]; + tensor var_9338_to_fp16 = const()[name = tensor("op_9338_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9339_cast = add(x = var_9337_cast, y = var_9338_to_fp16)[name = tensor("op_9339_cast")]; + tensor denom_281_epsilon_0_to_fp16 = const()[name = tensor("denom_281_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_281_cast = rsqrt(epsilon = denom_281_epsilon_0_to_fp16, x = var_9339_cast)[name = tensor("denom_281_cast")]; + tensor out_281_cast = mul(x = zero_mean_281_cast, y = denom_281_cast)[name = tensor("out_281_cast")]; + tensor var_9343_to_fp16 = const()[name = tensor("op_9343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296326272)))]; + tensor var_9344_cast = add(x = out_281_cast, y = var_9343_to_fp16)[name = tensor("op_9344_cast")]; + tensor var_9346_to_fp16 = const()[name = tensor("op_9346_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296328896)))]; + tensor input_557_cast = mul(x = var_9344_cast, y = var_9346_to_fp16)[name = tensor("input_557_cast")]; + tensor var_9354 = const()[name = tensor("op_9354"), val = tensor([1, 1])]; + tensor var_9356 = const()[name = tensor("op_9356"), val = tensor([1, 1])]; + tensor var_9358_pad_type_0 = const()[name = tensor("op_9358_pad_type_0"), val = tensor("custom")]; + tensor var_9358_pad_0 = const()[name = tensor("op_9358_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1296331520))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1309438784))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1309439360)))]; + tensor var_9358_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_9356, groups = var_6865, pad = var_9358_pad_0, pad_type = var_9358_pad_type_0, strides = var_9354, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_557_cast)[name = tensor("op_9358_cast")]; + tensor var_9359_split_sizes_0 = const()[name = tensor("op_9359_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9359_axis_0 = const()[name = tensor("op_9359_axis_0"), val = tensor(1)]; + tensor var_9359_cast_0, tensor var_9359_cast_1 = split(axis = var_9359_axis_0, split_sizes = var_9359_split_sizes_0, x = var_9358_cast)[name = tensor("op_9359_cast")]; + tensor var_9361_mode_0 = const()[name = tensor("op_9361_mode_0"), val = tensor("EXACT")]; + tensor var_9361_cast = gelu(mode = var_9361_mode_0, x = var_9359_cast_1)[name = tensor("op_9361_cast")]; + tensor input_559_cast = mul(x = var_9359_cast_0, y = var_9361_cast)[name = tensor("input_559_cast")]; + tensor var_9365 = const()[name = tensor("op_9365"), val = tensor([1, 1])]; + tensor var_9367 = const()[name = tensor("op_9367"), val = tensor([1, 1])]; + tensor var_9369_pad_type_0 = const()[name = tensor("op_9369_pad_type_0"), val = tensor("custom")]; + tensor var_9369_pad_0 = const()[name = tensor("op_9369_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1309459904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1316013568))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1316014144)))]; + tensor var_9369_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_9367, groups = var_6865, pad = var_9369_pad_0, pad_type = var_9369_pad_type_0, strides = var_9365, weight = up_blocks_0_attentions_1_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_559_cast)[name = tensor("op_9369_cast")]; + tensor inputs_283_cast = add(x = var_9369_cast, y = inputs_281_cast)[name = tensor("inputs_283_cast")]; + tensor var_9379 = const()[name = tensor("op_9379"), val = tensor([1])]; + tensor channels_mean_283_cast = reduce_mean(axes = var_9379, keep_dims = var_6860, x = inputs_283_cast)[name = tensor("channels_mean_283_cast")]; + tensor zero_mean_283_cast = sub(x = inputs_283_cast, y = channels_mean_283_cast)[name = tensor("zero_mean_283_cast")]; + tensor zero_mean_sq_283_cast = mul(x = zero_mean_283_cast, y = zero_mean_283_cast)[name = tensor("zero_mean_sq_283_cast")]; + tensor var_9383 = const()[name = tensor("op_9383"), val = tensor([1])]; + tensor var_9384_cast = reduce_mean(axes = var_9383, keep_dims = var_6860, x = zero_mean_sq_283_cast)[name = tensor("op_9384_cast")]; + tensor var_9385_to_fp16 = const()[name = tensor("op_9385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9386_cast = add(x = var_9384_cast, y = var_9385_to_fp16)[name = tensor("op_9386_cast")]; + tensor denom_283_epsilon_0_to_fp16 = const()[name = tensor("denom_283_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_283_cast = rsqrt(epsilon = denom_283_epsilon_0_to_fp16, x = var_9386_cast)[name = tensor("denom_283_cast")]; + tensor out_283_cast = mul(x = zero_mean_283_cast, y = denom_283_cast)[name = tensor("out_283_cast")]; + tensor var_9390_to_fp16 = const()[name = tensor("op_9390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1316016768)))]; + tensor var_9391_cast = add(x = out_283_cast, y = var_9390_to_fp16)[name = tensor("op_9391_cast")]; + tensor var_9393_to_fp16 = const()[name = tensor("op_9393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1316019392)))]; + tensor hidden_states_379_cast = mul(x = var_9391_cast, y = var_9393_to_fp16)[name = tensor("hidden_states_379_cast")]; + tensor var_9400 = const()[name = tensor("op_9400"), val = tensor([1, 1])]; + tensor var_9402 = const()[name = tensor("op_9402"), val = tensor([1, 1])]; + tensor q_189_pad_type_0 = const()[name = tensor("q_189_pad_type_0"), val = tensor("custom")]; + tensor q_189_pad_0 = const()[name = tensor("q_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1316022016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317250880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_189_cast = conv(dilations = var_9402, groups = var_6865, pad = q_189_pad_0, pad_type = q_189_pad_type_0, strides = var_9400, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("q_189_cast")]; + tensor var_9406 = const()[name = tensor("op_9406"), val = tensor([1, 1])]; + tensor var_9408 = const()[name = tensor("op_9408"), val = tensor([1, 1])]; + tensor k_189_pad_type_0 = const()[name = tensor("k_189_pad_type_0"), val = tensor("custom")]; + tensor k_189_pad_0 = const()[name = tensor("k_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1317251072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318479936))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_189_cast = conv(dilations = var_9408, groups = var_6865, pad = k_189_pad_0, pad_type = k_189_pad_type_0, strides = var_9406, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("k_189_cast")]; + tensor var_9412 = const()[name = tensor("op_9412"), val = tensor([1, 1])]; + tensor var_9414 = const()[name = tensor("op_9414"), val = tensor([1, 1])]; + tensor v_189_pad_type_0 = const()[name = tensor("v_189_pad_type_0"), val = tensor("custom")]; + tensor v_189_pad_0 = const()[name = tensor("v_189_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1318480128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320118592))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_189_cast = conv(dilations = var_9414, groups = var_6865, pad = v_189_pad_0, pad_type = v_189_pad_type_0, strides = var_9412, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_379_cast)[name = tensor("v_189_cast")]; + tensor var_9418 = const()[name = tensor("op_9418"), val = tensor([2, 20, 64, -1])]; + tensor var_9419_cast = reshape(shape = var_9418, x = q_189_cast)[name = tensor("op_9419_cast")]; + tensor var_9420 = const()[name = tensor("op_9420"), val = tensor([2, 20, 64, -1])]; + tensor var_9421_cast = reshape(shape = var_9420, x = k_189_cast)[name = tensor("op_9421_cast")]; + tensor var_9422 = const()[name = tensor("op_9422"), val = tensor([2, 20, 64, -1])]; + tensor var_9423_cast = reshape(shape = var_9422, x = v_189_cast)[name = tensor("op_9423_cast")]; + tensor attn_weights_377_transpose_x_0 = const()[name = tensor("attn_weights_377_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_377_transpose_y_0 = const()[name = tensor("attn_weights_377_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_377_cast = matmul(transpose_x = attn_weights_377_transpose_x_0, transpose_y = attn_weights_377_transpose_y_0, x = var_9419_cast, y = var_9421_cast)[name = tensor("attn_weights_377_cast")]; + tensor attn_weights_379_cast = mul(x = attn_weights_377_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_379_cast")]; + tensor var_9427_cast = softmax(axis = var_6849, x = attn_weights_379_cast)[name = tensor("op_9427_cast")]; + tensor attn_189_transpose_x_0 = const()[name = tensor("attn_189_transpose_x_0"), val = tensor(false)]; + tensor attn_189_transpose_y_0 = const()[name = tensor("attn_189_transpose_y_0"), val = tensor(true)]; + tensor attn_189_cast = matmul(transpose_x = attn_189_transpose_x_0, transpose_y = attn_189_transpose_y_0, x = var_9423_cast, y = var_9427_cast)[name = tensor("attn_189_cast")]; + tensor var_9431 = const()[name = tensor("op_9431"), val = tensor([2, 1280, 1, -1])]; + tensor input_561_cast = reshape(shape = var_9431, x = attn_189_cast)[name = tensor("input_561_cast")]; + tensor var_9436 = const()[name = tensor("op_9436"), val = tensor([1, 1])]; + tensor var_9438 = const()[name = tensor("op_9438"), val = tensor([1, 1])]; + tensor var_9440_pad_type_0 = const()[name = tensor("op_9440_pad_type_0"), val = tensor("custom")]; + tensor var_9440_pad_0 = const()[name = tensor("op_9440_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1320119168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1321757632))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1321758208)))]; + tensor var_9440_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_9438, groups = var_6865, pad = var_9440_pad_0, pad_type = var_9440_pad_type_0, strides = var_9436, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_561_cast)[name = tensor("op_9440_cast")]; + tensor inputs_285_cast = add(x = var_9440_cast, y = inputs_283_cast)[name = tensor("inputs_285_cast")]; + tensor var_9444 = const()[name = tensor("op_9444"), val = tensor([1])]; + tensor channels_mean_285_cast = reduce_mean(axes = var_9444, keep_dims = var_6860, x = inputs_285_cast)[name = tensor("channels_mean_285_cast")]; + tensor zero_mean_285_cast = sub(x = inputs_285_cast, y = channels_mean_285_cast)[name = tensor("zero_mean_285_cast")]; + tensor zero_mean_sq_285_cast = mul(x = zero_mean_285_cast, y = zero_mean_285_cast)[name = tensor("zero_mean_sq_285_cast")]; + tensor var_9448 = const()[name = tensor("op_9448"), val = tensor([1])]; + tensor var_9449_cast = reduce_mean(axes = var_9448, keep_dims = var_6860, x = zero_mean_sq_285_cast)[name = tensor("op_9449_cast")]; + tensor var_9450_to_fp16 = const()[name = tensor("op_9450_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9451_cast = add(x = var_9449_cast, y = var_9450_to_fp16)[name = tensor("op_9451_cast")]; + tensor denom_285_epsilon_0_to_fp16 = const()[name = tensor("denom_285_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_285_cast = rsqrt(epsilon = denom_285_epsilon_0_to_fp16, x = var_9451_cast)[name = tensor("denom_285_cast")]; + tensor out_285_cast = mul(x = zero_mean_285_cast, y = denom_285_cast)[name = tensor("out_285_cast")]; + tensor var_9455_to_fp16 = const()[name = tensor("op_9455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1321760832)))]; + tensor var_9456_cast = add(x = out_285_cast, y = var_9455_to_fp16)[name = tensor("op_9456_cast")]; + tensor var_9458_to_fp16 = const()[name = tensor("op_9458_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1321763456)))]; + tensor hidden_states_381_cast = mul(x = var_9456_cast, y = var_9458_to_fp16)[name = tensor("hidden_states_381_cast")]; + tensor var_9465 = const()[name = tensor("op_9465"), val = tensor([1, 1])]; + tensor var_9467 = const()[name = tensor("op_9467"), val = tensor([1, 1])]; + tensor q_191_pad_type_0 = const()[name = tensor("q_191_pad_type_0"), val = tensor("custom")]; + tensor q_191_pad_0 = const()[name = tensor("q_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1321766080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1322585344))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_191_cast = conv(dilations = var_9467, groups = var_6865, pad = q_191_pad_0, pad_type = q_191_pad_type_0, strides = var_9465, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_381_cast)[name = tensor("q_191_cast")]; + tensor var_9471 = const()[name = tensor("op_9471"), val = tensor([1, 1])]; + tensor var_9473 = const()[name = tensor("op_9473"), val = tensor([1, 1])]; + tensor k_191_pad_type_0 = const()[name = tensor("k_191_pad_type_0"), val = tensor("custom")]; + tensor k_191_pad_0 = const()[name = tensor("k_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1322585472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1324551616))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_191_cast = conv(dilations = var_9473, groups = var_6865, pad = k_191_pad_0, pad_type = k_191_pad_type_0, strides = var_9471, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_191_cast")]; + tensor var_9477 = const()[name = tensor("op_9477"), val = tensor([1, 1])]; + tensor var_9479 = const()[name = tensor("op_9479"), val = tensor([1, 1])]; + tensor v_191_pad_type_0 = const()[name = tensor("v_191_pad_type_0"), val = tensor("custom")]; + tensor v_191_pad_0 = const()[name = tensor("v_191_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1324551808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1326517952))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_191_cast = conv(dilations = var_9479, groups = var_6865, pad = v_191_pad_0, pad_type = v_191_pad_type_0, strides = var_9477, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_191_cast")]; + tensor var_9483 = const()[name = tensor("op_9483"), val = tensor([2, 20, 64, -1])]; + tensor var_9484_cast = reshape(shape = var_9483, x = q_191_cast)[name = tensor("op_9484_cast")]; + tensor var_9485 = const()[name = tensor("op_9485"), val = tensor([2, 20, 64, -1])]; + tensor var_9486_cast = reshape(shape = var_9485, x = k_191_cast)[name = tensor("op_9486_cast")]; + tensor var_9487 = const()[name = tensor("op_9487"), val = tensor([2, 20, 64, -1])]; + tensor var_9488_cast = reshape(shape = var_9487, x = v_191_cast)[name = tensor("op_9488_cast")]; + tensor attn_weights_381_transpose_x_0 = const()[name = tensor("attn_weights_381_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_381_transpose_y_0 = const()[name = tensor("attn_weights_381_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_381_cast = matmul(transpose_x = attn_weights_381_transpose_x_0, transpose_y = attn_weights_381_transpose_y_0, x = var_9484_cast, y = var_9486_cast)[name = tensor("attn_weights_381_cast")]; + tensor attn_weights_383_cast = mul(x = attn_weights_381_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_383_cast")]; + tensor var_9492_cast = softmax(axis = var_6849, x = attn_weights_383_cast)[name = tensor("op_9492_cast")]; + tensor attn_191_transpose_x_0 = const()[name = tensor("attn_191_transpose_x_0"), val = tensor(false)]; + tensor attn_191_transpose_y_0 = const()[name = tensor("attn_191_transpose_y_0"), val = tensor(true)]; + tensor attn_191_cast = matmul(transpose_x = attn_191_transpose_x_0, transpose_y = attn_191_transpose_y_0, x = var_9488_cast, y = var_9492_cast)[name = tensor("attn_191_cast")]; + tensor var_9496 = const()[name = tensor("op_9496"), val = tensor([2, 1280, 1, -1])]; + tensor input_563_cast = reshape(shape = var_9496, x = attn_191_cast)[name = tensor("input_563_cast")]; + tensor var_9501 = const()[name = tensor("op_9501"), val = tensor([1, 1])]; + tensor var_9503 = const()[name = tensor("op_9503"), val = tensor([1, 1])]; + tensor var_9505_pad_type_0 = const()[name = tensor("op_9505_pad_type_0"), val = tensor("custom")]; + tensor var_9505_pad_0 = const()[name = tensor("op_9505_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1326518144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327747008))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327747200)))]; + tensor var_9505_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_9503, groups = var_6865, pad = var_9505_pad_0, pad_type = var_9505_pad_type_0, strides = var_9501, weight = up_blocks_0_attentions_1_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_563_cast)[name = tensor("op_9505_cast")]; + tensor inputs_287_cast = add(x = var_9505_cast, y = inputs_285_cast)[name = tensor("inputs_287_cast")]; + tensor var_9509 = const()[name = tensor("op_9509"), val = tensor([1])]; + tensor channels_mean_287_cast = reduce_mean(axes = var_9509, keep_dims = var_6860, x = inputs_287_cast)[name = tensor("channels_mean_287_cast")]; + tensor zero_mean_287_cast = sub(x = inputs_287_cast, y = channels_mean_287_cast)[name = tensor("zero_mean_287_cast")]; + tensor zero_mean_sq_287_cast = mul(x = zero_mean_287_cast, y = zero_mean_287_cast)[name = tensor("zero_mean_sq_287_cast")]; + tensor var_9513 = const()[name = tensor("op_9513"), val = tensor([1])]; + tensor var_9514_cast = reduce_mean(axes = var_9513, keep_dims = var_6860, x = zero_mean_sq_287_cast)[name = tensor("op_9514_cast")]; + tensor var_9515_to_fp16 = const()[name = tensor("op_9515_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9516_cast = add(x = var_9514_cast, y = var_9515_to_fp16)[name = tensor("op_9516_cast")]; + tensor denom_287_epsilon_0_to_fp16 = const()[name = tensor("denom_287_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_287_cast = rsqrt(epsilon = denom_287_epsilon_0_to_fp16, x = var_9516_cast)[name = tensor("denom_287_cast")]; + tensor out_287_cast = mul(x = zero_mean_287_cast, y = denom_287_cast)[name = tensor("out_287_cast")]; + tensor var_9520_to_fp16 = const()[name = tensor("op_9520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327749824)))]; + tensor var_9521_cast = add(x = out_287_cast, y = var_9520_to_fp16)[name = tensor("op_9521_cast")]; + tensor var_9523_to_fp16 = const()[name = tensor("op_9523_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327752448)))]; + tensor input_565_cast = mul(x = var_9521_cast, y = var_9523_to_fp16)[name = tensor("input_565_cast")]; + tensor var_9531 = const()[name = tensor("op_9531"), val = tensor([1, 1])]; + tensor var_9533 = const()[name = tensor("op_9533"), val = tensor([1, 1])]; + tensor var_9535_pad_type_0 = const()[name = tensor("op_9535_pad_type_0"), val = tensor("custom")]; + tensor var_9535_pad_0 = const()[name = tensor("op_9535_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1327755072))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340862336))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340862912)))]; + tensor var_9535_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_9533, groups = var_6865, pad = var_9535_pad_0, pad_type = var_9535_pad_type_0, strides = var_9531, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_565_cast)[name = tensor("op_9535_cast")]; + tensor var_9536_split_sizes_0 = const()[name = tensor("op_9536_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9536_axis_0 = const()[name = tensor("op_9536_axis_0"), val = tensor(1)]; + tensor var_9536_cast_0, tensor var_9536_cast_1 = split(axis = var_9536_axis_0, split_sizes = var_9536_split_sizes_0, x = var_9535_cast)[name = tensor("op_9536_cast")]; + tensor var_9538_mode_0 = const()[name = tensor("op_9538_mode_0"), val = tensor("EXACT")]; + tensor var_9538_cast = gelu(mode = var_9538_mode_0, x = var_9536_cast_1)[name = tensor("op_9538_cast")]; + tensor input_567_cast = mul(x = var_9536_cast_0, y = var_9538_cast)[name = tensor("input_567_cast")]; + tensor var_9542 = const()[name = tensor("op_9542"), val = tensor([1, 1])]; + tensor var_9544 = const()[name = tensor("op_9544"), val = tensor([1, 1])]; + tensor var_9546_pad_type_0 = const()[name = tensor("op_9546_pad_type_0"), val = tensor("custom")]; + tensor var_9546_pad_0 = const()[name = tensor("op_9546_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1340883456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1347437120))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1347437696)))]; + tensor var_9546_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_9544, groups = var_6865, pad = var_9546_pad_0, pad_type = var_9546_pad_type_0, strides = var_9542, weight = up_blocks_0_attentions_1_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_567_cast)[name = tensor("op_9546_cast")]; + tensor inputs_289_cast = add(x = var_9546_cast, y = inputs_287_cast)[name = tensor("inputs_289_cast")]; + tensor var_9556 = const()[name = tensor("op_9556"), val = tensor([1])]; + tensor channels_mean_289_cast = reduce_mean(axes = var_9556, keep_dims = var_6860, x = inputs_289_cast)[name = tensor("channels_mean_289_cast")]; + tensor zero_mean_289_cast = sub(x = inputs_289_cast, y = channels_mean_289_cast)[name = tensor("zero_mean_289_cast")]; + tensor zero_mean_sq_289_cast = mul(x = zero_mean_289_cast, y = zero_mean_289_cast)[name = tensor("zero_mean_sq_289_cast")]; + tensor var_9560 = const()[name = tensor("op_9560"), val = tensor([1])]; + tensor var_9561_cast = reduce_mean(axes = var_9560, keep_dims = var_6860, x = zero_mean_sq_289_cast)[name = tensor("op_9561_cast")]; + tensor var_9562_to_fp16 = const()[name = tensor("op_9562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9563_cast = add(x = var_9561_cast, y = var_9562_to_fp16)[name = tensor("op_9563_cast")]; + tensor denom_289_epsilon_0_to_fp16 = const()[name = tensor("denom_289_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_289_cast = rsqrt(epsilon = denom_289_epsilon_0_to_fp16, x = var_9563_cast)[name = tensor("denom_289_cast")]; + tensor out_289_cast = mul(x = zero_mean_289_cast, y = denom_289_cast)[name = tensor("out_289_cast")]; + tensor var_9567_to_fp16 = const()[name = tensor("op_9567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1347440320)))]; + tensor var_9568_cast = add(x = out_289_cast, y = var_9567_to_fp16)[name = tensor("op_9568_cast")]; + tensor var_9570_to_fp16 = const()[name = tensor("op_9570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1347442944)))]; + tensor hidden_states_385_cast = mul(x = var_9568_cast, y = var_9570_to_fp16)[name = tensor("hidden_states_385_cast")]; + tensor var_9577 = const()[name = tensor("op_9577"), val = tensor([1, 1])]; + tensor var_9579 = const()[name = tensor("op_9579"), val = tensor([1, 1])]; + tensor q_193_pad_type_0 = const()[name = tensor("q_193_pad_type_0"), val = tensor("custom")]; + tensor q_193_pad_0 = const()[name = tensor("q_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1347445568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348674432))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_193_cast = conv(dilations = var_9579, groups = var_6865, pad = q_193_pad_0, pad_type = q_193_pad_type_0, strides = var_9577, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("q_193_cast")]; + tensor var_9583 = const()[name = tensor("op_9583"), val = tensor([1, 1])]; + tensor var_9585 = const()[name = tensor("op_9585"), val = tensor([1, 1])]; + tensor k_193_pad_type_0 = const()[name = tensor("k_193_pad_type_0"), val = tensor("custom")]; + tensor k_193_pad_0 = const()[name = tensor("k_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1348674624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349903488))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_193_cast = conv(dilations = var_9585, groups = var_6865, pad = k_193_pad_0, pad_type = k_193_pad_type_0, strides = var_9583, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("k_193_cast")]; + tensor var_9589 = const()[name = tensor("op_9589"), val = tensor([1, 1])]; + tensor var_9591 = const()[name = tensor("op_9591"), val = tensor([1, 1])]; + tensor v_193_pad_type_0 = const()[name = tensor("v_193_pad_type_0"), val = tensor("custom")]; + tensor v_193_pad_0 = const()[name = tensor("v_193_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1349903680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351542144))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_193_cast = conv(dilations = var_9591, groups = var_6865, pad = v_193_pad_0, pad_type = v_193_pad_type_0, strides = var_9589, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_385_cast)[name = tensor("v_193_cast")]; + tensor var_9595 = const()[name = tensor("op_9595"), val = tensor([2, 20, 64, -1])]; + tensor var_9596_cast = reshape(shape = var_9595, x = q_193_cast)[name = tensor("op_9596_cast")]; + tensor var_9597 = const()[name = tensor("op_9597"), val = tensor([2, 20, 64, -1])]; + tensor var_9598_cast = reshape(shape = var_9597, x = k_193_cast)[name = tensor("op_9598_cast")]; + tensor var_9599 = const()[name = tensor("op_9599"), val = tensor([2, 20, 64, -1])]; + tensor var_9600_cast = reshape(shape = var_9599, x = v_193_cast)[name = tensor("op_9600_cast")]; + tensor attn_weights_385_transpose_x_0 = const()[name = tensor("attn_weights_385_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_385_transpose_y_0 = const()[name = tensor("attn_weights_385_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_385_cast = matmul(transpose_x = attn_weights_385_transpose_x_0, transpose_y = attn_weights_385_transpose_y_0, x = var_9596_cast, y = var_9598_cast)[name = tensor("attn_weights_385_cast")]; + tensor attn_weights_387_cast = mul(x = attn_weights_385_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_387_cast")]; + tensor var_9604_cast = softmax(axis = var_6849, x = attn_weights_387_cast)[name = tensor("op_9604_cast")]; + tensor attn_193_transpose_x_0 = const()[name = tensor("attn_193_transpose_x_0"), val = tensor(false)]; + tensor attn_193_transpose_y_0 = const()[name = tensor("attn_193_transpose_y_0"), val = tensor(true)]; + tensor attn_193_cast = matmul(transpose_x = attn_193_transpose_x_0, transpose_y = attn_193_transpose_y_0, x = var_9600_cast, y = var_9604_cast)[name = tensor("attn_193_cast")]; + tensor var_9608 = const()[name = tensor("op_9608"), val = tensor([2, 1280, 1, -1])]; + tensor input_569_cast = reshape(shape = var_9608, x = attn_193_cast)[name = tensor("input_569_cast")]; + tensor var_9613 = const()[name = tensor("op_9613"), val = tensor([1, 1])]; + tensor var_9615 = const()[name = tensor("op_9615"), val = tensor([1, 1])]; + tensor var_9617_pad_type_0 = const()[name = tensor("op_9617_pad_type_0"), val = tensor("custom")]; + tensor var_9617_pad_0 = const()[name = tensor("op_9617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1351542720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353181184))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353181760)))]; + tensor var_9617_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_9615, groups = var_6865, pad = var_9617_pad_0, pad_type = var_9617_pad_type_0, strides = var_9613, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_569_cast)[name = tensor("op_9617_cast")]; + tensor inputs_291_cast = add(x = var_9617_cast, y = inputs_289_cast)[name = tensor("inputs_291_cast")]; + tensor var_9621 = const()[name = tensor("op_9621"), val = tensor([1])]; + tensor channels_mean_291_cast = reduce_mean(axes = var_9621, keep_dims = var_6860, x = inputs_291_cast)[name = tensor("channels_mean_291_cast")]; + tensor zero_mean_291_cast = sub(x = inputs_291_cast, y = channels_mean_291_cast)[name = tensor("zero_mean_291_cast")]; + tensor zero_mean_sq_291_cast = mul(x = zero_mean_291_cast, y = zero_mean_291_cast)[name = tensor("zero_mean_sq_291_cast")]; + tensor var_9625 = const()[name = tensor("op_9625"), val = tensor([1])]; + tensor var_9626_cast = reduce_mean(axes = var_9625, keep_dims = var_6860, x = zero_mean_sq_291_cast)[name = tensor("op_9626_cast")]; + tensor var_9627_to_fp16 = const()[name = tensor("op_9627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9628_cast = add(x = var_9626_cast, y = var_9627_to_fp16)[name = tensor("op_9628_cast")]; + tensor denom_291_epsilon_0_to_fp16 = const()[name = tensor("denom_291_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_291_cast = rsqrt(epsilon = denom_291_epsilon_0_to_fp16, x = var_9628_cast)[name = tensor("denom_291_cast")]; + tensor out_291_cast = mul(x = zero_mean_291_cast, y = denom_291_cast)[name = tensor("out_291_cast")]; + tensor var_9632_to_fp16 = const()[name = tensor("op_9632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353184384)))]; + tensor var_9633_cast = add(x = out_291_cast, y = var_9632_to_fp16)[name = tensor("op_9633_cast")]; + tensor var_9635_to_fp16 = const()[name = tensor("op_9635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353187008)))]; + tensor hidden_states_387_cast = mul(x = var_9633_cast, y = var_9635_to_fp16)[name = tensor("hidden_states_387_cast")]; + tensor var_9642 = const()[name = tensor("op_9642"), val = tensor([1, 1])]; + tensor var_9644 = const()[name = tensor("op_9644"), val = tensor([1, 1])]; + tensor q_195_pad_type_0 = const()[name = tensor("q_195_pad_type_0"), val = tensor("custom")]; + tensor q_195_pad_0 = const()[name = tensor("q_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1353189632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354418496))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_195_cast = conv(dilations = var_9644, groups = var_6865, pad = q_195_pad_0, pad_type = q_195_pad_type_0, strides = var_9642, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_387_cast)[name = tensor("q_195_cast")]; + tensor var_9648 = const()[name = tensor("op_9648"), val = tensor([1, 1])]; + tensor var_9650 = const()[name = tensor("op_9650"), val = tensor([1, 1])]; + tensor k_195_pad_type_0 = const()[name = tensor("k_195_pad_type_0"), val = tensor("custom")]; + tensor k_195_pad_0 = const()[name = tensor("k_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1354418688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356384832))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_195_cast = conv(dilations = var_9650, groups = var_6865, pad = k_195_pad_0, pad_type = k_195_pad_type_0, strides = var_9648, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_195_cast")]; + tensor var_9654 = const()[name = tensor("op_9654"), val = tensor([1, 1])]; + tensor var_9656 = const()[name = tensor("op_9656"), val = tensor([1, 1])]; + tensor v_195_pad_type_0 = const()[name = tensor("v_195_pad_type_0"), val = tensor("custom")]; + tensor v_195_pad_0 = const()[name = tensor("v_195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1356385024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358351168))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_195_cast = conv(dilations = var_9656, groups = var_6865, pad = v_195_pad_0, pad_type = v_195_pad_type_0, strides = var_9654, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_195_cast")]; + tensor var_9660 = const()[name = tensor("op_9660"), val = tensor([2, 20, 64, -1])]; + tensor var_9661_cast = reshape(shape = var_9660, x = q_195_cast)[name = tensor("op_9661_cast")]; + tensor var_9662 = const()[name = tensor("op_9662"), val = tensor([2, 20, 64, -1])]; + tensor var_9663_cast = reshape(shape = var_9662, x = k_195_cast)[name = tensor("op_9663_cast")]; + tensor var_9664 = const()[name = tensor("op_9664"), val = tensor([2, 20, 64, -1])]; + tensor var_9665_cast = reshape(shape = var_9664, x = v_195_cast)[name = tensor("op_9665_cast")]; + tensor attn_weights_389_transpose_x_0 = const()[name = tensor("attn_weights_389_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_389_transpose_y_0 = const()[name = tensor("attn_weights_389_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_389_cast = matmul(transpose_x = attn_weights_389_transpose_x_0, transpose_y = attn_weights_389_transpose_y_0, x = var_9661_cast, y = var_9663_cast)[name = tensor("attn_weights_389_cast")]; + tensor attn_weights_391_cast = mul(x = attn_weights_389_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_391_cast")]; + tensor var_9669_cast = softmax(axis = var_6849, x = attn_weights_391_cast)[name = tensor("op_9669_cast")]; + tensor attn_195_transpose_x_0 = const()[name = tensor("attn_195_transpose_x_0"), val = tensor(false)]; + tensor attn_195_transpose_y_0 = const()[name = tensor("attn_195_transpose_y_0"), val = tensor(true)]; + tensor attn_195_cast = matmul(transpose_x = attn_195_transpose_x_0, transpose_y = attn_195_transpose_y_0, x = var_9665_cast, y = var_9669_cast)[name = tensor("attn_195_cast")]; + tensor var_9673 = const()[name = tensor("op_9673"), val = tensor([2, 1280, 1, -1])]; + tensor input_571_cast = reshape(shape = var_9673, x = attn_195_cast)[name = tensor("input_571_cast")]; + tensor var_9678 = const()[name = tensor("op_9678"), val = tensor([1, 1])]; + tensor var_9680 = const()[name = tensor("op_9680"), val = tensor([1, 1])]; + tensor var_9682_pad_type_0 = const()[name = tensor("op_9682_pad_type_0"), val = tensor("custom")]; + tensor var_9682_pad_0 = const()[name = tensor("op_9682_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1358351360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359170624))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359170752)))]; + tensor var_9682_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_9680, groups = var_6865, pad = var_9682_pad_0, pad_type = var_9682_pad_type_0, strides = var_9678, weight = up_blocks_0_attentions_1_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_571_cast)[name = tensor("op_9682_cast")]; + tensor inputs_293_cast = add(x = var_9682_cast, y = inputs_291_cast)[name = tensor("inputs_293_cast")]; + tensor var_9686 = const()[name = tensor("op_9686"), val = tensor([1])]; + tensor channels_mean_293_cast = reduce_mean(axes = var_9686, keep_dims = var_6860, x = inputs_293_cast)[name = tensor("channels_mean_293_cast")]; + tensor zero_mean_293_cast = sub(x = inputs_293_cast, y = channels_mean_293_cast)[name = tensor("zero_mean_293_cast")]; + tensor zero_mean_sq_293_cast = mul(x = zero_mean_293_cast, y = zero_mean_293_cast)[name = tensor("zero_mean_sq_293_cast")]; + tensor var_9690 = const()[name = tensor("op_9690"), val = tensor([1])]; + tensor var_9691_cast = reduce_mean(axes = var_9690, keep_dims = var_6860, x = zero_mean_sq_293_cast)[name = tensor("op_9691_cast")]; + tensor var_9692_to_fp16 = const()[name = tensor("op_9692_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9693_cast = add(x = var_9691_cast, y = var_9692_to_fp16)[name = tensor("op_9693_cast")]; + tensor denom_293_epsilon_0_to_fp16 = const()[name = tensor("denom_293_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_293_cast = rsqrt(epsilon = denom_293_epsilon_0_to_fp16, x = var_9693_cast)[name = tensor("denom_293_cast")]; + tensor out_293_cast = mul(x = zero_mean_293_cast, y = denom_293_cast)[name = tensor("out_293_cast")]; + tensor var_9697_to_fp16 = const()[name = tensor("op_9697_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359173376)))]; + tensor var_9698_cast = add(x = out_293_cast, y = var_9697_to_fp16)[name = tensor("op_9698_cast")]; + tensor var_9700_to_fp16 = const()[name = tensor("op_9700_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359176000)))]; + tensor input_573_cast = mul(x = var_9698_cast, y = var_9700_to_fp16)[name = tensor("input_573_cast")]; + tensor var_9708 = const()[name = tensor("op_9708"), val = tensor([1, 1])]; + tensor var_9710 = const()[name = tensor("op_9710"), val = tensor([1, 1])]; + tensor var_9712_pad_type_0 = const()[name = tensor("op_9712_pad_type_0"), val = tensor("custom")]; + tensor var_9712_pad_0 = const()[name = tensor("op_9712_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1359178624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372285888))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372286464)))]; + tensor var_9712_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_9710, groups = var_6865, pad = var_9712_pad_0, pad_type = var_9712_pad_type_0, strides = var_9708, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_573_cast)[name = tensor("op_9712_cast")]; + tensor var_9713_split_sizes_0 = const()[name = tensor("op_9713_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9713_axis_0 = const()[name = tensor("op_9713_axis_0"), val = tensor(1)]; + tensor var_9713_cast_0, tensor var_9713_cast_1 = split(axis = var_9713_axis_0, split_sizes = var_9713_split_sizes_0, x = var_9712_cast)[name = tensor("op_9713_cast")]; + tensor var_9715_mode_0 = const()[name = tensor("op_9715_mode_0"), val = tensor("EXACT")]; + tensor var_9715_cast = gelu(mode = var_9715_mode_0, x = var_9713_cast_1)[name = tensor("op_9715_cast")]; + tensor input_575_cast = mul(x = var_9713_cast_0, y = var_9715_cast)[name = tensor("input_575_cast")]; + tensor var_9719 = const()[name = tensor("op_9719"), val = tensor([1, 1])]; + tensor var_9721 = const()[name = tensor("op_9721"), val = tensor([1, 1])]; + tensor var_9723_pad_type_0 = const()[name = tensor("op_9723_pad_type_0"), val = tensor("custom")]; + tensor var_9723_pad_0 = const()[name = tensor("op_9723_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1372307008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378860672))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378861248)))]; + tensor var_9723_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_9721, groups = var_6865, pad = var_9723_pad_0, pad_type = var_9723_pad_type_0, strides = var_9719, weight = up_blocks_0_attentions_1_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_575_cast)[name = tensor("op_9723_cast")]; + tensor inputs_295_cast = add(x = var_9723_cast, y = inputs_293_cast)[name = tensor("inputs_295_cast")]; + tensor var_9733 = const()[name = tensor("op_9733"), val = tensor([1])]; + tensor channels_mean_295_cast = reduce_mean(axes = var_9733, keep_dims = var_6860, x = inputs_295_cast)[name = tensor("channels_mean_295_cast")]; + tensor zero_mean_295_cast = sub(x = inputs_295_cast, y = channels_mean_295_cast)[name = tensor("zero_mean_295_cast")]; + tensor zero_mean_sq_295_cast = mul(x = zero_mean_295_cast, y = zero_mean_295_cast)[name = tensor("zero_mean_sq_295_cast")]; + tensor var_9737 = const()[name = tensor("op_9737"), val = tensor([1])]; + tensor var_9738_cast = reduce_mean(axes = var_9737, keep_dims = var_6860, x = zero_mean_sq_295_cast)[name = tensor("op_9738_cast")]; + tensor var_9739_to_fp16 = const()[name = tensor("op_9739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9740_cast = add(x = var_9738_cast, y = var_9739_to_fp16)[name = tensor("op_9740_cast")]; + tensor denom_295_epsilon_0_to_fp16 = const()[name = tensor("denom_295_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_295_cast = rsqrt(epsilon = denom_295_epsilon_0_to_fp16, x = var_9740_cast)[name = tensor("denom_295_cast")]; + tensor out_295_cast = mul(x = zero_mean_295_cast, y = denom_295_cast)[name = tensor("out_295_cast")]; + tensor var_9744_to_fp16 = const()[name = tensor("op_9744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378863872)))]; + tensor var_9745_cast = add(x = out_295_cast, y = var_9744_to_fp16)[name = tensor("op_9745_cast")]; + tensor var_9747_to_fp16 = const()[name = tensor("op_9747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378866496)))]; + tensor hidden_states_391_cast = mul(x = var_9745_cast, y = var_9747_to_fp16)[name = tensor("hidden_states_391_cast")]; + tensor var_9754 = const()[name = tensor("op_9754"), val = tensor([1, 1])]; + tensor var_9756 = const()[name = tensor("op_9756"), val = tensor([1, 1])]; + tensor q_197_pad_type_0 = const()[name = tensor("q_197_pad_type_0"), val = tensor("custom")]; + tensor q_197_pad_0 = const()[name = tensor("q_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1378869120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380097984))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_197_cast = conv(dilations = var_9756, groups = var_6865, pad = q_197_pad_0, pad_type = q_197_pad_type_0, strides = var_9754, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("q_197_cast")]; + tensor var_9760 = const()[name = tensor("op_9760"), val = tensor([1, 1])]; + tensor var_9762 = const()[name = tensor("op_9762"), val = tensor([1, 1])]; + tensor k_197_pad_type_0 = const()[name = tensor("k_197_pad_type_0"), val = tensor("custom")]; + tensor k_197_pad_0 = const()[name = tensor("k_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1380098176))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381327040))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_197_cast = conv(dilations = var_9762, groups = var_6865, pad = k_197_pad_0, pad_type = k_197_pad_type_0, strides = var_9760, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("k_197_cast")]; + tensor var_9766 = const()[name = tensor("op_9766"), val = tensor([1, 1])]; + tensor var_9768 = const()[name = tensor("op_9768"), val = tensor([1, 1])]; + tensor v_197_pad_type_0 = const()[name = tensor("v_197_pad_type_0"), val = tensor("custom")]; + tensor v_197_pad_0 = const()[name = tensor("v_197_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1381327232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382965696))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_197_cast = conv(dilations = var_9768, groups = var_6865, pad = v_197_pad_0, pad_type = v_197_pad_type_0, strides = var_9766, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_391_cast)[name = tensor("v_197_cast")]; + tensor var_9772 = const()[name = tensor("op_9772"), val = tensor([2, 20, 64, -1])]; + tensor var_9773_cast = reshape(shape = var_9772, x = q_197_cast)[name = tensor("op_9773_cast")]; + tensor var_9774 = const()[name = tensor("op_9774"), val = tensor([2, 20, 64, -1])]; + tensor var_9775_cast = reshape(shape = var_9774, x = k_197_cast)[name = tensor("op_9775_cast")]; + tensor var_9776 = const()[name = tensor("op_9776"), val = tensor([2, 20, 64, -1])]; + tensor var_9777_cast = reshape(shape = var_9776, x = v_197_cast)[name = tensor("op_9777_cast")]; + tensor attn_weights_393_transpose_x_0 = const()[name = tensor("attn_weights_393_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_393_transpose_y_0 = const()[name = tensor("attn_weights_393_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_393_cast = matmul(transpose_x = attn_weights_393_transpose_x_0, transpose_y = attn_weights_393_transpose_y_0, x = var_9773_cast, y = var_9775_cast)[name = tensor("attn_weights_393_cast")]; + tensor attn_weights_395_cast = mul(x = attn_weights_393_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_395_cast")]; + tensor var_9781_cast = softmax(axis = var_6849, x = attn_weights_395_cast)[name = tensor("op_9781_cast")]; + tensor attn_197_transpose_x_0 = const()[name = tensor("attn_197_transpose_x_0"), val = tensor(false)]; + tensor attn_197_transpose_y_0 = const()[name = tensor("attn_197_transpose_y_0"), val = tensor(true)]; + tensor attn_197_cast = matmul(transpose_x = attn_197_transpose_x_0, transpose_y = attn_197_transpose_y_0, x = var_9777_cast, y = var_9781_cast)[name = tensor("attn_197_cast")]; + tensor var_9785 = const()[name = tensor("op_9785"), val = tensor([2, 1280, 1, -1])]; + tensor input_577_cast = reshape(shape = var_9785, x = attn_197_cast)[name = tensor("input_577_cast")]; + tensor var_9790 = const()[name = tensor("op_9790"), val = tensor([1, 1])]; + tensor var_9792 = const()[name = tensor("op_9792"), val = tensor([1, 1])]; + tensor var_9794_pad_type_0 = const()[name = tensor("op_9794_pad_type_0"), val = tensor("custom")]; + tensor var_9794_pad_0 = const()[name = tensor("op_9794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1382966272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384604736))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384605312)))]; + tensor var_9794_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_9792, groups = var_6865, pad = var_9794_pad_0, pad_type = var_9794_pad_type_0, strides = var_9790, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_577_cast)[name = tensor("op_9794_cast")]; + tensor inputs_297_cast = add(x = var_9794_cast, y = inputs_295_cast)[name = tensor("inputs_297_cast")]; + tensor var_9798 = const()[name = tensor("op_9798"), val = tensor([1])]; + tensor channels_mean_297_cast = reduce_mean(axes = var_9798, keep_dims = var_6860, x = inputs_297_cast)[name = tensor("channels_mean_297_cast")]; + tensor zero_mean_297_cast = sub(x = inputs_297_cast, y = channels_mean_297_cast)[name = tensor("zero_mean_297_cast")]; + tensor zero_mean_sq_297_cast = mul(x = zero_mean_297_cast, y = zero_mean_297_cast)[name = tensor("zero_mean_sq_297_cast")]; + tensor var_9802 = const()[name = tensor("op_9802"), val = tensor([1])]; + tensor var_9803_cast = reduce_mean(axes = var_9802, keep_dims = var_6860, x = zero_mean_sq_297_cast)[name = tensor("op_9803_cast")]; + tensor var_9804_to_fp16 = const()[name = tensor("op_9804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9805_cast = add(x = var_9803_cast, y = var_9804_to_fp16)[name = tensor("op_9805_cast")]; + tensor denom_297_epsilon_0_to_fp16 = const()[name = tensor("denom_297_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_297_cast = rsqrt(epsilon = denom_297_epsilon_0_to_fp16, x = var_9805_cast)[name = tensor("denom_297_cast")]; + tensor out_297_cast = mul(x = zero_mean_297_cast, y = denom_297_cast)[name = tensor("out_297_cast")]; + tensor var_9809_to_fp16 = const()[name = tensor("op_9809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384607936)))]; + tensor var_9810_cast = add(x = out_297_cast, y = var_9809_to_fp16)[name = tensor("op_9810_cast")]; + tensor var_9812_to_fp16 = const()[name = tensor("op_9812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384610560)))]; + tensor hidden_states_393_cast = mul(x = var_9810_cast, y = var_9812_to_fp16)[name = tensor("hidden_states_393_cast")]; + tensor var_9819 = const()[name = tensor("op_9819"), val = tensor([1, 1])]; + tensor var_9821 = const()[name = tensor("op_9821"), val = tensor([1, 1])]; + tensor q_199_pad_type_0 = const()[name = tensor("q_199_pad_type_0"), val = tensor("custom")]; + tensor q_199_pad_0 = const()[name = tensor("q_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1384613184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385432448))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_199_cast = conv(dilations = var_9821, groups = var_6865, pad = q_199_pad_0, pad_type = q_199_pad_type_0, strides = var_9819, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_393_cast)[name = tensor("q_199_cast")]; + tensor var_9825 = const()[name = tensor("op_9825"), val = tensor([1, 1])]; + tensor var_9827 = const()[name = tensor("op_9827"), val = tensor([1, 1])]; + tensor k_199_pad_type_0 = const()[name = tensor("k_199_pad_type_0"), val = tensor("custom")]; + tensor k_199_pad_0 = const()[name = tensor("k_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1385432576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387398720))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_199_cast = conv(dilations = var_9827, groups = var_6865, pad = k_199_pad_0, pad_type = k_199_pad_type_0, strides = var_9825, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_199_cast")]; + tensor var_9831 = const()[name = tensor("op_9831"), val = tensor([1, 1])]; + tensor var_9833 = const()[name = tensor("op_9833"), val = tensor([1, 1])]; + tensor v_199_pad_type_0 = const()[name = tensor("v_199_pad_type_0"), val = tensor("custom")]; + tensor v_199_pad_0 = const()[name = tensor("v_199_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1387398912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389365056))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_199_cast = conv(dilations = var_9833, groups = var_6865, pad = v_199_pad_0, pad_type = v_199_pad_type_0, strides = var_9831, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_199_cast")]; + tensor var_9837 = const()[name = tensor("op_9837"), val = tensor([2, 20, 64, -1])]; + tensor var_9838_cast = reshape(shape = var_9837, x = q_199_cast)[name = tensor("op_9838_cast")]; + tensor var_9839 = const()[name = tensor("op_9839"), val = tensor([2, 20, 64, -1])]; + tensor var_9840_cast = reshape(shape = var_9839, x = k_199_cast)[name = tensor("op_9840_cast")]; + tensor var_9841 = const()[name = tensor("op_9841"), val = tensor([2, 20, 64, -1])]; + tensor var_9842_cast = reshape(shape = var_9841, x = v_199_cast)[name = tensor("op_9842_cast")]; + tensor attn_weights_397_transpose_x_0 = const()[name = tensor("attn_weights_397_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_397_transpose_y_0 = const()[name = tensor("attn_weights_397_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_397_cast = matmul(transpose_x = attn_weights_397_transpose_x_0, transpose_y = attn_weights_397_transpose_y_0, x = var_9838_cast, y = var_9840_cast)[name = tensor("attn_weights_397_cast")]; + tensor attn_weights_399_cast = mul(x = attn_weights_397_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_399_cast")]; + tensor var_9846_cast = softmax(axis = var_6849, x = attn_weights_399_cast)[name = tensor("op_9846_cast")]; + tensor attn_199_transpose_x_0 = const()[name = tensor("attn_199_transpose_x_0"), val = tensor(false)]; + tensor attn_199_transpose_y_0 = const()[name = tensor("attn_199_transpose_y_0"), val = tensor(true)]; + tensor attn_199_cast = matmul(transpose_x = attn_199_transpose_x_0, transpose_y = attn_199_transpose_y_0, x = var_9842_cast, y = var_9846_cast)[name = tensor("attn_199_cast")]; + tensor var_9850 = const()[name = tensor("op_9850"), val = tensor([2, 1280, 1, -1])]; + tensor input_579_cast = reshape(shape = var_9850, x = attn_199_cast)[name = tensor("input_579_cast")]; + tensor var_9855 = const()[name = tensor("op_9855"), val = tensor([1, 1])]; + tensor var_9857 = const()[name = tensor("op_9857"), val = tensor([1, 1])]; + tensor var_9859_pad_type_0 = const()[name = tensor("op_9859_pad_type_0"), val = tensor("custom")]; + tensor var_9859_pad_0 = const()[name = tensor("op_9859_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1389365248))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1390184512))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1390184640)))]; + tensor var_9859_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_9857, groups = var_6865, pad = var_9859_pad_0, pad_type = var_9859_pad_type_0, strides = var_9855, weight = up_blocks_0_attentions_1_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_579_cast)[name = tensor("op_9859_cast")]; + tensor inputs_299_cast = add(x = var_9859_cast, y = inputs_297_cast)[name = tensor("inputs_299_cast")]; + tensor var_9863 = const()[name = tensor("op_9863"), val = tensor([1])]; + tensor channels_mean_299_cast = reduce_mean(axes = var_9863, keep_dims = var_6860, x = inputs_299_cast)[name = tensor("channels_mean_299_cast")]; + tensor zero_mean_299_cast = sub(x = inputs_299_cast, y = channels_mean_299_cast)[name = tensor("zero_mean_299_cast")]; + tensor zero_mean_sq_299_cast = mul(x = zero_mean_299_cast, y = zero_mean_299_cast)[name = tensor("zero_mean_sq_299_cast")]; + tensor var_9867 = const()[name = tensor("op_9867"), val = tensor([1])]; + tensor var_9868_cast = reduce_mean(axes = var_9867, keep_dims = var_6860, x = zero_mean_sq_299_cast)[name = tensor("op_9868_cast")]; + tensor var_9869_to_fp16 = const()[name = tensor("op_9869_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9870_cast = add(x = var_9868_cast, y = var_9869_to_fp16)[name = tensor("op_9870_cast")]; + tensor denom_299_epsilon_0_to_fp16 = const()[name = tensor("denom_299_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_299_cast = rsqrt(epsilon = denom_299_epsilon_0_to_fp16, x = var_9870_cast)[name = tensor("denom_299_cast")]; + tensor out_299_cast = mul(x = zero_mean_299_cast, y = denom_299_cast)[name = tensor("out_299_cast")]; + tensor var_9874_to_fp16 = const()[name = tensor("op_9874_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1390187264)))]; + tensor var_9875_cast = add(x = out_299_cast, y = var_9874_to_fp16)[name = tensor("op_9875_cast")]; + tensor var_9877_to_fp16 = const()[name = tensor("op_9877_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1390189888)))]; + tensor input_581_cast = mul(x = var_9875_cast, y = var_9877_to_fp16)[name = tensor("input_581_cast")]; + tensor var_9885 = const()[name = tensor("op_9885"), val = tensor([1, 1])]; + tensor var_9887 = const()[name = tensor("op_9887"), val = tensor([1, 1])]; + tensor var_9889_pad_type_0 = const()[name = tensor("op_9889_pad_type_0"), val = tensor("custom")]; + tensor var_9889_pad_0 = const()[name = tensor("op_9889_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1390192512))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403299776))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403300352)))]; + tensor var_9889_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_9887, groups = var_6865, pad = var_9889_pad_0, pad_type = var_9889_pad_type_0, strides = var_9885, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_581_cast)[name = tensor("op_9889_cast")]; + tensor var_9890_split_sizes_0 = const()[name = tensor("op_9890_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_9890_axis_0 = const()[name = tensor("op_9890_axis_0"), val = tensor(1)]; + tensor var_9890_cast_0, tensor var_9890_cast_1 = split(axis = var_9890_axis_0, split_sizes = var_9890_split_sizes_0, x = var_9889_cast)[name = tensor("op_9890_cast")]; + tensor var_9892_mode_0 = const()[name = tensor("op_9892_mode_0"), val = tensor("EXACT")]; + tensor var_9892_cast = gelu(mode = var_9892_mode_0, x = var_9890_cast_1)[name = tensor("op_9892_cast")]; + tensor input_583_cast = mul(x = var_9890_cast_0, y = var_9892_cast)[name = tensor("input_583_cast")]; + tensor var_9896 = const()[name = tensor("op_9896"), val = tensor([1, 1])]; + tensor var_9898 = const()[name = tensor("op_9898"), val = tensor([1, 1])]; + tensor var_9900_pad_type_0 = const()[name = tensor("op_9900_pad_type_0"), val = tensor("custom")]; + tensor var_9900_pad_0 = const()[name = tensor("op_9900_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1403320896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1409874560))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1409875136)))]; + tensor var_9900_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_9898, groups = var_6865, pad = var_9900_pad_0, pad_type = var_9900_pad_type_0, strides = var_9896, weight = up_blocks_0_attentions_1_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_583_cast)[name = tensor("op_9900_cast")]; + tensor inputs_301_cast = add(x = var_9900_cast, y = inputs_299_cast)[name = tensor("inputs_301_cast")]; + tensor var_9910 = const()[name = tensor("op_9910"), val = tensor([1])]; + tensor channels_mean_301_cast = reduce_mean(axes = var_9910, keep_dims = var_6860, x = inputs_301_cast)[name = tensor("channels_mean_301_cast")]; + tensor zero_mean_301_cast = sub(x = inputs_301_cast, y = channels_mean_301_cast)[name = tensor("zero_mean_301_cast")]; + tensor zero_mean_sq_301_cast = mul(x = zero_mean_301_cast, y = zero_mean_301_cast)[name = tensor("zero_mean_sq_301_cast")]; + tensor var_9914 = const()[name = tensor("op_9914"), val = tensor([1])]; + tensor var_9915_cast = reduce_mean(axes = var_9914, keep_dims = var_6860, x = zero_mean_sq_301_cast)[name = tensor("op_9915_cast")]; + tensor var_9916_to_fp16 = const()[name = tensor("op_9916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9917_cast = add(x = var_9915_cast, y = var_9916_to_fp16)[name = tensor("op_9917_cast")]; + tensor denom_301_epsilon_0_to_fp16 = const()[name = tensor("denom_301_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_301_cast = rsqrt(epsilon = denom_301_epsilon_0_to_fp16, x = var_9917_cast)[name = tensor("denom_301_cast")]; + tensor out_301_cast = mul(x = zero_mean_301_cast, y = denom_301_cast)[name = tensor("out_301_cast")]; + tensor var_9921_to_fp16 = const()[name = tensor("op_9921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1409877760)))]; + tensor var_9922_cast = add(x = out_301_cast, y = var_9921_to_fp16)[name = tensor("op_9922_cast")]; + tensor var_9924_to_fp16 = const()[name = tensor("op_9924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1409880384)))]; + tensor hidden_states_397_cast = mul(x = var_9922_cast, y = var_9924_to_fp16)[name = tensor("hidden_states_397_cast")]; + tensor var_9931 = const()[name = tensor("op_9931"), val = tensor([1, 1])]; + tensor var_9933 = const()[name = tensor("op_9933"), val = tensor([1, 1])]; + tensor q_201_pad_type_0 = const()[name = tensor("q_201_pad_type_0"), val = tensor("custom")]; + tensor q_201_pad_0 = const()[name = tensor("q_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1409883008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1411111872))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_201_cast = conv(dilations = var_9933, groups = var_6865, pad = q_201_pad_0, pad_type = q_201_pad_type_0, strides = var_9931, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("q_201_cast")]; + tensor var_9937 = const()[name = tensor("op_9937"), val = tensor([1, 1])]; + tensor var_9939 = const()[name = tensor("op_9939"), val = tensor([1, 1])]; + tensor k_201_pad_type_0 = const()[name = tensor("k_201_pad_type_0"), val = tensor("custom")]; + tensor k_201_pad_0 = const()[name = tensor("k_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1411112064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412750528))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_201_cast = conv(dilations = var_9939, groups = var_6865, pad = k_201_pad_0, pad_type = k_201_pad_type_0, strides = var_9937, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("k_201_cast")]; + tensor var_9943 = const()[name = tensor("op_9943"), val = tensor([1, 1])]; + tensor var_9945 = const()[name = tensor("op_9945"), val = tensor([1, 1])]; + tensor v_201_pad_type_0 = const()[name = tensor("v_201_pad_type_0"), val = tensor("custom")]; + tensor v_201_pad_0 = const()[name = tensor("v_201_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1412751104))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414389568))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_201_cast = conv(dilations = var_9945, groups = var_6865, pad = v_201_pad_0, pad_type = v_201_pad_type_0, strides = var_9943, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_397_cast)[name = tensor("v_201_cast")]; + tensor var_9949 = const()[name = tensor("op_9949"), val = tensor([2, 20, 64, -1])]; + tensor var_9950_cast = reshape(shape = var_9949, x = q_201_cast)[name = tensor("op_9950_cast")]; + tensor var_9951 = const()[name = tensor("op_9951"), val = tensor([2, 20, 64, -1])]; + tensor var_9952_cast = reshape(shape = var_9951, x = k_201_cast)[name = tensor("op_9952_cast")]; + tensor var_9953 = const()[name = tensor("op_9953"), val = tensor([2, 20, 64, -1])]; + tensor var_9954_cast = reshape(shape = var_9953, x = v_201_cast)[name = tensor("op_9954_cast")]; + tensor attn_weights_401_transpose_x_0 = const()[name = tensor("attn_weights_401_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_401_transpose_y_0 = const()[name = tensor("attn_weights_401_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_401_cast = matmul(transpose_x = attn_weights_401_transpose_x_0, transpose_y = attn_weights_401_transpose_y_0, x = var_9950_cast, y = var_9952_cast)[name = tensor("attn_weights_401_cast")]; + tensor attn_weights_403_cast = mul(x = attn_weights_401_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_403_cast")]; + tensor var_9958_cast = softmax(axis = var_6849, x = attn_weights_403_cast)[name = tensor("op_9958_cast")]; + tensor attn_201_transpose_x_0 = const()[name = tensor("attn_201_transpose_x_0"), val = tensor(false)]; + tensor attn_201_transpose_y_0 = const()[name = tensor("attn_201_transpose_y_0"), val = tensor(true)]; + tensor attn_201_cast = matmul(transpose_x = attn_201_transpose_x_0, transpose_y = attn_201_transpose_y_0, x = var_9954_cast, y = var_9958_cast)[name = tensor("attn_201_cast")]; + tensor var_9962 = const()[name = tensor("op_9962"), val = tensor([2, 1280, 1, -1])]; + tensor input_585_cast = reshape(shape = var_9962, x = attn_201_cast)[name = tensor("input_585_cast")]; + tensor var_9967 = const()[name = tensor("op_9967"), val = tensor([1, 1])]; + tensor var_9969 = const()[name = tensor("op_9969"), val = tensor([1, 1])]; + tensor var_9971_pad_type_0 = const()[name = tensor("op_9971_pad_type_0"), val = tensor("custom")]; + tensor var_9971_pad_0 = const()[name = tensor("op_9971_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1414390144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416028608))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416029184)))]; + tensor var_9971_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_9969, groups = var_6865, pad = var_9971_pad_0, pad_type = var_9971_pad_type_0, strides = var_9967, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_585_cast)[name = tensor("op_9971_cast")]; + tensor inputs_303_cast = add(x = var_9971_cast, y = inputs_301_cast)[name = tensor("inputs_303_cast")]; + tensor var_9975 = const()[name = tensor("op_9975"), val = tensor([1])]; + tensor channels_mean_303_cast = reduce_mean(axes = var_9975, keep_dims = var_6860, x = inputs_303_cast)[name = tensor("channels_mean_303_cast")]; + tensor zero_mean_303_cast = sub(x = inputs_303_cast, y = channels_mean_303_cast)[name = tensor("zero_mean_303_cast")]; + tensor zero_mean_sq_303_cast = mul(x = zero_mean_303_cast, y = zero_mean_303_cast)[name = tensor("zero_mean_sq_303_cast")]; + tensor var_9979 = const()[name = tensor("op_9979"), val = tensor([1])]; + tensor var_9980_cast = reduce_mean(axes = var_9979, keep_dims = var_6860, x = zero_mean_sq_303_cast)[name = tensor("op_9980_cast")]; + tensor var_9981_to_fp16 = const()[name = tensor("op_9981_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_9982_cast = add(x = var_9980_cast, y = var_9981_to_fp16)[name = tensor("op_9982_cast")]; + tensor denom_303_epsilon_0_to_fp16 = const()[name = tensor("denom_303_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_303_cast = rsqrt(epsilon = denom_303_epsilon_0_to_fp16, x = var_9982_cast)[name = tensor("denom_303_cast")]; + tensor out_303_cast = mul(x = zero_mean_303_cast, y = denom_303_cast)[name = tensor("out_303_cast")]; + tensor var_9986_to_fp16 = const()[name = tensor("op_9986_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416031808)))]; + tensor var_9987_cast = add(x = out_303_cast, y = var_9986_to_fp16)[name = tensor("op_9987_cast")]; + tensor var_9989_to_fp16 = const()[name = tensor("op_9989_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416034432)))]; + tensor hidden_states_399_cast = mul(x = var_9987_cast, y = var_9989_to_fp16)[name = tensor("hidden_states_399_cast")]; + tensor var_9996 = const()[name = tensor("op_9996"), val = tensor([1, 1])]; + tensor var_9998 = const()[name = tensor("op_9998"), val = tensor([1, 1])]; + tensor q_203_pad_type_0 = const()[name = tensor("q_203_pad_type_0"), val = tensor("custom")]; + tensor q_203_pad_0 = const()[name = tensor("q_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416037056))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416856320))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_203_cast = conv(dilations = var_9998, groups = var_6865, pad = q_203_pad_0, pad_type = q_203_pad_type_0, strides = var_9996, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_399_cast)[name = tensor("q_203_cast")]; + tensor var_10002 = const()[name = tensor("op_10002"), val = tensor([1, 1])]; + tensor var_10004 = const()[name = tensor("op_10004"), val = tensor([1, 1])]; + tensor k_203_pad_type_0 = const()[name = tensor("k_203_pad_type_0"), val = tensor("custom")]; + tensor k_203_pad_0 = const()[name = tensor("k_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1416856448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1418167232))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_203_cast = conv(dilations = var_10004, groups = var_6865, pad = k_203_pad_0, pad_type = k_203_pad_type_0, strides = var_10002, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_203_cast")]; + tensor var_10008 = const()[name = tensor("op_10008"), val = tensor([1, 1])]; + tensor var_10010 = const()[name = tensor("op_10010"), val = tensor([1, 1])]; + tensor v_203_pad_type_0 = const()[name = tensor("v_203_pad_type_0"), val = tensor("custom")]; + tensor v_203_pad_0 = const()[name = tensor("v_203_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1418167360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419478144))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_203_cast = conv(dilations = var_10010, groups = var_6865, pad = v_203_pad_0, pad_type = v_203_pad_type_0, strides = var_10008, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_203_cast")]; + tensor var_10014 = const()[name = tensor("op_10014"), val = tensor([2, 20, 64, -1])]; + tensor var_10015_cast = reshape(shape = var_10014, x = q_203_cast)[name = tensor("op_10015_cast")]; + tensor var_10016 = const()[name = tensor("op_10016"), val = tensor([2, 20, 64, -1])]; + tensor var_10017_cast = reshape(shape = var_10016, x = k_203_cast)[name = tensor("op_10017_cast")]; + tensor var_10018 = const()[name = tensor("op_10018"), val = tensor([2, 20, 64, -1])]; + tensor var_10019_cast = reshape(shape = var_10018, x = v_203_cast)[name = tensor("op_10019_cast")]; + tensor attn_weights_405_transpose_x_0 = const()[name = tensor("attn_weights_405_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_405_transpose_y_0 = const()[name = tensor("attn_weights_405_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_405_cast = matmul(transpose_x = attn_weights_405_transpose_x_0, transpose_y = attn_weights_405_transpose_y_0, x = var_10015_cast, y = var_10017_cast)[name = tensor("attn_weights_405_cast")]; + tensor attn_weights_407_cast = mul(x = attn_weights_405_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_407_cast")]; + tensor var_10023_cast = softmax(axis = var_6849, x = attn_weights_407_cast)[name = tensor("op_10023_cast")]; + tensor attn_203_transpose_x_0 = const()[name = tensor("attn_203_transpose_x_0"), val = tensor(false)]; + tensor attn_203_transpose_y_0 = const()[name = tensor("attn_203_transpose_y_0"), val = tensor(true)]; + tensor attn_203_cast = matmul(transpose_x = attn_203_transpose_x_0, transpose_y = attn_203_transpose_y_0, x = var_10019_cast, y = var_10023_cast)[name = tensor("attn_203_cast")]; + tensor var_10027 = const()[name = tensor("op_10027"), val = tensor([2, 1280, 1, -1])]; + tensor input_587_cast = reshape(shape = var_10027, x = attn_203_cast)[name = tensor("input_587_cast")]; + tensor var_10032 = const()[name = tensor("op_10032"), val = tensor([1, 1])]; + tensor var_10034 = const()[name = tensor("op_10034"), val = tensor([1, 1])]; + tensor var_10036_pad_type_0 = const()[name = tensor("op_10036_pad_type_0"), val = tensor("custom")]; + tensor var_10036_pad_0 = const()[name = tensor("op_10036_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1419478272))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1420297536))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1420297664)))]; + tensor var_10036_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_10034, groups = var_6865, pad = var_10036_pad_0, pad_type = var_10036_pad_type_0, strides = var_10032, weight = up_blocks_0_attentions_1_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_587_cast)[name = tensor("op_10036_cast")]; + tensor inputs_305_cast = add(x = var_10036_cast, y = inputs_303_cast)[name = tensor("inputs_305_cast")]; + tensor var_10040 = const()[name = tensor("op_10040"), val = tensor([1])]; + tensor channels_mean_305_cast = reduce_mean(axes = var_10040, keep_dims = var_6860, x = inputs_305_cast)[name = tensor("channels_mean_305_cast")]; + tensor zero_mean_305_cast = sub(x = inputs_305_cast, y = channels_mean_305_cast)[name = tensor("zero_mean_305_cast")]; + tensor zero_mean_sq_305_cast = mul(x = zero_mean_305_cast, y = zero_mean_305_cast)[name = tensor("zero_mean_sq_305_cast")]; + tensor var_10044 = const()[name = tensor("op_10044"), val = tensor([1])]; + tensor var_10045_cast = reduce_mean(axes = var_10044, keep_dims = var_6860, x = zero_mean_sq_305_cast)[name = tensor("op_10045_cast")]; + tensor var_10046_to_fp16 = const()[name = tensor("op_10046_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10047_cast = add(x = var_10045_cast, y = var_10046_to_fp16)[name = tensor("op_10047_cast")]; + tensor denom_305_epsilon_0_to_fp16 = const()[name = tensor("denom_305_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_305_cast = rsqrt(epsilon = denom_305_epsilon_0_to_fp16, x = var_10047_cast)[name = tensor("denom_305_cast")]; + tensor out_305_cast = mul(x = zero_mean_305_cast, y = denom_305_cast)[name = tensor("out_305_cast")]; + tensor var_10051_to_fp16 = const()[name = tensor("op_10051_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1420300288)))]; + tensor var_10052_cast = add(x = out_305_cast, y = var_10051_to_fp16)[name = tensor("op_10052_cast")]; + tensor var_10054_to_fp16 = const()[name = tensor("op_10054_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1420302912)))]; + tensor input_589_cast = mul(x = var_10052_cast, y = var_10054_to_fp16)[name = tensor("input_589_cast")]; + tensor var_10062 = const()[name = tensor("op_10062"), val = tensor([1, 1])]; + tensor var_10064 = const()[name = tensor("op_10064"), val = tensor([1, 1])]; + tensor var_10066_pad_type_0 = const()[name = tensor("op_10066_pad_type_0"), val = tensor("custom")]; + tensor var_10066_pad_0 = const()[name = tensor("op_10066_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1420305536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433412800))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433413376)))]; + tensor var_10066_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_10064, groups = var_6865, pad = var_10066_pad_0, pad_type = var_10066_pad_type_0, strides = var_10062, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_589_cast)[name = tensor("op_10066_cast")]; + tensor var_10067_split_sizes_0 = const()[name = tensor("op_10067_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10067_axis_0 = const()[name = tensor("op_10067_axis_0"), val = tensor(1)]; + tensor var_10067_cast_0, tensor var_10067_cast_1 = split(axis = var_10067_axis_0, split_sizes = var_10067_split_sizes_0, x = var_10066_cast)[name = tensor("op_10067_cast")]; + tensor var_10069_mode_0 = const()[name = tensor("op_10069_mode_0"), val = tensor("EXACT")]; + tensor var_10069_cast = gelu(mode = var_10069_mode_0, x = var_10067_cast_1)[name = tensor("op_10069_cast")]; + tensor input_591_cast = mul(x = var_10067_cast_0, y = var_10069_cast)[name = tensor("input_591_cast")]; + tensor var_10073 = const()[name = tensor("op_10073"), val = tensor([1, 1])]; + tensor var_10075 = const()[name = tensor("op_10075"), val = tensor([1, 1])]; + tensor var_10077_pad_type_0 = const()[name = tensor("op_10077_pad_type_0"), val = tensor("custom")]; + tensor var_10077_pad_0 = const()[name = tensor("op_10077_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1433433920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439987584))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439988160)))]; + tensor var_10077_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_10075, groups = var_6865, pad = var_10077_pad_0, pad_type = var_10077_pad_type_0, strides = var_10073, weight = up_blocks_0_attentions_1_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_591_cast)[name = tensor("op_10077_cast")]; + tensor inputs_307_cast = add(x = var_10077_cast, y = inputs_305_cast)[name = tensor("inputs_307_cast")]; + tensor var_10087 = const()[name = tensor("op_10087"), val = tensor([1])]; + tensor channels_mean_307_cast = reduce_mean(axes = var_10087, keep_dims = var_6860, x = inputs_307_cast)[name = tensor("channels_mean_307_cast")]; + tensor zero_mean_307_cast = sub(x = inputs_307_cast, y = channels_mean_307_cast)[name = tensor("zero_mean_307_cast")]; + tensor zero_mean_sq_307_cast = mul(x = zero_mean_307_cast, y = zero_mean_307_cast)[name = tensor("zero_mean_sq_307_cast")]; + tensor var_10091 = const()[name = tensor("op_10091"), val = tensor([1])]; + tensor var_10092_cast = reduce_mean(axes = var_10091, keep_dims = var_6860, x = zero_mean_sq_307_cast)[name = tensor("op_10092_cast")]; + tensor var_10093_to_fp16 = const()[name = tensor("op_10093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10094_cast = add(x = var_10092_cast, y = var_10093_to_fp16)[name = tensor("op_10094_cast")]; + tensor denom_307_epsilon_0_to_fp16 = const()[name = tensor("denom_307_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_307_cast = rsqrt(epsilon = denom_307_epsilon_0_to_fp16, x = var_10094_cast)[name = tensor("denom_307_cast")]; + tensor out_307_cast = mul(x = zero_mean_307_cast, y = denom_307_cast)[name = tensor("out_307_cast")]; + tensor var_10098_to_fp16 = const()[name = tensor("op_10098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439990784)))]; + tensor var_10099_cast = add(x = out_307_cast, y = var_10098_to_fp16)[name = tensor("op_10099_cast")]; + tensor var_10101_to_fp16 = const()[name = tensor("op_10101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439993408)))]; + tensor hidden_states_403_cast = mul(x = var_10099_cast, y = var_10101_to_fp16)[name = tensor("hidden_states_403_cast")]; + tensor var_10108 = const()[name = tensor("op_10108"), val = tensor([1, 1])]; + tensor var_10110 = const()[name = tensor("op_10110"), val = tensor([1, 1])]; + tensor q_205_pad_type_0 = const()[name = tensor("q_205_pad_type_0"), val = tensor("custom")]; + tensor q_205_pad_0 = const()[name = tensor("q_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1439996032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441224896))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_205_cast = conv(dilations = var_10110, groups = var_6865, pad = q_205_pad_0, pad_type = q_205_pad_type_0, strides = var_10108, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("q_205_cast")]; + tensor var_10114 = const()[name = tensor("op_10114"), val = tensor([1, 1])]; + tensor var_10116 = const()[name = tensor("op_10116"), val = tensor([1, 1])]; + tensor k_205_pad_type_0 = const()[name = tensor("k_205_pad_type_0"), val = tensor("custom")]; + tensor k_205_pad_0 = const()[name = tensor("k_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1441225088))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1442453952))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_205_cast = conv(dilations = var_10116, groups = var_6865, pad = k_205_pad_0, pad_type = k_205_pad_type_0, strides = var_10114, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("k_205_cast")]; + tensor var_10120 = const()[name = tensor("op_10120"), val = tensor([1, 1])]; + tensor var_10122 = const()[name = tensor("op_10122"), val = tensor([1, 1])]; + tensor v_205_pad_type_0 = const()[name = tensor("v_205_pad_type_0"), val = tensor("custom")]; + tensor v_205_pad_0 = const()[name = tensor("v_205_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1442454144))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1444092608))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_205_cast = conv(dilations = var_10122, groups = var_6865, pad = v_205_pad_0, pad_type = v_205_pad_type_0, strides = var_10120, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_403_cast)[name = tensor("v_205_cast")]; + tensor var_10126 = const()[name = tensor("op_10126"), val = tensor([2, 20, 64, -1])]; + tensor var_10127_cast = reshape(shape = var_10126, x = q_205_cast)[name = tensor("op_10127_cast")]; + tensor var_10128 = const()[name = tensor("op_10128"), val = tensor([2, 20, 64, -1])]; + tensor var_10129_cast = reshape(shape = var_10128, x = k_205_cast)[name = tensor("op_10129_cast")]; + tensor var_10130 = const()[name = tensor("op_10130"), val = tensor([2, 20, 64, -1])]; + tensor var_10131_cast = reshape(shape = var_10130, x = v_205_cast)[name = tensor("op_10131_cast")]; + tensor attn_weights_409_transpose_x_0 = const()[name = tensor("attn_weights_409_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_409_transpose_y_0 = const()[name = tensor("attn_weights_409_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_409_cast = matmul(transpose_x = attn_weights_409_transpose_x_0, transpose_y = attn_weights_409_transpose_y_0, x = var_10127_cast, y = var_10129_cast)[name = tensor("attn_weights_409_cast")]; + tensor attn_weights_411_cast = mul(x = attn_weights_409_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_411_cast")]; + tensor var_10135_cast = softmax(axis = var_6849, x = attn_weights_411_cast)[name = tensor("op_10135_cast")]; + tensor attn_205_transpose_x_0 = const()[name = tensor("attn_205_transpose_x_0"), val = tensor(false)]; + tensor attn_205_transpose_y_0 = const()[name = tensor("attn_205_transpose_y_0"), val = tensor(true)]; + tensor attn_205_cast = matmul(transpose_x = attn_205_transpose_x_0, transpose_y = attn_205_transpose_y_0, x = var_10131_cast, y = var_10135_cast)[name = tensor("attn_205_cast")]; + tensor var_10139 = const()[name = tensor("op_10139"), val = tensor([2, 1280, 1, -1])]; + tensor input_593_cast = reshape(shape = var_10139, x = attn_205_cast)[name = tensor("input_593_cast")]; + tensor var_10144 = const()[name = tensor("op_10144"), val = tensor([1, 1])]; + tensor var_10146 = const()[name = tensor("op_10146"), val = tensor([1, 1])]; + tensor var_10148_pad_type_0 = const()[name = tensor("op_10148_pad_type_0"), val = tensor("custom")]; + tensor var_10148_pad_0 = const()[name = tensor("op_10148_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1444093184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1445731648))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1445732224)))]; + tensor var_10148_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_10146, groups = var_6865, pad = var_10148_pad_0, pad_type = var_10148_pad_type_0, strides = var_10144, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_593_cast)[name = tensor("op_10148_cast")]; + tensor inputs_309_cast = add(x = var_10148_cast, y = inputs_307_cast)[name = tensor("inputs_309_cast")]; + tensor var_10152 = const()[name = tensor("op_10152"), val = tensor([1])]; + tensor channels_mean_309_cast = reduce_mean(axes = var_10152, keep_dims = var_6860, x = inputs_309_cast)[name = tensor("channels_mean_309_cast")]; + tensor zero_mean_309_cast = sub(x = inputs_309_cast, y = channels_mean_309_cast)[name = tensor("zero_mean_309_cast")]; + tensor zero_mean_sq_309_cast = mul(x = zero_mean_309_cast, y = zero_mean_309_cast)[name = tensor("zero_mean_sq_309_cast")]; + tensor var_10156 = const()[name = tensor("op_10156"), val = tensor([1])]; + tensor var_10157_cast = reduce_mean(axes = var_10156, keep_dims = var_6860, x = zero_mean_sq_309_cast)[name = tensor("op_10157_cast")]; + tensor var_10158_to_fp16 = const()[name = tensor("op_10158_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10159_cast = add(x = var_10157_cast, y = var_10158_to_fp16)[name = tensor("op_10159_cast")]; + tensor denom_309_epsilon_0_to_fp16 = const()[name = tensor("denom_309_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_309_cast = rsqrt(epsilon = denom_309_epsilon_0_to_fp16, x = var_10159_cast)[name = tensor("denom_309_cast")]; + tensor out_309_cast = mul(x = zero_mean_309_cast, y = denom_309_cast)[name = tensor("out_309_cast")]; + tensor var_10163_to_fp16 = const()[name = tensor("op_10163_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1445734848)))]; + tensor var_10164_cast = add(x = out_309_cast, y = var_10163_to_fp16)[name = tensor("op_10164_cast")]; + tensor var_10166_to_fp16 = const()[name = tensor("op_10166_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1445737472)))]; + tensor hidden_states_405_cast = mul(x = var_10164_cast, y = var_10166_to_fp16)[name = tensor("hidden_states_405_cast")]; + tensor var_10173 = const()[name = tensor("op_10173"), val = tensor([1, 1])]; + tensor var_10175 = const()[name = tensor("op_10175"), val = tensor([1, 1])]; + tensor q_207_pad_type_0 = const()[name = tensor("q_207_pad_type_0"), val = tensor("custom")]; + tensor q_207_pad_0 = const()[name = tensor("q_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1445740096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1446968960))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_207_cast = conv(dilations = var_10175, groups = var_6865, pad = q_207_pad_0, pad_type = q_207_pad_type_0, strides = var_10173, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_405_cast)[name = tensor("q_207_cast")]; + tensor var_10179 = const()[name = tensor("op_10179"), val = tensor([1, 1])]; + tensor var_10181 = const()[name = tensor("op_10181"), val = tensor([1, 1])]; + tensor k_207_pad_type_0 = const()[name = tensor("k_207_pad_type_0"), val = tensor("custom")]; + tensor k_207_pad_0 = const()[name = tensor("k_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1446969152))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1448279936))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_207_cast = conv(dilations = var_10181, groups = var_6865, pad = k_207_pad_0, pad_type = k_207_pad_type_0, strides = var_10179, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_207_cast")]; + tensor var_10185 = const()[name = tensor("op_10185"), val = tensor([1, 1])]; + tensor var_10187 = const()[name = tensor("op_10187"), val = tensor([1, 1])]; + tensor v_207_pad_type_0 = const()[name = tensor("v_207_pad_type_0"), val = tensor("custom")]; + tensor v_207_pad_0 = const()[name = tensor("v_207_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1448280064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449590848))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_207_cast = conv(dilations = var_10187, groups = var_6865, pad = v_207_pad_0, pad_type = v_207_pad_type_0, strides = var_10185, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_207_cast")]; + tensor var_10191 = const()[name = tensor("op_10191"), val = tensor([2, 20, 64, -1])]; + tensor var_10192_cast = reshape(shape = var_10191, x = q_207_cast)[name = tensor("op_10192_cast")]; + tensor var_10193 = const()[name = tensor("op_10193"), val = tensor([2, 20, 64, -1])]; + tensor var_10194_cast = reshape(shape = var_10193, x = k_207_cast)[name = tensor("op_10194_cast")]; + tensor var_10195 = const()[name = tensor("op_10195"), val = tensor([2, 20, 64, -1])]; + tensor var_10196_cast = reshape(shape = var_10195, x = v_207_cast)[name = tensor("op_10196_cast")]; + tensor attn_weights_413_transpose_x_0 = const()[name = tensor("attn_weights_413_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_413_transpose_y_0 = const()[name = tensor("attn_weights_413_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_413_cast = matmul(transpose_x = attn_weights_413_transpose_x_0, transpose_y = attn_weights_413_transpose_y_0, x = var_10192_cast, y = var_10194_cast)[name = tensor("attn_weights_413_cast")]; + tensor attn_weights_415_cast = mul(x = attn_weights_413_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_415_cast")]; + tensor var_10200_cast = softmax(axis = var_6849, x = attn_weights_415_cast)[name = tensor("op_10200_cast")]; + tensor attn_207_transpose_x_0 = const()[name = tensor("attn_207_transpose_x_0"), val = tensor(false)]; + tensor attn_207_transpose_y_0 = const()[name = tensor("attn_207_transpose_y_0"), val = tensor(true)]; + tensor attn_207_cast = matmul(transpose_x = attn_207_transpose_x_0, transpose_y = attn_207_transpose_y_0, x = var_10196_cast, y = var_10200_cast)[name = tensor("attn_207_cast")]; + tensor var_10204 = const()[name = tensor("op_10204"), val = tensor([2, 1280, 1, -1])]; + tensor input_595_cast = reshape(shape = var_10204, x = attn_207_cast)[name = tensor("input_595_cast")]; + tensor var_10209 = const()[name = tensor("op_10209"), val = tensor([1, 1])]; + tensor var_10211 = const()[name = tensor("op_10211"), val = tensor([1, 1])]; + tensor var_10213_pad_type_0 = const()[name = tensor("op_10213_pad_type_0"), val = tensor("custom")]; + tensor var_10213_pad_0 = const()[name = tensor("op_10213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1449590976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450410240))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450410368)))]; + tensor var_10213_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_10211, groups = var_6865, pad = var_10213_pad_0, pad_type = var_10213_pad_type_0, strides = var_10209, weight = up_blocks_0_attentions_1_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_595_cast)[name = tensor("op_10213_cast")]; + tensor inputs_311_cast = add(x = var_10213_cast, y = inputs_309_cast)[name = tensor("inputs_311_cast")]; + tensor var_10217 = const()[name = tensor("op_10217"), val = tensor([1])]; + tensor channels_mean_311_cast = reduce_mean(axes = var_10217, keep_dims = var_6860, x = inputs_311_cast)[name = tensor("channels_mean_311_cast")]; + tensor zero_mean_311_cast = sub(x = inputs_311_cast, y = channels_mean_311_cast)[name = tensor("zero_mean_311_cast")]; + tensor zero_mean_sq_311_cast = mul(x = zero_mean_311_cast, y = zero_mean_311_cast)[name = tensor("zero_mean_sq_311_cast")]; + tensor var_10221 = const()[name = tensor("op_10221"), val = tensor([1])]; + tensor var_10222_cast = reduce_mean(axes = var_10221, keep_dims = var_6860, x = zero_mean_sq_311_cast)[name = tensor("op_10222_cast")]; + tensor var_10223_to_fp16 = const()[name = tensor("op_10223_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10224_cast = add(x = var_10222_cast, y = var_10223_to_fp16)[name = tensor("op_10224_cast")]; + tensor denom_311_epsilon_0_to_fp16 = const()[name = tensor("denom_311_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_311_cast = rsqrt(epsilon = denom_311_epsilon_0_to_fp16, x = var_10224_cast)[name = tensor("denom_311_cast")]; + tensor out_311_cast = mul(x = zero_mean_311_cast, y = denom_311_cast)[name = tensor("out_311_cast")]; + tensor var_10228_to_fp16 = const()[name = tensor("op_10228_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450412992)))]; + tensor var_10229_cast = add(x = out_311_cast, y = var_10228_to_fp16)[name = tensor("op_10229_cast")]; + tensor var_10231_to_fp16 = const()[name = tensor("op_10231_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450415616)))]; + tensor input_597_cast = mul(x = var_10229_cast, y = var_10231_to_fp16)[name = tensor("input_597_cast")]; + tensor var_10239 = const()[name = tensor("op_10239"), val = tensor([1, 1])]; + tensor var_10241 = const()[name = tensor("op_10241"), val = tensor([1, 1])]; + tensor var_10243_pad_type_0 = const()[name = tensor("op_10243_pad_type_0"), val = tensor("custom")]; + tensor var_10243_pad_0 = const()[name = tensor("op_10243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1450418240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1463525504))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1463526080)))]; + tensor var_10243_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_10241, groups = var_6865, pad = var_10243_pad_0, pad_type = var_10243_pad_type_0, strides = var_10239, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_597_cast)[name = tensor("op_10243_cast")]; + tensor var_10244_split_sizes_0 = const()[name = tensor("op_10244_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10244_axis_0 = const()[name = tensor("op_10244_axis_0"), val = tensor(1)]; + tensor var_10244_cast_0, tensor var_10244_cast_1 = split(axis = var_10244_axis_0, split_sizes = var_10244_split_sizes_0, x = var_10243_cast)[name = tensor("op_10244_cast")]; + tensor var_10246_mode_0 = const()[name = tensor("op_10246_mode_0"), val = tensor("EXACT")]; + tensor var_10246_cast = gelu(mode = var_10246_mode_0, x = var_10244_cast_1)[name = tensor("op_10246_cast")]; + tensor input_599_cast = mul(x = var_10244_cast_0, y = var_10246_cast)[name = tensor("input_599_cast")]; + tensor var_10250 = const()[name = tensor("op_10250"), val = tensor([1, 1])]; + tensor var_10252 = const()[name = tensor("op_10252"), val = tensor([1, 1])]; + tensor var_10254_pad_type_0 = const()[name = tensor("op_10254_pad_type_0"), val = tensor("custom")]; + tensor var_10254_pad_0 = const()[name = tensor("op_10254_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1463546624))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1470100288))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1470100864)))]; + tensor var_10254_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_10252, groups = var_6865, pad = var_10254_pad_0, pad_type = var_10254_pad_type_0, strides = var_10250, weight = up_blocks_0_attentions_1_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_599_cast)[name = tensor("op_10254_cast")]; + tensor inputs_313_cast = add(x = var_10254_cast, y = inputs_311_cast)[name = tensor("inputs_313_cast")]; + tensor var_10264 = const()[name = tensor("op_10264"), val = tensor([1])]; + tensor channels_mean_313_cast = reduce_mean(axes = var_10264, keep_dims = var_6860, x = inputs_313_cast)[name = tensor("channels_mean_313_cast")]; + tensor zero_mean_313_cast = sub(x = inputs_313_cast, y = channels_mean_313_cast)[name = tensor("zero_mean_313_cast")]; + tensor zero_mean_sq_313_cast = mul(x = zero_mean_313_cast, y = zero_mean_313_cast)[name = tensor("zero_mean_sq_313_cast")]; + tensor var_10268 = const()[name = tensor("op_10268"), val = tensor([1])]; + tensor var_10269_cast = reduce_mean(axes = var_10268, keep_dims = var_6860, x = zero_mean_sq_313_cast)[name = tensor("op_10269_cast")]; + tensor var_10270_to_fp16 = const()[name = tensor("op_10270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10271_cast = add(x = var_10269_cast, y = var_10270_to_fp16)[name = tensor("op_10271_cast")]; + tensor denom_313_epsilon_0_to_fp16 = const()[name = tensor("denom_313_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_313_cast = rsqrt(epsilon = denom_313_epsilon_0_to_fp16, x = var_10271_cast)[name = tensor("denom_313_cast")]; + tensor out_313_cast = mul(x = zero_mean_313_cast, y = denom_313_cast)[name = tensor("out_313_cast")]; + tensor var_10275_to_fp16 = const()[name = tensor("op_10275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1470103488)))]; + tensor var_10276_cast = add(x = out_313_cast, y = var_10275_to_fp16)[name = tensor("op_10276_cast")]; + tensor var_10278_to_fp16 = const()[name = tensor("op_10278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1470106112)))]; + tensor hidden_states_409_cast = mul(x = var_10276_cast, y = var_10278_to_fp16)[name = tensor("hidden_states_409_cast")]; + tensor var_10285 = const()[name = tensor("op_10285"), val = tensor([1, 1])]; + tensor var_10287 = const()[name = tensor("op_10287"), val = tensor([1, 1])]; + tensor q_209_pad_type_0 = const()[name = tensor("q_209_pad_type_0"), val = tensor("custom")]; + tensor q_209_pad_0 = const()[name = tensor("q_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1470108736))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1471337600))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_209_cast = conv(dilations = var_10287, groups = var_6865, pad = q_209_pad_0, pad_type = q_209_pad_type_0, strides = var_10285, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("q_209_cast")]; + tensor var_10291 = const()[name = tensor("op_10291"), val = tensor([1, 1])]; + tensor var_10293 = const()[name = tensor("op_10293"), val = tensor([1, 1])]; + tensor k_209_pad_type_0 = const()[name = tensor("k_209_pad_type_0"), val = tensor("custom")]; + tensor k_209_pad_0 = const()[name = tensor("k_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1471337792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1472566656))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_209_cast = conv(dilations = var_10293, groups = var_6865, pad = k_209_pad_0, pad_type = k_209_pad_type_0, strides = var_10291, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("k_209_cast")]; + tensor var_10297 = const()[name = tensor("op_10297"), val = tensor([1, 1])]; + tensor var_10299 = const()[name = tensor("op_10299"), val = tensor([1, 1])]; + tensor v_209_pad_type_0 = const()[name = tensor("v_209_pad_type_0"), val = tensor("custom")]; + tensor v_209_pad_0 = const()[name = tensor("v_209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1472566848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1474205312))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_209_cast = conv(dilations = var_10299, groups = var_6865, pad = v_209_pad_0, pad_type = v_209_pad_type_0, strides = var_10297, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_409_cast)[name = tensor("v_209_cast")]; + tensor var_10303 = const()[name = tensor("op_10303"), val = tensor([2, 20, 64, -1])]; + tensor var_10304_cast = reshape(shape = var_10303, x = q_209_cast)[name = tensor("op_10304_cast")]; + tensor var_10305 = const()[name = tensor("op_10305"), val = tensor([2, 20, 64, -1])]; + tensor var_10306_cast = reshape(shape = var_10305, x = k_209_cast)[name = tensor("op_10306_cast")]; + tensor var_10307 = const()[name = tensor("op_10307"), val = tensor([2, 20, 64, -1])]; + tensor var_10308_cast = reshape(shape = var_10307, x = v_209_cast)[name = tensor("op_10308_cast")]; + tensor attn_weights_417_transpose_x_0 = const()[name = tensor("attn_weights_417_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_417_transpose_y_0 = const()[name = tensor("attn_weights_417_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_417_cast = matmul(transpose_x = attn_weights_417_transpose_x_0, transpose_y = attn_weights_417_transpose_y_0, x = var_10304_cast, y = var_10306_cast)[name = tensor("attn_weights_417_cast")]; + tensor attn_weights_419_cast = mul(x = attn_weights_417_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_419_cast")]; + tensor var_10312_cast = softmax(axis = var_6849, x = attn_weights_419_cast)[name = tensor("op_10312_cast")]; + tensor attn_209_transpose_x_0 = const()[name = tensor("attn_209_transpose_x_0"), val = tensor(false)]; + tensor attn_209_transpose_y_0 = const()[name = tensor("attn_209_transpose_y_0"), val = tensor(true)]; + tensor attn_209_cast = matmul(transpose_x = attn_209_transpose_x_0, transpose_y = attn_209_transpose_y_0, x = var_10308_cast, y = var_10312_cast)[name = tensor("attn_209_cast")]; + tensor var_10316 = const()[name = tensor("op_10316"), val = tensor([2, 1280, 1, -1])]; + tensor input_601_cast = reshape(shape = var_10316, x = attn_209_cast)[name = tensor("input_601_cast")]; + tensor var_10321 = const()[name = tensor("op_10321"), val = tensor([1, 1])]; + tensor var_10323 = const()[name = tensor("op_10323"), val = tensor([1, 1])]; + tensor var_10325_pad_type_0 = const()[name = tensor("op_10325_pad_type_0"), val = tensor("custom")]; + tensor var_10325_pad_0 = const()[name = tensor("op_10325_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1474205888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475844352))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475844928)))]; + tensor var_10325_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_10323, groups = var_6865, pad = var_10325_pad_0, pad_type = var_10325_pad_type_0, strides = var_10321, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_601_cast)[name = tensor("op_10325_cast")]; + tensor inputs_315_cast = add(x = var_10325_cast, y = inputs_313_cast)[name = tensor("inputs_315_cast")]; + tensor var_10329 = const()[name = tensor("op_10329"), val = tensor([1])]; + tensor channels_mean_315_cast = reduce_mean(axes = var_10329, keep_dims = var_6860, x = inputs_315_cast)[name = tensor("channels_mean_315_cast")]; + tensor zero_mean_315_cast = sub(x = inputs_315_cast, y = channels_mean_315_cast)[name = tensor("zero_mean_315_cast")]; + tensor zero_mean_sq_315_cast = mul(x = zero_mean_315_cast, y = zero_mean_315_cast)[name = tensor("zero_mean_sq_315_cast")]; + tensor var_10333 = const()[name = tensor("op_10333"), val = tensor([1])]; + tensor var_10334_cast = reduce_mean(axes = var_10333, keep_dims = var_6860, x = zero_mean_sq_315_cast)[name = tensor("op_10334_cast")]; + tensor var_10335_to_fp16 = const()[name = tensor("op_10335_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10336_cast = add(x = var_10334_cast, y = var_10335_to_fp16)[name = tensor("op_10336_cast")]; + tensor denom_315_epsilon_0_to_fp16 = const()[name = tensor("denom_315_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_315_cast = rsqrt(epsilon = denom_315_epsilon_0_to_fp16, x = var_10336_cast)[name = tensor("denom_315_cast")]; + tensor out_315_cast = mul(x = zero_mean_315_cast, y = denom_315_cast)[name = tensor("out_315_cast")]; + tensor var_10340_to_fp16 = const()[name = tensor("op_10340_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475847552)))]; + tensor var_10341_cast = add(x = out_315_cast, y = var_10340_to_fp16)[name = tensor("op_10341_cast")]; + tensor var_10343_to_fp16 = const()[name = tensor("op_10343_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475850176)))]; + tensor hidden_states_411_cast = mul(x = var_10341_cast, y = var_10343_to_fp16)[name = tensor("hidden_states_411_cast")]; + tensor var_10350 = const()[name = tensor("op_10350"), val = tensor([1, 1])]; + tensor var_10352 = const()[name = tensor("op_10352"), val = tensor([1, 1])]; + tensor q_211_pad_type_0 = const()[name = tensor("q_211_pad_type_0"), val = tensor("custom")]; + tensor q_211_pad_0 = const()[name = tensor("q_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1475852800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1476672064))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_211_cast = conv(dilations = var_10352, groups = var_6865, pad = q_211_pad_0, pad_type = q_211_pad_type_0, strides = var_10350, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_411_cast)[name = tensor("q_211_cast")]; + tensor var_10356 = const()[name = tensor("op_10356"), val = tensor([1, 1])]; + tensor var_10358 = const()[name = tensor("op_10358"), val = tensor([1, 1])]; + tensor k_211_pad_type_0 = const()[name = tensor("k_211_pad_type_0"), val = tensor("custom")]; + tensor k_211_pad_0 = const()[name = tensor("k_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1476672192))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1478638336))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_211_cast = conv(dilations = var_10358, groups = var_6865, pad = k_211_pad_0, pad_type = k_211_pad_type_0, strides = var_10356, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_211_cast")]; + tensor var_10362 = const()[name = tensor("op_10362"), val = tensor([1, 1])]; + tensor var_10364 = const()[name = tensor("op_10364"), val = tensor([1, 1])]; + tensor v_211_pad_type_0 = const()[name = tensor("v_211_pad_type_0"), val = tensor("custom")]; + tensor v_211_pad_0 = const()[name = tensor("v_211_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1478638528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1479949312))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_211_cast = conv(dilations = var_10364, groups = var_6865, pad = v_211_pad_0, pad_type = v_211_pad_type_0, strides = var_10362, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_211_cast")]; + tensor var_10368 = const()[name = tensor("op_10368"), val = tensor([2, 20, 64, -1])]; + tensor var_10369_cast = reshape(shape = var_10368, x = q_211_cast)[name = tensor("op_10369_cast")]; + tensor var_10370 = const()[name = tensor("op_10370"), val = tensor([2, 20, 64, -1])]; + tensor var_10371_cast = reshape(shape = var_10370, x = k_211_cast)[name = tensor("op_10371_cast")]; + tensor var_10372 = const()[name = tensor("op_10372"), val = tensor([2, 20, 64, -1])]; + tensor var_10373_cast = reshape(shape = var_10372, x = v_211_cast)[name = tensor("op_10373_cast")]; + tensor attn_weights_421_transpose_x_0 = const()[name = tensor("attn_weights_421_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_421_transpose_y_0 = const()[name = tensor("attn_weights_421_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_421_cast = matmul(transpose_x = attn_weights_421_transpose_x_0, transpose_y = attn_weights_421_transpose_y_0, x = var_10369_cast, y = var_10371_cast)[name = tensor("attn_weights_421_cast")]; + tensor attn_weights_423_cast = mul(x = attn_weights_421_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_423_cast")]; + tensor var_10377_cast = softmax(axis = var_6849, x = attn_weights_423_cast)[name = tensor("op_10377_cast")]; + tensor attn_211_transpose_x_0 = const()[name = tensor("attn_211_transpose_x_0"), val = tensor(false)]; + tensor attn_211_transpose_y_0 = const()[name = tensor("attn_211_transpose_y_0"), val = tensor(true)]; + tensor attn_211_cast = matmul(transpose_x = attn_211_transpose_x_0, transpose_y = attn_211_transpose_y_0, x = var_10373_cast, y = var_10377_cast)[name = tensor("attn_211_cast")]; + tensor var_10381 = const()[name = tensor("op_10381"), val = tensor([2, 1280, 1, -1])]; + tensor input_603_cast = reshape(shape = var_10381, x = attn_211_cast)[name = tensor("input_603_cast")]; + tensor var_10386 = const()[name = tensor("op_10386"), val = tensor([1, 1])]; + tensor var_10388 = const()[name = tensor("op_10388"), val = tensor([1, 1])]; + tensor var_10390_pad_type_0 = const()[name = tensor("op_10390_pad_type_0"), val = tensor("custom")]; + tensor var_10390_pad_0 = const()[name = tensor("op_10390_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1479949440))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481178304))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481178496)))]; + tensor var_10390_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_10388, groups = var_6865, pad = var_10390_pad_0, pad_type = var_10390_pad_type_0, strides = var_10386, weight = up_blocks_0_attentions_1_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_603_cast)[name = tensor("op_10390_cast")]; + tensor inputs_317_cast = add(x = var_10390_cast, y = inputs_315_cast)[name = tensor("inputs_317_cast")]; + tensor var_10394 = const()[name = tensor("op_10394"), val = tensor([1])]; + tensor channels_mean_317_cast = reduce_mean(axes = var_10394, keep_dims = var_6860, x = inputs_317_cast)[name = tensor("channels_mean_317_cast")]; + tensor zero_mean_317_cast = sub(x = inputs_317_cast, y = channels_mean_317_cast)[name = tensor("zero_mean_317_cast")]; + tensor zero_mean_sq_317_cast = mul(x = zero_mean_317_cast, y = zero_mean_317_cast)[name = tensor("zero_mean_sq_317_cast")]; + tensor var_10398 = const()[name = tensor("op_10398"), val = tensor([1])]; + tensor var_10399_cast = reduce_mean(axes = var_10398, keep_dims = var_6860, x = zero_mean_sq_317_cast)[name = tensor("op_10399_cast")]; + tensor var_10400_to_fp16 = const()[name = tensor("op_10400_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10401_cast = add(x = var_10399_cast, y = var_10400_to_fp16)[name = tensor("op_10401_cast")]; + tensor denom_317_epsilon_0_to_fp16 = const()[name = tensor("denom_317_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_317_cast = rsqrt(epsilon = denom_317_epsilon_0_to_fp16, x = var_10401_cast)[name = tensor("denom_317_cast")]; + tensor out_317_cast = mul(x = zero_mean_317_cast, y = denom_317_cast)[name = tensor("out_317_cast")]; + tensor var_10405_to_fp16 = const()[name = tensor("op_10405_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481181120)))]; + tensor var_10406_cast = add(x = out_317_cast, y = var_10405_to_fp16)[name = tensor("op_10406_cast")]; + tensor var_10408_to_fp16 = const()[name = tensor("op_10408_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481183744)))]; + tensor input_605_cast = mul(x = var_10406_cast, y = var_10408_to_fp16)[name = tensor("input_605_cast")]; + tensor var_10416 = const()[name = tensor("op_10416"), val = tensor([1, 1])]; + tensor var_10418 = const()[name = tensor("op_10418"), val = tensor([1, 1])]; + tensor var_10420_pad_type_0 = const()[name = tensor("op_10420_pad_type_0"), val = tensor("custom")]; + tensor var_10420_pad_0 = const()[name = tensor("op_10420_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1481186368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1494293632))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1494294208)))]; + tensor var_10420_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_10418, groups = var_6865, pad = var_10420_pad_0, pad_type = var_10420_pad_type_0, strides = var_10416, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_605_cast)[name = tensor("op_10420_cast")]; + tensor var_10421_split_sizes_0 = const()[name = tensor("op_10421_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10421_axis_0 = const()[name = tensor("op_10421_axis_0"), val = tensor(1)]; + tensor var_10421_cast_0, tensor var_10421_cast_1 = split(axis = var_10421_axis_0, split_sizes = var_10421_split_sizes_0, x = var_10420_cast)[name = tensor("op_10421_cast")]; + tensor var_10423_mode_0 = const()[name = tensor("op_10423_mode_0"), val = tensor("EXACT")]; + tensor var_10423_cast = gelu(mode = var_10423_mode_0, x = var_10421_cast_1)[name = tensor("op_10423_cast")]; + tensor input_607_cast = mul(x = var_10421_cast_0, y = var_10423_cast)[name = tensor("input_607_cast")]; + tensor var_10427 = const()[name = tensor("op_10427"), val = tensor([1, 1])]; + tensor var_10429 = const()[name = tensor("op_10429"), val = tensor([1, 1])]; + tensor var_10431_pad_type_0 = const()[name = tensor("op_10431_pad_type_0"), val = tensor("custom")]; + tensor var_10431_pad_0 = const()[name = tensor("op_10431_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1494314752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1500868416))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1500868992)))]; + tensor var_10431_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_10429, groups = var_6865, pad = var_10431_pad_0, pad_type = var_10431_pad_type_0, strides = var_10427, weight = up_blocks_0_attentions_1_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_607_cast)[name = tensor("op_10431_cast")]; + tensor inputs_319_cast = add(x = var_10431_cast, y = inputs_317_cast)[name = tensor("inputs_319_cast")]; + tensor var_10441 = const()[name = tensor("op_10441"), val = tensor([1])]; + tensor channels_mean_319_cast = reduce_mean(axes = var_10441, keep_dims = var_6860, x = inputs_319_cast)[name = tensor("channels_mean_319_cast")]; + tensor zero_mean_319_cast = sub(x = inputs_319_cast, y = channels_mean_319_cast)[name = tensor("zero_mean_319_cast")]; + tensor zero_mean_sq_319_cast = mul(x = zero_mean_319_cast, y = zero_mean_319_cast)[name = tensor("zero_mean_sq_319_cast")]; + tensor var_10445 = const()[name = tensor("op_10445"), val = tensor([1])]; + tensor var_10446_cast = reduce_mean(axes = var_10445, keep_dims = var_6860, x = zero_mean_sq_319_cast)[name = tensor("op_10446_cast")]; + tensor var_10447_to_fp16 = const()[name = tensor("op_10447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10448_cast = add(x = var_10446_cast, y = var_10447_to_fp16)[name = tensor("op_10448_cast")]; + tensor denom_319_epsilon_0_to_fp16 = const()[name = tensor("denom_319_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_319_cast = rsqrt(epsilon = denom_319_epsilon_0_to_fp16, x = var_10448_cast)[name = tensor("denom_319_cast")]; + tensor out_319_cast = mul(x = zero_mean_319_cast, y = denom_319_cast)[name = tensor("out_319_cast")]; + tensor var_10452_to_fp16 = const()[name = tensor("op_10452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1500871616)))]; + tensor var_10453_cast = add(x = out_319_cast, y = var_10452_to_fp16)[name = tensor("op_10453_cast")]; + tensor var_10455_to_fp16 = const()[name = tensor("op_10455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1500874240)))]; + tensor hidden_states_415_cast = mul(x = var_10453_cast, y = var_10455_to_fp16)[name = tensor("hidden_states_415_cast")]; + tensor var_10462 = const()[name = tensor("op_10462"), val = tensor([1, 1])]; + tensor var_10464 = const()[name = tensor("op_10464"), val = tensor([1, 1])]; + tensor q_213_pad_type_0 = const()[name = tensor("q_213_pad_type_0"), val = tensor("custom")]; + tensor q_213_pad_0 = const()[name = tensor("q_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1500876864))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1502105728))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_213_cast = conv(dilations = var_10464, groups = var_6865, pad = q_213_pad_0, pad_type = q_213_pad_type_0, strides = var_10462, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("q_213_cast")]; + tensor var_10468 = const()[name = tensor("op_10468"), val = tensor([1, 1])]; + tensor var_10470 = const()[name = tensor("op_10470"), val = tensor([1, 1])]; + tensor k_213_pad_type_0 = const()[name = tensor("k_213_pad_type_0"), val = tensor("custom")]; + tensor k_213_pad_0 = const()[name = tensor("k_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1502105920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1503334784))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_213_cast = conv(dilations = var_10470, groups = var_6865, pad = k_213_pad_0, pad_type = k_213_pad_type_0, strides = var_10468, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("k_213_cast")]; + tensor var_10474 = const()[name = tensor("op_10474"), val = tensor([1, 1])]; + tensor var_10476 = const()[name = tensor("op_10476"), val = tensor([1, 1])]; + tensor v_213_pad_type_0 = const()[name = tensor("v_213_pad_type_0"), val = tensor("custom")]; + tensor v_213_pad_0 = const()[name = tensor("v_213_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1503334976))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1504973440))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_213_cast = conv(dilations = var_10476, groups = var_6865, pad = v_213_pad_0, pad_type = v_213_pad_type_0, strides = var_10474, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_415_cast)[name = tensor("v_213_cast")]; + tensor var_10480 = const()[name = tensor("op_10480"), val = tensor([2, 20, 64, -1])]; + tensor var_10481_cast = reshape(shape = var_10480, x = q_213_cast)[name = tensor("op_10481_cast")]; + tensor var_10482 = const()[name = tensor("op_10482"), val = tensor([2, 20, 64, -1])]; + tensor var_10483_cast = reshape(shape = var_10482, x = k_213_cast)[name = tensor("op_10483_cast")]; + tensor var_10484 = const()[name = tensor("op_10484"), val = tensor([2, 20, 64, -1])]; + tensor var_10485_cast = reshape(shape = var_10484, x = v_213_cast)[name = tensor("op_10485_cast")]; + tensor attn_weights_425_transpose_x_0 = const()[name = tensor("attn_weights_425_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_425_transpose_y_0 = const()[name = tensor("attn_weights_425_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_425_cast = matmul(transpose_x = attn_weights_425_transpose_x_0, transpose_y = attn_weights_425_transpose_y_0, x = var_10481_cast, y = var_10483_cast)[name = tensor("attn_weights_425_cast")]; + tensor attn_weights_427_cast = mul(x = attn_weights_425_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_427_cast")]; + tensor var_10489_cast = softmax(axis = var_6849, x = attn_weights_427_cast)[name = tensor("op_10489_cast")]; + tensor attn_213_transpose_x_0 = const()[name = tensor("attn_213_transpose_x_0"), val = tensor(false)]; + tensor attn_213_transpose_y_0 = const()[name = tensor("attn_213_transpose_y_0"), val = tensor(true)]; + tensor attn_213_cast = matmul(transpose_x = attn_213_transpose_x_0, transpose_y = attn_213_transpose_y_0, x = var_10485_cast, y = var_10489_cast)[name = tensor("attn_213_cast")]; + tensor var_10493 = const()[name = tensor("op_10493"), val = tensor([2, 1280, 1, -1])]; + tensor input_609_cast = reshape(shape = var_10493, x = attn_213_cast)[name = tensor("input_609_cast")]; + tensor var_10498 = const()[name = tensor("op_10498"), val = tensor([1, 1])]; + tensor var_10500 = const()[name = tensor("op_10500"), val = tensor([1, 1])]; + tensor var_10502_pad_type_0 = const()[name = tensor("op_10502_pad_type_0"), val = tensor("custom")]; + tensor var_10502_pad_0 = const()[name = tensor("op_10502_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1504974016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506612480))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506613056)))]; + tensor var_10502_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_10500, groups = var_6865, pad = var_10502_pad_0, pad_type = var_10502_pad_type_0, strides = var_10498, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_609_cast)[name = tensor("op_10502_cast")]; + tensor inputs_321_cast = add(x = var_10502_cast, y = inputs_319_cast)[name = tensor("inputs_321_cast")]; + tensor var_10506 = const()[name = tensor("op_10506"), val = tensor([1])]; + tensor channels_mean_321_cast = reduce_mean(axes = var_10506, keep_dims = var_6860, x = inputs_321_cast)[name = tensor("channels_mean_321_cast")]; + tensor zero_mean_321_cast = sub(x = inputs_321_cast, y = channels_mean_321_cast)[name = tensor("zero_mean_321_cast")]; + tensor zero_mean_sq_321_cast = mul(x = zero_mean_321_cast, y = zero_mean_321_cast)[name = tensor("zero_mean_sq_321_cast")]; + tensor var_10510 = const()[name = tensor("op_10510"), val = tensor([1])]; + tensor var_10511_cast = reduce_mean(axes = var_10510, keep_dims = var_6860, x = zero_mean_sq_321_cast)[name = tensor("op_10511_cast")]; + tensor var_10512_to_fp16 = const()[name = tensor("op_10512_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10513_cast = add(x = var_10511_cast, y = var_10512_to_fp16)[name = tensor("op_10513_cast")]; + tensor denom_321_epsilon_0_to_fp16 = const()[name = tensor("denom_321_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_321_cast = rsqrt(epsilon = denom_321_epsilon_0_to_fp16, x = var_10513_cast)[name = tensor("denom_321_cast")]; + tensor out_321_cast = mul(x = zero_mean_321_cast, y = denom_321_cast)[name = tensor("out_321_cast")]; + tensor var_10517_to_fp16 = const()[name = tensor("op_10517_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506615680)))]; + tensor var_10518_cast = add(x = out_321_cast, y = var_10517_to_fp16)[name = tensor("op_10518_cast")]; + tensor var_10520_to_fp16 = const()[name = tensor("op_10520_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506618304)))]; + tensor hidden_states_417_cast = mul(x = var_10518_cast, y = var_10520_to_fp16)[name = tensor("hidden_states_417_cast")]; + tensor var_10527 = const()[name = tensor("op_10527"), val = tensor([1, 1])]; + tensor var_10529 = const()[name = tensor("op_10529"), val = tensor([1, 1])]; + tensor q_215_pad_type_0 = const()[name = tensor("q_215_pad_type_0"), val = tensor("custom")]; + tensor q_215_pad_0 = const()[name = tensor("q_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1506620928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1507440192))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_215_cast = conv(dilations = var_10529, groups = var_6865, pad = q_215_pad_0, pad_type = q_215_pad_type_0, strides = var_10527, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_417_cast)[name = tensor("q_215_cast")]; + tensor var_10533 = const()[name = tensor("op_10533"), val = tensor([1, 1])]; + tensor var_10535 = const()[name = tensor("op_10535"), val = tensor([1, 1])]; + tensor k_215_pad_type_0 = const()[name = tensor("k_215_pad_type_0"), val = tensor("custom")]; + tensor k_215_pad_0 = const()[name = tensor("k_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1507440320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1508751104))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_215_cast = conv(dilations = var_10535, groups = var_6865, pad = k_215_pad_0, pad_type = k_215_pad_type_0, strides = var_10533, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_215_cast")]; + tensor var_10539 = const()[name = tensor("op_10539"), val = tensor([1, 1])]; + tensor var_10541 = const()[name = tensor("op_10541"), val = tensor([1, 1])]; + tensor v_215_pad_type_0 = const()[name = tensor("v_215_pad_type_0"), val = tensor("custom")]; + tensor v_215_pad_0 = const()[name = tensor("v_215_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1508751232))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1510717376))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_215_cast = conv(dilations = var_10541, groups = var_6865, pad = v_215_pad_0, pad_type = v_215_pad_type_0, strides = var_10539, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_215_cast")]; + tensor var_10545 = const()[name = tensor("op_10545"), val = tensor([2, 20, 64, -1])]; + tensor var_10546_cast = reshape(shape = var_10545, x = q_215_cast)[name = tensor("op_10546_cast")]; + tensor var_10547 = const()[name = tensor("op_10547"), val = tensor([2, 20, 64, -1])]; + tensor var_10548_cast = reshape(shape = var_10547, x = k_215_cast)[name = tensor("op_10548_cast")]; + tensor var_10549 = const()[name = tensor("op_10549"), val = tensor([2, 20, 64, -1])]; + tensor var_10550_cast = reshape(shape = var_10549, x = v_215_cast)[name = tensor("op_10550_cast")]; + tensor attn_weights_429_transpose_x_0 = const()[name = tensor("attn_weights_429_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_429_transpose_y_0 = const()[name = tensor("attn_weights_429_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_429_cast = matmul(transpose_x = attn_weights_429_transpose_x_0, transpose_y = attn_weights_429_transpose_y_0, x = var_10546_cast, y = var_10548_cast)[name = tensor("attn_weights_429_cast")]; + tensor attn_weights_431_cast = mul(x = attn_weights_429_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_431_cast")]; + tensor var_10554_cast = softmax(axis = var_6849, x = attn_weights_431_cast)[name = tensor("op_10554_cast")]; + tensor attn_215_transpose_x_0 = const()[name = tensor("attn_215_transpose_x_0"), val = tensor(false)]; + tensor attn_215_transpose_y_0 = const()[name = tensor("attn_215_transpose_y_0"), val = tensor(true)]; + tensor attn_215_cast = matmul(transpose_x = attn_215_transpose_x_0, transpose_y = attn_215_transpose_y_0, x = var_10550_cast, y = var_10554_cast)[name = tensor("attn_215_cast")]; + tensor var_10558 = const()[name = tensor("op_10558"), val = tensor([2, 1280, 1, -1])]; + tensor input_611_cast = reshape(shape = var_10558, x = attn_215_cast)[name = tensor("input_611_cast")]; + tensor var_10563 = const()[name = tensor("op_10563"), val = tensor([1, 1])]; + tensor var_10565 = const()[name = tensor("op_10565"), val = tensor([1, 1])]; + tensor var_10567_pad_type_0 = const()[name = tensor("op_10567_pad_type_0"), val = tensor("custom")]; + tensor var_10567_pad_0 = const()[name = tensor("op_10567_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1510717568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511536832))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511536960)))]; + tensor var_10567_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_10565, groups = var_6865, pad = var_10567_pad_0, pad_type = var_10567_pad_type_0, strides = var_10563, weight = up_blocks_0_attentions_1_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_611_cast)[name = tensor("op_10567_cast")]; + tensor inputs_323_cast = add(x = var_10567_cast, y = inputs_321_cast)[name = tensor("inputs_323_cast")]; + tensor var_10571 = const()[name = tensor("op_10571"), val = tensor([1])]; + tensor channels_mean_323_cast = reduce_mean(axes = var_10571, keep_dims = var_6860, x = inputs_323_cast)[name = tensor("channels_mean_323_cast")]; + tensor zero_mean_323_cast = sub(x = inputs_323_cast, y = channels_mean_323_cast)[name = tensor("zero_mean_323_cast")]; + tensor zero_mean_sq_323_cast = mul(x = zero_mean_323_cast, y = zero_mean_323_cast)[name = tensor("zero_mean_sq_323_cast")]; + tensor var_10575 = const()[name = tensor("op_10575"), val = tensor([1])]; + tensor var_10576_cast = reduce_mean(axes = var_10575, keep_dims = var_6860, x = zero_mean_sq_323_cast)[name = tensor("op_10576_cast")]; + tensor var_10577_to_fp16 = const()[name = tensor("op_10577_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10578_cast = add(x = var_10576_cast, y = var_10577_to_fp16)[name = tensor("op_10578_cast")]; + tensor denom_323_epsilon_0_to_fp16 = const()[name = tensor("denom_323_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_323_cast = rsqrt(epsilon = denom_323_epsilon_0_to_fp16, x = var_10578_cast)[name = tensor("denom_323_cast")]; + tensor out_323_cast = mul(x = zero_mean_323_cast, y = denom_323_cast)[name = tensor("out_323_cast")]; + tensor var_10582_to_fp16 = const()[name = tensor("op_10582_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511539584)))]; + tensor var_10583_cast = add(x = out_323_cast, y = var_10582_to_fp16)[name = tensor("op_10583_cast")]; + tensor var_10585_to_fp16 = const()[name = tensor("op_10585_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511542208)))]; + tensor input_613_cast = mul(x = var_10583_cast, y = var_10585_to_fp16)[name = tensor("input_613_cast")]; + tensor var_10593 = const()[name = tensor("op_10593"), val = tensor([1, 1])]; + tensor var_10595 = const()[name = tensor("op_10595"), val = tensor([1, 1])]; + tensor var_10597_pad_type_0 = const()[name = tensor("op_10597_pad_type_0"), val = tensor("custom")]; + tensor var_10597_pad_0 = const()[name = tensor("op_10597_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1511544832))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1524652096))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1524652672)))]; + tensor var_10597_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_10595, groups = var_6865, pad = var_10597_pad_0, pad_type = var_10597_pad_type_0, strides = var_10593, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_613_cast)[name = tensor("op_10597_cast")]; + tensor var_10598_split_sizes_0 = const()[name = tensor("op_10598_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10598_axis_0 = const()[name = tensor("op_10598_axis_0"), val = tensor(1)]; + tensor var_10598_cast_0, tensor var_10598_cast_1 = split(axis = var_10598_axis_0, split_sizes = var_10598_split_sizes_0, x = var_10597_cast)[name = tensor("op_10598_cast")]; + tensor var_10600_mode_0 = const()[name = tensor("op_10600_mode_0"), val = tensor("EXACT")]; + tensor var_10600_cast = gelu(mode = var_10600_mode_0, x = var_10598_cast_1)[name = tensor("op_10600_cast")]; + tensor input_615_cast = mul(x = var_10598_cast_0, y = var_10600_cast)[name = tensor("input_615_cast")]; + tensor var_10604 = const()[name = tensor("op_10604"), val = tensor([1, 1])]; + tensor var_10606 = const()[name = tensor("op_10606"), val = tensor([1, 1])]; + tensor var_10608_pad_type_0 = const()[name = tensor("op_10608_pad_type_0"), val = tensor("custom")]; + tensor var_10608_pad_0 = const()[name = tensor("op_10608_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1524673216))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1531226880))), name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1531227456)))]; + tensor var_10608_cast = conv(bias = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_10606, groups = var_6865, pad = var_10608_pad_0, pad_type = var_10608_pad_type_0, strides = var_10604, weight = up_blocks_0_attentions_1_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_615_cast)[name = tensor("op_10608_cast")]; + tensor hidden_states_421_cast = add(x = var_10608_cast, y = inputs_323_cast)[name = tensor("hidden_states_421_cast")]; + tensor var_10610 = const()[name = tensor("op_10610"), val = tensor([2, 1280, 32, 32])]; + tensor input_617_cast = reshape(shape = var_10610, x = hidden_states_421_cast)[name = tensor("input_617_cast")]; + tensor var_10614 = const()[name = tensor("op_10614"), val = tensor([1, 1])]; + tensor var_10616 = const()[name = tensor("op_10616"), val = tensor([1, 1])]; + tensor hidden_states_423_pad_type_0 = const()[name = tensor("hidden_states_423_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_423_pad_0 = const()[name = tensor("hidden_states_423_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1531230080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532868544))), name = tensor("up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532869120)))]; + tensor hidden_states_423_cast = conv(bias = up_blocks_0_attentions_1_proj_out_bias_to_fp16, dilations = var_10616, groups = var_6865, pad = hidden_states_423_pad_0, pad_type = hidden_states_423_pad_type_0, strides = var_10614, weight = up_blocks_0_attentions_1_proj_out_weight_to_fp16_palettized, x = input_617_cast)[name = tensor("hidden_states_423_cast")]; + tensor hidden_states_425_cast = add(x = hidden_states_423_cast, y = hidden_states_357_cast)[name = tensor("hidden_states_425_cast")]; + tensor input_619_interleave_0 = const()[name = tensor("input_619_interleave_0"), val = tensor(false)]; + tensor input_619_cast = concat(axis = var_6865, interleave = input_619_interleave_0, values = (hidden_states_425_cast, input_115_cast))[name = tensor("input_619_cast")]; + tensor reshape_108_shape_0 = const()[name = tensor("reshape_108_shape_0"), val = tensor([2, 32, 60, 32, 32])]; + tensor reshape_108_cast = reshape(shape = reshape_108_shape_0, x = input_619_cast)[name = tensor("reshape_108_cast")]; + tensor reduce_mean_81_axes_0 = const()[name = tensor("reduce_mean_81_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_81_keep_dims_0 = const()[name = tensor("reduce_mean_81_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_81_cast = reduce_mean(axes = reduce_mean_81_axes_0, keep_dims = reduce_mean_81_keep_dims_0, x = reshape_108_cast)[name = tensor("reduce_mean_81_cast")]; + tensor sub_54_cast = sub(x = reshape_108_cast, y = reduce_mean_81_cast)[name = tensor("sub_54_cast")]; + tensor square_27_cast = square(x = sub_54_cast)[name = tensor("square_27_cast")]; + tensor reduce_mean_83_axes_0 = const()[name = tensor("reduce_mean_83_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_83_keep_dims_0 = const()[name = tensor("reduce_mean_83_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_83_cast = reduce_mean(axes = reduce_mean_83_axes_0, keep_dims = reduce_mean_83_keep_dims_0, x = square_27_cast)[name = tensor("reduce_mean_83_cast")]; + tensor add_54_y_0_to_fp16 = const()[name = tensor("add_54_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_54_cast = add(x = reduce_mean_83_cast, y = add_54_y_0_to_fp16)[name = tensor("add_54_cast")]; + tensor sqrt_27_cast = sqrt(x = add_54_cast)[name = tensor("sqrt_27_cast")]; + tensor real_div_27_cast = real_div(x = sub_54_cast, y = sqrt_27_cast)[name = tensor("real_div_27_cast")]; + tensor reshape_109_shape_0 = const()[name = tensor("reshape_109_shape_0"), val = tensor([2, 1920, 32, 32])]; + tensor reshape_109_cast = reshape(shape = reshape_109_shape_0, x = real_div_27_cast)[name = tensor("reshape_109_cast")]; + tensor add_55_mean_0_to_fp16 = const()[name = tensor("add_55_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532871744)))]; + tensor add_55_variance_0_to_fp16 = const()[name = tensor("add_55_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532875648)))]; + tensor add_55_gamma_0_to_fp16 = const()[name = tensor("add_55_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532879552)))]; + tensor add_55_beta_0_to_fp16 = const()[name = tensor("add_55_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532883456)))]; + tensor add_55_epsilon_0_to_fp16 = const()[name = tensor("add_55_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_55_cast = batch_norm(beta = add_55_beta_0_to_fp16, epsilon = add_55_epsilon_0_to_fp16, gamma = add_55_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_109_cast)[name = tensor("add_55_cast")]; + tensor input_623_cast = silu(x = add_55_cast)[name = tensor("input_623_cast")]; + tensor var_10634 = const()[name = tensor("op_10634"), val = tensor([1, 1])]; + tensor var_10636 = const()[name = tensor("op_10636"), val = tensor([1, 1])]; + tensor hidden_states_427_pad_type_0 = const()[name = tensor("hidden_states_427_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_427_pad_0 = const()[name = tensor("hidden_states_427_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1532887360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1555005824))), name = tensor("up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized"), shape = tensor([1280, 1920, 3, 3])]; + tensor up_blocks_0_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1555006400)))]; + tensor hidden_states_427_cast = conv(bias = up_blocks_0_resnets_2_conv1_bias_to_fp16, dilations = var_10636, groups = var_6865, pad = hidden_states_427_pad_0, pad_type = hidden_states_427_pad_type_0, strides = var_10634, weight = up_blocks_0_resnets_2_conv1_weight_to_fp16_palettized, x = input_623_cast)[name = tensor("hidden_states_427_cast")]; + tensor var_10642 = const()[name = tensor("op_10642"), val = tensor([1, 1])]; + tensor var_10644 = const()[name = tensor("op_10644"), val = tensor([1, 1])]; + tensor temb_21_pad_type_0 = const()[name = tensor("temb_21_pad_type_0"), val = tensor("custom")]; + tensor temb_21_pad_0 = const()[name = tensor("temb_21_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1555009024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1556237888))), name = tensor("up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1556238080)))]; + tensor temb_21_cast = conv(bias = up_blocks_0_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_10644, groups = var_6865, pad = temb_21_pad_0, pad_type = temb_21_pad_type_0, strides = var_10642, weight = up_blocks_0_resnets_2_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_21_cast")]; + tensor input_627_cast = add(x = hidden_states_427_cast, y = temb_21_cast)[name = tensor("input_627_cast")]; + tensor reshape_112_shape_0 = const()[name = tensor("reshape_112_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_112_cast = reshape(shape = reshape_112_shape_0, x = input_627_cast)[name = tensor("reshape_112_cast")]; + tensor reduce_mean_84_axes_0 = const()[name = tensor("reduce_mean_84_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_84_keep_dims_0 = const()[name = tensor("reduce_mean_84_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_84_cast = reduce_mean(axes = reduce_mean_84_axes_0, keep_dims = reduce_mean_84_keep_dims_0, x = reshape_112_cast)[name = tensor("reduce_mean_84_cast")]; + tensor sub_56_cast = sub(x = reshape_112_cast, y = reduce_mean_84_cast)[name = tensor("sub_56_cast")]; + tensor square_28_cast = square(x = sub_56_cast)[name = tensor("square_28_cast")]; + tensor reduce_mean_86_axes_0 = const()[name = tensor("reduce_mean_86_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_86_keep_dims_0 = const()[name = tensor("reduce_mean_86_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_86_cast = reduce_mean(axes = reduce_mean_86_axes_0, keep_dims = reduce_mean_86_keep_dims_0, x = square_28_cast)[name = tensor("reduce_mean_86_cast")]; + tensor add_56_y_0_to_fp16 = const()[name = tensor("add_56_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_56_cast = add(x = reduce_mean_86_cast, y = add_56_y_0_to_fp16)[name = tensor("add_56_cast")]; + tensor sqrt_28_cast = sqrt(x = add_56_cast)[name = tensor("sqrt_28_cast")]; + tensor real_div_28_cast = real_div(x = sub_56_cast, y = sqrt_28_cast)[name = tensor("real_div_28_cast")]; + tensor reshape_113_shape_0 = const()[name = tensor("reshape_113_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_113_cast = reshape(shape = reshape_113_shape_0, x = real_div_28_cast)[name = tensor("reshape_113_cast")]; + tensor add_57_gamma_0_to_fp16 = const()[name = tensor("add_57_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1556240704)))]; + tensor add_57_beta_0_to_fp16 = const()[name = tensor("add_57_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1556243328)))]; + tensor add_57_epsilon_0_to_fp16 = const()[name = tensor("add_57_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_57_cast = batch_norm(beta = add_57_beta_0_to_fp16, epsilon = add_57_epsilon_0_to_fp16, gamma = add_57_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_113_cast)[name = tensor("add_57_cast")]; + tensor input_631_cast = silu(x = add_57_cast)[name = tensor("input_631_cast")]; + tensor var_10654 = const()[name = tensor("op_10654"), val = tensor([1, 1])]; + tensor var_10656 = const()[name = tensor("op_10656"), val = tensor([1, 1])]; + tensor hidden_states_429_pad_type_0 = const()[name = tensor("hidden_states_429_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_429_pad_0 = const()[name = tensor("hidden_states_429_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1556245952))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570991616))), name = tensor("up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 3, 3])]; + tensor up_blocks_0_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570992192)))]; + tensor hidden_states_429_cast = conv(bias = up_blocks_0_resnets_2_conv2_bias_to_fp16, dilations = var_10656, groups = var_6865, pad = hidden_states_429_pad_0, pad_type = hidden_states_429_pad_type_0, strides = var_10654, weight = up_blocks_0_resnets_2_conv2_weight_to_fp16_palettized, x = input_631_cast)[name = tensor("hidden_states_429_cast")]; + tensor var_10661 = const()[name = tensor("op_10661"), val = tensor([1, 1])]; + tensor var_10663 = const()[name = tensor("op_10663"), val = tensor([1, 1])]; + tensor x_9_pad_type_0 = const()[name = tensor("x_9_pad_type_0"), val = tensor("custom")]; + tensor x_9_pad_0 = const()[name = tensor("x_9_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1570994816)))]; + tensor up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575910080)))]; + tensor x_9_cast = conv(bias = up_blocks_0_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_10663, groups = var_6865, pad = x_9_pad_0, pad_type = x_9_pad_type_0, strides = var_10661, weight = up_blocks_0_resnets_2_conv_shortcut_weight_to_fp16, x = input_619_cast)[name = tensor("x_9_cast")]; + tensor hidden_states_431_cast = add(x = x_9_cast, y = hidden_states_429_cast)[name = tensor("hidden_states_431_cast")]; + tensor reshape_116_shape_0 = const()[name = tensor("reshape_116_shape_0"), val = tensor([2, 32, 40, 32, 32])]; + tensor reshape_116_cast = reshape(shape = reshape_116_shape_0, x = hidden_states_431_cast)[name = tensor("reshape_116_cast")]; + tensor reduce_mean_87_axes_0 = const()[name = tensor("reduce_mean_87_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_87_keep_dims_0 = const()[name = tensor("reduce_mean_87_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_87_cast = reduce_mean(axes = reduce_mean_87_axes_0, keep_dims = reduce_mean_87_keep_dims_0, x = reshape_116_cast)[name = tensor("reduce_mean_87_cast")]; + tensor sub_58_cast = sub(x = reshape_116_cast, y = reduce_mean_87_cast)[name = tensor("sub_58_cast")]; + tensor square_29_cast = square(x = sub_58_cast)[name = tensor("square_29_cast")]; + tensor reduce_mean_89_axes_0 = const()[name = tensor("reduce_mean_89_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_89_keep_dims_0 = const()[name = tensor("reduce_mean_89_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_89_cast = reduce_mean(axes = reduce_mean_89_axes_0, keep_dims = reduce_mean_89_keep_dims_0, x = square_29_cast)[name = tensor("reduce_mean_89_cast")]; + tensor add_58_y_0_to_fp16 = const()[name = tensor("add_58_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_58_cast = add(x = reduce_mean_89_cast, y = add_58_y_0_to_fp16)[name = tensor("add_58_cast")]; + tensor sqrt_29_cast = sqrt(x = add_58_cast)[name = tensor("sqrt_29_cast")]; + tensor real_div_29_cast = real_div(x = sub_58_cast, y = sqrt_29_cast)[name = tensor("real_div_29_cast")]; + tensor reshape_117_shape_0 = const()[name = tensor("reshape_117_shape_0"), val = tensor([2, 1280, 32, 32])]; + tensor reshape_117_cast = reshape(shape = reshape_117_shape_0, x = real_div_29_cast)[name = tensor("reshape_117_cast")]; + tensor add_59_gamma_0_to_fp16 = const()[name = tensor("add_59_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575912704)))]; + tensor add_59_beta_0_to_fp16 = const()[name = tensor("add_59_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575915328)))]; + tensor add_59_epsilon_0_to_fp16 = const()[name = tensor("add_59_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_59_cast = batch_norm(beta = add_59_beta_0_to_fp16, epsilon = add_59_epsilon_0_to_fp16, gamma = add_59_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_117_cast)[name = tensor("add_59_cast")]; + tensor var_10701 = const()[name = tensor("op_10701"), val = tensor([1, 1])]; + tensor var_10703 = const()[name = tensor("op_10703"), val = tensor([1, 1])]; + tensor hidden_states_433_pad_type_0 = const()[name = tensor("hidden_states_433_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_433_pad_0 = const()[name = tensor("hidden_states_433_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_in_weight_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1575917952)))]; + tensor up_blocks_0_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1579194816)))]; + tensor hidden_states_433_cast = conv(bias = up_blocks_0_attentions_2_proj_in_bias_to_fp16, dilations = var_10703, groups = var_6865, pad = hidden_states_433_pad_0, pad_type = hidden_states_433_pad_type_0, strides = var_10701, weight = up_blocks_0_attentions_2_proj_in_weight_to_fp16, x = add_59_cast)[name = tensor("hidden_states_433_cast")]; + tensor var_10708 = const()[name = tensor("op_10708"), val = tensor([2, 1280, 1, 1024])]; + tensor inputs_325_cast = reshape(shape = var_10708, x = hidden_states_433_cast)[name = tensor("inputs_325_cast")]; + tensor var_10718 = const()[name = tensor("op_10718"), val = tensor([1])]; + tensor channels_mean_325_cast = reduce_mean(axes = var_10718, keep_dims = var_6860, x = inputs_325_cast)[name = tensor("channels_mean_325_cast")]; + tensor zero_mean_325_cast = sub(x = inputs_325_cast, y = channels_mean_325_cast)[name = tensor("zero_mean_325_cast")]; + tensor zero_mean_sq_325_cast = mul(x = zero_mean_325_cast, y = zero_mean_325_cast)[name = tensor("zero_mean_sq_325_cast")]; + tensor var_10722 = const()[name = tensor("op_10722"), val = tensor([1])]; + tensor var_10723_cast = reduce_mean(axes = var_10722, keep_dims = var_6860, x = zero_mean_sq_325_cast)[name = tensor("op_10723_cast")]; + tensor var_10724_to_fp16 = const()[name = tensor("op_10724_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10725_cast = add(x = var_10723_cast, y = var_10724_to_fp16)[name = tensor("op_10725_cast")]; + tensor denom_325_epsilon_0_to_fp16 = const()[name = tensor("denom_325_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_325_cast = rsqrt(epsilon = denom_325_epsilon_0_to_fp16, x = var_10725_cast)[name = tensor("denom_325_cast")]; + tensor out_325_cast = mul(x = zero_mean_325_cast, y = denom_325_cast)[name = tensor("out_325_cast")]; + tensor var_10729_to_fp16 = const()[name = tensor("op_10729_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1579197440)))]; + tensor var_10730_cast = add(x = out_325_cast, y = var_10729_to_fp16)[name = tensor("op_10730_cast")]; + tensor var_10732_to_fp16 = const()[name = tensor("op_10732_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1579200064)))]; + tensor hidden_states_435_cast = mul(x = var_10730_cast, y = var_10732_to_fp16)[name = tensor("hidden_states_435_cast")]; + tensor var_10739 = const()[name = tensor("op_10739"), val = tensor([1, 1])]; + tensor var_10741 = const()[name = tensor("op_10741"), val = tensor([1, 1])]; + tensor q_217_pad_type_0 = const()[name = tensor("q_217_pad_type_0"), val = tensor("custom")]; + tensor q_217_pad_0 = const()[name = tensor("q_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1579202688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1580431552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_217_cast = conv(dilations = var_10741, groups = var_6865, pad = q_217_pad_0, pad_type = q_217_pad_type_0, strides = var_10739, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("q_217_cast")]; + tensor var_10745 = const()[name = tensor("op_10745"), val = tensor([1, 1])]; + tensor var_10747 = const()[name = tensor("op_10747"), val = tensor([1, 1])]; + tensor k_217_pad_type_0 = const()[name = tensor("k_217_pad_type_0"), val = tensor("custom")]; + tensor k_217_pad_0 = const()[name = tensor("k_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1580431744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1581660608))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_217_cast = conv(dilations = var_10747, groups = var_6865, pad = k_217_pad_0, pad_type = k_217_pad_type_0, strides = var_10745, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("k_217_cast")]; + tensor var_10751 = const()[name = tensor("op_10751"), val = tensor([1, 1])]; + tensor var_10753 = const()[name = tensor("op_10753"), val = tensor([1, 1])]; + tensor v_217_pad_type_0 = const()[name = tensor("v_217_pad_type_0"), val = tensor("custom")]; + tensor v_217_pad_0 = const()[name = tensor("v_217_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1581660800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1583299264))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_217_cast = conv(dilations = var_10753, groups = var_6865, pad = v_217_pad_0, pad_type = v_217_pad_type_0, strides = var_10751, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_435_cast)[name = tensor("v_217_cast")]; + tensor var_10757 = const()[name = tensor("op_10757"), val = tensor([2, 20, 64, -1])]; + tensor var_10758_cast = reshape(shape = var_10757, x = q_217_cast)[name = tensor("op_10758_cast")]; + tensor var_10759 = const()[name = tensor("op_10759"), val = tensor([2, 20, 64, -1])]; + tensor var_10760_cast = reshape(shape = var_10759, x = k_217_cast)[name = tensor("op_10760_cast")]; + tensor var_10761 = const()[name = tensor("op_10761"), val = tensor([2, 20, 64, -1])]; + tensor var_10762_cast = reshape(shape = var_10761, x = v_217_cast)[name = tensor("op_10762_cast")]; + tensor attn_weights_433_transpose_x_0 = const()[name = tensor("attn_weights_433_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_433_transpose_y_0 = const()[name = tensor("attn_weights_433_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_433_cast = matmul(transpose_x = attn_weights_433_transpose_x_0, transpose_y = attn_weights_433_transpose_y_0, x = var_10758_cast, y = var_10760_cast)[name = tensor("attn_weights_433_cast")]; + tensor attn_weights_435_cast = mul(x = attn_weights_433_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_435_cast")]; + tensor var_10766_cast = softmax(axis = var_6849, x = attn_weights_435_cast)[name = tensor("op_10766_cast")]; + tensor attn_217_transpose_x_0 = const()[name = tensor("attn_217_transpose_x_0"), val = tensor(false)]; + tensor attn_217_transpose_y_0 = const()[name = tensor("attn_217_transpose_y_0"), val = tensor(true)]; + tensor attn_217_cast = matmul(transpose_x = attn_217_transpose_x_0, transpose_y = attn_217_transpose_y_0, x = var_10762_cast, y = var_10766_cast)[name = tensor("attn_217_cast")]; + tensor var_10770 = const()[name = tensor("op_10770"), val = tensor([2, 1280, 1, -1])]; + tensor input_635_cast = reshape(shape = var_10770, x = attn_217_cast)[name = tensor("input_635_cast")]; + tensor var_10775 = const()[name = tensor("op_10775"), val = tensor([1, 1])]; + tensor var_10777 = const()[name = tensor("op_10777"), val = tensor([1, 1])]; + tensor var_10779_pad_type_0 = const()[name = tensor("op_10779_pad_type_0"), val = tensor("custom")]; + tensor var_10779_pad_0 = const()[name = tensor("op_10779_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1583299840))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1584938304))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1584938880)))]; + tensor var_10779_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_10777, groups = var_6865, pad = var_10779_pad_0, pad_type = var_10779_pad_type_0, strides = var_10775, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16_palettized, x = input_635_cast)[name = tensor("op_10779_cast")]; + tensor inputs_327_cast = add(x = var_10779_cast, y = inputs_325_cast)[name = tensor("inputs_327_cast")]; + tensor var_10783 = const()[name = tensor("op_10783"), val = tensor([1])]; + tensor channels_mean_327_cast = reduce_mean(axes = var_10783, keep_dims = var_6860, x = inputs_327_cast)[name = tensor("channels_mean_327_cast")]; + tensor zero_mean_327_cast = sub(x = inputs_327_cast, y = channels_mean_327_cast)[name = tensor("zero_mean_327_cast")]; + tensor zero_mean_sq_327_cast = mul(x = zero_mean_327_cast, y = zero_mean_327_cast)[name = tensor("zero_mean_sq_327_cast")]; + tensor var_10787 = const()[name = tensor("op_10787"), val = tensor([1])]; + tensor var_10788_cast = reduce_mean(axes = var_10787, keep_dims = var_6860, x = zero_mean_sq_327_cast)[name = tensor("op_10788_cast")]; + tensor var_10789_to_fp16 = const()[name = tensor("op_10789_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10790_cast = add(x = var_10788_cast, y = var_10789_to_fp16)[name = tensor("op_10790_cast")]; + tensor denom_327_epsilon_0_to_fp16 = const()[name = tensor("denom_327_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_327_cast = rsqrt(epsilon = denom_327_epsilon_0_to_fp16, x = var_10790_cast)[name = tensor("denom_327_cast")]; + tensor out_327_cast = mul(x = zero_mean_327_cast, y = denom_327_cast)[name = tensor("out_327_cast")]; + tensor var_10794_to_fp16 = const()[name = tensor("op_10794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1584941504)))]; + tensor var_10795_cast = add(x = out_327_cast, y = var_10794_to_fp16)[name = tensor("op_10795_cast")]; + tensor var_10797_to_fp16 = const()[name = tensor("op_10797_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1584944128)))]; + tensor hidden_states_437_cast = mul(x = var_10795_cast, y = var_10797_to_fp16)[name = tensor("hidden_states_437_cast")]; + tensor var_10804 = const()[name = tensor("op_10804"), val = tensor([1, 1])]; + tensor var_10806 = const()[name = tensor("op_10806"), val = tensor([1, 1])]; + tensor q_219_pad_type_0 = const()[name = tensor("q_219_pad_type_0"), val = tensor("custom")]; + tensor q_219_pad_0 = const()[name = tensor("q_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1584946752))), lut = tensor([-0x1.c78p-6, -0x1.154p-7, 0x1.148p-7, 0x1.c7p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_219_cast = conv(dilations = var_10806, groups = var_6865, pad = q_219_pad_0, pad_type = q_219_pad_type_0, strides = var_10804, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_437_cast)[name = tensor("q_219_cast")]; + tensor var_10810 = const()[name = tensor("op_10810"), val = tensor([1, 1])]; + tensor var_10812 = const()[name = tensor("op_10812"), val = tensor([1, 1])]; + tensor k_219_pad_type_0 = const()[name = tensor("k_219_pad_type_0"), val = tensor("custom")]; + tensor k_219_pad_0 = const()[name = tensor("k_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1585356416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586667200))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_219_cast = conv(dilations = var_10812, groups = var_6865, pad = k_219_pad_0, pad_type = k_219_pad_type_0, strides = var_10810, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_219_cast")]; + tensor var_10816 = const()[name = tensor("op_10816"), val = tensor([1, 1])]; + tensor var_10818 = const()[name = tensor("op_10818"), val = tensor([1, 1])]; + tensor v_219_pad_type_0 = const()[name = tensor("v_219_pad_type_0"), val = tensor("custom")]; + tensor v_219_pad_0 = const()[name = tensor("v_219_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1586667328))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1588633472))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_219_cast = conv(dilations = var_10818, groups = var_6865, pad = v_219_pad_0, pad_type = v_219_pad_type_0, strides = var_10816, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_219_cast")]; + tensor var_10822 = const()[name = tensor("op_10822"), val = tensor([2, 20, 64, -1])]; + tensor var_10823_cast = reshape(shape = var_10822, x = q_219_cast)[name = tensor("op_10823_cast")]; + tensor var_10824 = const()[name = tensor("op_10824"), val = tensor([2, 20, 64, -1])]; + tensor var_10825_cast = reshape(shape = var_10824, x = k_219_cast)[name = tensor("op_10825_cast")]; + tensor var_10826 = const()[name = tensor("op_10826"), val = tensor([2, 20, 64, -1])]; + tensor var_10827_cast = reshape(shape = var_10826, x = v_219_cast)[name = tensor("op_10827_cast")]; + tensor attn_weights_437_transpose_x_0 = const()[name = tensor("attn_weights_437_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_437_transpose_y_0 = const()[name = tensor("attn_weights_437_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_437_cast = matmul(transpose_x = attn_weights_437_transpose_x_0, transpose_y = attn_weights_437_transpose_y_0, x = var_10823_cast, y = var_10825_cast)[name = tensor("attn_weights_437_cast")]; + tensor attn_weights_439_cast = mul(x = attn_weights_437_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_439_cast")]; + tensor var_10831_cast = softmax(axis = var_6849, x = attn_weights_439_cast)[name = tensor("op_10831_cast")]; + tensor attn_219_transpose_x_0 = const()[name = tensor("attn_219_transpose_x_0"), val = tensor(false)]; + tensor attn_219_transpose_y_0 = const()[name = tensor("attn_219_transpose_y_0"), val = tensor(true)]; + tensor attn_219_cast = matmul(transpose_x = attn_219_transpose_x_0, transpose_y = attn_219_transpose_y_0, x = var_10827_cast, y = var_10831_cast)[name = tensor("attn_219_cast")]; + tensor var_10835 = const()[name = tensor("op_10835"), val = tensor([2, 1280, 1, -1])]; + tensor input_637_cast = reshape(shape = var_10835, x = attn_219_cast)[name = tensor("input_637_cast")]; + tensor var_10840 = const()[name = tensor("op_10840"), val = tensor([1, 1])]; + tensor var_10842 = const()[name = tensor("op_10842"), val = tensor([1, 1])]; + tensor var_10844_pad_type_0 = const()[name = tensor("op_10844_pad_type_0"), val = tensor("custom")]; + tensor var_10844_pad_0 = const()[name = tensor("op_10844_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1588633664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1589452928))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1589453056)))]; + tensor var_10844_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_10842, groups = var_6865, pad = var_10844_pad_0, pad_type = var_10844_pad_type_0, strides = var_10840, weight = up_blocks_0_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_637_cast)[name = tensor("op_10844_cast")]; + tensor inputs_329_cast = add(x = var_10844_cast, y = inputs_327_cast)[name = tensor("inputs_329_cast")]; + tensor var_10848 = const()[name = tensor("op_10848"), val = tensor([1])]; + tensor channels_mean_329_cast = reduce_mean(axes = var_10848, keep_dims = var_6860, x = inputs_329_cast)[name = tensor("channels_mean_329_cast")]; + tensor zero_mean_329_cast = sub(x = inputs_329_cast, y = channels_mean_329_cast)[name = tensor("zero_mean_329_cast")]; + tensor zero_mean_sq_329_cast = mul(x = zero_mean_329_cast, y = zero_mean_329_cast)[name = tensor("zero_mean_sq_329_cast")]; + tensor var_10852 = const()[name = tensor("op_10852"), val = tensor([1])]; + tensor var_10853_cast = reduce_mean(axes = var_10852, keep_dims = var_6860, x = zero_mean_sq_329_cast)[name = tensor("op_10853_cast")]; + tensor var_10854_to_fp16 = const()[name = tensor("op_10854_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10855_cast = add(x = var_10853_cast, y = var_10854_to_fp16)[name = tensor("op_10855_cast")]; + tensor denom_329_epsilon_0_to_fp16 = const()[name = tensor("denom_329_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_329_cast = rsqrt(epsilon = denom_329_epsilon_0_to_fp16, x = var_10855_cast)[name = tensor("denom_329_cast")]; + tensor out_329_cast = mul(x = zero_mean_329_cast, y = denom_329_cast)[name = tensor("out_329_cast")]; + tensor var_10859_to_fp16 = const()[name = tensor("op_10859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1589455680)))]; + tensor var_10860_cast = add(x = out_329_cast, y = var_10859_to_fp16)[name = tensor("op_10860_cast")]; + tensor var_10862_to_fp16 = const()[name = tensor("op_10862_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1589458304)))]; + tensor input_639_cast = mul(x = var_10860_cast, y = var_10862_to_fp16)[name = tensor("input_639_cast")]; + tensor var_10870 = const()[name = tensor("op_10870"), val = tensor([1, 1])]; + tensor var_10872 = const()[name = tensor("op_10872"), val = tensor([1, 1])]; + tensor var_10874_pad_type_0 = const()[name = tensor("op_10874_pad_type_0"), val = tensor("custom")]; + tensor var_10874_pad_0 = const()[name = tensor("op_10874_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1589460928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1602568192))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1602568768)))]; + tensor var_10874_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_10872, groups = var_6865, pad = var_10874_pad_0, pad_type = var_10874_pad_type_0, strides = var_10870, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16_palettized, x = input_639_cast)[name = tensor("op_10874_cast")]; + tensor var_10875_split_sizes_0 = const()[name = tensor("op_10875_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_10875_axis_0 = const()[name = tensor("op_10875_axis_0"), val = tensor(1)]; + tensor var_10875_cast_0, tensor var_10875_cast_1 = split(axis = var_10875_axis_0, split_sizes = var_10875_split_sizes_0, x = var_10874_cast)[name = tensor("op_10875_cast")]; + tensor var_10877_mode_0 = const()[name = tensor("op_10877_mode_0"), val = tensor("EXACT")]; + tensor var_10877_cast = gelu(mode = var_10877_mode_0, x = var_10875_cast_1)[name = tensor("op_10877_cast")]; + tensor input_641_cast = mul(x = var_10875_cast_0, y = var_10877_cast)[name = tensor("input_641_cast")]; + tensor var_10881 = const()[name = tensor("op_10881"), val = tensor([1, 1])]; + tensor var_10883 = const()[name = tensor("op_10883"), val = tensor([1, 1])]; + tensor var_10885_pad_type_0 = const()[name = tensor("op_10885_pad_type_0"), val = tensor("custom")]; + tensor var_10885_pad_0 = const()[name = tensor("op_10885_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1602589312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1609142976))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1609143552)))]; + tensor var_10885_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_10883, groups = var_6865, pad = var_10885_pad_0, pad_type = var_10885_pad_type_0, strides = var_10881, weight = up_blocks_0_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16_palettized, x = input_641_cast)[name = tensor("op_10885_cast")]; + tensor inputs_331_cast = add(x = var_10885_cast, y = inputs_329_cast)[name = tensor("inputs_331_cast")]; + tensor var_10895 = const()[name = tensor("op_10895"), val = tensor([1])]; + tensor channels_mean_331_cast = reduce_mean(axes = var_10895, keep_dims = var_6860, x = inputs_331_cast)[name = tensor("channels_mean_331_cast")]; + tensor zero_mean_331_cast = sub(x = inputs_331_cast, y = channels_mean_331_cast)[name = tensor("zero_mean_331_cast")]; + tensor zero_mean_sq_331_cast = mul(x = zero_mean_331_cast, y = zero_mean_331_cast)[name = tensor("zero_mean_sq_331_cast")]; + tensor var_10899 = const()[name = tensor("op_10899"), val = tensor([1])]; + tensor var_10900_cast = reduce_mean(axes = var_10899, keep_dims = var_6860, x = zero_mean_sq_331_cast)[name = tensor("op_10900_cast")]; + tensor var_10901_to_fp16 = const()[name = tensor("op_10901_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10902_cast = add(x = var_10900_cast, y = var_10901_to_fp16)[name = tensor("op_10902_cast")]; + tensor denom_331_epsilon_0_to_fp16 = const()[name = tensor("denom_331_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_331_cast = rsqrt(epsilon = denom_331_epsilon_0_to_fp16, x = var_10902_cast)[name = tensor("denom_331_cast")]; + tensor out_331_cast = mul(x = zero_mean_331_cast, y = denom_331_cast)[name = tensor("out_331_cast")]; + tensor var_10906_to_fp16 = const()[name = tensor("op_10906_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1609146176)))]; + tensor var_10907_cast = add(x = out_331_cast, y = var_10906_to_fp16)[name = tensor("op_10907_cast")]; + tensor var_10909_to_fp16 = const()[name = tensor("op_10909_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1609148800)))]; + tensor hidden_states_441_cast = mul(x = var_10907_cast, y = var_10909_to_fp16)[name = tensor("hidden_states_441_cast")]; + tensor var_10916 = const()[name = tensor("op_10916"), val = tensor([1, 1])]; + tensor var_10918 = const()[name = tensor("op_10918"), val = tensor([1, 1])]; + tensor q_221_pad_type_0 = const()[name = tensor("q_221_pad_type_0"), val = tensor("custom")]; + tensor q_221_pad_0 = const()[name = tensor("q_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1609151424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1610380288))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_221_cast = conv(dilations = var_10918, groups = var_6865, pad = q_221_pad_0, pad_type = q_221_pad_type_0, strides = var_10916, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("q_221_cast")]; + tensor var_10922 = const()[name = tensor("op_10922"), val = tensor([1, 1])]; + tensor var_10924 = const()[name = tensor("op_10924"), val = tensor([1, 1])]; + tensor k_221_pad_type_0 = const()[name = tensor("k_221_pad_type_0"), val = tensor("custom")]; + tensor k_221_pad_0 = const()[name = tensor("k_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1610380480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1611609344))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_221_cast = conv(dilations = var_10924, groups = var_6865, pad = k_221_pad_0, pad_type = k_221_pad_type_0, strides = var_10922, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("k_221_cast")]; + tensor var_10928 = const()[name = tensor("op_10928"), val = tensor([1, 1])]; + tensor var_10930 = const()[name = tensor("op_10930"), val = tensor([1, 1])]; + tensor v_221_pad_type_0 = const()[name = tensor("v_221_pad_type_0"), val = tensor("custom")]; + tensor v_221_pad_0 = const()[name = tensor("v_221_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1611609536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1613248000))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_221_cast = conv(dilations = var_10930, groups = var_6865, pad = v_221_pad_0, pad_type = v_221_pad_type_0, strides = var_10928, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_441_cast)[name = tensor("v_221_cast")]; + tensor var_10934 = const()[name = tensor("op_10934"), val = tensor([2, 20, 64, -1])]; + tensor var_10935_cast = reshape(shape = var_10934, x = q_221_cast)[name = tensor("op_10935_cast")]; + tensor var_10936 = const()[name = tensor("op_10936"), val = tensor([2, 20, 64, -1])]; + tensor var_10937_cast = reshape(shape = var_10936, x = k_221_cast)[name = tensor("op_10937_cast")]; + tensor var_10938 = const()[name = tensor("op_10938"), val = tensor([2, 20, 64, -1])]; + tensor var_10939_cast = reshape(shape = var_10938, x = v_221_cast)[name = tensor("op_10939_cast")]; + tensor attn_weights_441_transpose_x_0 = const()[name = tensor("attn_weights_441_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_441_transpose_y_0 = const()[name = tensor("attn_weights_441_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_441_cast = matmul(transpose_x = attn_weights_441_transpose_x_0, transpose_y = attn_weights_441_transpose_y_0, x = var_10935_cast, y = var_10937_cast)[name = tensor("attn_weights_441_cast")]; + tensor attn_weights_443_cast = mul(x = attn_weights_441_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_443_cast")]; + tensor var_10943_cast = softmax(axis = var_6849, x = attn_weights_443_cast)[name = tensor("op_10943_cast")]; + tensor attn_221_transpose_x_0 = const()[name = tensor("attn_221_transpose_x_0"), val = tensor(false)]; + tensor attn_221_transpose_y_0 = const()[name = tensor("attn_221_transpose_y_0"), val = tensor(true)]; + tensor attn_221_cast = matmul(transpose_x = attn_221_transpose_x_0, transpose_y = attn_221_transpose_y_0, x = var_10939_cast, y = var_10943_cast)[name = tensor("attn_221_cast")]; + tensor var_10947 = const()[name = tensor("op_10947"), val = tensor([2, 1280, 1, -1])]; + tensor input_643_cast = reshape(shape = var_10947, x = attn_221_cast)[name = tensor("input_643_cast")]; + tensor var_10952 = const()[name = tensor("op_10952"), val = tensor([1, 1])]; + tensor var_10954 = const()[name = tensor("op_10954"), val = tensor([1, 1])]; + tensor var_10956_pad_type_0 = const()[name = tensor("op_10956_pad_type_0"), val = tensor("custom")]; + tensor var_10956_pad_0 = const()[name = tensor("op_10956_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1613248576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1614887040))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1614887616)))]; + tensor var_10956_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_10954, groups = var_6865, pad = var_10956_pad_0, pad_type = var_10956_pad_type_0, strides = var_10952, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16_palettized, x = input_643_cast)[name = tensor("op_10956_cast")]; + tensor inputs_333_cast = add(x = var_10956_cast, y = inputs_331_cast)[name = tensor("inputs_333_cast")]; + tensor var_10960 = const()[name = tensor("op_10960"), val = tensor([1])]; + tensor channels_mean_333_cast = reduce_mean(axes = var_10960, keep_dims = var_6860, x = inputs_333_cast)[name = tensor("channels_mean_333_cast")]; + tensor zero_mean_333_cast = sub(x = inputs_333_cast, y = channels_mean_333_cast)[name = tensor("zero_mean_333_cast")]; + tensor zero_mean_sq_333_cast = mul(x = zero_mean_333_cast, y = zero_mean_333_cast)[name = tensor("zero_mean_sq_333_cast")]; + tensor var_10964 = const()[name = tensor("op_10964"), val = tensor([1])]; + tensor var_10965_cast = reduce_mean(axes = var_10964, keep_dims = var_6860, x = zero_mean_sq_333_cast)[name = tensor("op_10965_cast")]; + tensor var_10966_to_fp16 = const()[name = tensor("op_10966_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_10967_cast = add(x = var_10965_cast, y = var_10966_to_fp16)[name = tensor("op_10967_cast")]; + tensor denom_333_epsilon_0_to_fp16 = const()[name = tensor("denom_333_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_333_cast = rsqrt(epsilon = denom_333_epsilon_0_to_fp16, x = var_10967_cast)[name = tensor("denom_333_cast")]; + tensor out_333_cast = mul(x = zero_mean_333_cast, y = denom_333_cast)[name = tensor("out_333_cast")]; + tensor var_10971_to_fp16 = const()[name = tensor("op_10971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1614890240)))]; + tensor var_10972_cast = add(x = out_333_cast, y = var_10971_to_fp16)[name = tensor("op_10972_cast")]; + tensor var_10974_to_fp16 = const()[name = tensor("op_10974_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1614892864)))]; + tensor hidden_states_443_cast = mul(x = var_10972_cast, y = var_10974_to_fp16)[name = tensor("hidden_states_443_cast")]; + tensor var_10981 = const()[name = tensor("op_10981"), val = tensor([1, 1])]; + tensor var_10983 = const()[name = tensor("op_10983"), val = tensor([1, 1])]; + tensor q_223_pad_type_0 = const()[name = tensor("q_223_pad_type_0"), val = tensor("custom")]; + tensor q_223_pad_0 = const()[name = tensor("q_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1614895488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1615714752))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_223_cast = conv(dilations = var_10983, groups = var_6865, pad = q_223_pad_0, pad_type = q_223_pad_type_0, strides = var_10981, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_443_cast)[name = tensor("q_223_cast")]; + tensor var_10987 = const()[name = tensor("op_10987"), val = tensor([1, 1])]; + tensor var_10989 = const()[name = tensor("op_10989"), val = tensor([1, 1])]; + tensor k_223_pad_type_0 = const()[name = tensor("k_223_pad_type_0"), val = tensor("custom")]; + tensor k_223_pad_0 = const()[name = tensor("k_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1615714880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617025664))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_223_cast = conv(dilations = var_10989, groups = var_6865, pad = k_223_pad_0, pad_type = k_223_pad_type_0, strides = var_10987, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_223_cast")]; + tensor var_10993 = const()[name = tensor("op_10993"), val = tensor([1, 1])]; + tensor var_10995 = const()[name = tensor("op_10995"), val = tensor([1, 1])]; + tensor v_223_pad_type_0 = const()[name = tensor("v_223_pad_type_0"), val = tensor("custom")]; + tensor v_223_pad_0 = const()[name = tensor("v_223_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1617025792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1619647296))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_223_cast = conv(dilations = var_10995, groups = var_6865, pad = v_223_pad_0, pad_type = v_223_pad_type_0, strides = var_10993, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_223_cast")]; + tensor var_10999 = const()[name = tensor("op_10999"), val = tensor([2, 20, 64, -1])]; + tensor var_11000_cast = reshape(shape = var_10999, x = q_223_cast)[name = tensor("op_11000_cast")]; + tensor var_11001 = const()[name = tensor("op_11001"), val = tensor([2, 20, 64, -1])]; + tensor var_11002_cast = reshape(shape = var_11001, x = k_223_cast)[name = tensor("op_11002_cast")]; + tensor var_11003 = const()[name = tensor("op_11003"), val = tensor([2, 20, 64, -1])]; + tensor var_11004_cast = reshape(shape = var_11003, x = v_223_cast)[name = tensor("op_11004_cast")]; + tensor attn_weights_445_transpose_x_0 = const()[name = tensor("attn_weights_445_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_445_transpose_y_0 = const()[name = tensor("attn_weights_445_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_445_cast = matmul(transpose_x = attn_weights_445_transpose_x_0, transpose_y = attn_weights_445_transpose_y_0, x = var_11000_cast, y = var_11002_cast)[name = tensor("attn_weights_445_cast")]; + tensor attn_weights_447_cast = mul(x = attn_weights_445_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_447_cast")]; + tensor var_11008_cast = softmax(axis = var_6849, x = attn_weights_447_cast)[name = tensor("op_11008_cast")]; + tensor attn_223_transpose_x_0 = const()[name = tensor("attn_223_transpose_x_0"), val = tensor(false)]; + tensor attn_223_transpose_y_0 = const()[name = tensor("attn_223_transpose_y_0"), val = tensor(true)]; + tensor attn_223_cast = matmul(transpose_x = attn_223_transpose_x_0, transpose_y = attn_223_transpose_y_0, x = var_11004_cast, y = var_11008_cast)[name = tensor("attn_223_cast")]; + tensor var_11012 = const()[name = tensor("op_11012"), val = tensor([2, 1280, 1, -1])]; + tensor input_645_cast = reshape(shape = var_11012, x = attn_223_cast)[name = tensor("input_645_cast")]; + tensor var_11017 = const()[name = tensor("op_11017"), val = tensor([1, 1])]; + tensor var_11019 = const()[name = tensor("op_11019"), val = tensor([1, 1])]; + tensor var_11021_pad_type_0 = const()[name = tensor("op_11021_pad_type_0"), val = tensor("custom")]; + tensor var_11021_pad_0 = const()[name = tensor("op_11021_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1619647872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620876736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620876928)))]; + tensor var_11021_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_11019, groups = var_6865, pad = var_11021_pad_0, pad_type = var_11021_pad_type_0, strides = var_11017, weight = up_blocks_0_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_645_cast)[name = tensor("op_11021_cast")]; + tensor inputs_335_cast = add(x = var_11021_cast, y = inputs_333_cast)[name = tensor("inputs_335_cast")]; + tensor var_11025 = const()[name = tensor("op_11025"), val = tensor([1])]; + tensor channels_mean_335_cast = reduce_mean(axes = var_11025, keep_dims = var_6860, x = inputs_335_cast)[name = tensor("channels_mean_335_cast")]; + tensor zero_mean_335_cast = sub(x = inputs_335_cast, y = channels_mean_335_cast)[name = tensor("zero_mean_335_cast")]; + tensor zero_mean_sq_335_cast = mul(x = zero_mean_335_cast, y = zero_mean_335_cast)[name = tensor("zero_mean_sq_335_cast")]; + tensor var_11029 = const()[name = tensor("op_11029"), val = tensor([1])]; + tensor var_11030_cast = reduce_mean(axes = var_11029, keep_dims = var_6860, x = zero_mean_sq_335_cast)[name = tensor("op_11030_cast")]; + tensor var_11031_to_fp16 = const()[name = tensor("op_11031_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11032_cast = add(x = var_11030_cast, y = var_11031_to_fp16)[name = tensor("op_11032_cast")]; + tensor denom_335_epsilon_0_to_fp16 = const()[name = tensor("denom_335_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_335_cast = rsqrt(epsilon = denom_335_epsilon_0_to_fp16, x = var_11032_cast)[name = tensor("denom_335_cast")]; + tensor out_335_cast = mul(x = zero_mean_335_cast, y = denom_335_cast)[name = tensor("out_335_cast")]; + tensor var_11036_to_fp16 = const()[name = tensor("op_11036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620879552)))]; + tensor var_11037_cast = add(x = out_335_cast, y = var_11036_to_fp16)[name = tensor("op_11037_cast")]; + tensor var_11039_to_fp16 = const()[name = tensor("op_11039_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620882176)))]; + tensor input_647_cast = mul(x = var_11037_cast, y = var_11039_to_fp16)[name = tensor("input_647_cast")]; + tensor var_11047 = const()[name = tensor("op_11047"), val = tensor([1, 1])]; + tensor var_11049 = const()[name = tensor("op_11049"), val = tensor([1, 1])]; + tensor var_11051_pad_type_0 = const()[name = tensor("op_11051_pad_type_0"), val = tensor("custom")]; + tensor var_11051_pad_0 = const()[name = tensor("op_11051_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1620884800))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1633992064))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1633992640)))]; + tensor var_11051_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_11049, groups = var_6865, pad = var_11051_pad_0, pad_type = var_11051_pad_type_0, strides = var_11047, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16_palettized, x = input_647_cast)[name = tensor("op_11051_cast")]; + tensor var_11052_split_sizes_0 = const()[name = tensor("op_11052_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11052_axis_0 = const()[name = tensor("op_11052_axis_0"), val = tensor(1)]; + tensor var_11052_cast_0, tensor var_11052_cast_1 = split(axis = var_11052_axis_0, split_sizes = var_11052_split_sizes_0, x = var_11051_cast)[name = tensor("op_11052_cast")]; + tensor var_11054_mode_0 = const()[name = tensor("op_11054_mode_0"), val = tensor("EXACT")]; + tensor var_11054_cast = gelu(mode = var_11054_mode_0, x = var_11052_cast_1)[name = tensor("op_11054_cast")]; + tensor input_649_cast = mul(x = var_11052_cast_0, y = var_11054_cast)[name = tensor("input_649_cast")]; + tensor var_11058 = const()[name = tensor("op_11058"), val = tensor([1, 1])]; + tensor var_11060 = const()[name = tensor("op_11060"), val = tensor([1, 1])]; + tensor var_11062_pad_type_0 = const()[name = tensor("op_11062_pad_type_0"), val = tensor("custom")]; + tensor var_11062_pad_0 = const()[name = tensor("op_11062_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1634013184))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1640566848))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1640567424)))]; + tensor var_11062_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_11060, groups = var_6865, pad = var_11062_pad_0, pad_type = var_11062_pad_type_0, strides = var_11058, weight = up_blocks_0_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16_palettized, x = input_649_cast)[name = tensor("op_11062_cast")]; + tensor inputs_337_cast = add(x = var_11062_cast, y = inputs_335_cast)[name = tensor("inputs_337_cast")]; + tensor var_11072 = const()[name = tensor("op_11072"), val = tensor([1])]; + tensor channels_mean_337_cast = reduce_mean(axes = var_11072, keep_dims = var_6860, x = inputs_337_cast)[name = tensor("channels_mean_337_cast")]; + tensor zero_mean_337_cast = sub(x = inputs_337_cast, y = channels_mean_337_cast)[name = tensor("zero_mean_337_cast")]; + tensor zero_mean_sq_337_cast = mul(x = zero_mean_337_cast, y = zero_mean_337_cast)[name = tensor("zero_mean_sq_337_cast")]; + tensor var_11076 = const()[name = tensor("op_11076"), val = tensor([1])]; + tensor var_11077_cast = reduce_mean(axes = var_11076, keep_dims = var_6860, x = zero_mean_sq_337_cast)[name = tensor("op_11077_cast")]; + tensor var_11078_to_fp16 = const()[name = tensor("op_11078_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11079_cast = add(x = var_11077_cast, y = var_11078_to_fp16)[name = tensor("op_11079_cast")]; + tensor denom_337_epsilon_0_to_fp16 = const()[name = tensor("denom_337_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_337_cast = rsqrt(epsilon = denom_337_epsilon_0_to_fp16, x = var_11079_cast)[name = tensor("denom_337_cast")]; + tensor out_337_cast = mul(x = zero_mean_337_cast, y = denom_337_cast)[name = tensor("out_337_cast")]; + tensor var_11083_to_fp16 = const()[name = tensor("op_11083_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1640570048)))]; + tensor var_11084_cast = add(x = out_337_cast, y = var_11083_to_fp16)[name = tensor("op_11084_cast")]; + tensor var_11086_to_fp16 = const()[name = tensor("op_11086_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1640572672)))]; + tensor hidden_states_447_cast = mul(x = var_11084_cast, y = var_11086_to_fp16)[name = tensor("hidden_states_447_cast")]; + tensor var_11093 = const()[name = tensor("op_11093"), val = tensor([1, 1])]; + tensor var_11095 = const()[name = tensor("op_11095"), val = tensor([1, 1])]; + tensor q_225_pad_type_0 = const()[name = tensor("q_225_pad_type_0"), val = tensor("custom")]; + tensor q_225_pad_0 = const()[name = tensor("q_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1640575296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1641394560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_225_cast = conv(dilations = var_11095, groups = var_6865, pad = q_225_pad_0, pad_type = q_225_pad_type_0, strides = var_11093, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("q_225_cast")]; + tensor var_11099 = const()[name = tensor("op_11099"), val = tensor([1, 1])]; + tensor var_11101 = const()[name = tensor("op_11101"), val = tensor([1, 1])]; + tensor k_225_pad_type_0 = const()[name = tensor("k_225_pad_type_0"), val = tensor("custom")]; + tensor k_225_pad_0 = const()[name = tensor("k_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1641394688))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1642623552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_225_cast = conv(dilations = var_11101, groups = var_6865, pad = k_225_pad_0, pad_type = k_225_pad_type_0, strides = var_11099, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("k_225_cast")]; + tensor var_11105 = const()[name = tensor("op_11105"), val = tensor([1, 1])]; + tensor var_11107 = const()[name = tensor("op_11107"), val = tensor([1, 1])]; + tensor v_225_pad_type_0 = const()[name = tensor("v_225_pad_type_0"), val = tensor("custom")]; + tensor v_225_pad_0 = const()[name = tensor("v_225_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1642623744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1644262208))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_225_cast = conv(dilations = var_11107, groups = var_6865, pad = v_225_pad_0, pad_type = v_225_pad_type_0, strides = var_11105, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_447_cast)[name = tensor("v_225_cast")]; + tensor var_11111 = const()[name = tensor("op_11111"), val = tensor([2, 20, 64, -1])]; + tensor var_11112_cast = reshape(shape = var_11111, x = q_225_cast)[name = tensor("op_11112_cast")]; + tensor var_11113 = const()[name = tensor("op_11113"), val = tensor([2, 20, 64, -1])]; + tensor var_11114_cast = reshape(shape = var_11113, x = k_225_cast)[name = tensor("op_11114_cast")]; + tensor var_11115 = const()[name = tensor("op_11115"), val = tensor([2, 20, 64, -1])]; + tensor var_11116_cast = reshape(shape = var_11115, x = v_225_cast)[name = tensor("op_11116_cast")]; + tensor attn_weights_449_transpose_x_0 = const()[name = tensor("attn_weights_449_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_449_transpose_y_0 = const()[name = tensor("attn_weights_449_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_449_cast = matmul(transpose_x = attn_weights_449_transpose_x_0, transpose_y = attn_weights_449_transpose_y_0, x = var_11112_cast, y = var_11114_cast)[name = tensor("attn_weights_449_cast")]; + tensor attn_weights_451_cast = mul(x = attn_weights_449_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_451_cast")]; + tensor var_11120_cast = softmax(axis = var_6849, x = attn_weights_451_cast)[name = tensor("op_11120_cast")]; + tensor attn_225_transpose_x_0 = const()[name = tensor("attn_225_transpose_x_0"), val = tensor(false)]; + tensor attn_225_transpose_y_0 = const()[name = tensor("attn_225_transpose_y_0"), val = tensor(true)]; + tensor attn_225_cast = matmul(transpose_x = attn_225_transpose_x_0, transpose_y = attn_225_transpose_y_0, x = var_11116_cast, y = var_11120_cast)[name = tensor("attn_225_cast")]; + tensor var_11124 = const()[name = tensor("op_11124"), val = tensor([2, 1280, 1, -1])]; + tensor input_651_cast = reshape(shape = var_11124, x = attn_225_cast)[name = tensor("input_651_cast")]; + tensor var_11129 = const()[name = tensor("op_11129"), val = tensor([1, 1])]; + tensor var_11131 = const()[name = tensor("op_11131"), val = tensor([1, 1])]; + tensor var_11133_pad_type_0 = const()[name = tensor("op_11133_pad_type_0"), val = tensor("custom")]; + tensor var_11133_pad_0 = const()[name = tensor("op_11133_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1644262784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645901248))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645901824)))]; + tensor var_11133_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_bias_to_fp16, dilations = var_11131, groups = var_6865, pad = var_11133_pad_0, pad_type = var_11133_pad_type_0, strides = var_11129, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn1_to_out_0_weight_to_fp16_palettized, x = input_651_cast)[name = tensor("op_11133_cast")]; + tensor inputs_339_cast = add(x = var_11133_cast, y = inputs_337_cast)[name = tensor("inputs_339_cast")]; + tensor var_11137 = const()[name = tensor("op_11137"), val = tensor([1])]; + tensor channels_mean_339_cast = reduce_mean(axes = var_11137, keep_dims = var_6860, x = inputs_339_cast)[name = tensor("channels_mean_339_cast")]; + tensor zero_mean_339_cast = sub(x = inputs_339_cast, y = channels_mean_339_cast)[name = tensor("zero_mean_339_cast")]; + tensor zero_mean_sq_339_cast = mul(x = zero_mean_339_cast, y = zero_mean_339_cast)[name = tensor("zero_mean_sq_339_cast")]; + tensor var_11141 = const()[name = tensor("op_11141"), val = tensor([1])]; + tensor var_11142_cast = reduce_mean(axes = var_11141, keep_dims = var_6860, x = zero_mean_sq_339_cast)[name = tensor("op_11142_cast")]; + tensor var_11143_to_fp16 = const()[name = tensor("op_11143_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11144_cast = add(x = var_11142_cast, y = var_11143_to_fp16)[name = tensor("op_11144_cast")]; + tensor denom_339_epsilon_0_to_fp16 = const()[name = tensor("denom_339_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_339_cast = rsqrt(epsilon = denom_339_epsilon_0_to_fp16, x = var_11144_cast)[name = tensor("denom_339_cast")]; + tensor out_339_cast = mul(x = zero_mean_339_cast, y = denom_339_cast)[name = tensor("out_339_cast")]; + tensor var_11148_to_fp16 = const()[name = tensor("op_11148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645904448)))]; + tensor var_11149_cast = add(x = out_339_cast, y = var_11148_to_fp16)[name = tensor("op_11149_cast")]; + tensor var_11151_to_fp16 = const()[name = tensor("op_11151_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645907072)))]; + tensor hidden_states_449_cast = mul(x = var_11149_cast, y = var_11151_to_fp16)[name = tensor("hidden_states_449_cast")]; + tensor var_11158 = const()[name = tensor("op_11158"), val = tensor([1, 1])]; + tensor var_11160 = const()[name = tensor("op_11160"), val = tensor([1, 1])]; + tensor q_227_pad_type_0 = const()[name = tensor("q_227_pad_type_0"), val = tensor("custom")]; + tensor q_227_pad_0 = const()[name = tensor("q_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1645909696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1647138560))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_227_cast = conv(dilations = var_11160, groups = var_6865, pad = q_227_pad_0, pad_type = q_227_pad_type_0, strides = var_11158, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_449_cast)[name = tensor("q_227_cast")]; + tensor var_11164 = const()[name = tensor("op_11164"), val = tensor([1, 1])]; + tensor var_11166 = const()[name = tensor("op_11166"), val = tensor([1, 1])]; + tensor k_227_pad_type_0 = const()[name = tensor("k_227_pad_type_0"), val = tensor("custom")]; + tensor k_227_pad_0 = const()[name = tensor("k_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1647138752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648449536))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_227_cast = conv(dilations = var_11166, groups = var_6865, pad = k_227_pad_0, pad_type = k_227_pad_type_0, strides = var_11164, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_227_cast")]; + tensor var_11170 = const()[name = tensor("op_11170"), val = tensor([1, 1])]; + tensor var_11172 = const()[name = tensor("op_11172"), val = tensor([1, 1])]; + tensor v_227_pad_type_0 = const()[name = tensor("v_227_pad_type_0"), val = tensor("custom")]; + tensor v_227_pad_0 = const()[name = tensor("v_227_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1648449664))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1650415808))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_227_cast = conv(dilations = var_11172, groups = var_6865, pad = v_227_pad_0, pad_type = v_227_pad_type_0, strides = var_11170, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_227_cast")]; + tensor var_11176 = const()[name = tensor("op_11176"), val = tensor([2, 20, 64, -1])]; + tensor var_11177_cast = reshape(shape = var_11176, x = q_227_cast)[name = tensor("op_11177_cast")]; + tensor var_11178 = const()[name = tensor("op_11178"), val = tensor([2, 20, 64, -1])]; + tensor var_11179_cast = reshape(shape = var_11178, x = k_227_cast)[name = tensor("op_11179_cast")]; + tensor var_11180 = const()[name = tensor("op_11180"), val = tensor([2, 20, 64, -1])]; + tensor var_11181_cast = reshape(shape = var_11180, x = v_227_cast)[name = tensor("op_11181_cast")]; + tensor attn_weights_453_transpose_x_0 = const()[name = tensor("attn_weights_453_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_453_transpose_y_0 = const()[name = tensor("attn_weights_453_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_453_cast = matmul(transpose_x = attn_weights_453_transpose_x_0, transpose_y = attn_weights_453_transpose_y_0, x = var_11177_cast, y = var_11179_cast)[name = tensor("attn_weights_453_cast")]; + tensor attn_weights_455_cast = mul(x = attn_weights_453_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_455_cast")]; + tensor var_11185_cast = softmax(axis = var_6849, x = attn_weights_455_cast)[name = tensor("op_11185_cast")]; + tensor attn_227_transpose_x_0 = const()[name = tensor("attn_227_transpose_x_0"), val = tensor(false)]; + tensor attn_227_transpose_y_0 = const()[name = tensor("attn_227_transpose_y_0"), val = tensor(true)]; + tensor attn_227_cast = matmul(transpose_x = attn_227_transpose_x_0, transpose_y = attn_227_transpose_y_0, x = var_11181_cast, y = var_11185_cast)[name = tensor("attn_227_cast")]; + tensor var_11189 = const()[name = tensor("op_11189"), val = tensor([2, 1280, 1, -1])]; + tensor input_653_cast = reshape(shape = var_11189, x = attn_227_cast)[name = tensor("input_653_cast")]; + tensor var_11194 = const()[name = tensor("op_11194"), val = tensor([1, 1])]; + tensor var_11196 = const()[name = tensor("op_11196"), val = tensor([1, 1])]; + tensor var_11198_pad_type_0 = const()[name = tensor("op_11198_pad_type_0"), val = tensor("custom")]; + tensor var_11198_pad_0 = const()[name = tensor("op_11198_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1650416000))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651644864))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651645056)))]; + tensor var_11198_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_bias_to_fp16, dilations = var_11196, groups = var_6865, pad = var_11198_pad_0, pad_type = var_11198_pad_type_0, strides = var_11194, weight = up_blocks_0_attentions_2_transformer_blocks_2_attn2_to_out_0_weight_to_fp16_palettized, x = input_653_cast)[name = tensor("op_11198_cast")]; + tensor inputs_341_cast = add(x = var_11198_cast, y = inputs_339_cast)[name = tensor("inputs_341_cast")]; + tensor var_11202 = const()[name = tensor("op_11202"), val = tensor([1])]; + tensor channels_mean_341_cast = reduce_mean(axes = var_11202, keep_dims = var_6860, x = inputs_341_cast)[name = tensor("channels_mean_341_cast")]; + tensor zero_mean_341_cast = sub(x = inputs_341_cast, y = channels_mean_341_cast)[name = tensor("zero_mean_341_cast")]; + tensor zero_mean_sq_341_cast = mul(x = zero_mean_341_cast, y = zero_mean_341_cast)[name = tensor("zero_mean_sq_341_cast")]; + tensor var_11206 = const()[name = tensor("op_11206"), val = tensor([1])]; + tensor var_11207_cast = reduce_mean(axes = var_11206, keep_dims = var_6860, x = zero_mean_sq_341_cast)[name = tensor("op_11207_cast")]; + tensor var_11208_to_fp16 = const()[name = tensor("op_11208_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11209_cast = add(x = var_11207_cast, y = var_11208_to_fp16)[name = tensor("op_11209_cast")]; + tensor denom_341_epsilon_0_to_fp16 = const()[name = tensor("denom_341_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_341_cast = rsqrt(epsilon = denom_341_epsilon_0_to_fp16, x = var_11209_cast)[name = tensor("denom_341_cast")]; + tensor out_341_cast = mul(x = zero_mean_341_cast, y = denom_341_cast)[name = tensor("out_341_cast")]; + tensor var_11213_to_fp16 = const()[name = tensor("op_11213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651647680)))]; + tensor var_11214_cast = add(x = out_341_cast, y = var_11213_to_fp16)[name = tensor("op_11214_cast")]; + tensor var_11216_to_fp16 = const()[name = tensor("op_11216_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651650304)))]; + tensor input_655_cast = mul(x = var_11214_cast, y = var_11216_to_fp16)[name = tensor("input_655_cast")]; + tensor var_11224 = const()[name = tensor("op_11224"), val = tensor([1, 1])]; + tensor var_11226 = const()[name = tensor("op_11226"), val = tensor([1, 1])]; + tensor var_11228_pad_type_0 = const()[name = tensor("op_11228_pad_type_0"), val = tensor("custom")]; + tensor var_11228_pad_0 = const()[name = tensor("op_11228_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1651652928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1664760192))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1664760768)))]; + tensor var_11228_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_bias_to_fp16, dilations = var_11226, groups = var_6865, pad = var_11228_pad_0, pad_type = var_11228_pad_type_0, strides = var_11224, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_0_proj_weight_to_fp16_palettized, x = input_655_cast)[name = tensor("op_11228_cast")]; + tensor var_11229_split_sizes_0 = const()[name = tensor("op_11229_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11229_axis_0 = const()[name = tensor("op_11229_axis_0"), val = tensor(1)]; + tensor var_11229_cast_0, tensor var_11229_cast_1 = split(axis = var_11229_axis_0, split_sizes = var_11229_split_sizes_0, x = var_11228_cast)[name = tensor("op_11229_cast")]; + tensor var_11231_mode_0 = const()[name = tensor("op_11231_mode_0"), val = tensor("EXACT")]; + tensor var_11231_cast = gelu(mode = var_11231_mode_0, x = var_11229_cast_1)[name = tensor("op_11231_cast")]; + tensor input_657_cast = mul(x = var_11229_cast_0, y = var_11231_cast)[name = tensor("input_657_cast")]; + tensor var_11235 = const()[name = tensor("op_11235"), val = tensor([1, 1])]; + tensor var_11237 = const()[name = tensor("op_11237"), val = tensor([1, 1])]; + tensor var_11239_pad_type_0 = const()[name = tensor("op_11239_pad_type_0"), val = tensor("custom")]; + tensor var_11239_pad_0 = const()[name = tensor("op_11239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1664781312))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671334976))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671335552)))]; + tensor var_11239_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_bias_to_fp16, dilations = var_11237, groups = var_6865, pad = var_11239_pad_0, pad_type = var_11239_pad_type_0, strides = var_11235, weight = up_blocks_0_attentions_2_transformer_blocks_2_ff_net_2_weight_to_fp16_palettized, x = input_657_cast)[name = tensor("op_11239_cast")]; + tensor inputs_343_cast = add(x = var_11239_cast, y = inputs_341_cast)[name = tensor("inputs_343_cast")]; + tensor var_11249 = const()[name = tensor("op_11249"), val = tensor([1])]; + tensor channels_mean_343_cast = reduce_mean(axes = var_11249, keep_dims = var_6860, x = inputs_343_cast)[name = tensor("channels_mean_343_cast")]; + tensor zero_mean_343_cast = sub(x = inputs_343_cast, y = channels_mean_343_cast)[name = tensor("zero_mean_343_cast")]; + tensor zero_mean_sq_343_cast = mul(x = zero_mean_343_cast, y = zero_mean_343_cast)[name = tensor("zero_mean_sq_343_cast")]; + tensor var_11253 = const()[name = tensor("op_11253"), val = tensor([1])]; + tensor var_11254_cast = reduce_mean(axes = var_11253, keep_dims = var_6860, x = zero_mean_sq_343_cast)[name = tensor("op_11254_cast")]; + tensor var_11255_to_fp16 = const()[name = tensor("op_11255_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11256_cast = add(x = var_11254_cast, y = var_11255_to_fp16)[name = tensor("op_11256_cast")]; + tensor denom_343_epsilon_0_to_fp16 = const()[name = tensor("denom_343_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_343_cast = rsqrt(epsilon = denom_343_epsilon_0_to_fp16, x = var_11256_cast)[name = tensor("denom_343_cast")]; + tensor out_343_cast = mul(x = zero_mean_343_cast, y = denom_343_cast)[name = tensor("out_343_cast")]; + tensor var_11260_to_fp16 = const()[name = tensor("op_11260_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671338176)))]; + tensor var_11261_cast = add(x = out_343_cast, y = var_11260_to_fp16)[name = tensor("op_11261_cast")]; + tensor var_11263_to_fp16 = const()[name = tensor("op_11263_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671340800)))]; + tensor hidden_states_453_cast = mul(x = var_11261_cast, y = var_11263_to_fp16)[name = tensor("hidden_states_453_cast")]; + tensor var_11270 = const()[name = tensor("op_11270"), val = tensor([1, 1])]; + tensor var_11272 = const()[name = tensor("op_11272"), val = tensor([1, 1])]; + tensor q_229_pad_type_0 = const()[name = tensor("q_229_pad_type_0"), val = tensor("custom")]; + tensor q_229_pad_0 = const()[name = tensor("q_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1671343424))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1672572288))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_229_cast = conv(dilations = var_11272, groups = var_6865, pad = q_229_pad_0, pad_type = q_229_pad_type_0, strides = var_11270, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("q_229_cast")]; + tensor var_11276 = const()[name = tensor("op_11276"), val = tensor([1, 1])]; + tensor var_11278 = const()[name = tensor("op_11278"), val = tensor([1, 1])]; + tensor k_229_pad_type_0 = const()[name = tensor("k_229_pad_type_0"), val = tensor("custom")]; + tensor k_229_pad_0 = const()[name = tensor("k_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1672572480))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1673801344))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_229_cast = conv(dilations = var_11278, groups = var_6865, pad = k_229_pad_0, pad_type = k_229_pad_type_0, strides = var_11276, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("k_229_cast")]; + tensor var_11282 = const()[name = tensor("op_11282"), val = tensor([1, 1])]; + tensor var_11284 = const()[name = tensor("op_11284"), val = tensor([1, 1])]; + tensor v_229_pad_type_0 = const()[name = tensor("v_229_pad_type_0"), val = tensor("custom")]; + tensor v_229_pad_0 = const()[name = tensor("v_229_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1673801536))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1675440000))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_229_cast = conv(dilations = var_11284, groups = var_6865, pad = v_229_pad_0, pad_type = v_229_pad_type_0, strides = var_11282, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_453_cast)[name = tensor("v_229_cast")]; + tensor var_11288 = const()[name = tensor("op_11288"), val = tensor([2, 20, 64, -1])]; + tensor var_11289_cast = reshape(shape = var_11288, x = q_229_cast)[name = tensor("op_11289_cast")]; + tensor var_11290 = const()[name = tensor("op_11290"), val = tensor([2, 20, 64, -1])]; + tensor var_11291_cast = reshape(shape = var_11290, x = k_229_cast)[name = tensor("op_11291_cast")]; + tensor var_11292 = const()[name = tensor("op_11292"), val = tensor([2, 20, 64, -1])]; + tensor var_11293_cast = reshape(shape = var_11292, x = v_229_cast)[name = tensor("op_11293_cast")]; + tensor attn_weights_457_transpose_x_0 = const()[name = tensor("attn_weights_457_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_457_transpose_y_0 = const()[name = tensor("attn_weights_457_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_457_cast = matmul(transpose_x = attn_weights_457_transpose_x_0, transpose_y = attn_weights_457_transpose_y_0, x = var_11289_cast, y = var_11291_cast)[name = tensor("attn_weights_457_cast")]; + tensor attn_weights_459_cast = mul(x = attn_weights_457_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_459_cast")]; + tensor var_11297_cast = softmax(axis = var_6849, x = attn_weights_459_cast)[name = tensor("op_11297_cast")]; + tensor attn_229_transpose_x_0 = const()[name = tensor("attn_229_transpose_x_0"), val = tensor(false)]; + tensor attn_229_transpose_y_0 = const()[name = tensor("attn_229_transpose_y_0"), val = tensor(true)]; + tensor attn_229_cast = matmul(transpose_x = attn_229_transpose_x_0, transpose_y = attn_229_transpose_y_0, x = var_11293_cast, y = var_11297_cast)[name = tensor("attn_229_cast")]; + tensor var_11301 = const()[name = tensor("op_11301"), val = tensor([2, 1280, 1, -1])]; + tensor input_659_cast = reshape(shape = var_11301, x = attn_229_cast)[name = tensor("input_659_cast")]; + tensor var_11306 = const()[name = tensor("op_11306"), val = tensor([1, 1])]; + tensor var_11308 = const()[name = tensor("op_11308"), val = tensor([1, 1])]; + tensor var_11310_pad_type_0 = const()[name = tensor("op_11310_pad_type_0"), val = tensor("custom")]; + tensor var_11310_pad_0 = const()[name = tensor("op_11310_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1675440576))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677079040))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677079616)))]; + tensor var_11310_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_bias_to_fp16, dilations = var_11308, groups = var_6865, pad = var_11310_pad_0, pad_type = var_11310_pad_type_0, strides = var_11306, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn1_to_out_0_weight_to_fp16_palettized, x = input_659_cast)[name = tensor("op_11310_cast")]; + tensor inputs_345_cast = add(x = var_11310_cast, y = inputs_343_cast)[name = tensor("inputs_345_cast")]; + tensor var_11314 = const()[name = tensor("op_11314"), val = tensor([1])]; + tensor channels_mean_345_cast = reduce_mean(axes = var_11314, keep_dims = var_6860, x = inputs_345_cast)[name = tensor("channels_mean_345_cast")]; + tensor zero_mean_345_cast = sub(x = inputs_345_cast, y = channels_mean_345_cast)[name = tensor("zero_mean_345_cast")]; + tensor zero_mean_sq_345_cast = mul(x = zero_mean_345_cast, y = zero_mean_345_cast)[name = tensor("zero_mean_sq_345_cast")]; + tensor var_11318 = const()[name = tensor("op_11318"), val = tensor([1])]; + tensor var_11319_cast = reduce_mean(axes = var_11318, keep_dims = var_6860, x = zero_mean_sq_345_cast)[name = tensor("op_11319_cast")]; + tensor var_11320_to_fp16 = const()[name = tensor("op_11320_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11321_cast = add(x = var_11319_cast, y = var_11320_to_fp16)[name = tensor("op_11321_cast")]; + tensor denom_345_epsilon_0_to_fp16 = const()[name = tensor("denom_345_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_345_cast = rsqrt(epsilon = denom_345_epsilon_0_to_fp16, x = var_11321_cast)[name = tensor("denom_345_cast")]; + tensor out_345_cast = mul(x = zero_mean_345_cast, y = denom_345_cast)[name = tensor("out_345_cast")]; + tensor var_11325_to_fp16 = const()[name = tensor("op_11325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677082240)))]; + tensor var_11326_cast = add(x = out_345_cast, y = var_11325_to_fp16)[name = tensor("op_11326_cast")]; + tensor var_11328_to_fp16 = const()[name = tensor("op_11328_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677084864)))]; + tensor hidden_states_455_cast = mul(x = var_11326_cast, y = var_11328_to_fp16)[name = tensor("hidden_states_455_cast")]; + tensor var_11335 = const()[name = tensor("op_11335"), val = tensor([1, 1])]; + tensor var_11337 = const()[name = tensor("op_11337"), val = tensor([1, 1])]; + tensor q_231_pad_type_0 = const()[name = tensor("q_231_pad_type_0"), val = tensor("custom")]; + tensor q_231_pad_0 = const()[name = tensor("q_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677087488))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677906752))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_231_cast = conv(dilations = var_11337, groups = var_6865, pad = q_231_pad_0, pad_type = q_231_pad_type_0, strides = var_11335, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_455_cast)[name = tensor("q_231_cast")]; + tensor var_11341 = const()[name = tensor("op_11341"), val = tensor([1, 1])]; + tensor var_11343 = const()[name = tensor("op_11343"), val = tensor([1, 1])]; + tensor k_231_pad_type_0 = const()[name = tensor("k_231_pad_type_0"), val = tensor("custom")]; + tensor k_231_pad_0 = const()[name = tensor("k_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1677906880))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1679217664))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_231_cast = conv(dilations = var_11343, groups = var_6865, pad = k_231_pad_0, pad_type = k_231_pad_type_0, strides = var_11341, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_231_cast")]; + tensor var_11347 = const()[name = tensor("op_11347"), val = tensor([1, 1])]; + tensor var_11349 = const()[name = tensor("op_11349"), val = tensor([1, 1])]; + tensor v_231_pad_type_0 = const()[name = tensor("v_231_pad_type_0"), val = tensor("custom")]; + tensor v_231_pad_0 = const()[name = tensor("v_231_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1679217792))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681183936))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_231_cast = conv(dilations = var_11349, groups = var_6865, pad = v_231_pad_0, pad_type = v_231_pad_type_0, strides = var_11347, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_231_cast")]; + tensor var_11353 = const()[name = tensor("op_11353"), val = tensor([2, 20, 64, -1])]; + tensor var_11354_cast = reshape(shape = var_11353, x = q_231_cast)[name = tensor("op_11354_cast")]; + tensor var_11355 = const()[name = tensor("op_11355"), val = tensor([2, 20, 64, -1])]; + tensor var_11356_cast = reshape(shape = var_11355, x = k_231_cast)[name = tensor("op_11356_cast")]; + tensor var_11357 = const()[name = tensor("op_11357"), val = tensor([2, 20, 64, -1])]; + tensor var_11358_cast = reshape(shape = var_11357, x = v_231_cast)[name = tensor("op_11358_cast")]; + tensor attn_weights_461_transpose_x_0 = const()[name = tensor("attn_weights_461_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_461_transpose_y_0 = const()[name = tensor("attn_weights_461_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_461_cast = matmul(transpose_x = attn_weights_461_transpose_x_0, transpose_y = attn_weights_461_transpose_y_0, x = var_11354_cast, y = var_11356_cast)[name = tensor("attn_weights_461_cast")]; + tensor attn_weights_463_cast = mul(x = attn_weights_461_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_463_cast")]; + tensor var_11362_cast = softmax(axis = var_6849, x = attn_weights_463_cast)[name = tensor("op_11362_cast")]; + tensor attn_231_transpose_x_0 = const()[name = tensor("attn_231_transpose_x_0"), val = tensor(false)]; + tensor attn_231_transpose_y_0 = const()[name = tensor("attn_231_transpose_y_0"), val = tensor(true)]; + tensor attn_231_cast = matmul(transpose_x = attn_231_transpose_x_0, transpose_y = attn_231_transpose_y_0, x = var_11358_cast, y = var_11362_cast)[name = tensor("attn_231_cast")]; + tensor var_11366 = const()[name = tensor("op_11366"), val = tensor([2, 1280, 1, -1])]; + tensor input_661_cast = reshape(shape = var_11366, x = attn_231_cast)[name = tensor("input_661_cast")]; + tensor var_11371 = const()[name = tensor("op_11371"), val = tensor([1, 1])]; + tensor var_11373 = const()[name = tensor("op_11373"), val = tensor([1, 1])]; + tensor var_11375_pad_type_0 = const()[name = tensor("op_11375_pad_type_0"), val = tensor("custom")]; + tensor var_11375_pad_0 = const()[name = tensor("op_11375_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1681184128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682003392))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682003520)))]; + tensor var_11375_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_bias_to_fp16, dilations = var_11373, groups = var_6865, pad = var_11375_pad_0, pad_type = var_11375_pad_type_0, strides = var_11371, weight = up_blocks_0_attentions_2_transformer_blocks_3_attn2_to_out_0_weight_to_fp16_palettized, x = input_661_cast)[name = tensor("op_11375_cast")]; + tensor inputs_347_cast = add(x = var_11375_cast, y = inputs_345_cast)[name = tensor("inputs_347_cast")]; + tensor var_11379 = const()[name = tensor("op_11379"), val = tensor([1])]; + tensor channels_mean_347_cast = reduce_mean(axes = var_11379, keep_dims = var_6860, x = inputs_347_cast)[name = tensor("channels_mean_347_cast")]; + tensor zero_mean_347_cast = sub(x = inputs_347_cast, y = channels_mean_347_cast)[name = tensor("zero_mean_347_cast")]; + tensor zero_mean_sq_347_cast = mul(x = zero_mean_347_cast, y = zero_mean_347_cast)[name = tensor("zero_mean_sq_347_cast")]; + tensor var_11383 = const()[name = tensor("op_11383"), val = tensor([1])]; + tensor var_11384_cast = reduce_mean(axes = var_11383, keep_dims = var_6860, x = zero_mean_sq_347_cast)[name = tensor("op_11384_cast")]; + tensor var_11385_to_fp16 = const()[name = tensor("op_11385_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11386_cast = add(x = var_11384_cast, y = var_11385_to_fp16)[name = tensor("op_11386_cast")]; + tensor denom_347_epsilon_0_to_fp16 = const()[name = tensor("denom_347_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_347_cast = rsqrt(epsilon = denom_347_epsilon_0_to_fp16, x = var_11386_cast)[name = tensor("denom_347_cast")]; + tensor out_347_cast = mul(x = zero_mean_347_cast, y = denom_347_cast)[name = tensor("out_347_cast")]; + tensor var_11390_to_fp16 = const()[name = tensor("op_11390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682006144)))]; + tensor var_11391_cast = add(x = out_347_cast, y = var_11390_to_fp16)[name = tensor("op_11391_cast")]; + tensor var_11393_to_fp16 = const()[name = tensor("op_11393_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682008768)))]; + tensor input_663_cast = mul(x = var_11391_cast, y = var_11393_to_fp16)[name = tensor("input_663_cast")]; + tensor var_11401 = const()[name = tensor("op_11401"), val = tensor([1, 1])]; + tensor var_11403 = const()[name = tensor("op_11403"), val = tensor([1, 1])]; + tensor var_11405_pad_type_0 = const()[name = tensor("op_11405_pad_type_0"), val = tensor("custom")]; + tensor var_11405_pad_0 = const()[name = tensor("op_11405_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1682011392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1695118656))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1695119232)))]; + tensor var_11405_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_bias_to_fp16, dilations = var_11403, groups = var_6865, pad = var_11405_pad_0, pad_type = var_11405_pad_type_0, strides = var_11401, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_0_proj_weight_to_fp16_palettized, x = input_663_cast)[name = tensor("op_11405_cast")]; + tensor var_11406_split_sizes_0 = const()[name = tensor("op_11406_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11406_axis_0 = const()[name = tensor("op_11406_axis_0"), val = tensor(1)]; + tensor var_11406_cast_0, tensor var_11406_cast_1 = split(axis = var_11406_axis_0, split_sizes = var_11406_split_sizes_0, x = var_11405_cast)[name = tensor("op_11406_cast")]; + tensor var_11408_mode_0 = const()[name = tensor("op_11408_mode_0"), val = tensor("EXACT")]; + tensor var_11408_cast = gelu(mode = var_11408_mode_0, x = var_11406_cast_1)[name = tensor("op_11408_cast")]; + tensor input_665_cast = mul(x = var_11406_cast_0, y = var_11408_cast)[name = tensor("input_665_cast")]; + tensor var_11412 = const()[name = tensor("op_11412"), val = tensor([1, 1])]; + tensor var_11414 = const()[name = tensor("op_11414"), val = tensor([1, 1])]; + tensor var_11416_pad_type_0 = const()[name = tensor("op_11416_pad_type_0"), val = tensor("custom")]; + tensor var_11416_pad_0 = const()[name = tensor("op_11416_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1695139776))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1701693440))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1701694016)))]; + tensor var_11416_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_bias_to_fp16, dilations = var_11414, groups = var_6865, pad = var_11416_pad_0, pad_type = var_11416_pad_type_0, strides = var_11412, weight = up_blocks_0_attentions_2_transformer_blocks_3_ff_net_2_weight_to_fp16_palettized, x = input_665_cast)[name = tensor("op_11416_cast")]; + tensor inputs_349_cast = add(x = var_11416_cast, y = inputs_347_cast)[name = tensor("inputs_349_cast")]; + tensor var_11426 = const()[name = tensor("op_11426"), val = tensor([1])]; + tensor channels_mean_349_cast = reduce_mean(axes = var_11426, keep_dims = var_6860, x = inputs_349_cast)[name = tensor("channels_mean_349_cast")]; + tensor zero_mean_349_cast = sub(x = inputs_349_cast, y = channels_mean_349_cast)[name = tensor("zero_mean_349_cast")]; + tensor zero_mean_sq_349_cast = mul(x = zero_mean_349_cast, y = zero_mean_349_cast)[name = tensor("zero_mean_sq_349_cast")]; + tensor var_11430 = const()[name = tensor("op_11430"), val = tensor([1])]; + tensor var_11431_cast = reduce_mean(axes = var_11430, keep_dims = var_6860, x = zero_mean_sq_349_cast)[name = tensor("op_11431_cast")]; + tensor var_11432_to_fp16 = const()[name = tensor("op_11432_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11433_cast = add(x = var_11431_cast, y = var_11432_to_fp16)[name = tensor("op_11433_cast")]; + tensor denom_349_epsilon_0_to_fp16 = const()[name = tensor("denom_349_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_349_cast = rsqrt(epsilon = denom_349_epsilon_0_to_fp16, x = var_11433_cast)[name = tensor("denom_349_cast")]; + tensor out_349_cast = mul(x = zero_mean_349_cast, y = denom_349_cast)[name = tensor("out_349_cast")]; + tensor var_11437_to_fp16 = const()[name = tensor("op_11437_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1701696640)))]; + tensor var_11438_cast = add(x = out_349_cast, y = var_11437_to_fp16)[name = tensor("op_11438_cast")]; + tensor var_11440_to_fp16 = const()[name = tensor("op_11440_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1701699264)))]; + tensor hidden_states_459_cast = mul(x = var_11438_cast, y = var_11440_to_fp16)[name = tensor("hidden_states_459_cast")]; + tensor var_11447 = const()[name = tensor("op_11447"), val = tensor([1, 1])]; + tensor var_11449 = const()[name = tensor("op_11449"), val = tensor([1, 1])]; + tensor q_233_pad_type_0 = const()[name = tensor("q_233_pad_type_0"), val = tensor("custom")]; + tensor q_233_pad_0 = const()[name = tensor("q_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1701701888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1702930752))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_233_cast = conv(dilations = var_11449, groups = var_6865, pad = q_233_pad_0, pad_type = q_233_pad_type_0, strides = var_11447, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("q_233_cast")]; + tensor var_11453 = const()[name = tensor("op_11453"), val = tensor([1, 1])]; + tensor var_11455 = const()[name = tensor("op_11455"), val = tensor([1, 1])]; + tensor k_233_pad_type_0 = const()[name = tensor("k_233_pad_type_0"), val = tensor("custom")]; + tensor k_233_pad_0 = const()[name = tensor("k_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1702930944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1703750208))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_233_cast = conv(dilations = var_11455, groups = var_6865, pad = k_233_pad_0, pad_type = k_233_pad_type_0, strides = var_11453, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("k_233_cast")]; + tensor var_11459 = const()[name = tensor("op_11459"), val = tensor([1, 1])]; + tensor var_11461 = const()[name = tensor("op_11461"), val = tensor([1, 1])]; + tensor v_233_pad_type_0 = const()[name = tensor("v_233_pad_type_0"), val = tensor("custom")]; + tensor v_233_pad_0 = const()[name = tensor("v_233_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1703750336))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1705388800))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_233_cast = conv(dilations = var_11461, groups = var_6865, pad = v_233_pad_0, pad_type = v_233_pad_type_0, strides = var_11459, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_459_cast)[name = tensor("v_233_cast")]; + tensor var_11465 = const()[name = tensor("op_11465"), val = tensor([2, 20, 64, -1])]; + tensor var_11466_cast = reshape(shape = var_11465, x = q_233_cast)[name = tensor("op_11466_cast")]; + tensor var_11467 = const()[name = tensor("op_11467"), val = tensor([2, 20, 64, -1])]; + tensor var_11468_cast = reshape(shape = var_11467, x = k_233_cast)[name = tensor("op_11468_cast")]; + tensor var_11469 = const()[name = tensor("op_11469"), val = tensor([2, 20, 64, -1])]; + tensor var_11470_cast = reshape(shape = var_11469, x = v_233_cast)[name = tensor("op_11470_cast")]; + tensor attn_weights_465_transpose_x_0 = const()[name = tensor("attn_weights_465_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_465_transpose_y_0 = const()[name = tensor("attn_weights_465_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_465_cast = matmul(transpose_x = attn_weights_465_transpose_x_0, transpose_y = attn_weights_465_transpose_y_0, x = var_11466_cast, y = var_11468_cast)[name = tensor("attn_weights_465_cast")]; + tensor attn_weights_467_cast = mul(x = attn_weights_465_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_467_cast")]; + tensor var_11474_cast = softmax(axis = var_6849, x = attn_weights_467_cast)[name = tensor("op_11474_cast")]; + tensor attn_233_transpose_x_0 = const()[name = tensor("attn_233_transpose_x_0"), val = tensor(false)]; + tensor attn_233_transpose_y_0 = const()[name = tensor("attn_233_transpose_y_0"), val = tensor(true)]; + tensor attn_233_cast = matmul(transpose_x = attn_233_transpose_x_0, transpose_y = attn_233_transpose_y_0, x = var_11470_cast, y = var_11474_cast)[name = tensor("attn_233_cast")]; + tensor var_11478 = const()[name = tensor("op_11478"), val = tensor([2, 1280, 1, -1])]; + tensor input_667_cast = reshape(shape = var_11478, x = attn_233_cast)[name = tensor("input_667_cast")]; + tensor var_11483 = const()[name = tensor("op_11483"), val = tensor([1, 1])]; + tensor var_11485 = const()[name = tensor("op_11485"), val = tensor([1, 1])]; + tensor var_11487_pad_type_0 = const()[name = tensor("op_11487_pad_type_0"), val = tensor("custom")]; + tensor var_11487_pad_0 = const()[name = tensor("op_11487_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1705389376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707027840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707028416)))]; + tensor var_11487_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_bias_to_fp16, dilations = var_11485, groups = var_6865, pad = var_11487_pad_0, pad_type = var_11487_pad_type_0, strides = var_11483, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn1_to_out_0_weight_to_fp16_palettized, x = input_667_cast)[name = tensor("op_11487_cast")]; + tensor inputs_351_cast = add(x = var_11487_cast, y = inputs_349_cast)[name = tensor("inputs_351_cast")]; + tensor var_11491 = const()[name = tensor("op_11491"), val = tensor([1])]; + tensor channels_mean_351_cast = reduce_mean(axes = var_11491, keep_dims = var_6860, x = inputs_351_cast)[name = tensor("channels_mean_351_cast")]; + tensor zero_mean_351_cast = sub(x = inputs_351_cast, y = channels_mean_351_cast)[name = tensor("zero_mean_351_cast")]; + tensor zero_mean_sq_351_cast = mul(x = zero_mean_351_cast, y = zero_mean_351_cast)[name = tensor("zero_mean_sq_351_cast")]; + tensor var_11495 = const()[name = tensor("op_11495"), val = tensor([1])]; + tensor var_11496_cast = reduce_mean(axes = var_11495, keep_dims = var_6860, x = zero_mean_sq_351_cast)[name = tensor("op_11496_cast")]; + tensor var_11497_to_fp16 = const()[name = tensor("op_11497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11498_cast = add(x = var_11496_cast, y = var_11497_to_fp16)[name = tensor("op_11498_cast")]; + tensor denom_351_epsilon_0_to_fp16 = const()[name = tensor("denom_351_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_351_cast = rsqrt(epsilon = denom_351_epsilon_0_to_fp16, x = var_11498_cast)[name = tensor("denom_351_cast")]; + tensor out_351_cast = mul(x = zero_mean_351_cast, y = denom_351_cast)[name = tensor("out_351_cast")]; + tensor var_11502_to_fp16 = const()[name = tensor("op_11502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707031040)))]; + tensor var_11503_cast = add(x = out_351_cast, y = var_11502_to_fp16)[name = tensor("op_11503_cast")]; + tensor var_11505_to_fp16 = const()[name = tensor("op_11505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707033664)))]; + tensor hidden_states_461_cast = mul(x = var_11503_cast, y = var_11505_to_fp16)[name = tensor("hidden_states_461_cast")]; + tensor var_11512 = const()[name = tensor("op_11512"), val = tensor([1, 1])]; + tensor var_11514 = const()[name = tensor("op_11514"), val = tensor([1, 1])]; + tensor q_235_pad_type_0 = const()[name = tensor("q_235_pad_type_0"), val = tensor("custom")]; + tensor q_235_pad_0 = const()[name = tensor("q_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707036288))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707855552))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_235_cast = conv(dilations = var_11514, groups = var_6865, pad = q_235_pad_0, pad_type = q_235_pad_type_0, strides = var_11512, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_461_cast)[name = tensor("q_235_cast")]; + tensor var_11518 = const()[name = tensor("op_11518"), val = tensor([1, 1])]; + tensor var_11520 = const()[name = tensor("op_11520"), val = tensor([1, 1])]; + tensor k_235_pad_type_0 = const()[name = tensor("k_235_pad_type_0"), val = tensor("custom")]; + tensor k_235_pad_0 = const()[name = tensor("k_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1707855680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1709821824))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_235_cast = conv(dilations = var_11520, groups = var_6865, pad = k_235_pad_0, pad_type = k_235_pad_type_0, strides = var_11518, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_235_cast")]; + tensor var_11524 = const()[name = tensor("op_11524"), val = tensor([1, 1])]; + tensor var_11526 = const()[name = tensor("op_11526"), val = tensor([1, 1])]; + tensor v_235_pad_type_0 = const()[name = tensor("v_235_pad_type_0"), val = tensor("custom")]; + tensor v_235_pad_0 = const()[name = tensor("v_235_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1709822016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1711132800))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_235_cast = conv(dilations = var_11526, groups = var_6865, pad = v_235_pad_0, pad_type = v_235_pad_type_0, strides = var_11524, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_235_cast")]; + tensor var_11530 = const()[name = tensor("op_11530"), val = tensor([2, 20, 64, -1])]; + tensor var_11531_cast = reshape(shape = var_11530, x = q_235_cast)[name = tensor("op_11531_cast")]; + tensor var_11532 = const()[name = tensor("op_11532"), val = tensor([2, 20, 64, -1])]; + tensor var_11533_cast = reshape(shape = var_11532, x = k_235_cast)[name = tensor("op_11533_cast")]; + tensor var_11534 = const()[name = tensor("op_11534"), val = tensor([2, 20, 64, -1])]; + tensor var_11535_cast = reshape(shape = var_11534, x = v_235_cast)[name = tensor("op_11535_cast")]; + tensor attn_weights_469_transpose_x_0 = const()[name = tensor("attn_weights_469_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_469_transpose_y_0 = const()[name = tensor("attn_weights_469_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_469_cast = matmul(transpose_x = attn_weights_469_transpose_x_0, transpose_y = attn_weights_469_transpose_y_0, x = var_11531_cast, y = var_11533_cast)[name = tensor("attn_weights_469_cast")]; + tensor attn_weights_471_cast = mul(x = attn_weights_469_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_471_cast")]; + tensor var_11539_cast = softmax(axis = var_6849, x = attn_weights_471_cast)[name = tensor("op_11539_cast")]; + tensor attn_235_transpose_x_0 = const()[name = tensor("attn_235_transpose_x_0"), val = tensor(false)]; + tensor attn_235_transpose_y_0 = const()[name = tensor("attn_235_transpose_y_0"), val = tensor(true)]; + tensor attn_235_cast = matmul(transpose_x = attn_235_transpose_x_0, transpose_y = attn_235_transpose_y_0, x = var_11535_cast, y = var_11539_cast)[name = tensor("attn_235_cast")]; + tensor var_11543 = const()[name = tensor("op_11543"), val = tensor([2, 1280, 1, -1])]; + tensor input_669_cast = reshape(shape = var_11543, x = attn_235_cast)[name = tensor("input_669_cast")]; + tensor var_11548 = const()[name = tensor("op_11548"), val = tensor([1, 1])]; + tensor var_11550 = const()[name = tensor("op_11550"), val = tensor([1, 1])]; + tensor var_11552_pad_type_0 = const()[name = tensor("op_11552_pad_type_0"), val = tensor("custom")]; + tensor var_11552_pad_0 = const()[name = tensor("op_11552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1711132928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1712361792))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1712361984)))]; + tensor var_11552_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_bias_to_fp16, dilations = var_11550, groups = var_6865, pad = var_11552_pad_0, pad_type = var_11552_pad_type_0, strides = var_11548, weight = up_blocks_0_attentions_2_transformer_blocks_4_attn2_to_out_0_weight_to_fp16_palettized, x = input_669_cast)[name = tensor("op_11552_cast")]; + tensor inputs_353_cast = add(x = var_11552_cast, y = inputs_351_cast)[name = tensor("inputs_353_cast")]; + tensor var_11556 = const()[name = tensor("op_11556"), val = tensor([1])]; + tensor channels_mean_353_cast = reduce_mean(axes = var_11556, keep_dims = var_6860, x = inputs_353_cast)[name = tensor("channels_mean_353_cast")]; + tensor zero_mean_353_cast = sub(x = inputs_353_cast, y = channels_mean_353_cast)[name = tensor("zero_mean_353_cast")]; + tensor zero_mean_sq_353_cast = mul(x = zero_mean_353_cast, y = zero_mean_353_cast)[name = tensor("zero_mean_sq_353_cast")]; + tensor var_11560 = const()[name = tensor("op_11560"), val = tensor([1])]; + tensor var_11561_cast = reduce_mean(axes = var_11560, keep_dims = var_6860, x = zero_mean_sq_353_cast)[name = tensor("op_11561_cast")]; + tensor var_11562_to_fp16 = const()[name = tensor("op_11562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11563_cast = add(x = var_11561_cast, y = var_11562_to_fp16)[name = tensor("op_11563_cast")]; + tensor denom_353_epsilon_0_to_fp16 = const()[name = tensor("denom_353_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_353_cast = rsqrt(epsilon = denom_353_epsilon_0_to_fp16, x = var_11563_cast)[name = tensor("denom_353_cast")]; + tensor out_353_cast = mul(x = zero_mean_353_cast, y = denom_353_cast)[name = tensor("out_353_cast")]; + tensor var_11567_to_fp16 = const()[name = tensor("op_11567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1712364608)))]; + tensor var_11568_cast = add(x = out_353_cast, y = var_11567_to_fp16)[name = tensor("op_11568_cast")]; + tensor var_11570_to_fp16 = const()[name = tensor("op_11570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1712367232)))]; + tensor input_671_cast = mul(x = var_11568_cast, y = var_11570_to_fp16)[name = tensor("input_671_cast")]; + tensor var_11578 = const()[name = tensor("op_11578"), val = tensor([1, 1])]; + tensor var_11580 = const()[name = tensor("op_11580"), val = tensor([1, 1])]; + tensor var_11582_pad_type_0 = const()[name = tensor("op_11582_pad_type_0"), val = tensor("custom")]; + tensor var_11582_pad_0 = const()[name = tensor("op_11582_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1712369856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1725477120))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1725477696)))]; + tensor var_11582_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_bias_to_fp16, dilations = var_11580, groups = var_6865, pad = var_11582_pad_0, pad_type = var_11582_pad_type_0, strides = var_11578, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_0_proj_weight_to_fp16_palettized, x = input_671_cast)[name = tensor("op_11582_cast")]; + tensor var_11583_split_sizes_0 = const()[name = tensor("op_11583_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11583_axis_0 = const()[name = tensor("op_11583_axis_0"), val = tensor(1)]; + tensor var_11583_cast_0, tensor var_11583_cast_1 = split(axis = var_11583_axis_0, split_sizes = var_11583_split_sizes_0, x = var_11582_cast)[name = tensor("op_11583_cast")]; + tensor var_11585_mode_0 = const()[name = tensor("op_11585_mode_0"), val = tensor("EXACT")]; + tensor var_11585_cast = gelu(mode = var_11585_mode_0, x = var_11583_cast_1)[name = tensor("op_11585_cast")]; + tensor input_673_cast = mul(x = var_11583_cast_0, y = var_11585_cast)[name = tensor("input_673_cast")]; + tensor var_11589 = const()[name = tensor("op_11589"), val = tensor([1, 1])]; + tensor var_11591 = const()[name = tensor("op_11591"), val = tensor([1, 1])]; + tensor var_11593_pad_type_0 = const()[name = tensor("op_11593_pad_type_0"), val = tensor("custom")]; + tensor var_11593_pad_0 = const()[name = tensor("op_11593_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1725498240))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732051904))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732052480)))]; + tensor var_11593_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_bias_to_fp16, dilations = var_11591, groups = var_6865, pad = var_11593_pad_0, pad_type = var_11593_pad_type_0, strides = var_11589, weight = up_blocks_0_attentions_2_transformer_blocks_4_ff_net_2_weight_to_fp16_palettized, x = input_673_cast)[name = tensor("op_11593_cast")]; + tensor inputs_355_cast = add(x = var_11593_cast, y = inputs_353_cast)[name = tensor("inputs_355_cast")]; + tensor var_11603 = const()[name = tensor("op_11603"), val = tensor([1])]; + tensor channels_mean_355_cast = reduce_mean(axes = var_11603, keep_dims = var_6860, x = inputs_355_cast)[name = tensor("channels_mean_355_cast")]; + tensor zero_mean_355_cast = sub(x = inputs_355_cast, y = channels_mean_355_cast)[name = tensor("zero_mean_355_cast")]; + tensor zero_mean_sq_355_cast = mul(x = zero_mean_355_cast, y = zero_mean_355_cast)[name = tensor("zero_mean_sq_355_cast")]; + tensor var_11607 = const()[name = tensor("op_11607"), val = tensor([1])]; + tensor var_11608_cast = reduce_mean(axes = var_11607, keep_dims = var_6860, x = zero_mean_sq_355_cast)[name = tensor("op_11608_cast")]; + tensor var_11609_to_fp16 = const()[name = tensor("op_11609_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11610_cast = add(x = var_11608_cast, y = var_11609_to_fp16)[name = tensor("op_11610_cast")]; + tensor denom_355_epsilon_0_to_fp16 = const()[name = tensor("denom_355_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_355_cast = rsqrt(epsilon = denom_355_epsilon_0_to_fp16, x = var_11610_cast)[name = tensor("denom_355_cast")]; + tensor out_355_cast = mul(x = zero_mean_355_cast, y = denom_355_cast)[name = tensor("out_355_cast")]; + tensor var_11614_to_fp16 = const()[name = tensor("op_11614_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732055104)))]; + tensor var_11615_cast = add(x = out_355_cast, y = var_11614_to_fp16)[name = tensor("op_11615_cast")]; + tensor var_11617_to_fp16 = const()[name = tensor("op_11617_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732057728)))]; + tensor hidden_states_465_cast = mul(x = var_11615_cast, y = var_11617_to_fp16)[name = tensor("hidden_states_465_cast")]; + tensor var_11624 = const()[name = tensor("op_11624"), val = tensor([1, 1])]; + tensor var_11626 = const()[name = tensor("op_11626"), val = tensor([1, 1])]; + tensor q_237_pad_type_0 = const()[name = tensor("q_237_pad_type_0"), val = tensor("custom")]; + tensor q_237_pad_0 = const()[name = tensor("q_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1732060352))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733289216))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_237_cast = conv(dilations = var_11626, groups = var_6865, pad = q_237_pad_0, pad_type = q_237_pad_type_0, strides = var_11624, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("q_237_cast")]; + tensor var_11630 = const()[name = tensor("op_11630"), val = tensor([1, 1])]; + tensor var_11632 = const()[name = tensor("op_11632"), val = tensor([1, 1])]; + tensor k_237_pad_type_0 = const()[name = tensor("k_237_pad_type_0"), val = tensor("custom")]; + tensor k_237_pad_0 = const()[name = tensor("k_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1733289408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1734518272))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_237_cast = conv(dilations = var_11632, groups = var_6865, pad = k_237_pad_0, pad_type = k_237_pad_type_0, strides = var_11630, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("k_237_cast")]; + tensor var_11636 = const()[name = tensor("op_11636"), val = tensor([1, 1])]; + tensor var_11638 = const()[name = tensor("op_11638"), val = tensor([1, 1])]; + tensor v_237_pad_type_0 = const()[name = tensor("v_237_pad_type_0"), val = tensor("custom")]; + tensor v_237_pad_0 = const()[name = tensor("v_237_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1734518464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1736156928))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_237_cast = conv(dilations = var_11638, groups = var_6865, pad = v_237_pad_0, pad_type = v_237_pad_type_0, strides = var_11636, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_465_cast)[name = tensor("v_237_cast")]; + tensor var_11642 = const()[name = tensor("op_11642"), val = tensor([2, 20, 64, -1])]; + tensor var_11643_cast = reshape(shape = var_11642, x = q_237_cast)[name = tensor("op_11643_cast")]; + tensor var_11644 = const()[name = tensor("op_11644"), val = tensor([2, 20, 64, -1])]; + tensor var_11645_cast = reshape(shape = var_11644, x = k_237_cast)[name = tensor("op_11645_cast")]; + tensor var_11646 = const()[name = tensor("op_11646"), val = tensor([2, 20, 64, -1])]; + tensor var_11647_cast = reshape(shape = var_11646, x = v_237_cast)[name = tensor("op_11647_cast")]; + tensor attn_weights_473_transpose_x_0 = const()[name = tensor("attn_weights_473_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_473_transpose_y_0 = const()[name = tensor("attn_weights_473_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_473_cast = matmul(transpose_x = attn_weights_473_transpose_x_0, transpose_y = attn_weights_473_transpose_y_0, x = var_11643_cast, y = var_11645_cast)[name = tensor("attn_weights_473_cast")]; + tensor attn_weights_475_cast = mul(x = attn_weights_473_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_475_cast")]; + tensor var_11651_cast = softmax(axis = var_6849, x = attn_weights_475_cast)[name = tensor("op_11651_cast")]; + tensor attn_237_transpose_x_0 = const()[name = tensor("attn_237_transpose_x_0"), val = tensor(false)]; + tensor attn_237_transpose_y_0 = const()[name = tensor("attn_237_transpose_y_0"), val = tensor(true)]; + tensor attn_237_cast = matmul(transpose_x = attn_237_transpose_x_0, transpose_y = attn_237_transpose_y_0, x = var_11647_cast, y = var_11651_cast)[name = tensor("attn_237_cast")]; + tensor var_11655 = const()[name = tensor("op_11655"), val = tensor([2, 1280, 1, -1])]; + tensor input_675_cast = reshape(shape = var_11655, x = attn_237_cast)[name = tensor("input_675_cast")]; + tensor var_11660 = const()[name = tensor("op_11660"), val = tensor([1, 1])]; + tensor var_11662 = const()[name = tensor("op_11662"), val = tensor([1, 1])]; + tensor var_11664_pad_type_0 = const()[name = tensor("op_11664_pad_type_0"), val = tensor("custom")]; + tensor var_11664_pad_0 = const()[name = tensor("op_11664_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1736157504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1737795968))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1737796544)))]; + tensor var_11664_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_bias_to_fp16, dilations = var_11662, groups = var_6865, pad = var_11664_pad_0, pad_type = var_11664_pad_type_0, strides = var_11660, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn1_to_out_0_weight_to_fp16_palettized, x = input_675_cast)[name = tensor("op_11664_cast")]; + tensor inputs_357_cast = add(x = var_11664_cast, y = inputs_355_cast)[name = tensor("inputs_357_cast")]; + tensor var_11668 = const()[name = tensor("op_11668"), val = tensor([1])]; + tensor channels_mean_357_cast = reduce_mean(axes = var_11668, keep_dims = var_6860, x = inputs_357_cast)[name = tensor("channels_mean_357_cast")]; + tensor zero_mean_357_cast = sub(x = inputs_357_cast, y = channels_mean_357_cast)[name = tensor("zero_mean_357_cast")]; + tensor zero_mean_sq_357_cast = mul(x = zero_mean_357_cast, y = zero_mean_357_cast)[name = tensor("zero_mean_sq_357_cast")]; + tensor var_11672 = const()[name = tensor("op_11672"), val = tensor([1])]; + tensor var_11673_cast = reduce_mean(axes = var_11672, keep_dims = var_6860, x = zero_mean_sq_357_cast)[name = tensor("op_11673_cast")]; + tensor var_11674_to_fp16 = const()[name = tensor("op_11674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11675_cast = add(x = var_11673_cast, y = var_11674_to_fp16)[name = tensor("op_11675_cast")]; + tensor denom_357_epsilon_0_to_fp16 = const()[name = tensor("denom_357_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_357_cast = rsqrt(epsilon = denom_357_epsilon_0_to_fp16, x = var_11675_cast)[name = tensor("denom_357_cast")]; + tensor out_357_cast = mul(x = zero_mean_357_cast, y = denom_357_cast)[name = tensor("out_357_cast")]; + tensor var_11679_to_fp16 = const()[name = tensor("op_11679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1737799168)))]; + tensor var_11680_cast = add(x = out_357_cast, y = var_11679_to_fp16)[name = tensor("op_11680_cast")]; + tensor var_11682_to_fp16 = const()[name = tensor("op_11682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1737801792)))]; + tensor hidden_states_467_cast = mul(x = var_11680_cast, y = var_11682_to_fp16)[name = tensor("hidden_states_467_cast")]; + tensor var_11689 = const()[name = tensor("op_11689"), val = tensor([1, 1])]; + tensor var_11691 = const()[name = tensor("op_11691"), val = tensor([1, 1])]; + tensor q_239_pad_type_0 = const()[name = tensor("q_239_pad_type_0"), val = tensor("custom")]; + tensor q_239_pad_0 = const()[name = tensor("q_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1737804416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1738623680))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_239_cast = conv(dilations = var_11691, groups = var_6865, pad = q_239_pad_0, pad_type = q_239_pad_type_0, strides = var_11689, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_467_cast)[name = tensor("q_239_cast")]; + tensor var_11695 = const()[name = tensor("op_11695"), val = tensor([1, 1])]; + tensor var_11697 = const()[name = tensor("op_11697"), val = tensor([1, 1])]; + tensor k_239_pad_type_0 = const()[name = tensor("k_239_pad_type_0"), val = tensor("custom")]; + tensor k_239_pad_0 = const()[name = tensor("k_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1738623808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1739934592))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_239_cast = conv(dilations = var_11697, groups = var_6865, pad = k_239_pad_0, pad_type = k_239_pad_type_0, strides = var_11695, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_239_cast")]; + tensor var_11701 = const()[name = tensor("op_11701"), val = tensor([1, 1])]; + tensor var_11703 = const()[name = tensor("op_11703"), val = tensor([1, 1])]; + tensor v_239_pad_type_0 = const()[name = tensor("v_239_pad_type_0"), val = tensor("custom")]; + tensor v_239_pad_0 = const()[name = tensor("v_239_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1739934720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1741245504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_239_cast = conv(dilations = var_11703, groups = var_6865, pad = v_239_pad_0, pad_type = v_239_pad_type_0, strides = var_11701, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_239_cast")]; + tensor var_11707 = const()[name = tensor("op_11707"), val = tensor([2, 20, 64, -1])]; + tensor var_11708_cast = reshape(shape = var_11707, x = q_239_cast)[name = tensor("op_11708_cast")]; + tensor var_11709 = const()[name = tensor("op_11709"), val = tensor([2, 20, 64, -1])]; + tensor var_11710_cast = reshape(shape = var_11709, x = k_239_cast)[name = tensor("op_11710_cast")]; + tensor var_11711 = const()[name = tensor("op_11711"), val = tensor([2, 20, 64, -1])]; + tensor var_11712_cast = reshape(shape = var_11711, x = v_239_cast)[name = tensor("op_11712_cast")]; + tensor attn_weights_477_transpose_x_0 = const()[name = tensor("attn_weights_477_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_477_transpose_y_0 = const()[name = tensor("attn_weights_477_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_477_cast = matmul(transpose_x = attn_weights_477_transpose_x_0, transpose_y = attn_weights_477_transpose_y_0, x = var_11708_cast, y = var_11710_cast)[name = tensor("attn_weights_477_cast")]; + tensor attn_weights_479_cast = mul(x = attn_weights_477_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_479_cast")]; + tensor var_11716_cast = softmax(axis = var_6849, x = attn_weights_479_cast)[name = tensor("op_11716_cast")]; + tensor attn_239_transpose_x_0 = const()[name = tensor("attn_239_transpose_x_0"), val = tensor(false)]; + tensor attn_239_transpose_y_0 = const()[name = tensor("attn_239_transpose_y_0"), val = tensor(true)]; + tensor attn_239_cast = matmul(transpose_x = attn_239_transpose_x_0, transpose_y = attn_239_transpose_y_0, x = var_11712_cast, y = var_11716_cast)[name = tensor("attn_239_cast")]; + tensor var_11720 = const()[name = tensor("op_11720"), val = tensor([2, 1280, 1, -1])]; + tensor input_677_cast = reshape(shape = var_11720, x = attn_239_cast)[name = tensor("input_677_cast")]; + tensor var_11725 = const()[name = tensor("op_11725"), val = tensor([1, 1])]; + tensor var_11727 = const()[name = tensor("op_11727"), val = tensor([1, 1])]; + tensor var_11729_pad_type_0 = const()[name = tensor("op_11729_pad_type_0"), val = tensor("custom")]; + tensor var_11729_pad_0 = const()[name = tensor("op_11729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1741245632))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1742064896))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1742065024)))]; + tensor var_11729_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_bias_to_fp16, dilations = var_11727, groups = var_6865, pad = var_11729_pad_0, pad_type = var_11729_pad_type_0, strides = var_11725, weight = up_blocks_0_attentions_2_transformer_blocks_5_attn2_to_out_0_weight_to_fp16_palettized, x = input_677_cast)[name = tensor("op_11729_cast")]; + tensor inputs_359_cast = add(x = var_11729_cast, y = inputs_357_cast)[name = tensor("inputs_359_cast")]; + tensor var_11733 = const()[name = tensor("op_11733"), val = tensor([1])]; + tensor channels_mean_359_cast = reduce_mean(axes = var_11733, keep_dims = var_6860, x = inputs_359_cast)[name = tensor("channels_mean_359_cast")]; + tensor zero_mean_359_cast = sub(x = inputs_359_cast, y = channels_mean_359_cast)[name = tensor("zero_mean_359_cast")]; + tensor zero_mean_sq_359_cast = mul(x = zero_mean_359_cast, y = zero_mean_359_cast)[name = tensor("zero_mean_sq_359_cast")]; + tensor var_11737 = const()[name = tensor("op_11737"), val = tensor([1])]; + tensor var_11738_cast = reduce_mean(axes = var_11737, keep_dims = var_6860, x = zero_mean_sq_359_cast)[name = tensor("op_11738_cast")]; + tensor var_11739_to_fp16 = const()[name = tensor("op_11739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11740_cast = add(x = var_11738_cast, y = var_11739_to_fp16)[name = tensor("op_11740_cast")]; + tensor denom_359_epsilon_0_to_fp16 = const()[name = tensor("denom_359_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_359_cast = rsqrt(epsilon = denom_359_epsilon_0_to_fp16, x = var_11740_cast)[name = tensor("denom_359_cast")]; + tensor out_359_cast = mul(x = zero_mean_359_cast, y = denom_359_cast)[name = tensor("out_359_cast")]; + tensor var_11744_to_fp16 = const()[name = tensor("op_11744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1742067648)))]; + tensor var_11745_cast = add(x = out_359_cast, y = var_11744_to_fp16)[name = tensor("op_11745_cast")]; + tensor var_11747_to_fp16 = const()[name = tensor("op_11747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1742070272)))]; + tensor input_679_cast = mul(x = var_11745_cast, y = var_11747_to_fp16)[name = tensor("input_679_cast")]; + tensor var_11755 = const()[name = tensor("op_11755"), val = tensor([1, 1])]; + tensor var_11757 = const()[name = tensor("op_11757"), val = tensor([1, 1])]; + tensor var_11759_pad_type_0 = const()[name = tensor("op_11759_pad_type_0"), val = tensor("custom")]; + tensor var_11759_pad_0 = const()[name = tensor("op_11759_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1742072896))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1755180160))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1755180736)))]; + tensor var_11759_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_bias_to_fp16, dilations = var_11757, groups = var_6865, pad = var_11759_pad_0, pad_type = var_11759_pad_type_0, strides = var_11755, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_0_proj_weight_to_fp16_palettized, x = input_679_cast)[name = tensor("op_11759_cast")]; + tensor var_11760_split_sizes_0 = const()[name = tensor("op_11760_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11760_axis_0 = const()[name = tensor("op_11760_axis_0"), val = tensor(1)]; + tensor var_11760_cast_0, tensor var_11760_cast_1 = split(axis = var_11760_axis_0, split_sizes = var_11760_split_sizes_0, x = var_11759_cast)[name = tensor("op_11760_cast")]; + tensor var_11762_mode_0 = const()[name = tensor("op_11762_mode_0"), val = tensor("EXACT")]; + tensor var_11762_cast = gelu(mode = var_11762_mode_0, x = var_11760_cast_1)[name = tensor("op_11762_cast")]; + tensor input_681_cast = mul(x = var_11760_cast_0, y = var_11762_cast)[name = tensor("input_681_cast")]; + tensor var_11766 = const()[name = tensor("op_11766"), val = tensor([1, 1])]; + tensor var_11768 = const()[name = tensor("op_11768"), val = tensor([1, 1])]; + tensor var_11770_pad_type_0 = const()[name = tensor("op_11770_pad_type_0"), val = tensor("custom")]; + tensor var_11770_pad_0 = const()[name = tensor("op_11770_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1755201280))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761754944))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761755520)))]; + tensor var_11770_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_bias_to_fp16, dilations = var_11768, groups = var_6865, pad = var_11770_pad_0, pad_type = var_11770_pad_type_0, strides = var_11766, weight = up_blocks_0_attentions_2_transformer_blocks_5_ff_net_2_weight_to_fp16_palettized, x = input_681_cast)[name = tensor("op_11770_cast")]; + tensor inputs_361_cast = add(x = var_11770_cast, y = inputs_359_cast)[name = tensor("inputs_361_cast")]; + tensor var_11780 = const()[name = tensor("op_11780"), val = tensor([1])]; + tensor channels_mean_361_cast = reduce_mean(axes = var_11780, keep_dims = var_6860, x = inputs_361_cast)[name = tensor("channels_mean_361_cast")]; + tensor zero_mean_361_cast = sub(x = inputs_361_cast, y = channels_mean_361_cast)[name = tensor("zero_mean_361_cast")]; + tensor zero_mean_sq_361_cast = mul(x = zero_mean_361_cast, y = zero_mean_361_cast)[name = tensor("zero_mean_sq_361_cast")]; + tensor var_11784 = const()[name = tensor("op_11784"), val = tensor([1])]; + tensor var_11785_cast = reduce_mean(axes = var_11784, keep_dims = var_6860, x = zero_mean_sq_361_cast)[name = tensor("op_11785_cast")]; + tensor var_11786_to_fp16 = const()[name = tensor("op_11786_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11787_cast = add(x = var_11785_cast, y = var_11786_to_fp16)[name = tensor("op_11787_cast")]; + tensor denom_361_epsilon_0_to_fp16 = const()[name = tensor("denom_361_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_361_cast = rsqrt(epsilon = denom_361_epsilon_0_to_fp16, x = var_11787_cast)[name = tensor("denom_361_cast")]; + tensor out_361_cast = mul(x = zero_mean_361_cast, y = denom_361_cast)[name = tensor("out_361_cast")]; + tensor var_11791_to_fp16 = const()[name = tensor("op_11791_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761758144)))]; + tensor var_11792_cast = add(x = out_361_cast, y = var_11791_to_fp16)[name = tensor("op_11792_cast")]; + tensor var_11794_to_fp16 = const()[name = tensor("op_11794_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761760768)))]; + tensor hidden_states_471_cast = mul(x = var_11792_cast, y = var_11794_to_fp16)[name = tensor("hidden_states_471_cast")]; + tensor var_11801 = const()[name = tensor("op_11801"), val = tensor([1, 1])]; + tensor var_11803 = const()[name = tensor("op_11803"), val = tensor([1, 1])]; + tensor q_241_pad_type_0 = const()[name = tensor("q_241_pad_type_0"), val = tensor("custom")]; + tensor q_241_pad_0 = const()[name = tensor("q_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1761763392))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1762992256))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_241_cast = conv(dilations = var_11803, groups = var_6865, pad = q_241_pad_0, pad_type = q_241_pad_type_0, strides = var_11801, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("q_241_cast")]; + tensor var_11807 = const()[name = tensor("op_11807"), val = tensor([1, 1])]; + tensor var_11809 = const()[name = tensor("op_11809"), val = tensor([1, 1])]; + tensor k_241_pad_type_0 = const()[name = tensor("k_241_pad_type_0"), val = tensor("custom")]; + tensor k_241_pad_0 = const()[name = tensor("k_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1762992448))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1764221312))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_241_cast = conv(dilations = var_11809, groups = var_6865, pad = k_241_pad_0, pad_type = k_241_pad_type_0, strides = var_11807, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("k_241_cast")]; + tensor var_11813 = const()[name = tensor("op_11813"), val = tensor([1, 1])]; + tensor var_11815 = const()[name = tensor("op_11815"), val = tensor([1, 1])]; + tensor v_241_pad_type_0 = const()[name = tensor("v_241_pad_type_0"), val = tensor("custom")]; + tensor v_241_pad_0 = const()[name = tensor("v_241_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1764221504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1765859968))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_241_cast = conv(dilations = var_11815, groups = var_6865, pad = v_241_pad_0, pad_type = v_241_pad_type_0, strides = var_11813, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_471_cast)[name = tensor("v_241_cast")]; + tensor var_11819 = const()[name = tensor("op_11819"), val = tensor([2, 20, 64, -1])]; + tensor var_11820_cast = reshape(shape = var_11819, x = q_241_cast)[name = tensor("op_11820_cast")]; + tensor var_11821 = const()[name = tensor("op_11821"), val = tensor([2, 20, 64, -1])]; + tensor var_11822_cast = reshape(shape = var_11821, x = k_241_cast)[name = tensor("op_11822_cast")]; + tensor var_11823 = const()[name = tensor("op_11823"), val = tensor([2, 20, 64, -1])]; + tensor var_11824_cast = reshape(shape = var_11823, x = v_241_cast)[name = tensor("op_11824_cast")]; + tensor attn_weights_481_transpose_x_0 = const()[name = tensor("attn_weights_481_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_481_transpose_y_0 = const()[name = tensor("attn_weights_481_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_481_cast = matmul(transpose_x = attn_weights_481_transpose_x_0, transpose_y = attn_weights_481_transpose_y_0, x = var_11820_cast, y = var_11822_cast)[name = tensor("attn_weights_481_cast")]; + tensor attn_weights_483_cast = mul(x = attn_weights_481_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_483_cast")]; + tensor var_11828_cast = softmax(axis = var_6849, x = attn_weights_483_cast)[name = tensor("op_11828_cast")]; + tensor attn_241_transpose_x_0 = const()[name = tensor("attn_241_transpose_x_0"), val = tensor(false)]; + tensor attn_241_transpose_y_0 = const()[name = tensor("attn_241_transpose_y_0"), val = tensor(true)]; + tensor attn_241_cast = matmul(transpose_x = attn_241_transpose_x_0, transpose_y = attn_241_transpose_y_0, x = var_11824_cast, y = var_11828_cast)[name = tensor("attn_241_cast")]; + tensor var_11832 = const()[name = tensor("op_11832"), val = tensor([2, 1280, 1, -1])]; + tensor input_683_cast = reshape(shape = var_11832, x = attn_241_cast)[name = tensor("input_683_cast")]; + tensor var_11837 = const()[name = tensor("op_11837"), val = tensor([1, 1])]; + tensor var_11839 = const()[name = tensor("op_11839"), val = tensor([1, 1])]; + tensor var_11841_pad_type_0 = const()[name = tensor("op_11841_pad_type_0"), val = tensor("custom")]; + tensor var_11841_pad_0 = const()[name = tensor("op_11841_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1765860544))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1767499008))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1767499584)))]; + tensor var_11841_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_bias_to_fp16, dilations = var_11839, groups = var_6865, pad = var_11841_pad_0, pad_type = var_11841_pad_type_0, strides = var_11837, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn1_to_out_0_weight_to_fp16_palettized, x = input_683_cast)[name = tensor("op_11841_cast")]; + tensor inputs_363_cast = add(x = var_11841_cast, y = inputs_361_cast)[name = tensor("inputs_363_cast")]; + tensor var_11845 = const()[name = tensor("op_11845"), val = tensor([1])]; + tensor channels_mean_363_cast = reduce_mean(axes = var_11845, keep_dims = var_6860, x = inputs_363_cast)[name = tensor("channels_mean_363_cast")]; + tensor zero_mean_363_cast = sub(x = inputs_363_cast, y = channels_mean_363_cast)[name = tensor("zero_mean_363_cast")]; + tensor zero_mean_sq_363_cast = mul(x = zero_mean_363_cast, y = zero_mean_363_cast)[name = tensor("zero_mean_sq_363_cast")]; + tensor var_11849 = const()[name = tensor("op_11849"), val = tensor([1])]; + tensor var_11850_cast = reduce_mean(axes = var_11849, keep_dims = var_6860, x = zero_mean_sq_363_cast)[name = tensor("op_11850_cast")]; + tensor var_11851_to_fp16 = const()[name = tensor("op_11851_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11852_cast = add(x = var_11850_cast, y = var_11851_to_fp16)[name = tensor("op_11852_cast")]; + tensor denom_363_epsilon_0_to_fp16 = const()[name = tensor("denom_363_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_363_cast = rsqrt(epsilon = denom_363_epsilon_0_to_fp16, x = var_11852_cast)[name = tensor("denom_363_cast")]; + tensor out_363_cast = mul(x = zero_mean_363_cast, y = denom_363_cast)[name = tensor("out_363_cast")]; + tensor var_11856_to_fp16 = const()[name = tensor("op_11856_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1767502208)))]; + tensor var_11857_cast = add(x = out_363_cast, y = var_11856_to_fp16)[name = tensor("op_11857_cast")]; + tensor var_11859_to_fp16 = const()[name = tensor("op_11859_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1767504832)))]; + tensor hidden_states_473_cast = mul(x = var_11857_cast, y = var_11859_to_fp16)[name = tensor("hidden_states_473_cast")]; + tensor var_11866 = const()[name = tensor("op_11866"), val = tensor([1, 1])]; + tensor var_11868 = const()[name = tensor("op_11868"), val = tensor([1, 1])]; + tensor q_243_pad_type_0 = const()[name = tensor("q_243_pad_type_0"), val = tensor("custom")]; + tensor q_243_pad_0 = const()[name = tensor("q_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1767507456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1768326720))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_243_cast = conv(dilations = var_11868, groups = var_6865, pad = q_243_pad_0, pad_type = q_243_pad_type_0, strides = var_11866, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_473_cast)[name = tensor("q_243_cast")]; + tensor var_11872 = const()[name = tensor("op_11872"), val = tensor([1, 1])]; + tensor var_11874 = const()[name = tensor("op_11874"), val = tensor([1, 1])]; + tensor k_243_pad_type_0 = const()[name = tensor("k_243_pad_type_0"), val = tensor("custom")]; + tensor k_243_pad_0 = const()[name = tensor("k_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1768326848))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769637632))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_243_cast = conv(dilations = var_11874, groups = var_6865, pad = k_243_pad_0, pad_type = k_243_pad_type_0, strides = var_11872, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_243_cast")]; + tensor var_11878 = const()[name = tensor("op_11878"), val = tensor([1, 1])]; + tensor var_11880 = const()[name = tensor("op_11880"), val = tensor([1, 1])]; + tensor v_243_pad_type_0 = const()[name = tensor("v_243_pad_type_0"), val = tensor("custom")]; + tensor v_243_pad_0 = const()[name = tensor("v_243_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1769637760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1771603904))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_243_cast = conv(dilations = var_11880, groups = var_6865, pad = v_243_pad_0, pad_type = v_243_pad_type_0, strides = var_11878, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_243_cast")]; + tensor var_11884 = const()[name = tensor("op_11884"), val = tensor([2, 20, 64, -1])]; + tensor var_11885_cast = reshape(shape = var_11884, x = q_243_cast)[name = tensor("op_11885_cast")]; + tensor var_11886 = const()[name = tensor("op_11886"), val = tensor([2, 20, 64, -1])]; + tensor var_11887_cast = reshape(shape = var_11886, x = k_243_cast)[name = tensor("op_11887_cast")]; + tensor var_11888 = const()[name = tensor("op_11888"), val = tensor([2, 20, 64, -1])]; + tensor var_11889_cast = reshape(shape = var_11888, x = v_243_cast)[name = tensor("op_11889_cast")]; + tensor attn_weights_485_transpose_x_0 = const()[name = tensor("attn_weights_485_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_485_transpose_y_0 = const()[name = tensor("attn_weights_485_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_485_cast = matmul(transpose_x = attn_weights_485_transpose_x_0, transpose_y = attn_weights_485_transpose_y_0, x = var_11885_cast, y = var_11887_cast)[name = tensor("attn_weights_485_cast")]; + tensor attn_weights_487_cast = mul(x = attn_weights_485_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_487_cast")]; + tensor var_11893_cast = softmax(axis = var_6849, x = attn_weights_487_cast)[name = tensor("op_11893_cast")]; + tensor attn_243_transpose_x_0 = const()[name = tensor("attn_243_transpose_x_0"), val = tensor(false)]; + tensor attn_243_transpose_y_0 = const()[name = tensor("attn_243_transpose_y_0"), val = tensor(true)]; + tensor attn_243_cast = matmul(transpose_x = attn_243_transpose_x_0, transpose_y = attn_243_transpose_y_0, x = var_11889_cast, y = var_11893_cast)[name = tensor("attn_243_cast")]; + tensor var_11897 = const()[name = tensor("op_11897"), val = tensor([2, 1280, 1, -1])]; + tensor input_685_cast = reshape(shape = var_11897, x = attn_243_cast)[name = tensor("input_685_cast")]; + tensor var_11902 = const()[name = tensor("op_11902"), val = tensor([1, 1])]; + tensor var_11904 = const()[name = tensor("op_11904"), val = tensor([1, 1])]; + tensor var_11906_pad_type_0 = const()[name = tensor("op_11906_pad_type_0"), val = tensor("custom")]; + tensor var_11906_pad_0 = const()[name = tensor("op_11906_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1771604096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1772423360))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1772423488)))]; + tensor var_11906_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_bias_to_fp16, dilations = var_11904, groups = var_6865, pad = var_11906_pad_0, pad_type = var_11906_pad_type_0, strides = var_11902, weight = up_blocks_0_attentions_2_transformer_blocks_6_attn2_to_out_0_weight_to_fp16_palettized, x = input_685_cast)[name = tensor("op_11906_cast")]; + tensor inputs_365_cast = add(x = var_11906_cast, y = inputs_363_cast)[name = tensor("inputs_365_cast")]; + tensor var_11910 = const()[name = tensor("op_11910"), val = tensor([1])]; + tensor channels_mean_365_cast = reduce_mean(axes = var_11910, keep_dims = var_6860, x = inputs_365_cast)[name = tensor("channels_mean_365_cast")]; + tensor zero_mean_365_cast = sub(x = inputs_365_cast, y = channels_mean_365_cast)[name = tensor("zero_mean_365_cast")]; + tensor zero_mean_sq_365_cast = mul(x = zero_mean_365_cast, y = zero_mean_365_cast)[name = tensor("zero_mean_sq_365_cast")]; + tensor var_11914 = const()[name = tensor("op_11914"), val = tensor([1])]; + tensor var_11915_cast = reduce_mean(axes = var_11914, keep_dims = var_6860, x = zero_mean_sq_365_cast)[name = tensor("op_11915_cast")]; + tensor var_11916_to_fp16 = const()[name = tensor("op_11916_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11917_cast = add(x = var_11915_cast, y = var_11916_to_fp16)[name = tensor("op_11917_cast")]; + tensor denom_365_epsilon_0_to_fp16 = const()[name = tensor("denom_365_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_365_cast = rsqrt(epsilon = denom_365_epsilon_0_to_fp16, x = var_11917_cast)[name = tensor("denom_365_cast")]; + tensor out_365_cast = mul(x = zero_mean_365_cast, y = denom_365_cast)[name = tensor("out_365_cast")]; + tensor var_11921_to_fp16 = const()[name = tensor("op_11921_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1772426112)))]; + tensor var_11922_cast = add(x = out_365_cast, y = var_11921_to_fp16)[name = tensor("op_11922_cast")]; + tensor var_11924_to_fp16 = const()[name = tensor("op_11924_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1772428736)))]; + tensor input_687_cast = mul(x = var_11922_cast, y = var_11924_to_fp16)[name = tensor("input_687_cast")]; + tensor var_11932 = const()[name = tensor("op_11932"), val = tensor([1, 1])]; + tensor var_11934 = const()[name = tensor("op_11934"), val = tensor([1, 1])]; + tensor var_11936_pad_type_0 = const()[name = tensor("op_11936_pad_type_0"), val = tensor("custom")]; + tensor var_11936_pad_0 = const()[name = tensor("op_11936_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1772431360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785538624))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785539200)))]; + tensor var_11936_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_bias_to_fp16, dilations = var_11934, groups = var_6865, pad = var_11936_pad_0, pad_type = var_11936_pad_type_0, strides = var_11932, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_0_proj_weight_to_fp16_palettized, x = input_687_cast)[name = tensor("op_11936_cast")]; + tensor var_11937_split_sizes_0 = const()[name = tensor("op_11937_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_11937_axis_0 = const()[name = tensor("op_11937_axis_0"), val = tensor(1)]; + tensor var_11937_cast_0, tensor var_11937_cast_1 = split(axis = var_11937_axis_0, split_sizes = var_11937_split_sizes_0, x = var_11936_cast)[name = tensor("op_11937_cast")]; + tensor var_11939_mode_0 = const()[name = tensor("op_11939_mode_0"), val = tensor("EXACT")]; + tensor var_11939_cast = gelu(mode = var_11939_mode_0, x = var_11937_cast_1)[name = tensor("op_11939_cast")]; + tensor input_689_cast = mul(x = var_11937_cast_0, y = var_11939_cast)[name = tensor("input_689_cast")]; + tensor var_11943 = const()[name = tensor("op_11943"), val = tensor([1, 1])]; + tensor var_11945 = const()[name = tensor("op_11945"), val = tensor([1, 1])]; + tensor var_11947_pad_type_0 = const()[name = tensor("op_11947_pad_type_0"), val = tensor("custom")]; + tensor var_11947_pad_0 = const()[name = tensor("op_11947_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1785559744))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1792113408))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1792113984)))]; + tensor var_11947_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_bias_to_fp16, dilations = var_11945, groups = var_6865, pad = var_11947_pad_0, pad_type = var_11947_pad_type_0, strides = var_11943, weight = up_blocks_0_attentions_2_transformer_blocks_6_ff_net_2_weight_to_fp16_palettized, x = input_689_cast)[name = tensor("op_11947_cast")]; + tensor inputs_367_cast = add(x = var_11947_cast, y = inputs_365_cast)[name = tensor("inputs_367_cast")]; + tensor var_11957 = const()[name = tensor("op_11957"), val = tensor([1])]; + tensor channels_mean_367_cast = reduce_mean(axes = var_11957, keep_dims = var_6860, x = inputs_367_cast)[name = tensor("channels_mean_367_cast")]; + tensor zero_mean_367_cast = sub(x = inputs_367_cast, y = channels_mean_367_cast)[name = tensor("zero_mean_367_cast")]; + tensor zero_mean_sq_367_cast = mul(x = zero_mean_367_cast, y = zero_mean_367_cast)[name = tensor("zero_mean_sq_367_cast")]; + tensor var_11961 = const()[name = tensor("op_11961"), val = tensor([1])]; + tensor var_11962_cast = reduce_mean(axes = var_11961, keep_dims = var_6860, x = zero_mean_sq_367_cast)[name = tensor("op_11962_cast")]; + tensor var_11963_to_fp16 = const()[name = tensor("op_11963_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_11964_cast = add(x = var_11962_cast, y = var_11963_to_fp16)[name = tensor("op_11964_cast")]; + tensor denom_367_epsilon_0_to_fp16 = const()[name = tensor("denom_367_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_367_cast = rsqrt(epsilon = denom_367_epsilon_0_to_fp16, x = var_11964_cast)[name = tensor("denom_367_cast")]; + tensor out_367_cast = mul(x = zero_mean_367_cast, y = denom_367_cast)[name = tensor("out_367_cast")]; + tensor var_11968_to_fp16 = const()[name = tensor("op_11968_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1792116608)))]; + tensor var_11969_cast = add(x = out_367_cast, y = var_11968_to_fp16)[name = tensor("op_11969_cast")]; + tensor var_11971_to_fp16 = const()[name = tensor("op_11971_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1792119232)))]; + tensor hidden_states_477_cast = mul(x = var_11969_cast, y = var_11971_to_fp16)[name = tensor("hidden_states_477_cast")]; + tensor var_11978 = const()[name = tensor("op_11978"), val = tensor([1, 1])]; + tensor var_11980 = const()[name = tensor("op_11980"), val = tensor([1, 1])]; + tensor q_245_pad_type_0 = const()[name = tensor("q_245_pad_type_0"), val = tensor("custom")]; + tensor q_245_pad_0 = const()[name = tensor("q_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1792121856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1793350720))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_245_cast = conv(dilations = var_11980, groups = var_6865, pad = q_245_pad_0, pad_type = q_245_pad_type_0, strides = var_11978, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("q_245_cast")]; + tensor var_11984 = const()[name = tensor("op_11984"), val = tensor([1, 1])]; + tensor var_11986 = const()[name = tensor("op_11986"), val = tensor([1, 1])]; + tensor k_245_pad_type_0 = const()[name = tensor("k_245_pad_type_0"), val = tensor("custom")]; + tensor k_245_pad_0 = const()[name = tensor("k_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1793350912))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1794579776))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_245_cast = conv(dilations = var_11986, groups = var_6865, pad = k_245_pad_0, pad_type = k_245_pad_type_0, strides = var_11984, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("k_245_cast")]; + tensor var_11990 = const()[name = tensor("op_11990"), val = tensor([1, 1])]; + tensor var_11992 = const()[name = tensor("op_11992"), val = tensor([1, 1])]; + tensor v_245_pad_type_0 = const()[name = tensor("v_245_pad_type_0"), val = tensor("custom")]; + tensor v_245_pad_0 = const()[name = tensor("v_245_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1794579968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1796218432))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_245_cast = conv(dilations = var_11992, groups = var_6865, pad = v_245_pad_0, pad_type = v_245_pad_type_0, strides = var_11990, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_477_cast)[name = tensor("v_245_cast")]; + tensor var_11996 = const()[name = tensor("op_11996"), val = tensor([2, 20, 64, -1])]; + tensor var_11997_cast = reshape(shape = var_11996, x = q_245_cast)[name = tensor("op_11997_cast")]; + tensor var_11998 = const()[name = tensor("op_11998"), val = tensor([2, 20, 64, -1])]; + tensor var_11999_cast = reshape(shape = var_11998, x = k_245_cast)[name = tensor("op_11999_cast")]; + tensor var_12000 = const()[name = tensor("op_12000"), val = tensor([2, 20, 64, -1])]; + tensor var_12001_cast = reshape(shape = var_12000, x = v_245_cast)[name = tensor("op_12001_cast")]; + tensor attn_weights_489_transpose_x_0 = const()[name = tensor("attn_weights_489_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_489_transpose_y_0 = const()[name = tensor("attn_weights_489_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_489_cast = matmul(transpose_x = attn_weights_489_transpose_x_0, transpose_y = attn_weights_489_transpose_y_0, x = var_11997_cast, y = var_11999_cast)[name = tensor("attn_weights_489_cast")]; + tensor attn_weights_491_cast = mul(x = attn_weights_489_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_491_cast")]; + tensor var_12005_cast = softmax(axis = var_6849, x = attn_weights_491_cast)[name = tensor("op_12005_cast")]; + tensor attn_245_transpose_x_0 = const()[name = tensor("attn_245_transpose_x_0"), val = tensor(false)]; + tensor attn_245_transpose_y_0 = const()[name = tensor("attn_245_transpose_y_0"), val = tensor(true)]; + tensor attn_245_cast = matmul(transpose_x = attn_245_transpose_x_0, transpose_y = attn_245_transpose_y_0, x = var_12001_cast, y = var_12005_cast)[name = tensor("attn_245_cast")]; + tensor var_12009 = const()[name = tensor("op_12009"), val = tensor([2, 1280, 1, -1])]; + tensor input_691_cast = reshape(shape = var_12009, x = attn_245_cast)[name = tensor("input_691_cast")]; + tensor var_12014 = const()[name = tensor("op_12014"), val = tensor([1, 1])]; + tensor var_12016 = const()[name = tensor("op_12016"), val = tensor([1, 1])]; + tensor var_12018_pad_type_0 = const()[name = tensor("op_12018_pad_type_0"), val = tensor("custom")]; + tensor var_12018_pad_0 = const()[name = tensor("op_12018_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1796219008))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1797857472))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1797858048)))]; + tensor var_12018_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_bias_to_fp16, dilations = var_12016, groups = var_6865, pad = var_12018_pad_0, pad_type = var_12018_pad_type_0, strides = var_12014, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn1_to_out_0_weight_to_fp16_palettized, x = input_691_cast)[name = tensor("op_12018_cast")]; + tensor inputs_369_cast = add(x = var_12018_cast, y = inputs_367_cast)[name = tensor("inputs_369_cast")]; + tensor var_12022 = const()[name = tensor("op_12022"), val = tensor([1])]; + tensor channels_mean_369_cast = reduce_mean(axes = var_12022, keep_dims = var_6860, x = inputs_369_cast)[name = tensor("channels_mean_369_cast")]; + tensor zero_mean_369_cast = sub(x = inputs_369_cast, y = channels_mean_369_cast)[name = tensor("zero_mean_369_cast")]; + tensor zero_mean_sq_369_cast = mul(x = zero_mean_369_cast, y = zero_mean_369_cast)[name = tensor("zero_mean_sq_369_cast")]; + tensor var_12026 = const()[name = tensor("op_12026"), val = tensor([1])]; + tensor var_12027_cast = reduce_mean(axes = var_12026, keep_dims = var_6860, x = zero_mean_sq_369_cast)[name = tensor("op_12027_cast")]; + tensor var_12028_to_fp16 = const()[name = tensor("op_12028_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12029_cast = add(x = var_12027_cast, y = var_12028_to_fp16)[name = tensor("op_12029_cast")]; + tensor denom_369_epsilon_0_to_fp16 = const()[name = tensor("denom_369_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_369_cast = rsqrt(epsilon = denom_369_epsilon_0_to_fp16, x = var_12029_cast)[name = tensor("denom_369_cast")]; + tensor out_369_cast = mul(x = zero_mean_369_cast, y = denom_369_cast)[name = tensor("out_369_cast")]; + tensor var_12033_to_fp16 = const()[name = tensor("op_12033_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1797860672)))]; + tensor var_12034_cast = add(x = out_369_cast, y = var_12033_to_fp16)[name = tensor("op_12034_cast")]; + tensor var_12036_to_fp16 = const()[name = tensor("op_12036_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1797863296)))]; + tensor hidden_states_479_cast = mul(x = var_12034_cast, y = var_12036_to_fp16)[name = tensor("hidden_states_479_cast")]; + tensor var_12043 = const()[name = tensor("op_12043"), val = tensor([1, 1])]; + tensor var_12045 = const()[name = tensor("op_12045"), val = tensor([1, 1])]; + tensor q_247_pad_type_0 = const()[name = tensor("q_247_pad_type_0"), val = tensor("custom")]; + tensor q_247_pad_0 = const()[name = tensor("q_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1797865920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1799504384))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_247_cast = conv(dilations = var_12045, groups = var_6865, pad = q_247_pad_0, pad_type = q_247_pad_type_0, strides = var_12043, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_479_cast)[name = tensor("q_247_cast")]; + tensor var_12049 = const()[name = tensor("op_12049"), val = tensor([1, 1])]; + tensor var_12051 = const()[name = tensor("op_12051"), val = tensor([1, 1])]; + tensor k_247_pad_type_0 = const()[name = tensor("k_247_pad_type_0"), val = tensor("custom")]; + tensor k_247_pad_0 = const()[name = tensor("k_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1799504960))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1801471104))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_247_cast = conv(dilations = var_12051, groups = var_6865, pad = k_247_pad_0, pad_type = k_247_pad_type_0, strides = var_12049, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_247_cast")]; + tensor var_12055 = const()[name = tensor("op_12055"), val = tensor([1, 1])]; + tensor var_12057 = const()[name = tensor("op_12057"), val = tensor([1, 1])]; + tensor v_247_pad_type_0 = const()[name = tensor("v_247_pad_type_0"), val = tensor("custom")]; + tensor v_247_pad_0 = const()[name = tensor("v_247_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1801471296))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1802782080))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_247_cast = conv(dilations = var_12057, groups = var_6865, pad = v_247_pad_0, pad_type = v_247_pad_type_0, strides = var_12055, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_247_cast")]; + tensor var_12061 = const()[name = tensor("op_12061"), val = tensor([2, 20, 64, -1])]; + tensor var_12062_cast = reshape(shape = var_12061, x = q_247_cast)[name = tensor("op_12062_cast")]; + tensor var_12063 = const()[name = tensor("op_12063"), val = tensor([2, 20, 64, -1])]; + tensor var_12064_cast = reshape(shape = var_12063, x = k_247_cast)[name = tensor("op_12064_cast")]; + tensor var_12065 = const()[name = tensor("op_12065"), val = tensor([2, 20, 64, -1])]; + tensor var_12066_cast = reshape(shape = var_12065, x = v_247_cast)[name = tensor("op_12066_cast")]; + tensor attn_weights_493_transpose_x_0 = const()[name = tensor("attn_weights_493_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_493_transpose_y_0 = const()[name = tensor("attn_weights_493_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_493_cast = matmul(transpose_x = attn_weights_493_transpose_x_0, transpose_y = attn_weights_493_transpose_y_0, x = var_12062_cast, y = var_12064_cast)[name = tensor("attn_weights_493_cast")]; + tensor attn_weights_495_cast = mul(x = attn_weights_493_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_495_cast")]; + tensor var_12070_cast = softmax(axis = var_6849, x = attn_weights_495_cast)[name = tensor("op_12070_cast")]; + tensor attn_247_transpose_x_0 = const()[name = tensor("attn_247_transpose_x_0"), val = tensor(false)]; + tensor attn_247_transpose_y_0 = const()[name = tensor("attn_247_transpose_y_0"), val = tensor(true)]; + tensor attn_247_cast = matmul(transpose_x = attn_247_transpose_x_0, transpose_y = attn_247_transpose_y_0, x = var_12066_cast, y = var_12070_cast)[name = tensor("attn_247_cast")]; + tensor var_12074 = const()[name = tensor("op_12074"), val = tensor([2, 1280, 1, -1])]; + tensor input_693_cast = reshape(shape = var_12074, x = attn_247_cast)[name = tensor("input_693_cast")]; + tensor var_12079 = const()[name = tensor("op_12079"), val = tensor([1, 1])]; + tensor var_12081 = const()[name = tensor("op_12081"), val = tensor([1, 1])]; + tensor var_12083_pad_type_0 = const()[name = tensor("op_12083_pad_type_0"), val = tensor("custom")]; + tensor var_12083_pad_0 = const()[name = tensor("op_12083_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1802782208))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1803601472))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1803601600)))]; + tensor var_12083_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_bias_to_fp16, dilations = var_12081, groups = var_6865, pad = var_12083_pad_0, pad_type = var_12083_pad_type_0, strides = var_12079, weight = up_blocks_0_attentions_2_transformer_blocks_7_attn2_to_out_0_weight_to_fp16_palettized, x = input_693_cast)[name = tensor("op_12083_cast")]; + tensor inputs_371_cast = add(x = var_12083_cast, y = inputs_369_cast)[name = tensor("inputs_371_cast")]; + tensor var_12087 = const()[name = tensor("op_12087"), val = tensor([1])]; + tensor channels_mean_371_cast = reduce_mean(axes = var_12087, keep_dims = var_6860, x = inputs_371_cast)[name = tensor("channels_mean_371_cast")]; + tensor zero_mean_371_cast = sub(x = inputs_371_cast, y = channels_mean_371_cast)[name = tensor("zero_mean_371_cast")]; + tensor zero_mean_sq_371_cast = mul(x = zero_mean_371_cast, y = zero_mean_371_cast)[name = tensor("zero_mean_sq_371_cast")]; + tensor var_12091 = const()[name = tensor("op_12091"), val = tensor([1])]; + tensor var_12092_cast = reduce_mean(axes = var_12091, keep_dims = var_6860, x = zero_mean_sq_371_cast)[name = tensor("op_12092_cast")]; + tensor var_12093_to_fp16 = const()[name = tensor("op_12093_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12094_cast = add(x = var_12092_cast, y = var_12093_to_fp16)[name = tensor("op_12094_cast")]; + tensor denom_371_epsilon_0_to_fp16 = const()[name = tensor("denom_371_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_371_cast = rsqrt(epsilon = denom_371_epsilon_0_to_fp16, x = var_12094_cast)[name = tensor("denom_371_cast")]; + tensor out_371_cast = mul(x = zero_mean_371_cast, y = denom_371_cast)[name = tensor("out_371_cast")]; + tensor var_12098_to_fp16 = const()[name = tensor("op_12098_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1803604224)))]; + tensor var_12099_cast = add(x = out_371_cast, y = var_12098_to_fp16)[name = tensor("op_12099_cast")]; + tensor var_12101_to_fp16 = const()[name = tensor("op_12101_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1803606848)))]; + tensor input_695_cast = mul(x = var_12099_cast, y = var_12101_to_fp16)[name = tensor("input_695_cast")]; + tensor var_12109 = const()[name = tensor("op_12109"), val = tensor([1, 1])]; + tensor var_12111 = const()[name = tensor("op_12111"), val = tensor([1, 1])]; + tensor var_12113_pad_type_0 = const()[name = tensor("op_12113_pad_type_0"), val = tensor("custom")]; + tensor var_12113_pad_0 = const()[name = tensor("op_12113_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1803609472))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1816716736))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1816717312)))]; + tensor var_12113_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_bias_to_fp16, dilations = var_12111, groups = var_6865, pad = var_12113_pad_0, pad_type = var_12113_pad_type_0, strides = var_12109, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_0_proj_weight_to_fp16_palettized, x = input_695_cast)[name = tensor("op_12113_cast")]; + tensor var_12114_split_sizes_0 = const()[name = tensor("op_12114_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12114_axis_0 = const()[name = tensor("op_12114_axis_0"), val = tensor(1)]; + tensor var_12114_cast_0, tensor var_12114_cast_1 = split(axis = var_12114_axis_0, split_sizes = var_12114_split_sizes_0, x = var_12113_cast)[name = tensor("op_12114_cast")]; + tensor var_12116_mode_0 = const()[name = tensor("op_12116_mode_0"), val = tensor("EXACT")]; + tensor var_12116_cast = gelu(mode = var_12116_mode_0, x = var_12114_cast_1)[name = tensor("op_12116_cast")]; + tensor input_697_cast = mul(x = var_12114_cast_0, y = var_12116_cast)[name = tensor("input_697_cast")]; + tensor var_12120 = const()[name = tensor("op_12120"), val = tensor([1, 1])]; + tensor var_12122 = const()[name = tensor("op_12122"), val = tensor([1, 1])]; + tensor var_12124_pad_type_0 = const()[name = tensor("op_12124_pad_type_0"), val = tensor("custom")]; + tensor var_12124_pad_0 = const()[name = tensor("op_12124_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1816737856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1823291520))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1823292096)))]; + tensor var_12124_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_bias_to_fp16, dilations = var_12122, groups = var_6865, pad = var_12124_pad_0, pad_type = var_12124_pad_type_0, strides = var_12120, weight = up_blocks_0_attentions_2_transformer_blocks_7_ff_net_2_weight_to_fp16_palettized, x = input_697_cast)[name = tensor("op_12124_cast")]; + tensor inputs_373_cast = add(x = var_12124_cast, y = inputs_371_cast)[name = tensor("inputs_373_cast")]; + tensor var_12134 = const()[name = tensor("op_12134"), val = tensor([1])]; + tensor channels_mean_373_cast = reduce_mean(axes = var_12134, keep_dims = var_6860, x = inputs_373_cast)[name = tensor("channels_mean_373_cast")]; + tensor zero_mean_373_cast = sub(x = inputs_373_cast, y = channels_mean_373_cast)[name = tensor("zero_mean_373_cast")]; + tensor zero_mean_sq_373_cast = mul(x = zero_mean_373_cast, y = zero_mean_373_cast)[name = tensor("zero_mean_sq_373_cast")]; + tensor var_12138 = const()[name = tensor("op_12138"), val = tensor([1])]; + tensor var_12139_cast = reduce_mean(axes = var_12138, keep_dims = var_6860, x = zero_mean_sq_373_cast)[name = tensor("op_12139_cast")]; + tensor var_12140_to_fp16 = const()[name = tensor("op_12140_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12141_cast = add(x = var_12139_cast, y = var_12140_to_fp16)[name = tensor("op_12141_cast")]; + tensor denom_373_epsilon_0_to_fp16 = const()[name = tensor("denom_373_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_373_cast = rsqrt(epsilon = denom_373_epsilon_0_to_fp16, x = var_12141_cast)[name = tensor("denom_373_cast")]; + tensor out_373_cast = mul(x = zero_mean_373_cast, y = denom_373_cast)[name = tensor("out_373_cast")]; + tensor var_12145_to_fp16 = const()[name = tensor("op_12145_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1823294720)))]; + tensor var_12146_cast = add(x = out_373_cast, y = var_12145_to_fp16)[name = tensor("op_12146_cast")]; + tensor var_12148_to_fp16 = const()[name = tensor("op_12148_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1823297344)))]; + tensor hidden_states_483_cast = mul(x = var_12146_cast, y = var_12148_to_fp16)[name = tensor("hidden_states_483_cast")]; + tensor var_12155 = const()[name = tensor("op_12155"), val = tensor([1, 1])]; + tensor var_12157 = const()[name = tensor("op_12157"), val = tensor([1, 1])]; + tensor q_249_pad_type_0 = const()[name = tensor("q_249_pad_type_0"), val = tensor("custom")]; + tensor q_249_pad_0 = const()[name = tensor("q_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1823299968))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1824528832))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_249_cast = conv(dilations = var_12157, groups = var_6865, pad = q_249_pad_0, pad_type = q_249_pad_type_0, strides = var_12155, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("q_249_cast")]; + tensor var_12161 = const()[name = tensor("op_12161"), val = tensor([1, 1])]; + tensor var_12163 = const()[name = tensor("op_12163"), val = tensor([1, 1])]; + tensor k_249_pad_type_0 = const()[name = tensor("k_249_pad_type_0"), val = tensor("custom")]; + tensor k_249_pad_0 = const()[name = tensor("k_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1824529024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1825757888))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_249_cast = conv(dilations = var_12163, groups = var_6865, pad = k_249_pad_0, pad_type = k_249_pad_type_0, strides = var_12161, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("k_249_cast")]; + tensor var_12167 = const()[name = tensor("op_12167"), val = tensor([1, 1])]; + tensor var_12169 = const()[name = tensor("op_12169"), val = tensor([1, 1])]; + tensor v_249_pad_type_0 = const()[name = tensor("v_249_pad_type_0"), val = tensor("custom")]; + tensor v_249_pad_0 = const()[name = tensor("v_249_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1825758080))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827396544))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_249_cast = conv(dilations = var_12169, groups = var_6865, pad = v_249_pad_0, pad_type = v_249_pad_type_0, strides = var_12167, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_483_cast)[name = tensor("v_249_cast")]; + tensor var_12173 = const()[name = tensor("op_12173"), val = tensor([2, 20, 64, -1])]; + tensor var_12174_cast = reshape(shape = var_12173, x = q_249_cast)[name = tensor("op_12174_cast")]; + tensor var_12175 = const()[name = tensor("op_12175"), val = tensor([2, 20, 64, -1])]; + tensor var_12176_cast = reshape(shape = var_12175, x = k_249_cast)[name = tensor("op_12176_cast")]; + tensor var_12177 = const()[name = tensor("op_12177"), val = tensor([2, 20, 64, -1])]; + tensor var_12178_cast = reshape(shape = var_12177, x = v_249_cast)[name = tensor("op_12178_cast")]; + tensor attn_weights_497_transpose_x_0 = const()[name = tensor("attn_weights_497_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_497_transpose_y_0 = const()[name = tensor("attn_weights_497_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_497_cast = matmul(transpose_x = attn_weights_497_transpose_x_0, transpose_y = attn_weights_497_transpose_y_0, x = var_12174_cast, y = var_12176_cast)[name = tensor("attn_weights_497_cast")]; + tensor attn_weights_499_cast = mul(x = attn_weights_497_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_499_cast")]; + tensor var_12182_cast = softmax(axis = var_6849, x = attn_weights_499_cast)[name = tensor("op_12182_cast")]; + tensor attn_249_transpose_x_0 = const()[name = tensor("attn_249_transpose_x_0"), val = tensor(false)]; + tensor attn_249_transpose_y_0 = const()[name = tensor("attn_249_transpose_y_0"), val = tensor(true)]; + tensor attn_249_cast = matmul(transpose_x = attn_249_transpose_x_0, transpose_y = attn_249_transpose_y_0, x = var_12178_cast, y = var_12182_cast)[name = tensor("attn_249_cast")]; + tensor var_12186 = const()[name = tensor("op_12186"), val = tensor([2, 1280, 1, -1])]; + tensor input_699_cast = reshape(shape = var_12186, x = attn_249_cast)[name = tensor("input_699_cast")]; + tensor var_12191 = const()[name = tensor("op_12191"), val = tensor([1, 1])]; + tensor var_12193 = const()[name = tensor("op_12193"), val = tensor([1, 1])]; + tensor var_12195_pad_type_0 = const()[name = tensor("op_12195_pad_type_0"), val = tensor("custom")]; + tensor var_12195_pad_0 = const()[name = tensor("op_12195_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1827397120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829035584))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829036160)))]; + tensor var_12195_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_bias_to_fp16, dilations = var_12193, groups = var_6865, pad = var_12195_pad_0, pad_type = var_12195_pad_type_0, strides = var_12191, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn1_to_out_0_weight_to_fp16_palettized, x = input_699_cast)[name = tensor("op_12195_cast")]; + tensor inputs_375_cast = add(x = var_12195_cast, y = inputs_373_cast)[name = tensor("inputs_375_cast")]; + tensor var_12199 = const()[name = tensor("op_12199"), val = tensor([1])]; + tensor channels_mean_375_cast = reduce_mean(axes = var_12199, keep_dims = var_6860, x = inputs_375_cast)[name = tensor("channels_mean_375_cast")]; + tensor zero_mean_375_cast = sub(x = inputs_375_cast, y = channels_mean_375_cast)[name = tensor("zero_mean_375_cast")]; + tensor zero_mean_sq_375_cast = mul(x = zero_mean_375_cast, y = zero_mean_375_cast)[name = tensor("zero_mean_sq_375_cast")]; + tensor var_12203 = const()[name = tensor("op_12203"), val = tensor([1])]; + tensor var_12204_cast = reduce_mean(axes = var_12203, keep_dims = var_6860, x = zero_mean_sq_375_cast)[name = tensor("op_12204_cast")]; + tensor var_12205_to_fp16 = const()[name = tensor("op_12205_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12206_cast = add(x = var_12204_cast, y = var_12205_to_fp16)[name = tensor("op_12206_cast")]; + tensor denom_375_epsilon_0_to_fp16 = const()[name = tensor("denom_375_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_375_cast = rsqrt(epsilon = denom_375_epsilon_0_to_fp16, x = var_12206_cast)[name = tensor("denom_375_cast")]; + tensor out_375_cast = mul(x = zero_mean_375_cast, y = denom_375_cast)[name = tensor("out_375_cast")]; + tensor var_12210_to_fp16 = const()[name = tensor("op_12210_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829038784)))]; + tensor var_12211_cast = add(x = out_375_cast, y = var_12210_to_fp16)[name = tensor("op_12211_cast")]; + tensor var_12213_to_fp16 = const()[name = tensor("op_12213_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829041408)))]; + tensor hidden_states_485_cast = mul(x = var_12211_cast, y = var_12213_to_fp16)[name = tensor("hidden_states_485_cast")]; + tensor var_12220 = const()[name = tensor("op_12220"), val = tensor([1, 1])]; + tensor var_12222 = const()[name = tensor("op_12222"), val = tensor([1, 1])]; + tensor q_251_pad_type_0 = const()[name = tensor("q_251_pad_type_0"), val = tensor("custom")]; + tensor q_251_pad_0 = const()[name = tensor("q_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829044032))), lut = tensor([-0x1.648p-6, -0x1.c48p-8, 0x1.c1p-8, 0x1.644p-6]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_251_cast = conv(dilations = var_12222, groups = var_6865, pad = q_251_pad_0, pad_type = q_251_pad_type_0, strides = var_12220, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_485_cast)[name = tensor("q_251_cast")]; + tensor var_12226 = const()[name = tensor("op_12226"), val = tensor([1, 1])]; + tensor var_12228 = const()[name = tensor("op_12228"), val = tensor([1, 1])]; + tensor k_251_pad_type_0 = const()[name = tensor("k_251_pad_type_0"), val = tensor("custom")]; + tensor k_251_pad_0 = const()[name = tensor("k_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1829453696))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1831419840))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_251_cast = conv(dilations = var_12228, groups = var_6865, pad = k_251_pad_0, pad_type = k_251_pad_type_0, strides = var_12226, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_251_cast")]; + tensor var_12232 = const()[name = tensor("op_12232"), val = tensor([1, 1])]; + tensor var_12234 = const()[name = tensor("op_12234"), val = tensor([1, 1])]; + tensor v_251_pad_type_0 = const()[name = tensor("v_251_pad_type_0"), val = tensor("custom")]; + tensor v_251_pad_0 = const()[name = tensor("v_251_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1831420032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1832730816))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_251_cast = conv(dilations = var_12234, groups = var_6865, pad = v_251_pad_0, pad_type = v_251_pad_type_0, strides = var_12232, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_251_cast")]; + tensor var_12238 = const()[name = tensor("op_12238"), val = tensor([2, 20, 64, -1])]; + tensor var_12239_cast = reshape(shape = var_12238, x = q_251_cast)[name = tensor("op_12239_cast")]; + tensor var_12240 = const()[name = tensor("op_12240"), val = tensor([2, 20, 64, -1])]; + tensor var_12241_cast = reshape(shape = var_12240, x = k_251_cast)[name = tensor("op_12241_cast")]; + tensor var_12242 = const()[name = tensor("op_12242"), val = tensor([2, 20, 64, -1])]; + tensor var_12243_cast = reshape(shape = var_12242, x = v_251_cast)[name = tensor("op_12243_cast")]; + tensor attn_weights_501_transpose_x_0 = const()[name = tensor("attn_weights_501_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_501_transpose_y_0 = const()[name = tensor("attn_weights_501_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_501_cast = matmul(transpose_x = attn_weights_501_transpose_x_0, transpose_y = attn_weights_501_transpose_y_0, x = var_12239_cast, y = var_12241_cast)[name = tensor("attn_weights_501_cast")]; + tensor attn_weights_503_cast = mul(x = attn_weights_501_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_503_cast")]; + tensor var_12247_cast = softmax(axis = var_6849, x = attn_weights_503_cast)[name = tensor("op_12247_cast")]; + tensor attn_251_transpose_x_0 = const()[name = tensor("attn_251_transpose_x_0"), val = tensor(false)]; + tensor attn_251_transpose_y_0 = const()[name = tensor("attn_251_transpose_y_0"), val = tensor(true)]; + tensor attn_251_cast = matmul(transpose_x = attn_251_transpose_x_0, transpose_y = attn_251_transpose_y_0, x = var_12243_cast, y = var_12247_cast)[name = tensor("attn_251_cast")]; + tensor var_12251 = const()[name = tensor("op_12251"), val = tensor([2, 1280, 1, -1])]; + tensor input_701_cast = reshape(shape = var_12251, x = attn_251_cast)[name = tensor("input_701_cast")]; + tensor var_12256 = const()[name = tensor("op_12256"), val = tensor([1, 1])]; + tensor var_12258 = const()[name = tensor("op_12258"), val = tensor([1, 1])]; + tensor var_12260_pad_type_0 = const()[name = tensor("op_12260_pad_type_0"), val = tensor("custom")]; + tensor var_12260_pad_0 = const()[name = tensor("op_12260_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1832730944))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833959808))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833960000)))]; + tensor var_12260_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_bias_to_fp16, dilations = var_12258, groups = var_6865, pad = var_12260_pad_0, pad_type = var_12260_pad_type_0, strides = var_12256, weight = up_blocks_0_attentions_2_transformer_blocks_8_attn2_to_out_0_weight_to_fp16_palettized, x = input_701_cast)[name = tensor("op_12260_cast")]; + tensor inputs_377_cast = add(x = var_12260_cast, y = inputs_375_cast)[name = tensor("inputs_377_cast")]; + tensor var_12264 = const()[name = tensor("op_12264"), val = tensor([1])]; + tensor channels_mean_377_cast = reduce_mean(axes = var_12264, keep_dims = var_6860, x = inputs_377_cast)[name = tensor("channels_mean_377_cast")]; + tensor zero_mean_377_cast = sub(x = inputs_377_cast, y = channels_mean_377_cast)[name = tensor("zero_mean_377_cast")]; + tensor zero_mean_sq_377_cast = mul(x = zero_mean_377_cast, y = zero_mean_377_cast)[name = tensor("zero_mean_sq_377_cast")]; + tensor var_12268 = const()[name = tensor("op_12268"), val = tensor([1])]; + tensor var_12269_cast = reduce_mean(axes = var_12268, keep_dims = var_6860, x = zero_mean_sq_377_cast)[name = tensor("op_12269_cast")]; + tensor var_12270_to_fp16 = const()[name = tensor("op_12270_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12271_cast = add(x = var_12269_cast, y = var_12270_to_fp16)[name = tensor("op_12271_cast")]; + tensor denom_377_epsilon_0_to_fp16 = const()[name = tensor("denom_377_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_377_cast = rsqrt(epsilon = denom_377_epsilon_0_to_fp16, x = var_12271_cast)[name = tensor("denom_377_cast")]; + tensor out_377_cast = mul(x = zero_mean_377_cast, y = denom_377_cast)[name = tensor("out_377_cast")]; + tensor var_12275_to_fp16 = const()[name = tensor("op_12275_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833962624)))]; + tensor var_12276_cast = add(x = out_377_cast, y = var_12275_to_fp16)[name = tensor("op_12276_cast")]; + tensor var_12278_to_fp16 = const()[name = tensor("op_12278_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833965248)))]; + tensor input_703_cast = mul(x = var_12276_cast, y = var_12278_to_fp16)[name = tensor("input_703_cast")]; + tensor var_12286 = const()[name = tensor("op_12286"), val = tensor([1, 1])]; + tensor var_12288 = const()[name = tensor("op_12288"), val = tensor([1, 1])]; + tensor var_12290_pad_type_0 = const()[name = tensor("op_12290_pad_type_0"), val = tensor("custom")]; + tensor var_12290_pad_0 = const()[name = tensor("op_12290_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1833967872))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847075136))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847075712)))]; + tensor var_12290_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_bias_to_fp16, dilations = var_12288, groups = var_6865, pad = var_12290_pad_0, pad_type = var_12290_pad_type_0, strides = var_12286, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_0_proj_weight_to_fp16_palettized, x = input_703_cast)[name = tensor("op_12290_cast")]; + tensor var_12291_split_sizes_0 = const()[name = tensor("op_12291_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12291_axis_0 = const()[name = tensor("op_12291_axis_0"), val = tensor(1)]; + tensor var_12291_cast_0, tensor var_12291_cast_1 = split(axis = var_12291_axis_0, split_sizes = var_12291_split_sizes_0, x = var_12290_cast)[name = tensor("op_12291_cast")]; + tensor var_12293_mode_0 = const()[name = tensor("op_12293_mode_0"), val = tensor("EXACT")]; + tensor var_12293_cast = gelu(mode = var_12293_mode_0, x = var_12291_cast_1)[name = tensor("op_12293_cast")]; + tensor input_705_cast = mul(x = var_12291_cast_0, y = var_12293_cast)[name = tensor("input_705_cast")]; + tensor var_12297 = const()[name = tensor("op_12297"), val = tensor([1, 1])]; + tensor var_12299 = const()[name = tensor("op_12299"), val = tensor([1, 1])]; + tensor var_12301_pad_type_0 = const()[name = tensor("op_12301_pad_type_0"), val = tensor("custom")]; + tensor var_12301_pad_0 = const()[name = tensor("op_12301_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1847096256))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853649920))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853650496)))]; + tensor var_12301_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_bias_to_fp16, dilations = var_12299, groups = var_6865, pad = var_12301_pad_0, pad_type = var_12301_pad_type_0, strides = var_12297, weight = up_blocks_0_attentions_2_transformer_blocks_8_ff_net_2_weight_to_fp16_palettized, x = input_705_cast)[name = tensor("op_12301_cast")]; + tensor inputs_379_cast = add(x = var_12301_cast, y = inputs_377_cast)[name = tensor("inputs_379_cast")]; + tensor var_12311 = const()[name = tensor("op_12311"), val = tensor([1])]; + tensor channels_mean_379_cast = reduce_mean(axes = var_12311, keep_dims = var_6860, x = inputs_379_cast)[name = tensor("channels_mean_379_cast")]; + tensor zero_mean_379_cast = sub(x = inputs_379_cast, y = channels_mean_379_cast)[name = tensor("zero_mean_379_cast")]; + tensor zero_mean_sq_379_cast = mul(x = zero_mean_379_cast, y = zero_mean_379_cast)[name = tensor("zero_mean_sq_379_cast")]; + tensor var_12315 = const()[name = tensor("op_12315"), val = tensor([1])]; + tensor var_12316_cast = reduce_mean(axes = var_12315, keep_dims = var_6860, x = zero_mean_sq_379_cast)[name = tensor("op_12316_cast")]; + tensor var_12317_to_fp16 = const()[name = tensor("op_12317_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12318_cast = add(x = var_12316_cast, y = var_12317_to_fp16)[name = tensor("op_12318_cast")]; + tensor denom_379_epsilon_0_to_fp16 = const()[name = tensor("denom_379_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_379_cast = rsqrt(epsilon = denom_379_epsilon_0_to_fp16, x = var_12318_cast)[name = tensor("denom_379_cast")]; + tensor out_379_cast = mul(x = zero_mean_379_cast, y = denom_379_cast)[name = tensor("out_379_cast")]; + tensor var_12322_to_fp16 = const()[name = tensor("op_12322_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853653120)))]; + tensor var_12323_cast = add(x = out_379_cast, y = var_12322_to_fp16)[name = tensor("op_12323_cast")]; + tensor var_12325_to_fp16 = const()[name = tensor("op_12325_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853655744)))]; + tensor hidden_states_489_cast = mul(x = var_12323_cast, y = var_12325_to_fp16)[name = tensor("hidden_states_489_cast")]; + tensor var_12332 = const()[name = tensor("op_12332"), val = tensor([1, 1])]; + tensor var_12334 = const()[name = tensor("op_12334"), val = tensor([1, 1])]; + tensor q_253_pad_type_0 = const()[name = tensor("q_253_pad_type_0"), val = tensor("custom")]; + tensor q_253_pad_0 = const()[name = tensor("q_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1853658368))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855296832))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_253_cast = conv(dilations = var_12334, groups = var_6865, pad = q_253_pad_0, pad_type = q_253_pad_type_0, strides = var_12332, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("q_253_cast")]; + tensor var_12338 = const()[name = tensor("op_12338"), val = tensor([1, 1])]; + tensor var_12340 = const()[name = tensor("op_12340"), val = tensor([1, 1])]; + tensor k_253_pad_type_0 = const()[name = tensor("k_253_pad_type_0"), val = tensor("custom")]; + tensor k_253_pad_0 = const()[name = tensor("k_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1855297408))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1856526272))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor k_253_cast = conv(dilations = var_12340, groups = var_6865, pad = k_253_pad_0, pad_type = k_253_pad_type_0, strides = var_12338, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("k_253_cast")]; + tensor var_12344 = const()[name = tensor("op_12344"), val = tensor([1, 1])]; + tensor var_12346 = const()[name = tensor("op_12346"), val = tensor([1, 1])]; + tensor v_253_pad_type_0 = const()[name = tensor("v_253_pad_type_0"), val = tensor("custom")]; + tensor v_253_pad_0 = const()[name = tensor("v_253_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1856526464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1858164928))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor v_253_cast = conv(dilations = var_12346, groups = var_6865, pad = v_253_pad_0, pad_type = v_253_pad_type_0, strides = var_12344, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_v_weight_to_fp16_palettized, x = hidden_states_489_cast)[name = tensor("v_253_cast")]; + tensor var_12350 = const()[name = tensor("op_12350"), val = tensor([2, 20, 64, -1])]; + tensor var_12351_cast = reshape(shape = var_12350, x = q_253_cast)[name = tensor("op_12351_cast")]; + tensor var_12352 = const()[name = tensor("op_12352"), val = tensor([2, 20, 64, -1])]; + tensor var_12353_cast = reshape(shape = var_12352, x = k_253_cast)[name = tensor("op_12353_cast")]; + tensor var_12354 = const()[name = tensor("op_12354"), val = tensor([2, 20, 64, -1])]; + tensor var_12355_cast = reshape(shape = var_12354, x = v_253_cast)[name = tensor("op_12355_cast")]; + tensor attn_weights_505_transpose_x_0 = const()[name = tensor("attn_weights_505_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_505_transpose_y_0 = const()[name = tensor("attn_weights_505_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_505_cast = matmul(transpose_x = attn_weights_505_transpose_x_0, transpose_y = attn_weights_505_transpose_y_0, x = var_12351_cast, y = var_12353_cast)[name = tensor("attn_weights_505_cast")]; + tensor attn_weights_507_cast = mul(x = attn_weights_505_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_507_cast")]; + tensor var_12359_cast = softmax(axis = var_6849, x = attn_weights_507_cast)[name = tensor("op_12359_cast")]; + tensor attn_253_transpose_x_0 = const()[name = tensor("attn_253_transpose_x_0"), val = tensor(false)]; + tensor attn_253_transpose_y_0 = const()[name = tensor("attn_253_transpose_y_0"), val = tensor(true)]; + tensor attn_253_cast = matmul(transpose_x = attn_253_transpose_x_0, transpose_y = attn_253_transpose_y_0, x = var_12355_cast, y = var_12359_cast)[name = tensor("attn_253_cast")]; + tensor var_12363 = const()[name = tensor("op_12363"), val = tensor([2, 1280, 1, -1])]; + tensor input_707_cast = reshape(shape = var_12363, x = attn_253_cast)[name = tensor("input_707_cast")]; + tensor var_12368 = const()[name = tensor("op_12368"), val = tensor([1, 1])]; + tensor var_12370 = const()[name = tensor("op_12370"), val = tensor([1, 1])]; + tensor var_12372_pad_type_0 = const()[name = tensor("op_12372_pad_type_0"), val = tensor("custom")]; + tensor var_12372_pad_0 = const()[name = tensor("op_12372_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1858165504))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859803968))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859804544)))]; + tensor var_12372_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_bias_to_fp16, dilations = var_12370, groups = var_6865, pad = var_12372_pad_0, pad_type = var_12372_pad_type_0, strides = var_12368, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn1_to_out_0_weight_to_fp16_palettized, x = input_707_cast)[name = tensor("op_12372_cast")]; + tensor inputs_381_cast = add(x = var_12372_cast, y = inputs_379_cast)[name = tensor("inputs_381_cast")]; + tensor var_12376 = const()[name = tensor("op_12376"), val = tensor([1])]; + tensor channels_mean_381_cast = reduce_mean(axes = var_12376, keep_dims = var_6860, x = inputs_381_cast)[name = tensor("channels_mean_381_cast")]; + tensor zero_mean_381_cast = sub(x = inputs_381_cast, y = channels_mean_381_cast)[name = tensor("zero_mean_381_cast")]; + tensor zero_mean_sq_381_cast = mul(x = zero_mean_381_cast, y = zero_mean_381_cast)[name = tensor("zero_mean_sq_381_cast")]; + tensor var_12380 = const()[name = tensor("op_12380"), val = tensor([1])]; + tensor var_12381_cast = reduce_mean(axes = var_12380, keep_dims = var_6860, x = zero_mean_sq_381_cast)[name = tensor("op_12381_cast")]; + tensor var_12382_to_fp16 = const()[name = tensor("op_12382_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12383_cast = add(x = var_12381_cast, y = var_12382_to_fp16)[name = tensor("op_12383_cast")]; + tensor denom_381_epsilon_0_to_fp16 = const()[name = tensor("denom_381_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_381_cast = rsqrt(epsilon = denom_381_epsilon_0_to_fp16, x = var_12383_cast)[name = tensor("denom_381_cast")]; + tensor out_381_cast = mul(x = zero_mean_381_cast, y = denom_381_cast)[name = tensor("out_381_cast")]; + tensor var_12387_to_fp16 = const()[name = tensor("op_12387_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859807168)))]; + tensor var_12388_cast = add(x = out_381_cast, y = var_12387_to_fp16)[name = tensor("op_12388_cast")]; + tensor var_12390_to_fp16 = const()[name = tensor("op_12390_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859809792)))]; + tensor hidden_states_491_cast = mul(x = var_12388_cast, y = var_12390_to_fp16)[name = tensor("hidden_states_491_cast")]; + tensor var_12397 = const()[name = tensor("op_12397"), val = tensor([1, 1])]; + tensor var_12399 = const()[name = tensor("op_12399"), val = tensor([1, 1])]; + tensor q_255_pad_type_0 = const()[name = tensor("q_255_pad_type_0"), val = tensor("custom")]; + tensor q_255_pad_0 = const()[name = tensor("q_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1859812416))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860631680))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor q_255_cast = conv(dilations = var_12399, groups = var_6865, pad = q_255_pad_0, pad_type = q_255_pad_type_0, strides = var_12397, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_491_cast)[name = tensor("q_255_cast")]; + tensor var_12403 = const()[name = tensor("op_12403"), val = tensor([1, 1])]; + tensor var_12405 = const()[name = tensor("op_12405"), val = tensor([1, 1])]; + tensor k_255_pad_type_0 = const()[name = tensor("k_255_pad_type_0"), val = tensor("custom")]; + tensor k_255_pad_0 = const()[name = tensor("k_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1860631808))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1861942592))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor k_255_cast = conv(dilations = var_12405, groups = var_6865, pad = k_255_pad_0, pad_type = k_255_pad_type_0, strides = var_12403, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_255_cast")]; + tensor var_12409 = const()[name = tensor("op_12409"), val = tensor([1, 1])]; + tensor var_12411 = const()[name = tensor("op_12411"), val = tensor([1, 1])]; + tensor v_255_pad_type_0 = const()[name = tensor("v_255_pad_type_0"), val = tensor("custom")]; + tensor v_255_pad_0 = const()[name = tensor("v_255_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1861942720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863253504))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([1280, 2048, 1, 1])]; + tensor v_255_cast = conv(dilations = var_12411, groups = var_6865, pad = v_255_pad_0, pad_type = v_255_pad_type_0, strides = var_12409, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_255_cast")]; + tensor var_12415 = const()[name = tensor("op_12415"), val = tensor([2, 20, 64, -1])]; + tensor var_12416_cast = reshape(shape = var_12415, x = q_255_cast)[name = tensor("op_12416_cast")]; + tensor var_12417 = const()[name = tensor("op_12417"), val = tensor([2, 20, 64, -1])]; + tensor var_12418_cast = reshape(shape = var_12417, x = k_255_cast)[name = tensor("op_12418_cast")]; + tensor var_12419 = const()[name = tensor("op_12419"), val = tensor([2, 20, 64, -1])]; + tensor var_12420_cast = reshape(shape = var_12419, x = v_255_cast)[name = tensor("op_12420_cast")]; + tensor attn_weights_509_transpose_x_0 = const()[name = tensor("attn_weights_509_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_509_transpose_y_0 = const()[name = tensor("attn_weights_509_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_509_cast = matmul(transpose_x = attn_weights_509_transpose_x_0, transpose_y = attn_weights_509_transpose_y_0, x = var_12416_cast, y = var_12418_cast)[name = tensor("attn_weights_509_cast")]; + tensor attn_weights_511_cast = mul(x = attn_weights_509_cast, y = var_6856_to_fp16)[name = tensor("attn_weights_511_cast")]; + tensor var_12424_cast = softmax(axis = var_6849, x = attn_weights_511_cast)[name = tensor("op_12424_cast")]; + tensor attn_255_transpose_x_0 = const()[name = tensor("attn_255_transpose_x_0"), val = tensor(false)]; + tensor attn_255_transpose_y_0 = const()[name = tensor("attn_255_transpose_y_0"), val = tensor(true)]; + tensor attn_255_cast = matmul(transpose_x = attn_255_transpose_x_0, transpose_y = attn_255_transpose_y_0, x = var_12420_cast, y = var_12424_cast)[name = tensor("attn_255_cast")]; + tensor var_12428 = const()[name = tensor("op_12428"), val = tensor([2, 1280, 1, -1])]; + tensor input_709_cast = reshape(shape = var_12428, x = attn_255_cast)[name = tensor("input_709_cast")]; + tensor var_12433 = const()[name = tensor("op_12433"), val = tensor([1, 1])]; + tensor var_12435 = const()[name = tensor("op_12435"), val = tensor([1, 1])]; + tensor var_12437_pad_type_0 = const()[name = tensor("op_12437_pad_type_0"), val = tensor("custom")]; + tensor var_12437_pad_0 = const()[name = tensor("op_12437_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863253632))), lut = tensor([-0x1.2dp-7, -0x1.6ap-9, 0x1.6c4p-9, 0x1.2dp-7]), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([1280, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863663296)))]; + tensor var_12437_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_bias_to_fp16, dilations = var_12435, groups = var_6865, pad = var_12437_pad_0, pad_type = var_12437_pad_type_0, strides = var_12433, weight = up_blocks_0_attentions_2_transformer_blocks_9_attn2_to_out_0_weight_to_fp16_palettized, x = input_709_cast)[name = tensor("op_12437_cast")]; + tensor inputs_383_cast = add(x = var_12437_cast, y = inputs_381_cast)[name = tensor("inputs_383_cast")]; + tensor var_12441 = const()[name = tensor("op_12441"), val = tensor([1])]; + tensor channels_mean_383_cast = reduce_mean(axes = var_12441, keep_dims = var_6860, x = inputs_383_cast)[name = tensor("channels_mean_383_cast")]; + tensor zero_mean_383_cast = sub(x = inputs_383_cast, y = channels_mean_383_cast)[name = tensor("zero_mean_383_cast")]; + tensor zero_mean_sq_383_cast = mul(x = zero_mean_383_cast, y = zero_mean_383_cast)[name = tensor("zero_mean_sq_383_cast")]; + tensor var_12445 = const()[name = tensor("op_12445"), val = tensor([1])]; + tensor var_12446_cast = reduce_mean(axes = var_12445, keep_dims = var_6860, x = zero_mean_sq_383_cast)[name = tensor("op_12446_cast")]; + tensor var_12447_to_fp16 = const()[name = tensor("op_12447_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12448_cast = add(x = var_12446_cast, y = var_12447_to_fp16)[name = tensor("op_12448_cast")]; + tensor denom_383_epsilon_0_to_fp16 = const()[name = tensor("denom_383_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_383_cast = rsqrt(epsilon = denom_383_epsilon_0_to_fp16, x = var_12448_cast)[name = tensor("denom_383_cast")]; + tensor out_383_cast = mul(x = zero_mean_383_cast, y = denom_383_cast)[name = tensor("out_383_cast")]; + tensor var_12452_to_fp16 = const()[name = tensor("op_12452_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863665920)))]; + tensor var_12453_cast = add(x = out_383_cast, y = var_12452_to_fp16)[name = tensor("op_12453_cast")]; + tensor var_12455_to_fp16 = const()[name = tensor("op_12455_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863668544)))]; + tensor input_711_cast = mul(x = var_12453_cast, y = var_12455_to_fp16)[name = tensor("input_711_cast")]; + tensor var_12463 = const()[name = tensor("op_12463"), val = tensor([1, 1])]; + tensor var_12465 = const()[name = tensor("op_12465"), val = tensor([1, 1])]; + tensor var_12467_pad_type_0 = const()[name = tensor("op_12467_pad_type_0"), val = tensor("custom")]; + tensor var_12467_pad_0 = const()[name = tensor("op_12467_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1863671168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876778432))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized"), shape = tensor([10240, 1280, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876779008)))]; + tensor var_12467_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_bias_to_fp16, dilations = var_12465, groups = var_6865, pad = var_12467_pad_0, pad_type = var_12467_pad_type_0, strides = var_12463, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_0_proj_weight_to_fp16_palettized, x = input_711_cast)[name = tensor("op_12467_cast")]; + tensor var_12468_split_sizes_0 = const()[name = tensor("op_12468_split_sizes_0"), val = tensor([5120, 5120])]; + tensor var_12468_axis_0 = const()[name = tensor("op_12468_axis_0"), val = tensor(1)]; + tensor var_12468_cast_0, tensor var_12468_cast_1 = split(axis = var_12468_axis_0, split_sizes = var_12468_split_sizes_0, x = var_12467_cast)[name = tensor("op_12468_cast")]; + tensor var_12470_mode_0 = const()[name = tensor("op_12470_mode_0"), val = tensor("EXACT")]; + tensor var_12470_cast = gelu(mode = var_12470_mode_0, x = var_12468_cast_1)[name = tensor("op_12470_cast")]; + tensor input_713_cast = mul(x = var_12468_cast_0, y = var_12470_cast)[name = tensor("input_713_cast")]; + tensor var_12474 = const()[name = tensor("op_12474"), val = tensor([1, 1])]; + tensor var_12476 = const()[name = tensor("op_12476"), val = tensor([1, 1])]; + tensor var_12478_pad_type_0 = const()[name = tensor("op_12478_pad_type_0"), val = tensor("custom")]; + tensor var_12478_pad_0 = const()[name = tensor("op_12478_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1876799552))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883353216))), name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized"), shape = tensor([1280, 5120, 1, 1])]; + tensor up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883353792)))]; + tensor var_12478_cast = conv(bias = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_bias_to_fp16, dilations = var_12476, groups = var_6865, pad = var_12478_pad_0, pad_type = var_12478_pad_type_0, strides = var_12474, weight = up_blocks_0_attentions_2_transformer_blocks_9_ff_net_2_weight_to_fp16_palettized, x = input_713_cast)[name = tensor("op_12478_cast")]; + tensor hidden_states_495_cast = add(x = var_12478_cast, y = inputs_383_cast)[name = tensor("hidden_states_495_cast")]; + tensor var_12480 = const()[name = tensor("op_12480"), val = tensor([2, 1280, 32, 32])]; + tensor input_715_cast = reshape(shape = var_12480, x = hidden_states_495_cast)[name = tensor("input_715_cast")]; + tensor var_12484 = const()[name = tensor("op_12484"), val = tensor([1, 1])]; + tensor var_12486 = const()[name = tensor("op_12486"), val = tensor([1, 1])]; + tensor hidden_states_497_pad_type_0 = const()[name = tensor("hidden_states_497_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_497_pad_0 = const()[name = tensor("hidden_states_497_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_0_attentions_2_proj_out_weight_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1883356416)))]; + tensor up_blocks_0_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_0_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1886633280)))]; + tensor hidden_states_497_cast = conv(bias = up_blocks_0_attentions_2_proj_out_bias_to_fp16, dilations = var_12486, groups = var_6865, pad = hidden_states_497_pad_0, pad_type = hidden_states_497_pad_type_0, strides = var_12484, weight = up_blocks_0_attentions_2_proj_out_weight_to_fp16, x = input_715_cast)[name = tensor("hidden_states_497_cast")]; + tensor input_717_cast = add(x = hidden_states_497_cast, y = hidden_states_431_cast)[name = tensor("input_717_cast")]; + tensor input_719_scale_factor_height_0 = const()[name = tensor("input_719_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_719_scale_factor_width_0 = const()[name = tensor("input_719_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_719_cast = upsample_nearest_neighbor(scale_factor_height = input_719_scale_factor_height_0, scale_factor_width = input_719_scale_factor_width_0, x = input_717_cast)[name = tensor("input_719_cast")]; + tensor var_12495 = const()[name = tensor("op_12495"), val = tensor([1, 1])]; + tensor var_12497 = const()[name = tensor("op_12497"), val = tensor([1, 1])]; + tensor hidden_states_499_pad_type_0 = const()[name = tensor("hidden_states_499_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_499_pad_0 = const()[name = tensor("hidden_states_499_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_0_upsamplers_0_conv_weight_to_fp16 = const()[name = tensor("up_blocks_0_upsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1886635904)))]; + tensor up_blocks_0_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_0_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1916127168)))]; + tensor hidden_states_499_cast = conv(bias = up_blocks_0_upsamplers_0_conv_bias_to_fp16, dilations = var_12497, groups = var_6865, pad = hidden_states_499_pad_0, pad_type = hidden_states_499_pad_type_0, strides = var_12495, weight = up_blocks_0_upsamplers_0_conv_weight_to_fp16, x = input_719_cast)[name = tensor("hidden_states_499_cast")]; + tensor var_12502 = const()[name = tensor("op_12502"), val = tensor(3)]; + tensor var_12513 = const()[name = tensor("op_12513"), val = tensor(true)]; + tensor var_12518 = const()[name = tensor("op_12518"), val = tensor(1)]; + tensor input_721_interleave_0 = const()[name = tensor("input_721_interleave_0"), val = tensor(false)]; + tensor input_721_cast = concat(axis = var_12518, interleave = input_721_interleave_0, values = (hidden_states_499_cast, input_113_cast))[name = tensor("input_721_cast")]; + tensor reshape_120_shape_0 = const()[name = tensor("reshape_120_shape_0"), val = tensor([2, 32, 60, 64, 64])]; + tensor reshape_120_cast = reshape(shape = reshape_120_shape_0, x = input_721_cast)[name = tensor("reshape_120_cast")]; + tensor reduce_mean_90_axes_0 = const()[name = tensor("reduce_mean_90_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_90_keep_dims_0 = const()[name = tensor("reduce_mean_90_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_90_cast = reduce_mean(axes = reduce_mean_90_axes_0, keep_dims = reduce_mean_90_keep_dims_0, x = reshape_120_cast)[name = tensor("reduce_mean_90_cast")]; + tensor sub_60_cast = sub(x = reshape_120_cast, y = reduce_mean_90_cast)[name = tensor("sub_60_cast")]; + tensor square_30_cast = square(x = sub_60_cast)[name = tensor("square_30_cast")]; + tensor reduce_mean_92_axes_0 = const()[name = tensor("reduce_mean_92_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_92_keep_dims_0 = const()[name = tensor("reduce_mean_92_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_92_cast = reduce_mean(axes = reduce_mean_92_axes_0, keep_dims = reduce_mean_92_keep_dims_0, x = square_30_cast)[name = tensor("reduce_mean_92_cast")]; + tensor add_60_y_0_to_fp16 = const()[name = tensor("add_60_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_60_cast = add(x = reduce_mean_92_cast, y = add_60_y_0_to_fp16)[name = tensor("add_60_cast")]; + tensor sqrt_30_cast = sqrt(x = add_60_cast)[name = tensor("sqrt_30_cast")]; + tensor real_div_30_cast = real_div(x = sub_60_cast, y = sqrt_30_cast)[name = tensor("real_div_30_cast")]; + tensor reshape_121_shape_0 = const()[name = tensor("reshape_121_shape_0"), val = tensor([2, 1920, 64, 64])]; + tensor reshape_121_cast = reshape(shape = reshape_121_shape_0, x = real_div_30_cast)[name = tensor("reshape_121_cast")]; + tensor add_61_gamma_0_to_fp16 = const()[name = tensor("add_61_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1916129792)))]; + tensor add_61_beta_0_to_fp16 = const()[name = tensor("add_61_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1916133696)))]; + tensor add_61_epsilon_0_to_fp16 = const()[name = tensor("add_61_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_61_cast = batch_norm(beta = add_61_beta_0_to_fp16, epsilon = add_61_epsilon_0_to_fp16, gamma = add_61_gamma_0_to_fp16, mean = add_55_mean_0_to_fp16, variance = add_55_variance_0_to_fp16, x = reshape_121_cast)[name = tensor("add_61_cast")]; + tensor input_725_cast = silu(x = add_61_cast)[name = tensor("input_725_cast")]; + tensor var_12547 = const()[name = tensor("op_12547"), val = tensor([1, 1])]; + tensor var_12549 = const()[name = tensor("op_12549"), val = tensor([1, 1])]; + tensor hidden_states_501_pad_type_0 = const()[name = tensor("hidden_states_501_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_501_pad_0 = const()[name = tensor("hidden_states_501_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1916137600))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927196864))), name = tensor("up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized"), shape = tensor([640, 1920, 3, 3])]; + tensor up_blocks_1_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927197440)))]; + tensor hidden_states_501_cast = conv(bias = up_blocks_1_resnets_0_conv1_bias_to_fp16, dilations = var_12549, groups = var_12518, pad = hidden_states_501_pad_0, pad_type = hidden_states_501_pad_type_0, strides = var_12547, weight = up_blocks_1_resnets_0_conv1_weight_to_fp16_palettized, x = input_725_cast)[name = tensor("hidden_states_501_cast")]; + tensor var_12555 = const()[name = tensor("op_12555"), val = tensor([1, 1])]; + tensor var_12557 = const()[name = tensor("op_12557"), val = tensor([1, 1])]; + tensor temb_23_pad_type_0 = const()[name = tensor("temb_23_pad_type_0"), val = tensor("custom")]; + tensor temb_23_pad_0 = const()[name = tensor("temb_23_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1927198784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1928018048))), name = tensor("up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized"), shape = tensor([640, 1280, 1, 1])]; + tensor up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1928018624)))]; + tensor temb_23_cast = conv(bias = up_blocks_1_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_12557, groups = var_12518, pad = temb_23_pad_0, pad_type = temb_23_pad_type_0, strides = var_12555, weight = up_blocks_1_resnets_0_time_emb_proj_weight_to_fp16_palettized, x = input_21_cast)[name = tensor("temb_23_cast")]; + tensor input_729_cast = add(x = hidden_states_501_cast, y = temb_23_cast)[name = tensor("input_729_cast")]; + tensor reshape_124_shape_0 = const()[name = tensor("reshape_124_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_124_cast = reshape(shape = reshape_124_shape_0, x = input_729_cast)[name = tensor("reshape_124_cast")]; + tensor reduce_mean_93_axes_0 = const()[name = tensor("reduce_mean_93_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_93_keep_dims_0 = const()[name = tensor("reduce_mean_93_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_93_cast = reduce_mean(axes = reduce_mean_93_axes_0, keep_dims = reduce_mean_93_keep_dims_0, x = reshape_124_cast)[name = tensor("reduce_mean_93_cast")]; + tensor sub_62_cast = sub(x = reshape_124_cast, y = reduce_mean_93_cast)[name = tensor("sub_62_cast")]; + tensor square_31_cast = square(x = sub_62_cast)[name = tensor("square_31_cast")]; + tensor reduce_mean_95_axes_0 = const()[name = tensor("reduce_mean_95_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_95_keep_dims_0 = const()[name = tensor("reduce_mean_95_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_95_cast = reduce_mean(axes = reduce_mean_95_axes_0, keep_dims = reduce_mean_95_keep_dims_0, x = square_31_cast)[name = tensor("reduce_mean_95_cast")]; + tensor add_62_y_0_to_fp16 = const()[name = tensor("add_62_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_62_cast = add(x = reduce_mean_95_cast, y = add_62_y_0_to_fp16)[name = tensor("add_62_cast")]; + tensor sqrt_31_cast = sqrt(x = add_62_cast)[name = tensor("sqrt_31_cast")]; + tensor real_div_31_cast = real_div(x = sub_62_cast, y = sqrt_31_cast)[name = tensor("real_div_31_cast")]; + tensor reshape_125_shape_0 = const()[name = tensor("reshape_125_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_125_cast = reshape(shape = reshape_125_shape_0, x = real_div_31_cast)[name = tensor("reshape_125_cast")]; + tensor add_63_gamma_0_to_fp16 = const()[name = tensor("add_63_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1928019968)))]; + tensor add_63_beta_0_to_fp16 = const()[name = tensor("add_63_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1928021312)))]; + tensor add_63_epsilon_0_to_fp16 = const()[name = tensor("add_63_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_63_cast = batch_norm(beta = add_63_beta_0_to_fp16, epsilon = add_63_epsilon_0_to_fp16, gamma = add_63_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_125_cast)[name = tensor("add_63_cast")]; + tensor input_733_cast = silu(x = add_63_cast)[name = tensor("input_733_cast")]; + tensor var_12567 = const()[name = tensor("op_12567"), val = tensor([1, 1])]; + tensor var_12569 = const()[name = tensor("op_12569"), val = tensor([1, 1])]; + tensor hidden_states_503_pad_type_0 = const()[name = tensor("hidden_states_503_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_503_pad_0 = const()[name = tensor("hidden_states_503_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1928022656)))]; + tensor up_blocks_1_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1935395520)))]; + tensor hidden_states_503_cast = conv(bias = up_blocks_1_resnets_0_conv2_bias_to_fp16, dilations = var_12569, groups = var_12518, pad = hidden_states_503_pad_0, pad_type = hidden_states_503_pad_type_0, strides = var_12567, weight = up_blocks_1_resnets_0_conv2_weight_to_fp16, x = input_733_cast)[name = tensor("hidden_states_503_cast")]; + tensor var_12574 = const()[name = tensor("op_12574"), val = tensor([1, 1])]; + tensor var_12576 = const()[name = tensor("op_12576"), val = tensor([1, 1])]; + tensor x_11_pad_type_0 = const()[name = tensor("x_11_pad_type_0"), val = tensor("custom")]; + tensor x_11_pad_0 = const()[name = tensor("x_11_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1935396864)))]; + tensor up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1937854528)))]; + tensor x_11_cast = conv(bias = up_blocks_1_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_12576, groups = var_12518, pad = x_11_pad_0, pad_type = x_11_pad_type_0, strides = var_12574, weight = up_blocks_1_resnets_0_conv_shortcut_weight_to_fp16, x = input_721_cast)[name = tensor("x_11_cast")]; + tensor hidden_states_505_cast = add(x = x_11_cast, y = hidden_states_503_cast)[name = tensor("hidden_states_505_cast")]; + tensor reshape_128_shape_0 = const()[name = tensor("reshape_128_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_128_cast = reshape(shape = reshape_128_shape_0, x = hidden_states_505_cast)[name = tensor("reshape_128_cast")]; + tensor reduce_mean_96_axes_0 = const()[name = tensor("reduce_mean_96_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_96_keep_dims_0 = const()[name = tensor("reduce_mean_96_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_96_cast = reduce_mean(axes = reduce_mean_96_axes_0, keep_dims = reduce_mean_96_keep_dims_0, x = reshape_128_cast)[name = tensor("reduce_mean_96_cast")]; + tensor sub_64_cast = sub(x = reshape_128_cast, y = reduce_mean_96_cast)[name = tensor("sub_64_cast")]; + tensor square_32_cast = square(x = sub_64_cast)[name = tensor("square_32_cast")]; + tensor reduce_mean_98_axes_0 = const()[name = tensor("reduce_mean_98_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_98_keep_dims_0 = const()[name = tensor("reduce_mean_98_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_98_cast = reduce_mean(axes = reduce_mean_98_axes_0, keep_dims = reduce_mean_98_keep_dims_0, x = square_32_cast)[name = tensor("reduce_mean_98_cast")]; + tensor add_64_y_0_to_fp16 = const()[name = tensor("add_64_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_64_cast = add(x = reduce_mean_98_cast, y = add_64_y_0_to_fp16)[name = tensor("add_64_cast")]; + tensor sqrt_32_cast = sqrt(x = add_64_cast)[name = tensor("sqrt_32_cast")]; + tensor real_div_32_cast = real_div(x = sub_64_cast, y = sqrt_32_cast)[name = tensor("real_div_32_cast")]; + tensor reshape_129_shape_0 = const()[name = tensor("reshape_129_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_129_cast = reshape(shape = reshape_129_shape_0, x = real_div_32_cast)[name = tensor("reshape_129_cast")]; + tensor add_65_gamma_0_to_fp16 = const()[name = tensor("add_65_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1937855872)))]; + tensor add_65_beta_0_to_fp16 = const()[name = tensor("add_65_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1937857216)))]; + tensor add_65_epsilon_0_to_fp16 = const()[name = tensor("add_65_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_65_cast = batch_norm(beta = add_65_beta_0_to_fp16, epsilon = add_65_epsilon_0_to_fp16, gamma = add_65_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_129_cast)[name = tensor("add_65_cast")]; + tensor var_12598 = const()[name = tensor("op_12598"), val = tensor([1, 1])]; + tensor var_12600 = const()[name = tensor("op_12600"), val = tensor([1, 1])]; + tensor hidden_states_507_pad_type_0 = const()[name = tensor("hidden_states_507_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_507_pad_0 = const()[name = tensor("hidden_states_507_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_in_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1937858560)))]; + tensor up_blocks_1_attentions_0_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1938677824)))]; + tensor hidden_states_507_cast = conv(bias = up_blocks_1_attentions_0_proj_in_bias_to_fp16, dilations = var_12600, groups = var_12518, pad = hidden_states_507_pad_0, pad_type = hidden_states_507_pad_type_0, strides = var_12598, weight = up_blocks_1_attentions_0_proj_in_weight_to_fp16, x = add_65_cast)[name = tensor("hidden_states_507_cast")]; + tensor var_12605 = const()[name = tensor("op_12605"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_385_cast = reshape(shape = var_12605, x = hidden_states_507_cast)[name = tensor("inputs_385_cast")]; + tensor var_12615 = const()[name = tensor("op_12615"), val = tensor([1])]; + tensor channels_mean_385_cast = reduce_mean(axes = var_12615, keep_dims = var_12513, x = inputs_385_cast)[name = tensor("channels_mean_385_cast")]; + tensor zero_mean_385_cast = sub(x = inputs_385_cast, y = channels_mean_385_cast)[name = tensor("zero_mean_385_cast")]; + tensor zero_mean_sq_385_cast = mul(x = zero_mean_385_cast, y = zero_mean_385_cast)[name = tensor("zero_mean_sq_385_cast")]; + tensor var_12619 = const()[name = tensor("op_12619"), val = tensor([1])]; + tensor var_12620_cast = reduce_mean(axes = var_12619, keep_dims = var_12513, x = zero_mean_sq_385_cast)[name = tensor("op_12620_cast")]; + tensor var_12621_to_fp16 = const()[name = tensor("op_12621_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12622_cast = add(x = var_12620_cast, y = var_12621_to_fp16)[name = tensor("op_12622_cast")]; + tensor denom_385_epsilon_0_to_fp16 = const()[name = tensor("denom_385_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_385_cast = rsqrt(epsilon = denom_385_epsilon_0_to_fp16, x = var_12622_cast)[name = tensor("denom_385_cast")]; + tensor out_385_cast = mul(x = zero_mean_385_cast, y = denom_385_cast)[name = tensor("out_385_cast")]; + tensor var_12626_to_fp16 = const()[name = tensor("op_12626_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1938679168)))]; + tensor var_12627_cast = add(x = out_385_cast, y = var_12626_to_fp16)[name = tensor("op_12627_cast")]; + tensor var_12629_to_fp16 = const()[name = tensor("op_12629_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1938680512)))]; + tensor hidden_states_509_cast = mul(x = var_12627_cast, y = var_12629_to_fp16)[name = tensor("hidden_states_509_cast")]; + tensor var_12636 = const()[name = tensor("op_12636"), val = tensor([1, 1])]; + tensor var_12638 = const()[name = tensor("op_12638"), val = tensor([1, 1])]; + tensor q_257_pad_type_0 = const()[name = tensor("q_257_pad_type_0"), val = tensor("custom")]; + tensor q_257_pad_0 = const()[name = tensor("q_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1938681856))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1939091520))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_257_cast = conv(dilations = var_12638, groups = var_12518, pad = q_257_pad_0, pad_type = q_257_pad_type_0, strides = var_12636, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("q_257_cast")]; + tensor var_12642 = const()[name = tensor("op_12642"), val = tensor([1, 1])]; + tensor var_12644 = const()[name = tensor("op_12644"), val = tensor([1, 1])]; + tensor k_257_pad_type_0 = const()[name = tensor("k_257_pad_type_0"), val = tensor("custom")]; + tensor k_257_pad_0 = const()[name = tensor("k_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1939092096))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1939501760))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_257_cast = conv(dilations = var_12644, groups = var_12518, pad = k_257_pad_0, pad_type = k_257_pad_type_0, strides = var_12642, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_509_cast)[name = tensor("k_257_cast")]; + tensor var_12648 = const()[name = tensor("op_12648"), val = tensor([1, 1])]; + tensor var_12650 = const()[name = tensor("op_12650"), val = tensor([1, 1])]; + tensor v_257_pad_type_0 = const()[name = tensor("v_257_pad_type_0"), val = tensor("custom")]; + tensor v_257_pad_0 = const()[name = tensor("v_257_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1939502336)))]; + tensor v_257_cast = conv(dilations = var_12650, groups = var_12518, pad = v_257_pad_0, pad_type = v_257_pad_type_0, strides = var_12648, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_v_weight_to_fp16, x = hidden_states_509_cast)[name = tensor("v_257_cast")]; + tensor var_12654 = const()[name = tensor("op_12654"), val = tensor([2, 10, 64, -1])]; + tensor var_12655_cast = reshape(shape = var_12654, x = q_257_cast)[name = tensor("op_12655_cast")]; + tensor var_12656 = const()[name = tensor("op_12656"), val = tensor([2, 10, 64, -1])]; + tensor var_12657_cast = reshape(shape = var_12656, x = k_257_cast)[name = tensor("op_12657_cast")]; + tensor var_12658 = const()[name = tensor("op_12658"), val = tensor([2, 10, 64, -1])]; + tensor var_12659_cast = reshape(shape = var_12658, x = v_257_cast)[name = tensor("op_12659_cast")]; + tensor attn_weights_513_transpose_x_0 = const()[name = tensor("attn_weights_513_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_513_transpose_y_0 = const()[name = tensor("attn_weights_513_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_513_cast = matmul(transpose_x = attn_weights_513_transpose_x_0, transpose_y = attn_weights_513_transpose_y_0, x = var_12655_cast, y = var_12657_cast)[name = tensor("attn_weights_513_cast")]; + tensor var_12509_to_fp16 = const()[name = tensor("op_12509_to_fp16"), val = tensor(0x1p-3)]; + tensor attn_weights_515_cast = mul(x = attn_weights_513_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_515_cast")]; + tensor var_12663_cast = softmax(axis = var_12502, x = attn_weights_515_cast)[name = tensor("op_12663_cast")]; + tensor attn_257_transpose_x_0 = const()[name = tensor("attn_257_transpose_x_0"), val = tensor(false)]; + tensor attn_257_transpose_y_0 = const()[name = tensor("attn_257_transpose_y_0"), val = tensor(true)]; + tensor attn_257_cast = matmul(transpose_x = attn_257_transpose_x_0, transpose_y = attn_257_transpose_y_0, x = var_12659_cast, y = var_12663_cast)[name = tensor("attn_257_cast")]; + tensor var_12667 = const()[name = tensor("op_12667"), val = tensor([2, 640, 1, -1])]; + tensor input_737_cast = reshape(shape = var_12667, x = attn_257_cast)[name = tensor("input_737_cast")]; + tensor var_12672 = const()[name = tensor("op_12672"), val = tensor([1, 1])]; + tensor var_12674 = const()[name = tensor("op_12674"), val = tensor([1, 1])]; + tensor var_12676_pad_type_0 = const()[name = tensor("op_12676_pad_type_0"), val = tensor("custom")]; + tensor var_12676_pad_0 = const()[name = tensor("op_12676_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1940321600)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1941140864)))]; + tensor var_12676_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_12674, groups = var_12518, pad = var_12676_pad_0, pad_type = var_12676_pad_type_0, strides = var_12672, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn1_to_out_0_weight_to_fp16, x = input_737_cast)[name = tensor("op_12676_cast")]; + tensor inputs_387_cast = add(x = var_12676_cast, y = inputs_385_cast)[name = tensor("inputs_387_cast")]; + tensor var_12680 = const()[name = tensor("op_12680"), val = tensor([1])]; + tensor channels_mean_387_cast = reduce_mean(axes = var_12680, keep_dims = var_12513, x = inputs_387_cast)[name = tensor("channels_mean_387_cast")]; + tensor zero_mean_387_cast = sub(x = inputs_387_cast, y = channels_mean_387_cast)[name = tensor("zero_mean_387_cast")]; + tensor zero_mean_sq_387_cast = mul(x = zero_mean_387_cast, y = zero_mean_387_cast)[name = tensor("zero_mean_sq_387_cast")]; + tensor var_12684 = const()[name = tensor("op_12684"), val = tensor([1])]; + tensor var_12685_cast = reduce_mean(axes = var_12684, keep_dims = var_12513, x = zero_mean_sq_387_cast)[name = tensor("op_12685_cast")]; + tensor var_12686_to_fp16 = const()[name = tensor("op_12686_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12687_cast = add(x = var_12685_cast, y = var_12686_to_fp16)[name = tensor("op_12687_cast")]; + tensor denom_387_epsilon_0_to_fp16 = const()[name = tensor("denom_387_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_387_cast = rsqrt(epsilon = denom_387_epsilon_0_to_fp16, x = var_12687_cast)[name = tensor("denom_387_cast")]; + tensor out_387_cast = mul(x = zero_mean_387_cast, y = denom_387_cast)[name = tensor("out_387_cast")]; + tensor var_12691_to_fp16 = const()[name = tensor("op_12691_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1941142208)))]; + tensor var_12692_cast = add(x = out_387_cast, y = var_12691_to_fp16)[name = tensor("op_12692_cast")]; + tensor var_12694_to_fp16 = const()[name = tensor("op_12694_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1941143552)))]; + tensor hidden_states_511_cast = mul(x = var_12692_cast, y = var_12694_to_fp16)[name = tensor("hidden_states_511_cast")]; + tensor var_12701 = const()[name = tensor("op_12701"), val = tensor([1, 1])]; + tensor var_12703 = const()[name = tensor("op_12703"), val = tensor([1, 1])]; + tensor q_259_pad_type_0 = const()[name = tensor("q_259_pad_type_0"), val = tensor("custom")]; + tensor q_259_pad_0 = const()[name = tensor("q_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1941144896)))]; + tensor q_259_cast = conv(dilations = var_12703, groups = var_12518, pad = q_259_pad_0, pad_type = q_259_pad_type_0, strides = var_12701, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_q_weight_to_fp16, x = hidden_states_511_cast)[name = tensor("q_259_cast")]; + tensor var_12707 = const()[name = tensor("op_12707"), val = tensor([1, 1])]; + tensor var_12709 = const()[name = tensor("op_12709"), val = tensor([1, 1])]; + tensor k_259_pad_type_0 = const()[name = tensor("k_259_pad_type_0"), val = tensor("custom")]; + tensor k_259_pad_0 = const()[name = tensor("k_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1941964160))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1942947264))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_259_cast = conv(dilations = var_12709, groups = var_12518, pad = k_259_pad_0, pad_type = k_259_pad_type_0, strides = var_12707, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_259_cast")]; + tensor var_12713 = const()[name = tensor("op_12713"), val = tensor([1, 1])]; + tensor var_12715 = const()[name = tensor("op_12715"), val = tensor([1, 1])]; + tensor v_259_pad_type_0 = const()[name = tensor("v_259_pad_type_0"), val = tensor("custom")]; + tensor v_259_pad_0 = const()[name = tensor("v_259_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1942947456))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1943930560))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_259_cast = conv(dilations = var_12715, groups = var_12518, pad = v_259_pad_0, pad_type = v_259_pad_type_0, strides = var_12713, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_259_cast")]; + tensor var_12719 = const()[name = tensor("op_12719"), val = tensor([2, 10, 64, -1])]; + tensor var_12720_cast = reshape(shape = var_12719, x = q_259_cast)[name = tensor("op_12720_cast")]; + tensor var_12721 = const()[name = tensor("op_12721"), val = tensor([2, 10, 64, -1])]; + tensor var_12722_cast = reshape(shape = var_12721, x = k_259_cast)[name = tensor("op_12722_cast")]; + tensor var_12723 = const()[name = tensor("op_12723"), val = tensor([2, 10, 64, -1])]; + tensor var_12724_cast = reshape(shape = var_12723, x = v_259_cast)[name = tensor("op_12724_cast")]; + tensor attn_weights_517_transpose_x_0 = const()[name = tensor("attn_weights_517_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_517_transpose_y_0 = const()[name = tensor("attn_weights_517_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_517_cast = matmul(transpose_x = attn_weights_517_transpose_x_0, transpose_y = attn_weights_517_transpose_y_0, x = var_12720_cast, y = var_12722_cast)[name = tensor("attn_weights_517_cast")]; + tensor attn_weights_519_cast = mul(x = attn_weights_517_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_519_cast")]; + tensor var_12728_cast = softmax(axis = var_12502, x = attn_weights_519_cast)[name = tensor("op_12728_cast")]; + tensor attn_259_transpose_x_0 = const()[name = tensor("attn_259_transpose_x_0"), val = tensor(false)]; + tensor attn_259_transpose_y_0 = const()[name = tensor("attn_259_transpose_y_0"), val = tensor(true)]; + tensor attn_259_cast = matmul(transpose_x = attn_259_transpose_x_0, transpose_y = attn_259_transpose_y_0, x = var_12724_cast, y = var_12728_cast)[name = tensor("attn_259_cast")]; + tensor var_12732 = const()[name = tensor("op_12732"), val = tensor([2, 640, 1, -1])]; + tensor input_739_cast = reshape(shape = var_12732, x = attn_259_cast)[name = tensor("input_739_cast")]; + tensor var_12737 = const()[name = tensor("op_12737"), val = tensor([1, 1])]; + tensor var_12739 = const()[name = tensor("op_12739"), val = tensor([1, 1])]; + tensor var_12741_pad_type_0 = const()[name = tensor("op_12741_pad_type_0"), val = tensor("custom")]; + tensor var_12741_pad_0 = const()[name = tensor("op_12741_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1943930752))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1944340416))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1944340992)))]; + tensor var_12741_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_12739, groups = var_12518, pad = var_12741_pad_0, pad_type = var_12741_pad_type_0, strides = var_12737, weight = up_blocks_1_attentions_0_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_739_cast)[name = tensor("op_12741_cast")]; + tensor inputs_389_cast = add(x = var_12741_cast, y = inputs_387_cast)[name = tensor("inputs_389_cast")]; + tensor var_12745 = const()[name = tensor("op_12745"), val = tensor([1])]; + tensor channels_mean_389_cast = reduce_mean(axes = var_12745, keep_dims = var_12513, x = inputs_389_cast)[name = tensor("channels_mean_389_cast")]; + tensor zero_mean_389_cast = sub(x = inputs_389_cast, y = channels_mean_389_cast)[name = tensor("zero_mean_389_cast")]; + tensor zero_mean_sq_389_cast = mul(x = zero_mean_389_cast, y = zero_mean_389_cast)[name = tensor("zero_mean_sq_389_cast")]; + tensor var_12749 = const()[name = tensor("op_12749"), val = tensor([1])]; + tensor var_12750_cast = reduce_mean(axes = var_12749, keep_dims = var_12513, x = zero_mean_sq_389_cast)[name = tensor("op_12750_cast")]; + tensor var_12751_to_fp16 = const()[name = tensor("op_12751_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12752_cast = add(x = var_12750_cast, y = var_12751_to_fp16)[name = tensor("op_12752_cast")]; + tensor denom_389_epsilon_0_to_fp16 = const()[name = tensor("denom_389_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_389_cast = rsqrt(epsilon = denom_389_epsilon_0_to_fp16, x = var_12752_cast)[name = tensor("denom_389_cast")]; + tensor out_389_cast = mul(x = zero_mean_389_cast, y = denom_389_cast)[name = tensor("out_389_cast")]; + tensor var_12756_to_fp16 = const()[name = tensor("op_12756_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1944342336)))]; + tensor var_12757_cast = add(x = out_389_cast, y = var_12756_to_fp16)[name = tensor("op_12757_cast")]; + tensor var_12759_to_fp16 = const()[name = tensor("op_12759_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1944343680)))]; + tensor input_741_cast = mul(x = var_12757_cast, y = var_12759_to_fp16)[name = tensor("input_741_cast")]; + tensor var_12767 = const()[name = tensor("op_12767"), val = tensor([1, 1])]; + tensor var_12769 = const()[name = tensor("op_12769"), val = tensor([1, 1])]; + tensor var_12771_pad_type_0 = const()[name = tensor("op_12771_pad_type_0"), val = tensor("custom")]; + tensor var_12771_pad_0 = const()[name = tensor("op_12771_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1944345024)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1950898688)))]; + tensor var_12771_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_12769, groups = var_12518, pad = var_12771_pad_0, pad_type = var_12771_pad_type_0, strides = var_12767, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_0_proj_weight_to_fp16, x = input_741_cast)[name = tensor("op_12771_cast")]; + tensor var_12772_split_sizes_0 = const()[name = tensor("op_12772_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12772_axis_0 = const()[name = tensor("op_12772_axis_0"), val = tensor(1)]; + tensor var_12772_cast_0, tensor var_12772_cast_1 = split(axis = var_12772_axis_0, split_sizes = var_12772_split_sizes_0, x = var_12771_cast)[name = tensor("op_12772_cast")]; + tensor var_12774_mode_0 = const()[name = tensor("op_12774_mode_0"), val = tensor("EXACT")]; + tensor var_12774_cast = gelu(mode = var_12774_mode_0, x = var_12772_cast_1)[name = tensor("op_12774_cast")]; + tensor input_743_cast = mul(x = var_12772_cast_0, y = var_12774_cast)[name = tensor("input_743_cast")]; + tensor var_12778 = const()[name = tensor("op_12778"), val = tensor([1, 1])]; + tensor var_12780 = const()[name = tensor("op_12780"), val = tensor([1, 1])]; + tensor var_12782_pad_type_0 = const()[name = tensor("op_12782_pad_type_0"), val = tensor("custom")]; + tensor var_12782_pad_0 = const()[name = tensor("op_12782_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1950908992)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954185856)))]; + tensor var_12782_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_12780, groups = var_12518, pad = var_12782_pad_0, pad_type = var_12782_pad_type_0, strides = var_12778, weight = up_blocks_1_attentions_0_transformer_blocks_0_ff_net_2_weight_to_fp16, x = input_743_cast)[name = tensor("op_12782_cast")]; + tensor inputs_391_cast = add(x = var_12782_cast, y = inputs_389_cast)[name = tensor("inputs_391_cast")]; + tensor var_12792 = const()[name = tensor("op_12792"), val = tensor([1])]; + tensor channels_mean_391_cast = reduce_mean(axes = var_12792, keep_dims = var_12513, x = inputs_391_cast)[name = tensor("channels_mean_391_cast")]; + tensor zero_mean_391_cast = sub(x = inputs_391_cast, y = channels_mean_391_cast)[name = tensor("zero_mean_391_cast")]; + tensor zero_mean_sq_391_cast = mul(x = zero_mean_391_cast, y = zero_mean_391_cast)[name = tensor("zero_mean_sq_391_cast")]; + tensor var_12796 = const()[name = tensor("op_12796"), val = tensor([1])]; + tensor var_12797_cast = reduce_mean(axes = var_12796, keep_dims = var_12513, x = zero_mean_sq_391_cast)[name = tensor("op_12797_cast")]; + tensor var_12798_to_fp16 = const()[name = tensor("op_12798_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12799_cast = add(x = var_12797_cast, y = var_12798_to_fp16)[name = tensor("op_12799_cast")]; + tensor denom_391_epsilon_0_to_fp16 = const()[name = tensor("denom_391_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_391_cast = rsqrt(epsilon = denom_391_epsilon_0_to_fp16, x = var_12799_cast)[name = tensor("denom_391_cast")]; + tensor out_391_cast = mul(x = zero_mean_391_cast, y = denom_391_cast)[name = tensor("out_391_cast")]; + tensor var_12803_to_fp16 = const()[name = tensor("op_12803_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954187200)))]; + tensor var_12804_cast = add(x = out_391_cast, y = var_12803_to_fp16)[name = tensor("op_12804_cast")]; + tensor var_12806_to_fp16 = const()[name = tensor("op_12806_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954188544)))]; + tensor hidden_states_515_cast = mul(x = var_12804_cast, y = var_12806_to_fp16)[name = tensor("hidden_states_515_cast")]; + tensor var_12813 = const()[name = tensor("op_12813"), val = tensor([1, 1])]; + tensor var_12815 = const()[name = tensor("op_12815"), val = tensor([1, 1])]; + tensor q_261_pad_type_0 = const()[name = tensor("q_261_pad_type_0"), val = tensor("custom")]; + tensor q_261_pad_0 = const()[name = tensor("q_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954189888))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954599552))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_261_cast = conv(dilations = var_12815, groups = var_12518, pad = q_261_pad_0, pad_type = q_261_pad_type_0, strides = var_12813, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("q_261_cast")]; + tensor var_12819 = const()[name = tensor("op_12819"), val = tensor([1, 1])]; + tensor var_12821 = const()[name = tensor("op_12821"), val = tensor([1, 1])]; + tensor k_261_pad_type_0 = const()[name = tensor("k_261_pad_type_0"), val = tensor("custom")]; + tensor k_261_pad_0 = const()[name = tensor("k_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1954600128))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1955009792))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_261_cast = conv(dilations = var_12821, groups = var_12518, pad = k_261_pad_0, pad_type = k_261_pad_type_0, strides = var_12819, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_515_cast)[name = tensor("k_261_cast")]; + tensor var_12825 = const()[name = tensor("op_12825"), val = tensor([1, 1])]; + tensor var_12827 = const()[name = tensor("op_12827"), val = tensor([1, 1])]; + tensor v_261_pad_type_0 = const()[name = tensor("v_261_pad_type_0"), val = tensor("custom")]; + tensor v_261_pad_0 = const()[name = tensor("v_261_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1955010368)))]; + tensor v_261_cast = conv(dilations = var_12827, groups = var_12518, pad = v_261_pad_0, pad_type = v_261_pad_type_0, strides = var_12825, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_v_weight_to_fp16, x = hidden_states_515_cast)[name = tensor("v_261_cast")]; + tensor var_12831 = const()[name = tensor("op_12831"), val = tensor([2, 10, 64, -1])]; + tensor var_12832_cast = reshape(shape = var_12831, x = q_261_cast)[name = tensor("op_12832_cast")]; + tensor var_12833 = const()[name = tensor("op_12833"), val = tensor([2, 10, 64, -1])]; + tensor var_12834_cast = reshape(shape = var_12833, x = k_261_cast)[name = tensor("op_12834_cast")]; + tensor var_12835 = const()[name = tensor("op_12835"), val = tensor([2, 10, 64, -1])]; + tensor var_12836_cast = reshape(shape = var_12835, x = v_261_cast)[name = tensor("op_12836_cast")]; + tensor attn_weights_521_transpose_x_0 = const()[name = tensor("attn_weights_521_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_521_transpose_y_0 = const()[name = tensor("attn_weights_521_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_521_cast = matmul(transpose_x = attn_weights_521_transpose_x_0, transpose_y = attn_weights_521_transpose_y_0, x = var_12832_cast, y = var_12834_cast)[name = tensor("attn_weights_521_cast")]; + tensor attn_weights_523_cast = mul(x = attn_weights_521_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_523_cast")]; + tensor var_12840_cast = softmax(axis = var_12502, x = attn_weights_523_cast)[name = tensor("op_12840_cast")]; + tensor attn_261_transpose_x_0 = const()[name = tensor("attn_261_transpose_x_0"), val = tensor(false)]; + tensor attn_261_transpose_y_0 = const()[name = tensor("attn_261_transpose_y_0"), val = tensor(true)]; + tensor attn_261_cast = matmul(transpose_x = attn_261_transpose_x_0, transpose_y = attn_261_transpose_y_0, x = var_12836_cast, y = var_12840_cast)[name = tensor("attn_261_cast")]; + tensor var_12844 = const()[name = tensor("op_12844"), val = tensor([2, 640, 1, -1])]; + tensor input_745_cast = reshape(shape = var_12844, x = attn_261_cast)[name = tensor("input_745_cast")]; + tensor var_12849 = const()[name = tensor("op_12849"), val = tensor([1, 1])]; + tensor var_12851 = const()[name = tensor("op_12851"), val = tensor([1, 1])]; + tensor var_12853_pad_type_0 = const()[name = tensor("op_12853_pad_type_0"), val = tensor("custom")]; + tensor var_12853_pad_0 = const()[name = tensor("op_12853_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1955829632)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1956648896)))]; + tensor var_12853_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_12851, groups = var_12518, pad = var_12853_pad_0, pad_type = var_12853_pad_type_0, strides = var_12849, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn1_to_out_0_weight_to_fp16, x = input_745_cast)[name = tensor("op_12853_cast")]; + tensor inputs_393_cast = add(x = var_12853_cast, y = inputs_391_cast)[name = tensor("inputs_393_cast")]; + tensor var_12857 = const()[name = tensor("op_12857"), val = tensor([1])]; + tensor channels_mean_393_cast = reduce_mean(axes = var_12857, keep_dims = var_12513, x = inputs_393_cast)[name = tensor("channels_mean_393_cast")]; + tensor zero_mean_393_cast = sub(x = inputs_393_cast, y = channels_mean_393_cast)[name = tensor("zero_mean_393_cast")]; + tensor zero_mean_sq_393_cast = mul(x = zero_mean_393_cast, y = zero_mean_393_cast)[name = tensor("zero_mean_sq_393_cast")]; + tensor var_12861 = const()[name = tensor("op_12861"), val = tensor([1])]; + tensor var_12862_cast = reduce_mean(axes = var_12861, keep_dims = var_12513, x = zero_mean_sq_393_cast)[name = tensor("op_12862_cast")]; + tensor var_12863_to_fp16 = const()[name = tensor("op_12863_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12864_cast = add(x = var_12862_cast, y = var_12863_to_fp16)[name = tensor("op_12864_cast")]; + tensor denom_393_epsilon_0_to_fp16 = const()[name = tensor("denom_393_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_393_cast = rsqrt(epsilon = denom_393_epsilon_0_to_fp16, x = var_12864_cast)[name = tensor("denom_393_cast")]; + tensor out_393_cast = mul(x = zero_mean_393_cast, y = denom_393_cast)[name = tensor("out_393_cast")]; + tensor var_12868_to_fp16 = const()[name = tensor("op_12868_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1956650240)))]; + tensor var_12869_cast = add(x = out_393_cast, y = var_12868_to_fp16)[name = tensor("op_12869_cast")]; + tensor var_12871_to_fp16 = const()[name = tensor("op_12871_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1956651584)))]; + tensor hidden_states_517_cast = mul(x = var_12869_cast, y = var_12871_to_fp16)[name = tensor("hidden_states_517_cast")]; + tensor var_12878 = const()[name = tensor("op_12878"), val = tensor([1, 1])]; + tensor var_12880 = const()[name = tensor("op_12880"), val = tensor([1, 1])]; + tensor q_263_pad_type_0 = const()[name = tensor("q_263_pad_type_0"), val = tensor("custom")]; + tensor q_263_pad_0 = const()[name = tensor("q_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1956652928))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1957062592))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_263_cast = conv(dilations = var_12880, groups = var_12518, pad = q_263_pad_0, pad_type = q_263_pad_type_0, strides = var_12878, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_517_cast)[name = tensor("q_263_cast")]; + tensor var_12884 = const()[name = tensor("op_12884"), val = tensor([1, 1])]; + tensor var_12886 = const()[name = tensor("op_12886"), val = tensor([1, 1])]; + tensor k_263_pad_type_0 = const()[name = tensor("k_263_pad_type_0"), val = tensor("custom")]; + tensor k_263_pad_0 = const()[name = tensor("k_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1957063168))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1958046272))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_263_cast = conv(dilations = var_12886, groups = var_12518, pad = k_263_pad_0, pad_type = k_263_pad_type_0, strides = var_12884, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_263_cast")]; + tensor var_12890 = const()[name = tensor("op_12890"), val = tensor([1, 1])]; + tensor var_12892 = const()[name = tensor("op_12892"), val = tensor([1, 1])]; + tensor v_263_pad_type_0 = const()[name = tensor("v_263_pad_type_0"), val = tensor("custom")]; + tensor v_263_pad_0 = const()[name = tensor("v_263_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1958046464))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959029568))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_263_cast = conv(dilations = var_12892, groups = var_12518, pad = v_263_pad_0, pad_type = v_263_pad_type_0, strides = var_12890, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_263_cast")]; + tensor var_12896 = const()[name = tensor("op_12896"), val = tensor([2, 10, 64, -1])]; + tensor var_12897_cast = reshape(shape = var_12896, x = q_263_cast)[name = tensor("op_12897_cast")]; + tensor var_12898 = const()[name = tensor("op_12898"), val = tensor([2, 10, 64, -1])]; + tensor var_12899_cast = reshape(shape = var_12898, x = k_263_cast)[name = tensor("op_12899_cast")]; + tensor var_12900 = const()[name = tensor("op_12900"), val = tensor([2, 10, 64, -1])]; + tensor var_12901_cast = reshape(shape = var_12900, x = v_263_cast)[name = tensor("op_12901_cast")]; + tensor attn_weights_525_transpose_x_0 = const()[name = tensor("attn_weights_525_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_525_transpose_y_0 = const()[name = tensor("attn_weights_525_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_525_cast = matmul(transpose_x = attn_weights_525_transpose_x_0, transpose_y = attn_weights_525_transpose_y_0, x = var_12897_cast, y = var_12899_cast)[name = tensor("attn_weights_525_cast")]; + tensor attn_weights_527_cast = mul(x = attn_weights_525_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_527_cast")]; + tensor var_12905_cast = softmax(axis = var_12502, x = attn_weights_527_cast)[name = tensor("op_12905_cast")]; + tensor attn_263_transpose_x_0 = const()[name = tensor("attn_263_transpose_x_0"), val = tensor(false)]; + tensor attn_263_transpose_y_0 = const()[name = tensor("attn_263_transpose_y_0"), val = tensor(true)]; + tensor attn_263_cast = matmul(transpose_x = attn_263_transpose_x_0, transpose_y = attn_263_transpose_y_0, x = var_12901_cast, y = var_12905_cast)[name = tensor("attn_263_cast")]; + tensor var_12909 = const()[name = tensor("op_12909"), val = tensor([2, 640, 1, -1])]; + tensor input_747_cast = reshape(shape = var_12909, x = attn_263_cast)[name = tensor("input_747_cast")]; + tensor var_12914 = const()[name = tensor("op_12914"), val = tensor([1, 1])]; + tensor var_12916 = const()[name = tensor("op_12916"), val = tensor([1, 1])]; + tensor var_12918_pad_type_0 = const()[name = tensor("op_12918_pad_type_0"), val = tensor("custom")]; + tensor var_12918_pad_0 = const()[name = tensor("op_12918_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959029760))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959337024))), name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959337216)))]; + tensor var_12918_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_12916, groups = var_12518, pad = var_12918_pad_0, pad_type = var_12918_pad_type_0, strides = var_12914, weight = up_blocks_1_attentions_0_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_747_cast)[name = tensor("op_12918_cast")]; + tensor inputs_395_cast = add(x = var_12918_cast, y = inputs_393_cast)[name = tensor("inputs_395_cast")]; + tensor var_12922 = const()[name = tensor("op_12922"), val = tensor([1])]; + tensor channels_mean_395_cast = reduce_mean(axes = var_12922, keep_dims = var_12513, x = inputs_395_cast)[name = tensor("channels_mean_395_cast")]; + tensor zero_mean_395_cast = sub(x = inputs_395_cast, y = channels_mean_395_cast)[name = tensor("zero_mean_395_cast")]; + tensor zero_mean_sq_395_cast = mul(x = zero_mean_395_cast, y = zero_mean_395_cast)[name = tensor("zero_mean_sq_395_cast")]; + tensor var_12926 = const()[name = tensor("op_12926"), val = tensor([1])]; + tensor var_12927_cast = reduce_mean(axes = var_12926, keep_dims = var_12513, x = zero_mean_sq_395_cast)[name = tensor("op_12927_cast")]; + tensor var_12928_to_fp16 = const()[name = tensor("op_12928_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_12929_cast = add(x = var_12927_cast, y = var_12928_to_fp16)[name = tensor("op_12929_cast")]; + tensor denom_395_epsilon_0_to_fp16 = const()[name = tensor("denom_395_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_395_cast = rsqrt(epsilon = denom_395_epsilon_0_to_fp16, x = var_12929_cast)[name = tensor("denom_395_cast")]; + tensor out_395_cast = mul(x = zero_mean_395_cast, y = denom_395_cast)[name = tensor("out_395_cast")]; + tensor var_12933_to_fp16 = const()[name = tensor("op_12933_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959338560)))]; + tensor var_12934_cast = add(x = out_395_cast, y = var_12933_to_fp16)[name = tensor("op_12934_cast")]; + tensor var_12936_to_fp16 = const()[name = tensor("op_12936_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959339904)))]; + tensor input_749_cast = mul(x = var_12934_cast, y = var_12936_to_fp16)[name = tensor("input_749_cast")]; + tensor var_12944 = const()[name = tensor("op_12944"), val = tensor([1, 1])]; + tensor var_12946 = const()[name = tensor("op_12946"), val = tensor([1, 1])]; + tensor var_12948_pad_type_0 = const()[name = tensor("op_12948_pad_type_0"), val = tensor("custom")]; + tensor var_12948_pad_0 = const()[name = tensor("op_12948_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1959341248)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1965894912)))]; + tensor var_12948_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_12946, groups = var_12518, pad = var_12948_pad_0, pad_type = var_12948_pad_type_0, strides = var_12944, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_0_proj_weight_to_fp16, x = input_749_cast)[name = tensor("op_12948_cast")]; + tensor var_12949_split_sizes_0 = const()[name = tensor("op_12949_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_12949_axis_0 = const()[name = tensor("op_12949_axis_0"), val = tensor(1)]; + tensor var_12949_cast_0, tensor var_12949_cast_1 = split(axis = var_12949_axis_0, split_sizes = var_12949_split_sizes_0, x = var_12948_cast)[name = tensor("op_12949_cast")]; + tensor var_12951_mode_0 = const()[name = tensor("op_12951_mode_0"), val = tensor("EXACT")]; + tensor var_12951_cast = gelu(mode = var_12951_mode_0, x = var_12949_cast_1)[name = tensor("op_12951_cast")]; + tensor input_751_cast = mul(x = var_12949_cast_0, y = var_12951_cast)[name = tensor("input_751_cast")]; + tensor var_12955 = const()[name = tensor("op_12955"), val = tensor([1, 1])]; + tensor var_12957 = const()[name = tensor("op_12957"), val = tensor([1, 1])]; + tensor var_12959_pad_type_0 = const()[name = tensor("op_12959_pad_type_0"), val = tensor("custom")]; + tensor var_12959_pad_0 = const()[name = tensor("op_12959_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1965905216)))]; + tensor up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1969182080)))]; + tensor var_12959_cast = conv(bias = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_12957, groups = var_12518, pad = var_12959_pad_0, pad_type = var_12959_pad_type_0, strides = var_12955, weight = up_blocks_1_attentions_0_transformer_blocks_1_ff_net_2_weight_to_fp16, x = input_751_cast)[name = tensor("op_12959_cast")]; + tensor hidden_states_521_cast = add(x = var_12959_cast, y = inputs_395_cast)[name = tensor("hidden_states_521_cast")]; + tensor var_12961 = const()[name = tensor("op_12961"), val = tensor([2, 640, 64, 64])]; + tensor input_753_cast = reshape(shape = var_12961, x = hidden_states_521_cast)[name = tensor("input_753_cast")]; + tensor var_12965 = const()[name = tensor("op_12965"), val = tensor([1, 1])]; + tensor var_12967 = const()[name = tensor("op_12967"), val = tensor([1, 1])]; + tensor hidden_states_523_pad_type_0 = const()[name = tensor("hidden_states_523_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_523_pad_0 = const()[name = tensor("hidden_states_523_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_0_proj_out_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1969183424)))]; + tensor up_blocks_1_attentions_0_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_0_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1970002688)))]; + tensor hidden_states_523_cast = conv(bias = up_blocks_1_attentions_0_proj_out_bias_to_fp16, dilations = var_12967, groups = var_12518, pad = hidden_states_523_pad_0, pad_type = hidden_states_523_pad_type_0, strides = var_12965, weight = up_blocks_1_attentions_0_proj_out_weight_to_fp16, x = input_753_cast)[name = tensor("hidden_states_523_cast")]; + tensor hidden_states_525_cast = add(x = hidden_states_523_cast, y = hidden_states_505_cast)[name = tensor("hidden_states_525_cast")]; + tensor input_755_interleave_0 = const()[name = tensor("input_755_interleave_0"), val = tensor(false)]; + tensor input_755_cast = concat(axis = var_12518, interleave = input_755_interleave_0, values = (hidden_states_525_cast, input_79_cast))[name = tensor("input_755_cast")]; + tensor reshape_132_shape_0 = const()[name = tensor("reshape_132_shape_0"), val = tensor([2, 32, 40, 64, 64])]; + tensor reshape_132_cast = reshape(shape = reshape_132_shape_0, x = input_755_cast)[name = tensor("reshape_132_cast")]; + tensor reduce_mean_99_axes_0 = const()[name = tensor("reduce_mean_99_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_99_keep_dims_0 = const()[name = tensor("reduce_mean_99_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_99_cast = reduce_mean(axes = reduce_mean_99_axes_0, keep_dims = reduce_mean_99_keep_dims_0, x = reshape_132_cast)[name = tensor("reduce_mean_99_cast")]; + tensor sub_66_cast = sub(x = reshape_132_cast, y = reduce_mean_99_cast)[name = tensor("sub_66_cast")]; + tensor square_33_cast = square(x = sub_66_cast)[name = tensor("square_33_cast")]; + tensor reduce_mean_101_axes_0 = const()[name = tensor("reduce_mean_101_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_101_keep_dims_0 = const()[name = tensor("reduce_mean_101_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_101_cast = reduce_mean(axes = reduce_mean_101_axes_0, keep_dims = reduce_mean_101_keep_dims_0, x = square_33_cast)[name = tensor("reduce_mean_101_cast")]; + tensor add_66_y_0_to_fp16 = const()[name = tensor("add_66_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_66_cast = add(x = reduce_mean_101_cast, y = add_66_y_0_to_fp16)[name = tensor("add_66_cast")]; + tensor sqrt_33_cast = sqrt(x = add_66_cast)[name = tensor("sqrt_33_cast")]; + tensor real_div_33_cast = real_div(x = sub_66_cast, y = sqrt_33_cast)[name = tensor("real_div_33_cast")]; + tensor reshape_133_shape_0 = const()[name = tensor("reshape_133_shape_0"), val = tensor([2, 1280, 64, 64])]; + tensor reshape_133_cast = reshape(shape = reshape_133_shape_0, x = real_div_33_cast)[name = tensor("reshape_133_cast")]; + tensor add_67_gamma_0_to_fp16 = const()[name = tensor("add_67_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1970004032)))]; + tensor add_67_beta_0_to_fp16 = const()[name = tensor("add_67_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1970006656)))]; + tensor add_67_epsilon_0_to_fp16 = const()[name = tensor("add_67_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_67_cast = batch_norm(beta = add_67_beta_0_to_fp16, epsilon = add_67_epsilon_0_to_fp16, gamma = add_67_gamma_0_to_fp16, mean = add_23_mean_0_to_fp16, variance = add_23_variance_0_to_fp16, x = reshape_133_cast)[name = tensor("add_67_cast")]; + tensor input_759_cast = silu(x = add_67_cast)[name = tensor("input_759_cast")]; + tensor var_12985 = const()[name = tensor("op_12985"), val = tensor([1, 1])]; + tensor var_12987 = const()[name = tensor("op_12987"), val = tensor([1, 1])]; + tensor hidden_states_527_pad_type_0 = const()[name = tensor("hidden_states_527_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_527_pad_0 = const()[name = tensor("hidden_states_527_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1970009280)))]; + tensor up_blocks_1_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984754944)))]; + tensor hidden_states_527_cast = conv(bias = up_blocks_1_resnets_1_conv1_bias_to_fp16, dilations = var_12987, groups = var_12518, pad = hidden_states_527_pad_0, pad_type = hidden_states_527_pad_type_0, strides = var_12985, weight = up_blocks_1_resnets_1_conv1_weight_to_fp16, x = input_759_cast)[name = tensor("hidden_states_527_cast")]; + tensor var_12993 = const()[name = tensor("op_12993"), val = tensor([1, 1])]; + tensor var_12995 = const()[name = tensor("op_12995"), val = tensor([1, 1])]; + tensor temb_25_pad_type_0 = const()[name = tensor("temb_25_pad_type_0"), val = tensor("custom")]; + tensor temb_25_pad_0 = const()[name = tensor("temb_25_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1984756288)))]; + tensor up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1986394752)))]; + tensor temb_25_cast = conv(bias = up_blocks_1_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_12995, groups = var_12518, pad = temb_25_pad_0, pad_type = temb_25_pad_type_0, strides = var_12993, weight = up_blocks_1_resnets_1_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_25_cast")]; + tensor input_763_cast = add(x = hidden_states_527_cast, y = temb_25_cast)[name = tensor("input_763_cast")]; + tensor reshape_136_shape_0 = const()[name = tensor("reshape_136_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_136_cast = reshape(shape = reshape_136_shape_0, x = input_763_cast)[name = tensor("reshape_136_cast")]; + tensor reduce_mean_102_axes_0 = const()[name = tensor("reduce_mean_102_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_102_keep_dims_0 = const()[name = tensor("reduce_mean_102_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_102_cast = reduce_mean(axes = reduce_mean_102_axes_0, keep_dims = reduce_mean_102_keep_dims_0, x = reshape_136_cast)[name = tensor("reduce_mean_102_cast")]; + tensor sub_68_cast = sub(x = reshape_136_cast, y = reduce_mean_102_cast)[name = tensor("sub_68_cast")]; + tensor square_34_cast = square(x = sub_68_cast)[name = tensor("square_34_cast")]; + tensor reduce_mean_104_axes_0 = const()[name = tensor("reduce_mean_104_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_104_keep_dims_0 = const()[name = tensor("reduce_mean_104_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_104_cast = reduce_mean(axes = reduce_mean_104_axes_0, keep_dims = reduce_mean_104_keep_dims_0, x = square_34_cast)[name = tensor("reduce_mean_104_cast")]; + tensor add_68_y_0_to_fp16 = const()[name = tensor("add_68_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_68_cast = add(x = reduce_mean_104_cast, y = add_68_y_0_to_fp16)[name = tensor("add_68_cast")]; + tensor sqrt_34_cast = sqrt(x = add_68_cast)[name = tensor("sqrt_34_cast")]; + tensor real_div_34_cast = real_div(x = sub_68_cast, y = sqrt_34_cast)[name = tensor("real_div_34_cast")]; + tensor reshape_137_shape_0 = const()[name = tensor("reshape_137_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_137_cast = reshape(shape = reshape_137_shape_0, x = real_div_34_cast)[name = tensor("reshape_137_cast")]; + tensor add_69_gamma_0_to_fp16 = const()[name = tensor("add_69_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1986396096)))]; + tensor add_69_beta_0_to_fp16 = const()[name = tensor("add_69_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1986397440)))]; + tensor add_69_epsilon_0_to_fp16 = const()[name = tensor("add_69_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_69_cast = batch_norm(beta = add_69_beta_0_to_fp16, epsilon = add_69_epsilon_0_to_fp16, gamma = add_69_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_137_cast)[name = tensor("add_69_cast")]; + tensor input_767_cast = silu(x = add_69_cast)[name = tensor("input_767_cast")]; + tensor var_13005 = const()[name = tensor("op_13005"), val = tensor([1, 1])]; + tensor var_13007 = const()[name = tensor("op_13007"), val = tensor([1, 1])]; + tensor hidden_states_529_pad_type_0 = const()[name = tensor("hidden_states_529_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_529_pad_0 = const()[name = tensor("hidden_states_529_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1986398784)))]; + tensor up_blocks_1_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1993771648)))]; + tensor hidden_states_529_cast = conv(bias = up_blocks_1_resnets_1_conv2_bias_to_fp16, dilations = var_13007, groups = var_12518, pad = hidden_states_529_pad_0, pad_type = hidden_states_529_pad_type_0, strides = var_13005, weight = up_blocks_1_resnets_1_conv2_weight_to_fp16, x = input_767_cast)[name = tensor("hidden_states_529_cast")]; + tensor var_13012 = const()[name = tensor("op_13012"), val = tensor([1, 1])]; + tensor var_13014 = const()[name = tensor("op_13014"), val = tensor([1, 1])]; + tensor x_13_pad_type_0 = const()[name = tensor("x_13_pad_type_0"), val = tensor("custom")]; + tensor x_13_pad_0 = const()[name = tensor("x_13_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1993772992)))]; + tensor up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1995411456)))]; + tensor x_13_cast = conv(bias = up_blocks_1_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13014, groups = var_12518, pad = x_13_pad_0, pad_type = x_13_pad_type_0, strides = var_13012, weight = up_blocks_1_resnets_1_conv_shortcut_weight_to_fp16, x = input_755_cast)[name = tensor("x_13_cast")]; + tensor hidden_states_531_cast = add(x = x_13_cast, y = hidden_states_529_cast)[name = tensor("hidden_states_531_cast")]; + tensor reshape_140_shape_0 = const()[name = tensor("reshape_140_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_140_cast = reshape(shape = reshape_140_shape_0, x = hidden_states_531_cast)[name = tensor("reshape_140_cast")]; + tensor reduce_mean_105_axes_0 = const()[name = tensor("reduce_mean_105_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_105_keep_dims_0 = const()[name = tensor("reduce_mean_105_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_105_cast = reduce_mean(axes = reduce_mean_105_axes_0, keep_dims = reduce_mean_105_keep_dims_0, x = reshape_140_cast)[name = tensor("reduce_mean_105_cast")]; + tensor sub_70_cast = sub(x = reshape_140_cast, y = reduce_mean_105_cast)[name = tensor("sub_70_cast")]; + tensor square_35_cast = square(x = sub_70_cast)[name = tensor("square_35_cast")]; + tensor reduce_mean_107_axes_0 = const()[name = tensor("reduce_mean_107_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_107_keep_dims_0 = const()[name = tensor("reduce_mean_107_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_107_cast = reduce_mean(axes = reduce_mean_107_axes_0, keep_dims = reduce_mean_107_keep_dims_0, x = square_35_cast)[name = tensor("reduce_mean_107_cast")]; + tensor add_70_y_0_to_fp16 = const()[name = tensor("add_70_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_70_cast = add(x = reduce_mean_107_cast, y = add_70_y_0_to_fp16)[name = tensor("add_70_cast")]; + tensor sqrt_35_cast = sqrt(x = add_70_cast)[name = tensor("sqrt_35_cast")]; + tensor real_div_35_cast = real_div(x = sub_70_cast, y = sqrt_35_cast)[name = tensor("real_div_35_cast")]; + tensor reshape_141_shape_0 = const()[name = tensor("reshape_141_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_141_cast = reshape(shape = reshape_141_shape_0, x = real_div_35_cast)[name = tensor("reshape_141_cast")]; + tensor add_71_gamma_0_to_fp16 = const()[name = tensor("add_71_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1995412800)))]; + tensor add_71_beta_0_to_fp16 = const()[name = tensor("add_71_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1995414144)))]; + tensor add_71_epsilon_0_to_fp16 = const()[name = tensor("add_71_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_71_cast = batch_norm(beta = add_71_beta_0_to_fp16, epsilon = add_71_epsilon_0_to_fp16, gamma = add_71_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_141_cast)[name = tensor("add_71_cast")]; + tensor var_13036 = const()[name = tensor("op_13036"), val = tensor([1, 1])]; + tensor var_13038 = const()[name = tensor("op_13038"), val = tensor([1, 1])]; + tensor hidden_states_533_pad_type_0 = const()[name = tensor("hidden_states_533_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_533_pad_0 = const()[name = tensor("hidden_states_533_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_in_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1995415488)))]; + tensor up_blocks_1_attentions_1_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996234752)))]; + tensor hidden_states_533_cast = conv(bias = up_blocks_1_attentions_1_proj_in_bias_to_fp16, dilations = var_13038, groups = var_12518, pad = hidden_states_533_pad_0, pad_type = hidden_states_533_pad_type_0, strides = var_13036, weight = up_blocks_1_attentions_1_proj_in_weight_to_fp16, x = add_71_cast)[name = tensor("hidden_states_533_cast")]; + tensor var_13043 = const()[name = tensor("op_13043"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_397_cast = reshape(shape = var_13043, x = hidden_states_533_cast)[name = tensor("inputs_397_cast")]; + tensor var_13053 = const()[name = tensor("op_13053"), val = tensor([1])]; + tensor channels_mean_397_cast = reduce_mean(axes = var_13053, keep_dims = var_12513, x = inputs_397_cast)[name = tensor("channels_mean_397_cast")]; + tensor zero_mean_397_cast = sub(x = inputs_397_cast, y = channels_mean_397_cast)[name = tensor("zero_mean_397_cast")]; + tensor zero_mean_sq_397_cast = mul(x = zero_mean_397_cast, y = zero_mean_397_cast)[name = tensor("zero_mean_sq_397_cast")]; + tensor var_13057 = const()[name = tensor("op_13057"), val = tensor([1])]; + tensor var_13058_cast = reduce_mean(axes = var_13057, keep_dims = var_12513, x = zero_mean_sq_397_cast)[name = tensor("op_13058_cast")]; + tensor var_13059_to_fp16 = const()[name = tensor("op_13059_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13060_cast = add(x = var_13058_cast, y = var_13059_to_fp16)[name = tensor("op_13060_cast")]; + tensor denom_397_epsilon_0_to_fp16 = const()[name = tensor("denom_397_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_397_cast = rsqrt(epsilon = denom_397_epsilon_0_to_fp16, x = var_13060_cast)[name = tensor("denom_397_cast")]; + tensor out_397_cast = mul(x = zero_mean_397_cast, y = denom_397_cast)[name = tensor("out_397_cast")]; + tensor var_13064_to_fp16 = const()[name = tensor("op_13064_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996236096)))]; + tensor var_13065_cast = add(x = out_397_cast, y = var_13064_to_fp16)[name = tensor("op_13065_cast")]; + tensor var_13067_to_fp16 = const()[name = tensor("op_13067_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996237440)))]; + tensor hidden_states_535_cast = mul(x = var_13065_cast, y = var_13067_to_fp16)[name = tensor("hidden_states_535_cast")]; + tensor var_13074 = const()[name = tensor("op_13074"), val = tensor([1, 1])]; + tensor var_13076 = const()[name = tensor("op_13076"), val = tensor([1, 1])]; + tensor q_265_pad_type_0 = const()[name = tensor("q_265_pad_type_0"), val = tensor("custom")]; + tensor q_265_pad_0 = const()[name = tensor("q_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996238784))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996648448))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_265_cast = conv(dilations = var_13076, groups = var_12518, pad = q_265_pad_0, pad_type = q_265_pad_type_0, strides = var_13074, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("q_265_cast")]; + tensor var_13080 = const()[name = tensor("op_13080"), val = tensor([1, 1])]; + tensor var_13082 = const()[name = tensor("op_13082"), val = tensor([1, 1])]; + tensor k_265_pad_type_0 = const()[name = tensor("k_265_pad_type_0"), val = tensor("custom")]; + tensor k_265_pad_0 = const()[name = tensor("k_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1996649024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1997058688))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_265_cast = conv(dilations = var_13082, groups = var_12518, pad = k_265_pad_0, pad_type = k_265_pad_type_0, strides = var_13080, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_535_cast)[name = tensor("k_265_cast")]; + tensor var_13086 = const()[name = tensor("op_13086"), val = tensor([1, 1])]; + tensor var_13088 = const()[name = tensor("op_13088"), val = tensor([1, 1])]; + tensor v_265_pad_type_0 = const()[name = tensor("v_265_pad_type_0"), val = tensor("custom")]; + tensor v_265_pad_0 = const()[name = tensor("v_265_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1997059264)))]; + tensor v_265_cast = conv(dilations = var_13088, groups = var_12518, pad = v_265_pad_0, pad_type = v_265_pad_type_0, strides = var_13086, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_v_weight_to_fp16, x = hidden_states_535_cast)[name = tensor("v_265_cast")]; + tensor var_13092 = const()[name = tensor("op_13092"), val = tensor([2, 10, 64, -1])]; + tensor var_13093_cast = reshape(shape = var_13092, x = q_265_cast)[name = tensor("op_13093_cast")]; + tensor var_13094 = const()[name = tensor("op_13094"), val = tensor([2, 10, 64, -1])]; + tensor var_13095_cast = reshape(shape = var_13094, x = k_265_cast)[name = tensor("op_13095_cast")]; + tensor var_13096 = const()[name = tensor("op_13096"), val = tensor([2, 10, 64, -1])]; + tensor var_13097_cast = reshape(shape = var_13096, x = v_265_cast)[name = tensor("op_13097_cast")]; + tensor attn_weights_529_transpose_x_0 = const()[name = tensor("attn_weights_529_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_529_transpose_y_0 = const()[name = tensor("attn_weights_529_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_529_cast = matmul(transpose_x = attn_weights_529_transpose_x_0, transpose_y = attn_weights_529_transpose_y_0, x = var_13093_cast, y = var_13095_cast)[name = tensor("attn_weights_529_cast")]; + tensor attn_weights_531_cast = mul(x = attn_weights_529_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_531_cast")]; + tensor var_13101_cast = softmax(axis = var_12502, x = attn_weights_531_cast)[name = tensor("op_13101_cast")]; + tensor attn_265_transpose_x_0 = const()[name = tensor("attn_265_transpose_x_0"), val = tensor(false)]; + tensor attn_265_transpose_y_0 = const()[name = tensor("attn_265_transpose_y_0"), val = tensor(true)]; + tensor attn_265_cast = matmul(transpose_x = attn_265_transpose_x_0, transpose_y = attn_265_transpose_y_0, x = var_13097_cast, y = var_13101_cast)[name = tensor("attn_265_cast")]; + tensor var_13105 = const()[name = tensor("op_13105"), val = tensor([2, 640, 1, -1])]; + tensor input_771_cast = reshape(shape = var_13105, x = attn_265_cast)[name = tensor("input_771_cast")]; + tensor var_13110 = const()[name = tensor("op_13110"), val = tensor([1, 1])]; + tensor var_13112 = const()[name = tensor("op_13112"), val = tensor([1, 1])]; + tensor var_13114_pad_type_0 = const()[name = tensor("op_13114_pad_type_0"), val = tensor("custom")]; + tensor var_13114_pad_0 = const()[name = tensor("op_13114_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1997878528)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1998697792)))]; + tensor var_13114_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13112, groups = var_12518, pad = var_13114_pad_0, pad_type = var_13114_pad_type_0, strides = var_13110, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn1_to_out_0_weight_to_fp16, x = input_771_cast)[name = tensor("op_13114_cast")]; + tensor inputs_399_cast = add(x = var_13114_cast, y = inputs_397_cast)[name = tensor("inputs_399_cast")]; + tensor var_13118 = const()[name = tensor("op_13118"), val = tensor([1])]; + tensor channels_mean_399_cast = reduce_mean(axes = var_13118, keep_dims = var_12513, x = inputs_399_cast)[name = tensor("channels_mean_399_cast")]; + tensor zero_mean_399_cast = sub(x = inputs_399_cast, y = channels_mean_399_cast)[name = tensor("zero_mean_399_cast")]; + tensor zero_mean_sq_399_cast = mul(x = zero_mean_399_cast, y = zero_mean_399_cast)[name = tensor("zero_mean_sq_399_cast")]; + tensor var_13122 = const()[name = tensor("op_13122"), val = tensor([1])]; + tensor var_13123_cast = reduce_mean(axes = var_13122, keep_dims = var_12513, x = zero_mean_sq_399_cast)[name = tensor("op_13123_cast")]; + tensor var_13124_to_fp16 = const()[name = tensor("op_13124_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13125_cast = add(x = var_13123_cast, y = var_13124_to_fp16)[name = tensor("op_13125_cast")]; + tensor denom_399_epsilon_0_to_fp16 = const()[name = tensor("denom_399_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_399_cast = rsqrt(epsilon = denom_399_epsilon_0_to_fp16, x = var_13125_cast)[name = tensor("denom_399_cast")]; + tensor out_399_cast = mul(x = zero_mean_399_cast, y = denom_399_cast)[name = tensor("out_399_cast")]; + tensor var_13129_to_fp16 = const()[name = tensor("op_13129_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1998699136)))]; + tensor var_13130_cast = add(x = out_399_cast, y = var_13129_to_fp16)[name = tensor("op_13130_cast")]; + tensor var_13132_to_fp16 = const()[name = tensor("op_13132_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1998700480)))]; + tensor hidden_states_537_cast = mul(x = var_13130_cast, y = var_13132_to_fp16)[name = tensor("hidden_states_537_cast")]; + tensor var_13139 = const()[name = tensor("op_13139"), val = tensor([1, 1])]; + tensor var_13141 = const()[name = tensor("op_13141"), val = tensor([1, 1])]; + tensor q_267_pad_type_0 = const()[name = tensor("q_267_pad_type_0"), val = tensor("custom")]; + tensor q_267_pad_0 = const()[name = tensor("q_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1998701824))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1999111488))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_267_cast = conv(dilations = var_13141, groups = var_12518, pad = q_267_pad_0, pad_type = q_267_pad_type_0, strides = var_13139, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_537_cast)[name = tensor("q_267_cast")]; + tensor var_13145 = const()[name = tensor("op_13145"), val = tensor([1, 1])]; + tensor var_13147 = const()[name = tensor("op_13147"), val = tensor([1, 1])]; + tensor k_267_pad_type_0 = const()[name = tensor("k_267_pad_type_0"), val = tensor("custom")]; + tensor k_267_pad_0 = const()[name = tensor("k_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(1999112064))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2000095168))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_267_cast = conv(dilations = var_13147, groups = var_12518, pad = k_267_pad_0, pad_type = k_267_pad_type_0, strides = var_13145, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_267_cast")]; + tensor var_13151 = const()[name = tensor("op_13151"), val = tensor([1, 1])]; + tensor var_13153 = const()[name = tensor("op_13153"), val = tensor([1, 1])]; + tensor v_267_pad_type_0 = const()[name = tensor("v_267_pad_type_0"), val = tensor("custom")]; + tensor v_267_pad_0 = const()[name = tensor("v_267_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2000095360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001406144))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_267_cast = conv(dilations = var_13153, groups = var_12518, pad = v_267_pad_0, pad_type = v_267_pad_type_0, strides = var_13151, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_267_cast")]; + tensor var_13157 = const()[name = tensor("op_13157"), val = tensor([2, 10, 64, -1])]; + tensor var_13158_cast = reshape(shape = var_13157, x = q_267_cast)[name = tensor("op_13158_cast")]; + tensor var_13159 = const()[name = tensor("op_13159"), val = tensor([2, 10, 64, -1])]; + tensor var_13160_cast = reshape(shape = var_13159, x = k_267_cast)[name = tensor("op_13160_cast")]; + tensor var_13161 = const()[name = tensor("op_13161"), val = tensor([2, 10, 64, -1])]; + tensor var_13162_cast = reshape(shape = var_13161, x = v_267_cast)[name = tensor("op_13162_cast")]; + tensor attn_weights_533_transpose_x_0 = const()[name = tensor("attn_weights_533_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_533_transpose_y_0 = const()[name = tensor("attn_weights_533_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_533_cast = matmul(transpose_x = attn_weights_533_transpose_x_0, transpose_y = attn_weights_533_transpose_y_0, x = var_13158_cast, y = var_13160_cast)[name = tensor("attn_weights_533_cast")]; + tensor attn_weights_535_cast = mul(x = attn_weights_533_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_535_cast")]; + tensor var_13166_cast = softmax(axis = var_12502, x = attn_weights_535_cast)[name = tensor("op_13166_cast")]; + tensor attn_267_transpose_x_0 = const()[name = tensor("attn_267_transpose_x_0"), val = tensor(false)]; + tensor attn_267_transpose_y_0 = const()[name = tensor("attn_267_transpose_y_0"), val = tensor(true)]; + tensor attn_267_cast = matmul(transpose_x = attn_267_transpose_x_0, transpose_y = attn_267_transpose_y_0, x = var_13162_cast, y = var_13166_cast)[name = tensor("attn_267_cast")]; + tensor var_13170 = const()[name = tensor("op_13170"), val = tensor([2, 640, 1, -1])]; + tensor input_773_cast = reshape(shape = var_13170, x = attn_267_cast)[name = tensor("input_773_cast")]; + tensor var_13175 = const()[name = tensor("op_13175"), val = tensor([1, 1])]; + tensor var_13177 = const()[name = tensor("op_13177"), val = tensor([1, 1])]; + tensor var_13179_pad_type_0 = const()[name = tensor("op_13179_pad_type_0"), val = tensor("custom")]; + tensor var_13179_pad_0 = const()[name = tensor("op_13179_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001406720))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001816384))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001816960)))]; + tensor var_13179_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13177, groups = var_12518, pad = var_13179_pad_0, pad_type = var_13179_pad_type_0, strides = var_13175, weight = up_blocks_1_attentions_1_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_773_cast)[name = tensor("op_13179_cast")]; + tensor inputs_401_cast = add(x = var_13179_cast, y = inputs_399_cast)[name = tensor("inputs_401_cast")]; + tensor var_13183 = const()[name = tensor("op_13183"), val = tensor([1])]; + tensor channels_mean_401_cast = reduce_mean(axes = var_13183, keep_dims = var_12513, x = inputs_401_cast)[name = tensor("channels_mean_401_cast")]; + tensor zero_mean_401_cast = sub(x = inputs_401_cast, y = channels_mean_401_cast)[name = tensor("zero_mean_401_cast")]; + tensor zero_mean_sq_401_cast = mul(x = zero_mean_401_cast, y = zero_mean_401_cast)[name = tensor("zero_mean_sq_401_cast")]; + tensor var_13187 = const()[name = tensor("op_13187"), val = tensor([1])]; + tensor var_13188_cast = reduce_mean(axes = var_13187, keep_dims = var_12513, x = zero_mean_sq_401_cast)[name = tensor("op_13188_cast")]; + tensor var_13189_to_fp16 = const()[name = tensor("op_13189_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13190_cast = add(x = var_13188_cast, y = var_13189_to_fp16)[name = tensor("op_13190_cast")]; + tensor denom_401_epsilon_0_to_fp16 = const()[name = tensor("denom_401_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_401_cast = rsqrt(epsilon = denom_401_epsilon_0_to_fp16, x = var_13190_cast)[name = tensor("denom_401_cast")]; + tensor out_401_cast = mul(x = zero_mean_401_cast, y = denom_401_cast)[name = tensor("out_401_cast")]; + tensor var_13194_to_fp16 = const()[name = tensor("op_13194_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001818304)))]; + tensor var_13195_cast = add(x = out_401_cast, y = var_13194_to_fp16)[name = tensor("op_13195_cast")]; + tensor var_13197_to_fp16 = const()[name = tensor("op_13197_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001819648)))]; + tensor input_775_cast = mul(x = var_13195_cast, y = var_13197_to_fp16)[name = tensor("input_775_cast")]; + tensor var_13205 = const()[name = tensor("op_13205"), val = tensor([1, 1])]; + tensor var_13207 = const()[name = tensor("op_13207"), val = tensor([1, 1])]; + tensor var_13209_pad_type_0 = const()[name = tensor("op_13209_pad_type_0"), val = tensor("custom")]; + tensor var_13209_pad_0 = const()[name = tensor("op_13209_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2001820992)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2008374656)))]; + tensor var_13209_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13207, groups = var_12518, pad = var_13209_pad_0, pad_type = var_13209_pad_type_0, strides = var_13205, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_0_proj_weight_to_fp16, x = input_775_cast)[name = tensor("op_13209_cast")]; + tensor var_13210_split_sizes_0 = const()[name = tensor("op_13210_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13210_axis_0 = const()[name = tensor("op_13210_axis_0"), val = tensor(1)]; + tensor var_13210_cast_0, tensor var_13210_cast_1 = split(axis = var_13210_axis_0, split_sizes = var_13210_split_sizes_0, x = var_13209_cast)[name = tensor("op_13210_cast")]; + tensor var_13212_mode_0 = const()[name = tensor("op_13212_mode_0"), val = tensor("EXACT")]; + tensor var_13212_cast = gelu(mode = var_13212_mode_0, x = var_13210_cast_1)[name = tensor("op_13212_cast")]; + tensor input_777_cast = mul(x = var_13210_cast_0, y = var_13212_cast)[name = tensor("input_777_cast")]; + tensor var_13216 = const()[name = tensor("op_13216"), val = tensor([1, 1])]; + tensor var_13218 = const()[name = tensor("op_13218"), val = tensor([1, 1])]; + tensor var_13220_pad_type_0 = const()[name = tensor("op_13220_pad_type_0"), val = tensor("custom")]; + tensor var_13220_pad_0 = const()[name = tensor("op_13220_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2008384960)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2011661824)))]; + tensor var_13220_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13218, groups = var_12518, pad = var_13220_pad_0, pad_type = var_13220_pad_type_0, strides = var_13216, weight = up_blocks_1_attentions_1_transformer_blocks_0_ff_net_2_weight_to_fp16, x = input_777_cast)[name = tensor("op_13220_cast")]; + tensor inputs_403_cast = add(x = var_13220_cast, y = inputs_401_cast)[name = tensor("inputs_403_cast")]; + tensor var_13230 = const()[name = tensor("op_13230"), val = tensor([1])]; + tensor channels_mean_403_cast = reduce_mean(axes = var_13230, keep_dims = var_12513, x = inputs_403_cast)[name = tensor("channels_mean_403_cast")]; + tensor zero_mean_403_cast = sub(x = inputs_403_cast, y = channels_mean_403_cast)[name = tensor("zero_mean_403_cast")]; + tensor zero_mean_sq_403_cast = mul(x = zero_mean_403_cast, y = zero_mean_403_cast)[name = tensor("zero_mean_sq_403_cast")]; + tensor var_13234 = const()[name = tensor("op_13234"), val = tensor([1])]; + tensor var_13235_cast = reduce_mean(axes = var_13234, keep_dims = var_12513, x = zero_mean_sq_403_cast)[name = tensor("op_13235_cast")]; + tensor var_13236_to_fp16 = const()[name = tensor("op_13236_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13237_cast = add(x = var_13235_cast, y = var_13236_to_fp16)[name = tensor("op_13237_cast")]; + tensor denom_403_epsilon_0_to_fp16 = const()[name = tensor("denom_403_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_403_cast = rsqrt(epsilon = denom_403_epsilon_0_to_fp16, x = var_13237_cast)[name = tensor("denom_403_cast")]; + tensor out_403_cast = mul(x = zero_mean_403_cast, y = denom_403_cast)[name = tensor("out_403_cast")]; + tensor var_13241_to_fp16 = const()[name = tensor("op_13241_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2011663168)))]; + tensor var_13242_cast = add(x = out_403_cast, y = var_13241_to_fp16)[name = tensor("op_13242_cast")]; + tensor var_13244_to_fp16 = const()[name = tensor("op_13244_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2011664512)))]; + tensor hidden_states_541_cast = mul(x = var_13242_cast, y = var_13244_to_fp16)[name = tensor("hidden_states_541_cast")]; + tensor var_13251 = const()[name = tensor("op_13251"), val = tensor([1, 1])]; + tensor var_13253 = const()[name = tensor("op_13253"), val = tensor([1, 1])]; + tensor q_269_pad_type_0 = const()[name = tensor("q_269_pad_type_0"), val = tensor("custom")]; + tensor q_269_pad_0 = const()[name = tensor("q_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2011665856)))]; + tensor q_269_cast = conv(dilations = var_13253, groups = var_12518, pad = q_269_pad_0, pad_type = q_269_pad_type_0, strides = var_13251, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_q_weight_to_fp16, x = hidden_states_541_cast)[name = tensor("q_269_cast")]; + tensor var_13257 = const()[name = tensor("op_13257"), val = tensor([1, 1])]; + tensor var_13259 = const()[name = tensor("op_13259"), val = tensor([1, 1])]; + tensor k_269_pad_type_0 = const()[name = tensor("k_269_pad_type_0"), val = tensor("custom")]; + tensor k_269_pad_0 = const()[name = tensor("k_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2012485120))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2012894784))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_269_cast = conv(dilations = var_13259, groups = var_12518, pad = k_269_pad_0, pad_type = k_269_pad_type_0, strides = var_13257, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_541_cast)[name = tensor("k_269_cast")]; + tensor var_13263 = const()[name = tensor("op_13263"), val = tensor([1, 1])]; + tensor var_13265 = const()[name = tensor("op_13265"), val = tensor([1, 1])]; + tensor v_269_pad_type_0 = const()[name = tensor("v_269_pad_type_0"), val = tensor("custom")]; + tensor v_269_pad_0 = const()[name = tensor("v_269_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2012895360)))]; + tensor v_269_cast = conv(dilations = var_13265, groups = var_12518, pad = v_269_pad_0, pad_type = v_269_pad_type_0, strides = var_13263, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_v_weight_to_fp16, x = hidden_states_541_cast)[name = tensor("v_269_cast")]; + tensor var_13269 = const()[name = tensor("op_13269"), val = tensor([2, 10, 64, -1])]; + tensor var_13270_cast = reshape(shape = var_13269, x = q_269_cast)[name = tensor("op_13270_cast")]; + tensor var_13271 = const()[name = tensor("op_13271"), val = tensor([2, 10, 64, -1])]; + tensor var_13272_cast = reshape(shape = var_13271, x = k_269_cast)[name = tensor("op_13272_cast")]; + tensor var_13273 = const()[name = tensor("op_13273"), val = tensor([2, 10, 64, -1])]; + tensor var_13274_cast = reshape(shape = var_13273, x = v_269_cast)[name = tensor("op_13274_cast")]; + tensor attn_weights_537_transpose_x_0 = const()[name = tensor("attn_weights_537_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_537_transpose_y_0 = const()[name = tensor("attn_weights_537_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_537_cast = matmul(transpose_x = attn_weights_537_transpose_x_0, transpose_y = attn_weights_537_transpose_y_0, x = var_13270_cast, y = var_13272_cast)[name = tensor("attn_weights_537_cast")]; + tensor attn_weights_539_cast = mul(x = attn_weights_537_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_539_cast")]; + tensor var_13278_cast = softmax(axis = var_12502, x = attn_weights_539_cast)[name = tensor("op_13278_cast")]; + tensor attn_269_transpose_x_0 = const()[name = tensor("attn_269_transpose_x_0"), val = tensor(false)]; + tensor attn_269_transpose_y_0 = const()[name = tensor("attn_269_transpose_y_0"), val = tensor(true)]; + tensor attn_269_cast = matmul(transpose_x = attn_269_transpose_x_0, transpose_y = attn_269_transpose_y_0, x = var_13274_cast, y = var_13278_cast)[name = tensor("attn_269_cast")]; + tensor var_13282 = const()[name = tensor("op_13282"), val = tensor([2, 640, 1, -1])]; + tensor input_779_cast = reshape(shape = var_13282, x = attn_269_cast)[name = tensor("input_779_cast")]; + tensor var_13287 = const()[name = tensor("op_13287"), val = tensor([1, 1])]; + tensor var_13289 = const()[name = tensor("op_13289"), val = tensor([1, 1])]; + tensor var_13291_pad_type_0 = const()[name = tensor("op_13291_pad_type_0"), val = tensor("custom")]; + tensor var_13291_pad_0 = const()[name = tensor("op_13291_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2013714624)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014533888)))]; + tensor var_13291_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13289, groups = var_12518, pad = var_13291_pad_0, pad_type = var_13291_pad_type_0, strides = var_13287, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn1_to_out_0_weight_to_fp16, x = input_779_cast)[name = tensor("op_13291_cast")]; + tensor inputs_405_cast = add(x = var_13291_cast, y = inputs_403_cast)[name = tensor("inputs_405_cast")]; + tensor var_13295 = const()[name = tensor("op_13295"), val = tensor([1])]; + tensor channels_mean_405_cast = reduce_mean(axes = var_13295, keep_dims = var_12513, x = inputs_405_cast)[name = tensor("channels_mean_405_cast")]; + tensor zero_mean_405_cast = sub(x = inputs_405_cast, y = channels_mean_405_cast)[name = tensor("zero_mean_405_cast")]; + tensor zero_mean_sq_405_cast = mul(x = zero_mean_405_cast, y = zero_mean_405_cast)[name = tensor("zero_mean_sq_405_cast")]; + tensor var_13299 = const()[name = tensor("op_13299"), val = tensor([1])]; + tensor var_13300_cast = reduce_mean(axes = var_13299, keep_dims = var_12513, x = zero_mean_sq_405_cast)[name = tensor("op_13300_cast")]; + tensor var_13301_to_fp16 = const()[name = tensor("op_13301_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13302_cast = add(x = var_13300_cast, y = var_13301_to_fp16)[name = tensor("op_13302_cast")]; + tensor denom_405_epsilon_0_to_fp16 = const()[name = tensor("denom_405_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_405_cast = rsqrt(epsilon = denom_405_epsilon_0_to_fp16, x = var_13302_cast)[name = tensor("denom_405_cast")]; + tensor out_405_cast = mul(x = zero_mean_405_cast, y = denom_405_cast)[name = tensor("out_405_cast")]; + tensor var_13306_to_fp16 = const()[name = tensor("op_13306_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014535232)))]; + tensor var_13307_cast = add(x = out_405_cast, y = var_13306_to_fp16)[name = tensor("op_13307_cast")]; + tensor var_13309_to_fp16 = const()[name = tensor("op_13309_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014536576)))]; + tensor hidden_states_543_cast = mul(x = var_13307_cast, y = var_13309_to_fp16)[name = tensor("hidden_states_543_cast")]; + tensor var_13316 = const()[name = tensor("op_13316"), val = tensor([1, 1])]; + tensor var_13318 = const()[name = tensor("op_13318"), val = tensor([1, 1])]; + tensor q_271_pad_type_0 = const()[name = tensor("q_271_pad_type_0"), val = tensor("custom")]; + tensor q_271_pad_0 = const()[name = tensor("q_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014537920))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014845184))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_271_cast = conv(dilations = var_13318, groups = var_12518, pad = q_271_pad_0, pad_type = q_271_pad_type_0, strides = var_13316, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_543_cast)[name = tensor("q_271_cast")]; + tensor var_13322 = const()[name = tensor("op_13322"), val = tensor([1, 1])]; + tensor var_13324 = const()[name = tensor("op_13324"), val = tensor([1, 1])]; + tensor k_271_pad_type_0 = const()[name = tensor("k_271_pad_type_0"), val = tensor("custom")]; + tensor k_271_pad_0 = const()[name = tensor("k_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2014845376))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2015828480))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_271_cast = conv(dilations = var_13324, groups = var_12518, pad = k_271_pad_0, pad_type = k_271_pad_type_0, strides = var_13322, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_271_cast")]; + tensor var_13328 = const()[name = tensor("op_13328"), val = tensor([1, 1])]; + tensor var_13330 = const()[name = tensor("op_13330"), val = tensor([1, 1])]; + tensor v_271_pad_type_0 = const()[name = tensor("v_271_pad_type_0"), val = tensor("custom")]; + tensor v_271_pad_0 = const()[name = tensor("v_271_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2015828672))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017139456))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_271_cast = conv(dilations = var_13330, groups = var_12518, pad = v_271_pad_0, pad_type = v_271_pad_type_0, strides = var_13328, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_271_cast")]; + tensor var_13334 = const()[name = tensor("op_13334"), val = tensor([2, 10, 64, -1])]; + tensor var_13335_cast = reshape(shape = var_13334, x = q_271_cast)[name = tensor("op_13335_cast")]; + tensor var_13336 = const()[name = tensor("op_13336"), val = tensor([2, 10, 64, -1])]; + tensor var_13337_cast = reshape(shape = var_13336, x = k_271_cast)[name = tensor("op_13337_cast")]; + tensor var_13338 = const()[name = tensor("op_13338"), val = tensor([2, 10, 64, -1])]; + tensor var_13339_cast = reshape(shape = var_13338, x = v_271_cast)[name = tensor("op_13339_cast")]; + tensor attn_weights_541_transpose_x_0 = const()[name = tensor("attn_weights_541_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_541_transpose_y_0 = const()[name = tensor("attn_weights_541_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_541_cast = matmul(transpose_x = attn_weights_541_transpose_x_0, transpose_y = attn_weights_541_transpose_y_0, x = var_13335_cast, y = var_13337_cast)[name = tensor("attn_weights_541_cast")]; + tensor attn_weights_543_cast = mul(x = attn_weights_541_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_543_cast")]; + tensor var_13343_cast = softmax(axis = var_12502, x = attn_weights_543_cast)[name = tensor("op_13343_cast")]; + tensor attn_271_transpose_x_0 = const()[name = tensor("attn_271_transpose_x_0"), val = tensor(false)]; + tensor attn_271_transpose_y_0 = const()[name = tensor("attn_271_transpose_y_0"), val = tensor(true)]; + tensor attn_271_cast = matmul(transpose_x = attn_271_transpose_x_0, transpose_y = attn_271_transpose_y_0, x = var_13339_cast, y = var_13343_cast)[name = tensor("attn_271_cast")]; + tensor var_13347 = const()[name = tensor("op_13347"), val = tensor([2, 640, 1, -1])]; + tensor input_781_cast = reshape(shape = var_13347, x = attn_271_cast)[name = tensor("input_781_cast")]; + tensor var_13352 = const()[name = tensor("op_13352"), val = tensor([1, 1])]; + tensor var_13354 = const()[name = tensor("op_13354"), val = tensor([1, 1])]; + tensor var_13356_pad_type_0 = const()[name = tensor("op_13356_pad_type_0"), val = tensor("custom")]; + tensor var_13356_pad_0 = const()[name = tensor("op_13356_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017140032))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017549696))), name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017550272)))]; + tensor var_13356_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13354, groups = var_12518, pad = var_13356_pad_0, pad_type = var_13356_pad_type_0, strides = var_13352, weight = up_blocks_1_attentions_1_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_781_cast)[name = tensor("op_13356_cast")]; + tensor inputs_407_cast = add(x = var_13356_cast, y = inputs_405_cast)[name = tensor("inputs_407_cast")]; + tensor var_13360 = const()[name = tensor("op_13360"), val = tensor([1])]; + tensor channels_mean_407_cast = reduce_mean(axes = var_13360, keep_dims = var_12513, x = inputs_407_cast)[name = tensor("channels_mean_407_cast")]; + tensor zero_mean_407_cast = sub(x = inputs_407_cast, y = channels_mean_407_cast)[name = tensor("zero_mean_407_cast")]; + tensor zero_mean_sq_407_cast = mul(x = zero_mean_407_cast, y = zero_mean_407_cast)[name = tensor("zero_mean_sq_407_cast")]; + tensor var_13364 = const()[name = tensor("op_13364"), val = tensor([1])]; + tensor var_13365_cast = reduce_mean(axes = var_13364, keep_dims = var_12513, x = zero_mean_sq_407_cast)[name = tensor("op_13365_cast")]; + tensor var_13366_to_fp16 = const()[name = tensor("op_13366_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13367_cast = add(x = var_13365_cast, y = var_13366_to_fp16)[name = tensor("op_13367_cast")]; + tensor denom_407_epsilon_0_to_fp16 = const()[name = tensor("denom_407_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_407_cast = rsqrt(epsilon = denom_407_epsilon_0_to_fp16, x = var_13367_cast)[name = tensor("denom_407_cast")]; + tensor out_407_cast = mul(x = zero_mean_407_cast, y = denom_407_cast)[name = tensor("out_407_cast")]; + tensor var_13371_to_fp16 = const()[name = tensor("op_13371_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017551616)))]; + tensor var_13372_cast = add(x = out_407_cast, y = var_13371_to_fp16)[name = tensor("op_13372_cast")]; + tensor var_13374_to_fp16 = const()[name = tensor("op_13374_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017552960)))]; + tensor input_783_cast = mul(x = var_13372_cast, y = var_13374_to_fp16)[name = tensor("input_783_cast")]; + tensor var_13382 = const()[name = tensor("op_13382"), val = tensor([1, 1])]; + tensor var_13384 = const()[name = tensor("op_13384"), val = tensor([1, 1])]; + tensor var_13386_pad_type_0 = const()[name = tensor("op_13386_pad_type_0"), val = tensor("custom")]; + tensor var_13386_pad_0 = const()[name = tensor("op_13386_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2017554304)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2024107968)))]; + tensor var_13386_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13384, groups = var_12518, pad = var_13386_pad_0, pad_type = var_13386_pad_type_0, strides = var_13382, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_0_proj_weight_to_fp16, x = input_783_cast)[name = tensor("op_13386_cast")]; + tensor var_13387_split_sizes_0 = const()[name = tensor("op_13387_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13387_axis_0 = const()[name = tensor("op_13387_axis_0"), val = tensor(1)]; + tensor var_13387_cast_0, tensor var_13387_cast_1 = split(axis = var_13387_axis_0, split_sizes = var_13387_split_sizes_0, x = var_13386_cast)[name = tensor("op_13387_cast")]; + tensor var_13389_mode_0 = const()[name = tensor("op_13389_mode_0"), val = tensor("EXACT")]; + tensor var_13389_cast = gelu(mode = var_13389_mode_0, x = var_13387_cast_1)[name = tensor("op_13389_cast")]; + tensor input_785_cast = mul(x = var_13387_cast_0, y = var_13389_cast)[name = tensor("input_785_cast")]; + tensor var_13393 = const()[name = tensor("op_13393"), val = tensor([1, 1])]; + tensor var_13395 = const()[name = tensor("op_13395"), val = tensor([1, 1])]; + tensor var_13397_pad_type_0 = const()[name = tensor("op_13397_pad_type_0"), val = tensor("custom")]; + tensor var_13397_pad_0 = const()[name = tensor("op_13397_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2024118272)))]; + tensor up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2027395136)))]; + tensor var_13397_cast = conv(bias = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13395, groups = var_12518, pad = var_13397_pad_0, pad_type = var_13397_pad_type_0, strides = var_13393, weight = up_blocks_1_attentions_1_transformer_blocks_1_ff_net_2_weight_to_fp16, x = input_785_cast)[name = tensor("op_13397_cast")]; + tensor hidden_states_547_cast = add(x = var_13397_cast, y = inputs_407_cast)[name = tensor("hidden_states_547_cast")]; + tensor var_13399 = const()[name = tensor("op_13399"), val = tensor([2, 640, 64, 64])]; + tensor input_787_cast = reshape(shape = var_13399, x = hidden_states_547_cast)[name = tensor("input_787_cast")]; + tensor var_13403 = const()[name = tensor("op_13403"), val = tensor([1, 1])]; + tensor var_13405 = const()[name = tensor("op_13405"), val = tensor([1, 1])]; + tensor hidden_states_549_pad_type_0 = const()[name = tensor("hidden_states_549_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_549_pad_0 = const()[name = tensor("hidden_states_549_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_1_proj_out_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2027396480)))]; + tensor up_blocks_1_attentions_1_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_1_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028215744)))]; + tensor hidden_states_549_cast = conv(bias = up_blocks_1_attentions_1_proj_out_bias_to_fp16, dilations = var_13405, groups = var_12518, pad = hidden_states_549_pad_0, pad_type = hidden_states_549_pad_type_0, strides = var_13403, weight = up_blocks_1_attentions_1_proj_out_weight_to_fp16, x = input_787_cast)[name = tensor("hidden_states_549_cast")]; + tensor hidden_states_551_cast = add(x = hidden_states_549_cast, y = hidden_states_531_cast)[name = tensor("hidden_states_551_cast")]; + tensor input_789_interleave_0 = const()[name = tensor("input_789_interleave_0"), val = tensor(false)]; + tensor input_789_cast = concat(axis = var_12518, interleave = input_789_interleave_0, values = (hidden_states_551_cast, input_45_cast))[name = tensor("input_789_cast")]; + tensor reshape_144_shape_0 = const()[name = tensor("reshape_144_shape_0"), val = tensor([2, 32, 30, 64, 64])]; + tensor reshape_144_cast = reshape(shape = reshape_144_shape_0, x = input_789_cast)[name = tensor("reshape_144_cast")]; + tensor reduce_mean_108_axes_0 = const()[name = tensor("reduce_mean_108_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_108_keep_dims_0 = const()[name = tensor("reduce_mean_108_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_108_cast = reduce_mean(axes = reduce_mean_108_axes_0, keep_dims = reduce_mean_108_keep_dims_0, x = reshape_144_cast)[name = tensor("reduce_mean_108_cast")]; + tensor sub_72_cast = sub(x = reshape_144_cast, y = reduce_mean_108_cast)[name = tensor("sub_72_cast")]; + tensor square_36_cast = square(x = sub_72_cast)[name = tensor("square_36_cast")]; + tensor reduce_mean_110_axes_0 = const()[name = tensor("reduce_mean_110_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_110_keep_dims_0 = const()[name = tensor("reduce_mean_110_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_110_cast = reduce_mean(axes = reduce_mean_110_axes_0, keep_dims = reduce_mean_110_keep_dims_0, x = square_36_cast)[name = tensor("reduce_mean_110_cast")]; + tensor add_72_y_0_to_fp16 = const()[name = tensor("add_72_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_72_cast = add(x = reduce_mean_110_cast, y = add_72_y_0_to_fp16)[name = tensor("add_72_cast")]; + tensor sqrt_36_cast = sqrt(x = add_72_cast)[name = tensor("sqrt_36_cast")]; + tensor real_div_36_cast = real_div(x = sub_72_cast, y = sqrt_36_cast)[name = tensor("real_div_36_cast")]; + tensor reshape_145_shape_0 = const()[name = tensor("reshape_145_shape_0"), val = tensor([2, 960, 64, 64])]; + tensor reshape_145_cast = reshape(shape = reshape_145_shape_0, x = real_div_36_cast)[name = tensor("reshape_145_cast")]; + tensor add_73_mean_0_to_fp16 = const()[name = tensor("add_73_mean_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028217088)))]; + tensor add_73_variance_0_to_fp16 = const()[name = tensor("add_73_variance_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028219072)))]; + tensor add_73_gamma_0_to_fp16 = const()[name = tensor("add_73_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028221056)))]; + tensor add_73_beta_0_to_fp16 = const()[name = tensor("add_73_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028223040)))]; + tensor add_73_epsilon_0_to_fp16 = const()[name = tensor("add_73_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_73_cast = batch_norm(beta = add_73_beta_0_to_fp16, epsilon = add_73_epsilon_0_to_fp16, gamma = add_73_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_145_cast)[name = tensor("add_73_cast")]; + tensor input_793_cast = silu(x = add_73_cast)[name = tensor("input_793_cast")]; + tensor var_13423 = const()[name = tensor("op_13423"), val = tensor([1, 1])]; + tensor var_13425 = const()[name = tensor("op_13425"), val = tensor([1, 1])]; + tensor hidden_states_553_pad_type_0 = const()[name = tensor("hidden_states_553_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_553_pad_0 = const()[name = tensor("hidden_states_553_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2028225024)))]; + tensor up_blocks_1_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2039284288)))]; + tensor hidden_states_553_cast = conv(bias = up_blocks_1_resnets_2_conv1_bias_to_fp16, dilations = var_13425, groups = var_12518, pad = hidden_states_553_pad_0, pad_type = hidden_states_553_pad_type_0, strides = var_13423, weight = up_blocks_1_resnets_2_conv1_weight_to_fp16, x = input_793_cast)[name = tensor("hidden_states_553_cast")]; + tensor var_13431 = const()[name = tensor("op_13431"), val = tensor([1, 1])]; + tensor var_13433 = const()[name = tensor("op_13433"), val = tensor([1, 1])]; + tensor temb_27_pad_type_0 = const()[name = tensor("temb_27_pad_type_0"), val = tensor("custom")]; + tensor temb_27_pad_0 = const()[name = tensor("temb_27_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2039285632)))]; + tensor up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2040924096)))]; + tensor temb_27_cast = conv(bias = up_blocks_1_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13433, groups = var_12518, pad = temb_27_pad_0, pad_type = temb_27_pad_type_0, strides = var_13431, weight = up_blocks_1_resnets_2_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_27_cast")]; + tensor input_797_cast = add(x = hidden_states_553_cast, y = temb_27_cast)[name = tensor("input_797_cast")]; + tensor reshape_148_shape_0 = const()[name = tensor("reshape_148_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_148_cast = reshape(shape = reshape_148_shape_0, x = input_797_cast)[name = tensor("reshape_148_cast")]; + tensor reduce_mean_111_axes_0 = const()[name = tensor("reduce_mean_111_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_111_keep_dims_0 = const()[name = tensor("reduce_mean_111_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_111_cast = reduce_mean(axes = reduce_mean_111_axes_0, keep_dims = reduce_mean_111_keep_dims_0, x = reshape_148_cast)[name = tensor("reduce_mean_111_cast")]; + tensor sub_74_cast = sub(x = reshape_148_cast, y = reduce_mean_111_cast)[name = tensor("sub_74_cast")]; + tensor square_37_cast = square(x = sub_74_cast)[name = tensor("square_37_cast")]; + tensor reduce_mean_113_axes_0 = const()[name = tensor("reduce_mean_113_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_113_keep_dims_0 = const()[name = tensor("reduce_mean_113_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_113_cast = reduce_mean(axes = reduce_mean_113_axes_0, keep_dims = reduce_mean_113_keep_dims_0, x = square_37_cast)[name = tensor("reduce_mean_113_cast")]; + tensor add_74_y_0_to_fp16 = const()[name = tensor("add_74_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_74_cast = add(x = reduce_mean_113_cast, y = add_74_y_0_to_fp16)[name = tensor("add_74_cast")]; + tensor sqrt_37_cast = sqrt(x = add_74_cast)[name = tensor("sqrt_37_cast")]; + tensor real_div_37_cast = real_div(x = sub_74_cast, y = sqrt_37_cast)[name = tensor("real_div_37_cast")]; + tensor reshape_149_shape_0 = const()[name = tensor("reshape_149_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_149_cast = reshape(shape = reshape_149_shape_0, x = real_div_37_cast)[name = tensor("reshape_149_cast")]; + tensor add_75_gamma_0_to_fp16 = const()[name = tensor("add_75_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2040925440)))]; + tensor add_75_beta_0_to_fp16 = const()[name = tensor("add_75_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2040926784)))]; + tensor add_75_epsilon_0_to_fp16 = const()[name = tensor("add_75_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_75_cast = batch_norm(beta = add_75_beta_0_to_fp16, epsilon = add_75_epsilon_0_to_fp16, gamma = add_75_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_149_cast)[name = tensor("add_75_cast")]; + tensor input_801_cast = silu(x = add_75_cast)[name = tensor("input_801_cast")]; + tensor var_13443 = const()[name = tensor("op_13443"), val = tensor([1, 1])]; + tensor var_13445 = const()[name = tensor("op_13445"), val = tensor([1, 1])]; + tensor hidden_states_555_pad_type_0 = const()[name = tensor("hidden_states_555_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_555_pad_0 = const()[name = tensor("hidden_states_555_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_resnets_2_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2040928128)))]; + tensor up_blocks_1_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2048300992)))]; + tensor hidden_states_555_cast = conv(bias = up_blocks_1_resnets_2_conv2_bias_to_fp16, dilations = var_13445, groups = var_12518, pad = hidden_states_555_pad_0, pad_type = hidden_states_555_pad_type_0, strides = var_13443, weight = up_blocks_1_resnets_2_conv2_weight_to_fp16, x = input_801_cast)[name = tensor("hidden_states_555_cast")]; + tensor var_13450 = const()[name = tensor("op_13450"), val = tensor([1, 1])]; + tensor var_13452 = const()[name = tensor("op_13452"), val = tensor([1, 1])]; + tensor x_15_pad_type_0 = const()[name = tensor("x_15_pad_type_0"), val = tensor("custom")]; + tensor x_15_pad_0 = const()[name = tensor("x_15_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2048302336)))]; + tensor up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2049531200)))]; + tensor x_15_cast = conv(bias = up_blocks_1_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_13452, groups = var_12518, pad = x_15_pad_0, pad_type = x_15_pad_type_0, strides = var_13450, weight = up_blocks_1_resnets_2_conv_shortcut_weight_to_fp16, x = input_789_cast)[name = tensor("x_15_cast")]; + tensor hidden_states_557_cast = add(x = x_15_cast, y = hidden_states_555_cast)[name = tensor("hidden_states_557_cast")]; + tensor reshape_152_shape_0 = const()[name = tensor("reshape_152_shape_0"), val = tensor([2, 32, 20, 64, 64])]; + tensor reshape_152_cast = reshape(shape = reshape_152_shape_0, x = hidden_states_557_cast)[name = tensor("reshape_152_cast")]; + tensor reduce_mean_114_axes_0 = const()[name = tensor("reduce_mean_114_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_114_keep_dims_0 = const()[name = tensor("reduce_mean_114_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_114_cast = reduce_mean(axes = reduce_mean_114_axes_0, keep_dims = reduce_mean_114_keep_dims_0, x = reshape_152_cast)[name = tensor("reduce_mean_114_cast")]; + tensor sub_76_cast = sub(x = reshape_152_cast, y = reduce_mean_114_cast)[name = tensor("sub_76_cast")]; + tensor square_38_cast = square(x = sub_76_cast)[name = tensor("square_38_cast")]; + tensor reduce_mean_116_axes_0 = const()[name = tensor("reduce_mean_116_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_116_keep_dims_0 = const()[name = tensor("reduce_mean_116_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_116_cast = reduce_mean(axes = reduce_mean_116_axes_0, keep_dims = reduce_mean_116_keep_dims_0, x = square_38_cast)[name = tensor("reduce_mean_116_cast")]; + tensor add_76_y_0_to_fp16 = const()[name = tensor("add_76_y_0_to_fp16"), val = tensor(0x1.1p-20)]; + tensor add_76_cast = add(x = reduce_mean_116_cast, y = add_76_y_0_to_fp16)[name = tensor("add_76_cast")]; + tensor sqrt_38_cast = sqrt(x = add_76_cast)[name = tensor("sqrt_38_cast")]; + tensor real_div_38_cast = real_div(x = sub_76_cast, y = sqrt_38_cast)[name = tensor("real_div_38_cast")]; + tensor reshape_153_shape_0 = const()[name = tensor("reshape_153_shape_0"), val = tensor([2, 640, 64, 64])]; + tensor reshape_153_cast = reshape(shape = reshape_153_shape_0, x = real_div_38_cast)[name = tensor("reshape_153_cast")]; + tensor add_77_gamma_0_to_fp16 = const()[name = tensor("add_77_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2049532544)))]; + tensor add_77_beta_0_to_fp16 = const()[name = tensor("add_77_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2049533888)))]; + tensor add_77_epsilon_0_to_fp16 = const()[name = tensor("add_77_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_77_cast = batch_norm(beta = add_77_beta_0_to_fp16, epsilon = add_77_epsilon_0_to_fp16, gamma = add_77_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_153_cast)[name = tensor("add_77_cast")]; + tensor var_13474 = const()[name = tensor("op_13474"), val = tensor([1, 1])]; + tensor var_13476 = const()[name = tensor("op_13476"), val = tensor([1, 1])]; + tensor hidden_states_559_pad_type_0 = const()[name = tensor("hidden_states_559_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_559_pad_0 = const()[name = tensor("hidden_states_559_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_in_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_in_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2049535232)))]; + tensor up_blocks_1_attentions_2_proj_in_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_in_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050354496)))]; + tensor hidden_states_559_cast = conv(bias = up_blocks_1_attentions_2_proj_in_bias_to_fp16, dilations = var_13476, groups = var_12518, pad = hidden_states_559_pad_0, pad_type = hidden_states_559_pad_type_0, strides = var_13474, weight = up_blocks_1_attentions_2_proj_in_weight_to_fp16, x = add_77_cast)[name = tensor("hidden_states_559_cast")]; + tensor var_13481 = const()[name = tensor("op_13481"), val = tensor([2, 640, 1, 4096])]; + tensor inputs_409_cast = reshape(shape = var_13481, x = hidden_states_559_cast)[name = tensor("inputs_409_cast")]; + tensor var_13491 = const()[name = tensor("op_13491"), val = tensor([1])]; + tensor channels_mean_409_cast = reduce_mean(axes = var_13491, keep_dims = var_12513, x = inputs_409_cast)[name = tensor("channels_mean_409_cast")]; + tensor zero_mean_409_cast = sub(x = inputs_409_cast, y = channels_mean_409_cast)[name = tensor("zero_mean_409_cast")]; + tensor zero_mean_sq_409_cast = mul(x = zero_mean_409_cast, y = zero_mean_409_cast)[name = tensor("zero_mean_sq_409_cast")]; + tensor var_13495 = const()[name = tensor("op_13495"), val = tensor([1])]; + tensor var_13496_cast = reduce_mean(axes = var_13495, keep_dims = var_12513, x = zero_mean_sq_409_cast)[name = tensor("op_13496_cast")]; + tensor var_13497_to_fp16 = const()[name = tensor("op_13497_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13498_cast = add(x = var_13496_cast, y = var_13497_to_fp16)[name = tensor("op_13498_cast")]; + tensor denom_409_epsilon_0_to_fp16 = const()[name = tensor("denom_409_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_409_cast = rsqrt(epsilon = denom_409_epsilon_0_to_fp16, x = var_13498_cast)[name = tensor("denom_409_cast")]; + tensor out_409_cast = mul(x = zero_mean_409_cast, y = denom_409_cast)[name = tensor("out_409_cast")]; + tensor var_13502_to_fp16 = const()[name = tensor("op_13502_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050355840)))]; + tensor var_13503_cast = add(x = out_409_cast, y = var_13502_to_fp16)[name = tensor("op_13503_cast")]; + tensor var_13505_to_fp16 = const()[name = tensor("op_13505_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050357184)))]; + tensor hidden_states_561_cast = mul(x = var_13503_cast, y = var_13505_to_fp16)[name = tensor("hidden_states_561_cast")]; + tensor var_13512 = const()[name = tensor("op_13512"), val = tensor([1, 1])]; + tensor var_13514 = const()[name = tensor("op_13514"), val = tensor([1, 1])]; + tensor q_273_pad_type_0 = const()[name = tensor("q_273_pad_type_0"), val = tensor("custom")]; + tensor q_273_pad_0 = const()[name = tensor("q_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050358528))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050768192))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_273_cast = conv(dilations = var_13514, groups = var_12518, pad = q_273_pad_0, pad_type = q_273_pad_type_0, strides = var_13512, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_q_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("q_273_cast")]; + tensor var_13518 = const()[name = tensor("op_13518"), val = tensor([1, 1])]; + tensor var_13520 = const()[name = tensor("op_13520"), val = tensor([1, 1])]; + tensor k_273_pad_type_0 = const()[name = tensor("k_273_pad_type_0"), val = tensor("custom")]; + tensor k_273_pad_0 = const()[name = tensor("k_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2050768768))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2051178432))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor k_273_cast = conv(dilations = var_13520, groups = var_12518, pad = k_273_pad_0, pad_type = k_273_pad_type_0, strides = var_13518, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_k_weight_to_fp16_palettized, x = hidden_states_561_cast)[name = tensor("k_273_cast")]; + tensor var_13524 = const()[name = tensor("op_13524"), val = tensor([1, 1])]; + tensor var_13526 = const()[name = tensor("op_13526"), val = tensor([1, 1])]; + tensor v_273_pad_type_0 = const()[name = tensor("v_273_pad_type_0"), val = tensor("custom")]; + tensor v_273_pad_0 = const()[name = tensor("v_273_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2051179008)))]; + tensor v_273_cast = conv(dilations = var_13526, groups = var_12518, pad = v_273_pad_0, pad_type = v_273_pad_type_0, strides = var_13524, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_v_weight_to_fp16, x = hidden_states_561_cast)[name = tensor("v_273_cast")]; + tensor var_13530 = const()[name = tensor("op_13530"), val = tensor([2, 10, 64, -1])]; + tensor var_13531_cast = reshape(shape = var_13530, x = q_273_cast)[name = tensor("op_13531_cast")]; + tensor var_13532 = const()[name = tensor("op_13532"), val = tensor([2, 10, 64, -1])]; + tensor var_13533_cast = reshape(shape = var_13532, x = k_273_cast)[name = tensor("op_13533_cast")]; + tensor var_13534 = const()[name = tensor("op_13534"), val = tensor([2, 10, 64, -1])]; + tensor var_13535_cast = reshape(shape = var_13534, x = v_273_cast)[name = tensor("op_13535_cast")]; + tensor attn_weights_545_transpose_x_0 = const()[name = tensor("attn_weights_545_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_545_transpose_y_0 = const()[name = tensor("attn_weights_545_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_545_cast = matmul(transpose_x = attn_weights_545_transpose_x_0, transpose_y = attn_weights_545_transpose_y_0, x = var_13531_cast, y = var_13533_cast)[name = tensor("attn_weights_545_cast")]; + tensor attn_weights_547_cast = mul(x = attn_weights_545_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_547_cast")]; + tensor var_13539_cast = softmax(axis = var_12502, x = attn_weights_547_cast)[name = tensor("op_13539_cast")]; + tensor attn_273_transpose_x_0 = const()[name = tensor("attn_273_transpose_x_0"), val = tensor(false)]; + tensor attn_273_transpose_y_0 = const()[name = tensor("attn_273_transpose_y_0"), val = tensor(true)]; + tensor attn_273_cast = matmul(transpose_x = attn_273_transpose_x_0, transpose_y = attn_273_transpose_y_0, x = var_13535_cast, y = var_13539_cast)[name = tensor("attn_273_cast")]; + tensor var_13543 = const()[name = tensor("op_13543"), val = tensor([2, 640, 1, -1])]; + tensor input_805_cast = reshape(shape = var_13543, x = attn_273_cast)[name = tensor("input_805_cast")]; + tensor var_13548 = const()[name = tensor("op_13548"), val = tensor([1, 1])]; + tensor var_13550 = const()[name = tensor("op_13550"), val = tensor([1, 1])]; + tensor var_13552_pad_type_0 = const()[name = tensor("op_13552_pad_type_0"), val = tensor("custom")]; + tensor var_13552_pad_0 = const()[name = tensor("op_13552_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2051998272)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2052817536)))]; + tensor var_13552_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_bias_to_fp16, dilations = var_13550, groups = var_12518, pad = var_13552_pad_0, pad_type = var_13552_pad_type_0, strides = var_13548, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn1_to_out_0_weight_to_fp16, x = input_805_cast)[name = tensor("op_13552_cast")]; + tensor inputs_411_cast = add(x = var_13552_cast, y = inputs_409_cast)[name = tensor("inputs_411_cast")]; + tensor var_13556 = const()[name = tensor("op_13556"), val = tensor([1])]; + tensor channels_mean_411_cast = reduce_mean(axes = var_13556, keep_dims = var_12513, x = inputs_411_cast)[name = tensor("channels_mean_411_cast")]; + tensor zero_mean_411_cast = sub(x = inputs_411_cast, y = channels_mean_411_cast)[name = tensor("zero_mean_411_cast")]; + tensor zero_mean_sq_411_cast = mul(x = zero_mean_411_cast, y = zero_mean_411_cast)[name = tensor("zero_mean_sq_411_cast")]; + tensor var_13560 = const()[name = tensor("op_13560"), val = tensor([1])]; + tensor var_13561_cast = reduce_mean(axes = var_13560, keep_dims = var_12513, x = zero_mean_sq_411_cast)[name = tensor("op_13561_cast")]; + tensor var_13562_to_fp16 = const()[name = tensor("op_13562_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13563_cast = add(x = var_13561_cast, y = var_13562_to_fp16)[name = tensor("op_13563_cast")]; + tensor denom_411_epsilon_0_to_fp16 = const()[name = tensor("denom_411_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_411_cast = rsqrt(epsilon = denom_411_epsilon_0_to_fp16, x = var_13563_cast)[name = tensor("denom_411_cast")]; + tensor out_411_cast = mul(x = zero_mean_411_cast, y = denom_411_cast)[name = tensor("out_411_cast")]; + tensor var_13567_to_fp16 = const()[name = tensor("op_13567_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2052818880)))]; + tensor var_13568_cast = add(x = out_411_cast, y = var_13567_to_fp16)[name = tensor("op_13568_cast")]; + tensor var_13570_to_fp16 = const()[name = tensor("op_13570_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2052820224)))]; + tensor hidden_states_563_cast = mul(x = var_13568_cast, y = var_13570_to_fp16)[name = tensor("hidden_states_563_cast")]; + tensor var_13577 = const()[name = tensor("op_13577"), val = tensor([1, 1])]; + tensor var_13579 = const()[name = tensor("op_13579"), val = tensor([1, 1])]; + tensor q_275_pad_type_0 = const()[name = tensor("q_275_pad_type_0"), val = tensor("custom")]; + tensor q_275_pad_0 = const()[name = tensor("q_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2052821568))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2053128832))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_275_cast = conv(dilations = var_13579, groups = var_12518, pad = q_275_pad_0, pad_type = q_275_pad_type_0, strides = var_13577, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_563_cast)[name = tensor("q_275_cast")]; + tensor var_13583 = const()[name = tensor("op_13583"), val = tensor([1, 1])]; + tensor var_13585 = const()[name = tensor("op_13585"), val = tensor([1, 1])]; + tensor k_275_pad_type_0 = const()[name = tensor("k_275_pad_type_0"), val = tensor("custom")]; + tensor k_275_pad_0 = const()[name = tensor("k_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2053129024))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2054112128))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_275_cast = conv(dilations = var_13585, groups = var_12518, pad = k_275_pad_0, pad_type = k_275_pad_type_0, strides = var_13583, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_275_cast")]; + tensor var_13589 = const()[name = tensor("op_13589"), val = tensor([1, 1])]; + tensor var_13591 = const()[name = tensor("op_13591"), val = tensor([1, 1])]; + tensor v_275_pad_type_0 = const()[name = tensor("v_275_pad_type_0"), val = tensor("custom")]; + tensor v_275_pad_0 = const()[name = tensor("v_275_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2054112320))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055423104))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_275_cast = conv(dilations = var_13591, groups = var_12518, pad = v_275_pad_0, pad_type = v_275_pad_type_0, strides = var_13589, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_275_cast")]; + tensor var_13595 = const()[name = tensor("op_13595"), val = tensor([2, 10, 64, -1])]; + tensor var_13596_cast = reshape(shape = var_13595, x = q_275_cast)[name = tensor("op_13596_cast")]; + tensor var_13597 = const()[name = tensor("op_13597"), val = tensor([2, 10, 64, -1])]; + tensor var_13598_cast = reshape(shape = var_13597, x = k_275_cast)[name = tensor("op_13598_cast")]; + tensor var_13599 = const()[name = tensor("op_13599"), val = tensor([2, 10, 64, -1])]; + tensor var_13600_cast = reshape(shape = var_13599, x = v_275_cast)[name = tensor("op_13600_cast")]; + tensor attn_weights_549_transpose_x_0 = const()[name = tensor("attn_weights_549_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_549_transpose_y_0 = const()[name = tensor("attn_weights_549_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_549_cast = matmul(transpose_x = attn_weights_549_transpose_x_0, transpose_y = attn_weights_549_transpose_y_0, x = var_13596_cast, y = var_13598_cast)[name = tensor("attn_weights_549_cast")]; + tensor attn_weights_551_cast = mul(x = attn_weights_549_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_551_cast")]; + tensor var_13604_cast = softmax(axis = var_12502, x = attn_weights_551_cast)[name = tensor("op_13604_cast")]; + tensor attn_275_transpose_x_0 = const()[name = tensor("attn_275_transpose_x_0"), val = tensor(false)]; + tensor attn_275_transpose_y_0 = const()[name = tensor("attn_275_transpose_y_0"), val = tensor(true)]; + tensor attn_275_cast = matmul(transpose_x = attn_275_transpose_x_0, transpose_y = attn_275_transpose_y_0, x = var_13600_cast, y = var_13604_cast)[name = tensor("attn_275_cast")]; + tensor var_13608 = const()[name = tensor("op_13608"), val = tensor([2, 640, 1, -1])]; + tensor input_807_cast = reshape(shape = var_13608, x = attn_275_cast)[name = tensor("input_807_cast")]; + tensor var_13613 = const()[name = tensor("op_13613"), val = tensor([1, 1])]; + tensor var_13615 = const()[name = tensor("op_13615"), val = tensor([1, 1])]; + tensor var_13617_pad_type_0 = const()[name = tensor("op_13617_pad_type_0"), val = tensor("custom")]; + tensor var_13617_pad_0 = const()[name = tensor("op_13617_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055423680))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055833344))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055833920)))]; + tensor var_13617_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_bias_to_fp16, dilations = var_13615, groups = var_12518, pad = var_13617_pad_0, pad_type = var_13617_pad_type_0, strides = var_13613, weight = up_blocks_1_attentions_2_transformer_blocks_0_attn2_to_out_0_weight_to_fp16_palettized, x = input_807_cast)[name = tensor("op_13617_cast")]; + tensor inputs_413_cast = add(x = var_13617_cast, y = inputs_411_cast)[name = tensor("inputs_413_cast")]; + tensor var_13621 = const()[name = tensor("op_13621"), val = tensor([1])]; + tensor channels_mean_413_cast = reduce_mean(axes = var_13621, keep_dims = var_12513, x = inputs_413_cast)[name = tensor("channels_mean_413_cast")]; + tensor zero_mean_413_cast = sub(x = inputs_413_cast, y = channels_mean_413_cast)[name = tensor("zero_mean_413_cast")]; + tensor zero_mean_sq_413_cast = mul(x = zero_mean_413_cast, y = zero_mean_413_cast)[name = tensor("zero_mean_sq_413_cast")]; + tensor var_13625 = const()[name = tensor("op_13625"), val = tensor([1])]; + tensor var_13626_cast = reduce_mean(axes = var_13625, keep_dims = var_12513, x = zero_mean_sq_413_cast)[name = tensor("op_13626_cast")]; + tensor var_13627_to_fp16 = const()[name = tensor("op_13627_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13628_cast = add(x = var_13626_cast, y = var_13627_to_fp16)[name = tensor("op_13628_cast")]; + tensor denom_413_epsilon_0_to_fp16 = const()[name = tensor("denom_413_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_413_cast = rsqrt(epsilon = denom_413_epsilon_0_to_fp16, x = var_13628_cast)[name = tensor("denom_413_cast")]; + tensor out_413_cast = mul(x = zero_mean_413_cast, y = denom_413_cast)[name = tensor("out_413_cast")]; + tensor var_13632_to_fp16 = const()[name = tensor("op_13632_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055835264)))]; + tensor var_13633_cast = add(x = out_413_cast, y = var_13632_to_fp16)[name = tensor("op_13633_cast")]; + tensor var_13635_to_fp16 = const()[name = tensor("op_13635_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055836608)))]; + tensor input_809_cast = mul(x = var_13633_cast, y = var_13635_to_fp16)[name = tensor("input_809_cast")]; + tensor var_13643 = const()[name = tensor("op_13643"), val = tensor([1, 1])]; + tensor var_13645 = const()[name = tensor("op_13645"), val = tensor([1, 1])]; + tensor var_13647_pad_type_0 = const()[name = tensor("op_13647_pad_type_0"), val = tensor("custom")]; + tensor var_13647_pad_0 = const()[name = tensor("op_13647_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2055837952)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2062391616)))]; + tensor var_13647_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_bias_to_fp16, dilations = var_13645, groups = var_12518, pad = var_13647_pad_0, pad_type = var_13647_pad_type_0, strides = var_13643, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_0_proj_weight_to_fp16, x = input_809_cast)[name = tensor("op_13647_cast")]; + tensor var_13648_split_sizes_0 = const()[name = tensor("op_13648_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13648_axis_0 = const()[name = tensor("op_13648_axis_0"), val = tensor(1)]; + tensor var_13648_cast_0, tensor var_13648_cast_1 = split(axis = var_13648_axis_0, split_sizes = var_13648_split_sizes_0, x = var_13647_cast)[name = tensor("op_13648_cast")]; + tensor var_13650_mode_0 = const()[name = tensor("op_13650_mode_0"), val = tensor("EXACT")]; + tensor var_13650_cast = gelu(mode = var_13650_mode_0, x = var_13648_cast_1)[name = tensor("op_13650_cast")]; + tensor input_811_cast = mul(x = var_13648_cast_0, y = var_13650_cast)[name = tensor("input_811_cast")]; + tensor var_13654 = const()[name = tensor("op_13654"), val = tensor([1, 1])]; + tensor var_13656 = const()[name = tensor("op_13656"), val = tensor([1, 1])]; + tensor var_13658_pad_type_0 = const()[name = tensor("op_13658_pad_type_0"), val = tensor("custom")]; + tensor var_13658_pad_0 = const()[name = tensor("op_13658_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2062401920)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2065678784)))]; + tensor var_13658_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_bias_to_fp16, dilations = var_13656, groups = var_12518, pad = var_13658_pad_0, pad_type = var_13658_pad_type_0, strides = var_13654, weight = up_blocks_1_attentions_2_transformer_blocks_0_ff_net_2_weight_to_fp16, x = input_811_cast)[name = tensor("op_13658_cast")]; + tensor inputs_415_cast = add(x = var_13658_cast, y = inputs_413_cast)[name = tensor("inputs_415_cast")]; + tensor var_13668 = const()[name = tensor("op_13668"), val = tensor([1])]; + tensor channels_mean_415_cast = reduce_mean(axes = var_13668, keep_dims = var_12513, x = inputs_415_cast)[name = tensor("channels_mean_415_cast")]; + tensor zero_mean_415_cast = sub(x = inputs_415_cast, y = channels_mean_415_cast)[name = tensor("zero_mean_415_cast")]; + tensor zero_mean_sq_415_cast = mul(x = zero_mean_415_cast, y = zero_mean_415_cast)[name = tensor("zero_mean_sq_415_cast")]; + tensor var_13672 = const()[name = tensor("op_13672"), val = tensor([1])]; + tensor var_13673_cast = reduce_mean(axes = var_13672, keep_dims = var_12513, x = zero_mean_sq_415_cast)[name = tensor("op_13673_cast")]; + tensor var_13674_to_fp16 = const()[name = tensor("op_13674_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13675_cast = add(x = var_13673_cast, y = var_13674_to_fp16)[name = tensor("op_13675_cast")]; + tensor denom_415_epsilon_0_to_fp16 = const()[name = tensor("denom_415_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_415_cast = rsqrt(epsilon = denom_415_epsilon_0_to_fp16, x = var_13675_cast)[name = tensor("denom_415_cast")]; + tensor out_415_cast = mul(x = zero_mean_415_cast, y = denom_415_cast)[name = tensor("out_415_cast")]; + tensor var_13679_to_fp16 = const()[name = tensor("op_13679_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2065680128)))]; + tensor var_13680_cast = add(x = out_415_cast, y = var_13679_to_fp16)[name = tensor("op_13680_cast")]; + tensor var_13682_to_fp16 = const()[name = tensor("op_13682_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2065681472)))]; + tensor hidden_states_567_cast = mul(x = var_13680_cast, y = var_13682_to_fp16)[name = tensor("hidden_states_567_cast")]; + tensor var_13689 = const()[name = tensor("op_13689"), val = tensor([1, 1])]; + tensor var_13691 = const()[name = tensor("op_13691"), val = tensor([1, 1])]; + tensor q_277_pad_type_0 = const()[name = tensor("q_277_pad_type_0"), val = tensor("custom")]; + tensor q_277_pad_0 = const()[name = tensor("q_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2065682816)))]; + tensor q_277_cast = conv(dilations = var_13691, groups = var_12518, pad = q_277_pad_0, pad_type = q_277_pad_type_0, strides = var_13689, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_q_weight_to_fp16, x = hidden_states_567_cast)[name = tensor("q_277_cast")]; + tensor var_13695 = const()[name = tensor("op_13695"), val = tensor([1, 1])]; + tensor var_13697 = const()[name = tensor("op_13697"), val = tensor([1, 1])]; + tensor k_277_pad_type_0 = const()[name = tensor("k_277_pad_type_0"), val = tensor("custom")]; + tensor k_277_pad_0 = const()[name = tensor("k_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2066502080)))]; + tensor k_277_cast = conv(dilations = var_13697, groups = var_12518, pad = k_277_pad_0, pad_type = k_277_pad_type_0, strides = var_13695, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_k_weight_to_fp16, x = hidden_states_567_cast)[name = tensor("k_277_cast")]; + tensor var_13701 = const()[name = tensor("op_13701"), val = tensor([1, 1])]; + tensor var_13703 = const()[name = tensor("op_13703"), val = tensor([1, 1])]; + tensor v_277_pad_type_0 = const()[name = tensor("v_277_pad_type_0"), val = tensor("custom")]; + tensor v_277_pad_0 = const()[name = tensor("v_277_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2067321344)))]; + tensor v_277_cast = conv(dilations = var_13703, groups = var_12518, pad = v_277_pad_0, pad_type = v_277_pad_type_0, strides = var_13701, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_v_weight_to_fp16, x = hidden_states_567_cast)[name = tensor("v_277_cast")]; + tensor var_13707 = const()[name = tensor("op_13707"), val = tensor([2, 10, 64, -1])]; + tensor var_13708_cast = reshape(shape = var_13707, x = q_277_cast)[name = tensor("op_13708_cast")]; + tensor var_13709 = const()[name = tensor("op_13709"), val = tensor([2, 10, 64, -1])]; + tensor var_13710_cast = reshape(shape = var_13709, x = k_277_cast)[name = tensor("op_13710_cast")]; + tensor var_13711 = const()[name = tensor("op_13711"), val = tensor([2, 10, 64, -1])]; + tensor var_13712_cast = reshape(shape = var_13711, x = v_277_cast)[name = tensor("op_13712_cast")]; + tensor attn_weights_553_transpose_x_0 = const()[name = tensor("attn_weights_553_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_553_transpose_y_0 = const()[name = tensor("attn_weights_553_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_553_cast = matmul(transpose_x = attn_weights_553_transpose_x_0, transpose_y = attn_weights_553_transpose_y_0, x = var_13708_cast, y = var_13710_cast)[name = tensor("attn_weights_553_cast")]; + tensor attn_weights_555_cast = mul(x = attn_weights_553_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_555_cast")]; + tensor var_13716_cast = softmax(axis = var_12502, x = attn_weights_555_cast)[name = tensor("op_13716_cast")]; + tensor attn_277_transpose_x_0 = const()[name = tensor("attn_277_transpose_x_0"), val = tensor(false)]; + tensor attn_277_transpose_y_0 = const()[name = tensor("attn_277_transpose_y_0"), val = tensor(true)]; + tensor attn_277_cast = matmul(transpose_x = attn_277_transpose_x_0, transpose_y = attn_277_transpose_y_0, x = var_13712_cast, y = var_13716_cast)[name = tensor("attn_277_cast")]; + tensor var_13720 = const()[name = tensor("op_13720"), val = tensor([2, 640, 1, -1])]; + tensor input_813_cast = reshape(shape = var_13720, x = attn_277_cast)[name = tensor("input_813_cast")]; + tensor var_13725 = const()[name = tensor("op_13725"), val = tensor([1, 1])]; + tensor var_13727 = const()[name = tensor("op_13727"), val = tensor([1, 1])]; + tensor var_13729_pad_type_0 = const()[name = tensor("op_13729_pad_type_0"), val = tensor("custom")]; + tensor var_13729_pad_0 = const()[name = tensor("op_13729_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2068140608)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2068959872)))]; + tensor var_13729_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_bias_to_fp16, dilations = var_13727, groups = var_12518, pad = var_13729_pad_0, pad_type = var_13729_pad_type_0, strides = var_13725, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn1_to_out_0_weight_to_fp16, x = input_813_cast)[name = tensor("op_13729_cast")]; + tensor inputs_417_cast = add(x = var_13729_cast, y = inputs_415_cast)[name = tensor("inputs_417_cast")]; + tensor var_13733 = const()[name = tensor("op_13733"), val = tensor([1])]; + tensor channels_mean_417_cast = reduce_mean(axes = var_13733, keep_dims = var_12513, x = inputs_417_cast)[name = tensor("channels_mean_417_cast")]; + tensor zero_mean_417_cast = sub(x = inputs_417_cast, y = channels_mean_417_cast)[name = tensor("zero_mean_417_cast")]; + tensor zero_mean_sq_417_cast = mul(x = zero_mean_417_cast, y = zero_mean_417_cast)[name = tensor("zero_mean_sq_417_cast")]; + tensor var_13737 = const()[name = tensor("op_13737"), val = tensor([1])]; + tensor var_13738_cast = reduce_mean(axes = var_13737, keep_dims = var_12513, x = zero_mean_sq_417_cast)[name = tensor("op_13738_cast")]; + tensor var_13739_to_fp16 = const()[name = tensor("op_13739_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13740_cast = add(x = var_13738_cast, y = var_13739_to_fp16)[name = tensor("op_13740_cast")]; + tensor denom_417_epsilon_0_to_fp16 = const()[name = tensor("denom_417_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_417_cast = rsqrt(epsilon = denom_417_epsilon_0_to_fp16, x = var_13740_cast)[name = tensor("denom_417_cast")]; + tensor out_417_cast = mul(x = zero_mean_417_cast, y = denom_417_cast)[name = tensor("out_417_cast")]; + tensor var_13744_to_fp16 = const()[name = tensor("op_13744_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2068961216)))]; + tensor var_13745_cast = add(x = out_417_cast, y = var_13744_to_fp16)[name = tensor("op_13745_cast")]; + tensor var_13747_to_fp16 = const()[name = tensor("op_13747_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2068962560)))]; + tensor hidden_states_569_cast = mul(x = var_13745_cast, y = var_13747_to_fp16)[name = tensor("hidden_states_569_cast")]; + tensor var_13754 = const()[name = tensor("op_13754"), val = tensor([1, 1])]; + tensor var_13756 = const()[name = tensor("op_13756"), val = tensor([1, 1])]; + tensor q_pad_type_0 = const()[name = tensor("q_pad_type_0"), val = tensor("custom")]; + tensor q_pad_0 = const()[name = tensor("q_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2068963904))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2069271168))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor q_cast = conv(dilations = var_13756, groups = var_12518, pad = q_pad_0, pad_type = q_pad_type_0, strides = var_13754, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_q_weight_to_fp16_palettized, x = hidden_states_569_cast)[name = tensor("q_cast")]; + tensor var_13760 = const()[name = tensor("op_13760"), val = tensor([1, 1])]; + tensor var_13762 = const()[name = tensor("op_13762"), val = tensor([1, 1])]; + tensor k_pad_type_0 = const()[name = tensor("k_pad_type_0"), val = tensor("custom")]; + tensor k_pad_0 = const()[name = tensor("k_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2069271360))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2070254464))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor k_cast = conv(dilations = var_13762, groups = var_12518, pad = k_pad_0, pad_type = k_pad_type_0, strides = var_13760, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_k_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("k_cast")]; + tensor var_13766 = const()[name = tensor("op_13766"), val = tensor([1, 1])]; + tensor var_13768 = const()[name = tensor("op_13768"), val = tensor([1, 1])]; + tensor v_pad_type_0 = const()[name = tensor("v_pad_type_0"), val = tensor("custom")]; + tensor v_pad_0 = const()[name = tensor("v_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2070254656))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071565440))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized"), shape = tensor([640, 2048, 1, 1])]; + tensor v_cast = conv(dilations = var_13768, groups = var_12518, pad = v_pad_0, pad_type = v_pad_type_0, strides = var_13766, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_v_weight_to_fp16_palettized, x = encoder_hidden_states)[name = tensor("v_cast")]; + tensor var_13772 = const()[name = tensor("op_13772"), val = tensor([2, 10, 64, -1])]; + tensor var_13773_cast = reshape(shape = var_13772, x = q_cast)[name = tensor("op_13773_cast")]; + tensor var_13774 = const()[name = tensor("op_13774"), val = tensor([2, 10, 64, -1])]; + tensor var_13775_cast = reshape(shape = var_13774, x = k_cast)[name = tensor("op_13775_cast")]; + tensor var_13776 = const()[name = tensor("op_13776"), val = tensor([2, 10, 64, -1])]; + tensor var_13777_cast = reshape(shape = var_13776, x = v_cast)[name = tensor("op_13777_cast")]; + tensor attn_weights_557_transpose_x_0 = const()[name = tensor("attn_weights_557_transpose_x_0"), val = tensor(true)]; + tensor attn_weights_557_transpose_y_0 = const()[name = tensor("attn_weights_557_transpose_y_0"), val = tensor(false)]; + tensor attn_weights_557_cast = matmul(transpose_x = attn_weights_557_transpose_x_0, transpose_y = attn_weights_557_transpose_y_0, x = var_13773_cast, y = var_13775_cast)[name = tensor("attn_weights_557_cast")]; + tensor attn_weights_cast = mul(x = attn_weights_557_cast, y = var_12509_to_fp16)[name = tensor("attn_weights_cast")]; + tensor var_13781_cast = softmax(axis = var_12502, x = attn_weights_cast)[name = tensor("op_13781_cast")]; + tensor attn_transpose_x_0 = const()[name = tensor("attn_transpose_x_0"), val = tensor(false)]; + tensor attn_transpose_y_0 = const()[name = tensor("attn_transpose_y_0"), val = tensor(true)]; + tensor attn_cast = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_13777_cast, y = var_13781_cast)[name = tensor("attn_cast")]; + tensor var_13785 = const()[name = tensor("op_13785"), val = tensor([2, 640, 1, -1])]; + tensor input_815_cast = reshape(shape = var_13785, x = attn_cast)[name = tensor("input_815_cast")]; + tensor var_13790 = const()[name = tensor("op_13790"), val = tensor([1, 1])]; + tensor var_13792 = const()[name = tensor("op_13792"), val = tensor([1, 1])]; + tensor var_13794_pad_type_0 = const()[name = tensor("op_13794_pad_type_0"), val = tensor("custom")]; + tensor var_13794_pad_0 = const()[name = tensor("op_13794_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071566016))), lut = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071975680))), name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized"), shape = tensor([640, 640, 1, 1])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071976256)))]; + tensor var_13794_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_bias_to_fp16, dilations = var_13792, groups = var_12518, pad = var_13794_pad_0, pad_type = var_13794_pad_type_0, strides = var_13790, weight = up_blocks_1_attentions_2_transformer_blocks_1_attn2_to_out_0_weight_to_fp16_palettized, x = input_815_cast)[name = tensor("op_13794_cast")]; + tensor inputs_cast = add(x = var_13794_cast, y = inputs_417_cast)[name = tensor("inputs_cast")]; + tensor var_13798 = const()[name = tensor("op_13798"), val = tensor([1])]; + tensor channels_mean_cast = reduce_mean(axes = var_13798, keep_dims = var_12513, x = inputs_cast)[name = tensor("channels_mean_cast")]; + tensor zero_mean_cast = sub(x = inputs_cast, y = channels_mean_cast)[name = tensor("zero_mean_cast")]; + tensor zero_mean_sq_cast = mul(x = zero_mean_cast, y = zero_mean_cast)[name = tensor("zero_mean_sq_cast")]; + tensor var_13802 = const()[name = tensor("op_13802"), val = tensor([1])]; + tensor var_13803_cast = reduce_mean(axes = var_13802, keep_dims = var_12513, x = zero_mean_sq_cast)[name = tensor("op_13803_cast")]; + tensor var_13804_to_fp16 = const()[name = tensor("op_13804_to_fp16"), val = tensor(0x1.5p-17)]; + tensor var_13805_cast = add(x = var_13803_cast, y = var_13804_to_fp16)[name = tensor("op_13805_cast")]; + tensor denom_epsilon_0_to_fp16 = const()[name = tensor("denom_epsilon_0_to_fp16"), val = tensor(0x1p-24)]; + tensor denom_cast = rsqrt(epsilon = denom_epsilon_0_to_fp16, x = var_13805_cast)[name = tensor("denom_cast")]; + tensor out_cast = mul(x = zero_mean_cast, y = denom_cast)[name = tensor("out_cast")]; + tensor var_13809_to_fp16 = const()[name = tensor("op_13809_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071977600)))]; + tensor var_13810_cast = add(x = out_cast, y = var_13809_to_fp16)[name = tensor("op_13810_cast")]; + tensor var_13812_to_fp16 = const()[name = tensor("op_13812_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071978944)))]; + tensor input_817_cast = mul(x = var_13810_cast, y = var_13812_to_fp16)[name = tensor("input_817_cast")]; + tensor var_13820 = const()[name = tensor("op_13820"), val = tensor([1, 1])]; + tensor var_13822 = const()[name = tensor("op_13822"), val = tensor([1, 1])]; + tensor var_13824_pad_type_0 = const()[name = tensor("op_13824_pad_type_0"), val = tensor("custom")]; + tensor var_13824_pad_0 = const()[name = tensor("op_13824_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2071980288)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2078533952)))]; + tensor var_13824_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_bias_to_fp16, dilations = var_13822, groups = var_12518, pad = var_13824_pad_0, pad_type = var_13824_pad_type_0, strides = var_13820, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_0_proj_weight_to_fp16, x = input_817_cast)[name = tensor("op_13824_cast")]; + tensor var_13825_split_sizes_0 = const()[name = tensor("op_13825_split_sizes_0"), val = tensor([2560, 2560])]; + tensor var_13825_axis_0 = const()[name = tensor("op_13825_axis_0"), val = tensor(1)]; + tensor var_13825_cast_0, tensor var_13825_cast_1 = split(axis = var_13825_axis_0, split_sizes = var_13825_split_sizes_0, x = var_13824_cast)[name = tensor("op_13825_cast")]; + tensor var_13827_mode_0 = const()[name = tensor("op_13827_mode_0"), val = tensor("EXACT")]; + tensor var_13827_cast = gelu(mode = var_13827_mode_0, x = var_13825_cast_1)[name = tensor("op_13827_cast")]; + tensor input_819_cast = mul(x = var_13825_cast_0, y = var_13827_cast)[name = tensor("input_819_cast")]; + tensor var_13831 = const()[name = tensor("op_13831"), val = tensor([1, 1])]; + tensor var_13833 = const()[name = tensor("op_13833"), val = tensor([1, 1])]; + tensor var_13835_pad_type_0 = const()[name = tensor("op_13835_pad_type_0"), val = tensor("custom")]; + tensor var_13835_pad_0 = const()[name = tensor("op_13835_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2078544256)))]; + tensor up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2081821120)))]; + tensor var_13835_cast = conv(bias = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_bias_to_fp16, dilations = var_13833, groups = var_12518, pad = var_13835_pad_0, pad_type = var_13835_pad_type_0, strides = var_13831, weight = up_blocks_1_attentions_2_transformer_blocks_1_ff_net_2_weight_to_fp16, x = input_819_cast)[name = tensor("op_13835_cast")]; + tensor hidden_states_573_cast = add(x = var_13835_cast, y = inputs_cast)[name = tensor("hidden_states_573_cast")]; + tensor var_13837 = const()[name = tensor("op_13837"), val = tensor([2, 640, 64, 64])]; + tensor input_821_cast = reshape(shape = var_13837, x = hidden_states_573_cast)[name = tensor("input_821_cast")]; + tensor var_13841 = const()[name = tensor("op_13841"), val = tensor([1, 1])]; + tensor var_13843 = const()[name = tensor("op_13843"), val = tensor([1, 1])]; + tensor hidden_states_575_pad_type_0 = const()[name = tensor("hidden_states_575_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_575_pad_0 = const()[name = tensor("hidden_states_575_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_1_attentions_2_proj_out_weight_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2081822464)))]; + tensor up_blocks_1_attentions_2_proj_out_bias_to_fp16 = const()[name = tensor("up_blocks_1_attentions_2_proj_out_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2082641728)))]; + tensor hidden_states_575_cast = conv(bias = up_blocks_1_attentions_2_proj_out_bias_to_fp16, dilations = var_13843, groups = var_12518, pad = hidden_states_575_pad_0, pad_type = hidden_states_575_pad_type_0, strides = var_13841, weight = up_blocks_1_attentions_2_proj_out_weight_to_fp16, x = input_821_cast)[name = tensor("hidden_states_575_cast")]; + tensor input_823_cast = add(x = hidden_states_575_cast, y = hidden_states_557_cast)[name = tensor("input_823_cast")]; + tensor input_825_scale_factor_height_0 = const()[name = tensor("input_825_scale_factor_height_0"), val = tensor(0x1p+1)]; + tensor input_825_scale_factor_width_0 = const()[name = tensor("input_825_scale_factor_width_0"), val = tensor(0x1p+1)]; + tensor input_825_cast = upsample_nearest_neighbor(scale_factor_height = input_825_scale_factor_height_0, scale_factor_width = input_825_scale_factor_width_0, x = input_823_cast)[name = tensor("input_825_cast")]; + tensor var_13852 = const()[name = tensor("op_13852"), val = tensor([1, 1])]; + tensor var_13854 = const()[name = tensor("op_13854"), val = tensor([1, 1])]; + tensor hidden_states_577_pad_type_0 = const()[name = tensor("hidden_states_577_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_577_pad_0 = const()[name = tensor("hidden_states_577_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_1_upsamplers_0_conv_weight_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2082643072)))]; + tensor up_blocks_1_upsamplers_0_conv_bias_to_fp16 = const()[name = tensor("up_blocks_1_upsamplers_0_conv_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2090015936)))]; + tensor hidden_states_577_cast = conv(bias = up_blocks_1_upsamplers_0_conv_bias_to_fp16, dilations = var_13854, groups = var_12518, pad = hidden_states_577_pad_0, pad_type = hidden_states_577_pad_type_0, strides = var_13852, weight = up_blocks_1_upsamplers_0_conv_weight_to_fp16, x = input_825_cast)[name = tensor("hidden_states_577_cast")]; + tensor var_13862 = const()[name = tensor("op_13862"), val = tensor(1)]; + tensor input_827_interleave_0 = const()[name = tensor("input_827_interleave_0"), val = tensor(false)]; + tensor input_827_cast = concat(axis = var_13862, interleave = input_827_interleave_0, values = (hidden_states_577_cast, input_43_cast))[name = tensor("input_827_cast")]; + tensor reshape_156_shape_0 = const()[name = tensor("reshape_156_shape_0"), val = tensor([2, 32, 30, 128, 128])]; + tensor reshape_156_cast = reshape(shape = reshape_156_shape_0, x = input_827_cast)[name = tensor("reshape_156_cast")]; + tensor reduce_mean_117_axes_0 = const()[name = tensor("reduce_mean_117_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_117_keep_dims_0 = const()[name = tensor("reduce_mean_117_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_117_cast = reduce_mean(axes = reduce_mean_117_axes_0, keep_dims = reduce_mean_117_keep_dims_0, x = reshape_156_cast)[name = tensor("reduce_mean_117_cast")]; + tensor sub_78_cast = sub(x = reshape_156_cast, y = reduce_mean_117_cast)[name = tensor("sub_78_cast")]; + tensor square_39_cast = square(x = sub_78_cast)[name = tensor("square_39_cast")]; + tensor reduce_mean_119_axes_0 = const()[name = tensor("reduce_mean_119_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_119_keep_dims_0 = const()[name = tensor("reduce_mean_119_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_119_cast = reduce_mean(axes = reduce_mean_119_axes_0, keep_dims = reduce_mean_119_keep_dims_0, x = square_39_cast)[name = tensor("reduce_mean_119_cast")]; + tensor add_78_y_0_to_fp16 = const()[name = tensor("add_78_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_78_cast = add(x = reduce_mean_119_cast, y = add_78_y_0_to_fp16)[name = tensor("add_78_cast")]; + tensor sqrt_39_cast = sqrt(x = add_78_cast)[name = tensor("sqrt_39_cast")]; + tensor real_div_39_cast = real_div(x = sub_78_cast, y = sqrt_39_cast)[name = tensor("real_div_39_cast")]; + tensor reshape_157_shape_0 = const()[name = tensor("reshape_157_shape_0"), val = tensor([2, 960, 128, 128])]; + tensor reshape_157_cast = reshape(shape = reshape_157_shape_0, x = real_div_39_cast)[name = tensor("reshape_157_cast")]; + tensor add_79_gamma_0_to_fp16 = const()[name = tensor("add_79_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2090017280)))]; + tensor add_79_beta_0_to_fp16 = const()[name = tensor("add_79_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2090019264)))]; + tensor add_79_epsilon_0_to_fp16 = const()[name = tensor("add_79_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_79_cast = batch_norm(beta = add_79_beta_0_to_fp16, epsilon = add_79_epsilon_0_to_fp16, gamma = add_79_gamma_0_to_fp16, mean = add_73_mean_0_to_fp16, variance = add_73_variance_0_to_fp16, x = reshape_157_cast)[name = tensor("add_79_cast")]; + tensor input_831_cast = silu(x = add_79_cast)[name = tensor("input_831_cast")]; + tensor var_13883 = const()[name = tensor("op_13883"), val = tensor([1, 1])]; + tensor var_13885 = const()[name = tensor("op_13885"), val = tensor([1, 1])]; + tensor hidden_states_579_pad_type_0 = const()[name = tensor("hidden_states_579_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_579_pad_0 = const()[name = tensor("hidden_states_579_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2090021248)))]; + tensor up_blocks_2_resnets_0_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2095550912)))]; + tensor hidden_states_579_cast = conv(bias = up_blocks_2_resnets_0_conv1_bias_to_fp16, dilations = var_13885, groups = var_13862, pad = hidden_states_579_pad_0, pad_type = hidden_states_579_pad_type_0, strides = var_13883, weight = up_blocks_2_resnets_0_conv1_weight_to_fp16, x = input_831_cast)[name = tensor("hidden_states_579_cast")]; + tensor var_13891 = const()[name = tensor("op_13891"), val = tensor([1, 1])]; + tensor var_13893 = const()[name = tensor("op_13893"), val = tensor([1, 1])]; + tensor temb_29_pad_type_0 = const()[name = tensor("temb_29_pad_type_0"), val = tensor("custom")]; + tensor temb_29_pad_0 = const()[name = tensor("temb_29_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2095551616)))]; + tensor up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2096370880)))]; + tensor temb_29_cast = conv(bias = up_blocks_2_resnets_0_time_emb_proj_bias_to_fp16, dilations = var_13893, groups = var_13862, pad = temb_29_pad_0, pad_type = temb_29_pad_type_0, strides = var_13891, weight = up_blocks_2_resnets_0_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_29_cast")]; + tensor input_835_cast = add(x = hidden_states_579_cast, y = temb_29_cast)[name = tensor("input_835_cast")]; + tensor reshape_160_shape_0 = const()[name = tensor("reshape_160_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_160_cast = reshape(shape = reshape_160_shape_0, x = input_835_cast)[name = tensor("reshape_160_cast")]; + tensor reduce_mean_120_axes_0 = const()[name = tensor("reduce_mean_120_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_120_keep_dims_0 = const()[name = tensor("reduce_mean_120_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_120_cast = reduce_mean(axes = reduce_mean_120_axes_0, keep_dims = reduce_mean_120_keep_dims_0, x = reshape_160_cast)[name = tensor("reduce_mean_120_cast")]; + tensor sub_80_cast = sub(x = reshape_160_cast, y = reduce_mean_120_cast)[name = tensor("sub_80_cast")]; + tensor square_40_cast = square(x = sub_80_cast)[name = tensor("square_40_cast")]; + tensor reduce_mean_122_axes_0 = const()[name = tensor("reduce_mean_122_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_122_keep_dims_0 = const()[name = tensor("reduce_mean_122_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_122_cast = reduce_mean(axes = reduce_mean_122_axes_0, keep_dims = reduce_mean_122_keep_dims_0, x = square_40_cast)[name = tensor("reduce_mean_122_cast")]; + tensor add_80_y_0_to_fp16 = const()[name = tensor("add_80_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_80_cast = add(x = reduce_mean_122_cast, y = add_80_y_0_to_fp16)[name = tensor("add_80_cast")]; + tensor sqrt_40_cast = sqrt(x = add_80_cast)[name = tensor("sqrt_40_cast")]; + tensor real_div_40_cast = real_div(x = sub_80_cast, y = sqrt_40_cast)[name = tensor("real_div_40_cast")]; + tensor reshape_161_shape_0 = const()[name = tensor("reshape_161_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_161_cast = reshape(shape = reshape_161_shape_0, x = real_div_40_cast)[name = tensor("reshape_161_cast")]; + tensor add_81_gamma_0_to_fp16 = const()[name = tensor("add_81_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2096371584)))]; + tensor add_81_beta_0_to_fp16 = const()[name = tensor("add_81_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2096372288)))]; + tensor add_81_epsilon_0_to_fp16 = const()[name = tensor("add_81_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_81_cast = batch_norm(beta = add_81_beta_0_to_fp16, epsilon = add_81_epsilon_0_to_fp16, gamma = add_81_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_161_cast)[name = tensor("add_81_cast")]; + tensor input_839_cast = silu(x = add_81_cast)[name = tensor("input_839_cast")]; + tensor var_13903 = const()[name = tensor("op_13903"), val = tensor([1, 1])]; + tensor var_13905 = const()[name = tensor("op_13905"), val = tensor([1, 1])]; + tensor hidden_states_581_pad_type_0 = const()[name = tensor("hidden_states_581_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_581_pad_0 = const()[name = tensor("hidden_states_581_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_0_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2096372992)))]; + tensor up_blocks_2_resnets_0_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098216256)))]; + tensor hidden_states_581_cast = conv(bias = up_blocks_2_resnets_0_conv2_bias_to_fp16, dilations = var_13905, groups = var_13862, pad = hidden_states_581_pad_0, pad_type = hidden_states_581_pad_type_0, strides = var_13903, weight = up_blocks_2_resnets_0_conv2_weight_to_fp16, x = input_839_cast)[name = tensor("hidden_states_581_cast")]; + tensor var_13910 = const()[name = tensor("op_13910"), val = tensor([1, 1])]; + tensor var_13912 = const()[name = tensor("op_13912"), val = tensor([1, 1])]; + tensor x_17_pad_type_0 = const()[name = tensor("x_17_pad_type_0"), val = tensor("custom")]; + tensor x_17_pad_0 = const()[name = tensor("x_17_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098216960)))]; + tensor up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098831424)))]; + tensor x_17_cast = conv(bias = up_blocks_2_resnets_0_conv_shortcut_bias_to_fp16, dilations = var_13912, groups = var_13862, pad = x_17_pad_0, pad_type = x_17_pad_type_0, strides = var_13910, weight = up_blocks_2_resnets_0_conv_shortcut_weight_to_fp16, x = input_827_cast)[name = tensor("x_17_cast")]; + tensor hidden_states_583_cast = add(x = x_17_cast, y = hidden_states_581_cast)[name = tensor("hidden_states_583_cast")]; + tensor input_841_interleave_0 = const()[name = tensor("input_841_interleave_0"), val = tensor(false)]; + tensor input_841_cast = concat(axis = var_13862, interleave = input_841_interleave_0, values = (hidden_states_583_cast, input_29_cast))[name = tensor("input_841_cast")]; + tensor reshape_164_shape_0 = const()[name = tensor("reshape_164_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_164_cast = reshape(shape = reshape_164_shape_0, x = input_841_cast)[name = tensor("reshape_164_cast")]; + tensor reduce_mean_123_axes_0 = const()[name = tensor("reduce_mean_123_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_123_keep_dims_0 = const()[name = tensor("reduce_mean_123_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_123_cast = reduce_mean(axes = reduce_mean_123_axes_0, keep_dims = reduce_mean_123_keep_dims_0, x = reshape_164_cast)[name = tensor("reduce_mean_123_cast")]; + tensor sub_82_cast = sub(x = reshape_164_cast, y = reduce_mean_123_cast)[name = tensor("sub_82_cast")]; + tensor square_41_cast = square(x = sub_82_cast)[name = tensor("square_41_cast")]; + tensor reduce_mean_125_axes_0 = const()[name = tensor("reduce_mean_125_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_125_keep_dims_0 = const()[name = tensor("reduce_mean_125_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_125_cast = reduce_mean(axes = reduce_mean_125_axes_0, keep_dims = reduce_mean_125_keep_dims_0, x = square_41_cast)[name = tensor("reduce_mean_125_cast")]; + tensor add_82_y_0_to_fp16 = const()[name = tensor("add_82_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_82_cast = add(x = reduce_mean_125_cast, y = add_82_y_0_to_fp16)[name = tensor("add_82_cast")]; + tensor sqrt_41_cast = sqrt(x = add_82_cast)[name = tensor("sqrt_41_cast")]; + tensor real_div_41_cast = real_div(x = sub_82_cast, y = sqrt_41_cast)[name = tensor("real_div_41_cast")]; + tensor reshape_165_shape_0 = const()[name = tensor("reshape_165_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_165_cast = reshape(shape = reshape_165_shape_0, x = real_div_41_cast)[name = tensor("reshape_165_cast")]; + tensor add_83_gamma_0_to_fp16 = const()[name = tensor("add_83_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098832128)))]; + tensor add_83_beta_0_to_fp16 = const()[name = tensor("add_83_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098833472)))]; + tensor add_83_epsilon_0_to_fp16 = const()[name = tensor("add_83_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_83_cast = batch_norm(beta = add_83_beta_0_to_fp16, epsilon = add_83_epsilon_0_to_fp16, gamma = add_83_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_165_cast)[name = tensor("add_83_cast")]; + tensor input_845_cast = silu(x = add_83_cast)[name = tensor("input_845_cast")]; + tensor var_13930 = const()[name = tensor("op_13930"), val = tensor([1, 1])]; + tensor var_13932 = const()[name = tensor("op_13932"), val = tensor([1, 1])]; + tensor hidden_states_585_pad_type_0 = const()[name = tensor("hidden_states_585_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_585_pad_0 = const()[name = tensor("hidden_states_585_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2098834816)))]; + tensor up_blocks_2_resnets_1_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2102521280)))]; + tensor hidden_states_585_cast = conv(bias = up_blocks_2_resnets_1_conv1_bias_to_fp16, dilations = var_13932, groups = var_13862, pad = hidden_states_585_pad_0, pad_type = hidden_states_585_pad_type_0, strides = var_13930, weight = up_blocks_2_resnets_1_conv1_weight_to_fp16, x = input_845_cast)[name = tensor("hidden_states_585_cast")]; + tensor var_13938 = const()[name = tensor("op_13938"), val = tensor([1, 1])]; + tensor var_13940 = const()[name = tensor("op_13940"), val = tensor([1, 1])]; + tensor temb_31_pad_type_0 = const()[name = tensor("temb_31_pad_type_0"), val = tensor("custom")]; + tensor temb_31_pad_0 = const()[name = tensor("temb_31_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2102521984)))]; + tensor up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2103341248)))]; + tensor temb_31_cast = conv(bias = up_blocks_2_resnets_1_time_emb_proj_bias_to_fp16, dilations = var_13940, groups = var_13862, pad = temb_31_pad_0, pad_type = temb_31_pad_type_0, strides = var_13938, weight = up_blocks_2_resnets_1_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_31_cast")]; + tensor input_849_cast = add(x = hidden_states_585_cast, y = temb_31_cast)[name = tensor("input_849_cast")]; + tensor reshape_168_shape_0 = const()[name = tensor("reshape_168_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_168_cast = reshape(shape = reshape_168_shape_0, x = input_849_cast)[name = tensor("reshape_168_cast")]; + tensor reduce_mean_126_axes_0 = const()[name = tensor("reduce_mean_126_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_126_keep_dims_0 = const()[name = tensor("reduce_mean_126_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_126_cast = reduce_mean(axes = reduce_mean_126_axes_0, keep_dims = reduce_mean_126_keep_dims_0, x = reshape_168_cast)[name = tensor("reduce_mean_126_cast")]; + tensor sub_84_cast = sub(x = reshape_168_cast, y = reduce_mean_126_cast)[name = tensor("sub_84_cast")]; + tensor square_42_cast = square(x = sub_84_cast)[name = tensor("square_42_cast")]; + tensor reduce_mean_128_axes_0 = const()[name = tensor("reduce_mean_128_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_128_keep_dims_0 = const()[name = tensor("reduce_mean_128_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_128_cast = reduce_mean(axes = reduce_mean_128_axes_0, keep_dims = reduce_mean_128_keep_dims_0, x = square_42_cast)[name = tensor("reduce_mean_128_cast")]; + tensor add_84_y_0_to_fp16 = const()[name = tensor("add_84_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_84_cast = add(x = reduce_mean_128_cast, y = add_84_y_0_to_fp16)[name = tensor("add_84_cast")]; + tensor sqrt_42_cast = sqrt(x = add_84_cast)[name = tensor("sqrt_42_cast")]; + tensor real_div_42_cast = real_div(x = sub_84_cast, y = sqrt_42_cast)[name = tensor("real_div_42_cast")]; + tensor reshape_169_shape_0 = const()[name = tensor("reshape_169_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_169_cast = reshape(shape = reshape_169_shape_0, x = real_div_42_cast)[name = tensor("reshape_169_cast")]; + tensor add_85_gamma_0_to_fp16 = const()[name = tensor("add_85_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2103341952)))]; + tensor add_85_beta_0_to_fp16 = const()[name = tensor("add_85_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2103342656)))]; + tensor add_85_epsilon_0_to_fp16 = const()[name = tensor("add_85_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_85_cast = batch_norm(beta = add_85_beta_0_to_fp16, epsilon = add_85_epsilon_0_to_fp16, gamma = add_85_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_169_cast)[name = tensor("add_85_cast")]; + tensor input_853_cast = silu(x = add_85_cast)[name = tensor("input_853_cast")]; + tensor var_13950 = const()[name = tensor("op_13950"), val = tensor([1, 1])]; + tensor var_13952 = const()[name = tensor("op_13952"), val = tensor([1, 1])]; + tensor hidden_states_587_pad_type_0 = const()[name = tensor("hidden_states_587_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_587_pad_0 = const()[name = tensor("hidden_states_587_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_1_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2103343360)))]; + tensor up_blocks_2_resnets_1_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105186624)))]; + tensor hidden_states_587_cast = conv(bias = up_blocks_2_resnets_1_conv2_bias_to_fp16, dilations = var_13952, groups = var_13862, pad = hidden_states_587_pad_0, pad_type = hidden_states_587_pad_type_0, strides = var_13950, weight = up_blocks_2_resnets_1_conv2_weight_to_fp16, x = input_853_cast)[name = tensor("hidden_states_587_cast")]; + tensor var_13957 = const()[name = tensor("op_13957"), val = tensor([1, 1])]; + tensor var_13959 = const()[name = tensor("op_13959"), val = tensor([1, 1])]; + tensor x_19_pad_type_0 = const()[name = tensor("x_19_pad_type_0"), val = tensor("custom")]; + tensor x_19_pad_0 = const()[name = tensor("x_19_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105187328)))]; + tensor up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105596992)))]; + tensor x_19_cast = conv(bias = up_blocks_2_resnets_1_conv_shortcut_bias_to_fp16, dilations = var_13959, groups = var_13862, pad = x_19_pad_0, pad_type = x_19_pad_type_0, strides = var_13957, weight = up_blocks_2_resnets_1_conv_shortcut_weight_to_fp16, x = input_841_cast)[name = tensor("x_19_cast")]; + tensor hidden_states_589_cast = add(x = x_19_cast, y = hidden_states_587_cast)[name = tensor("hidden_states_589_cast")]; + tensor input_855_interleave_0 = const()[name = tensor("input_855_interleave_0"), val = tensor(false)]; + tensor input_855_cast = concat(axis = var_13862, interleave = input_855_interleave_0, values = (hidden_states_589_cast, input_13_cast))[name = tensor("input_855_cast")]; + tensor reshape_172_shape_0 = const()[name = tensor("reshape_172_shape_0"), val = tensor([2, 32, 20, 128, 128])]; + tensor reshape_172_cast = reshape(shape = reshape_172_shape_0, x = input_855_cast)[name = tensor("reshape_172_cast")]; + tensor reduce_mean_129_axes_0 = const()[name = tensor("reduce_mean_129_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_129_keep_dims_0 = const()[name = tensor("reduce_mean_129_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_129_cast = reduce_mean(axes = reduce_mean_129_axes_0, keep_dims = reduce_mean_129_keep_dims_0, x = reshape_172_cast)[name = tensor("reduce_mean_129_cast")]; + tensor sub_86_cast = sub(x = reshape_172_cast, y = reduce_mean_129_cast)[name = tensor("sub_86_cast")]; + tensor square_43_cast = square(x = sub_86_cast)[name = tensor("square_43_cast")]; + tensor reduce_mean_131_axes_0 = const()[name = tensor("reduce_mean_131_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_131_keep_dims_0 = const()[name = tensor("reduce_mean_131_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_131_cast = reduce_mean(axes = reduce_mean_131_axes_0, keep_dims = reduce_mean_131_keep_dims_0, x = square_43_cast)[name = tensor("reduce_mean_131_cast")]; + tensor add_86_y_0_to_fp16 = const()[name = tensor("add_86_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_86_cast = add(x = reduce_mean_131_cast, y = add_86_y_0_to_fp16)[name = tensor("add_86_cast")]; + tensor sqrt_43_cast = sqrt(x = add_86_cast)[name = tensor("sqrt_43_cast")]; + tensor real_div_43_cast = real_div(x = sub_86_cast, y = sqrt_43_cast)[name = tensor("real_div_43_cast")]; + tensor reshape_173_shape_0 = const()[name = tensor("reshape_173_shape_0"), val = tensor([2, 640, 128, 128])]; + tensor reshape_173_cast = reshape(shape = reshape_173_shape_0, x = real_div_43_cast)[name = tensor("reshape_173_cast")]; + tensor add_87_gamma_0_to_fp16 = const()[name = tensor("add_87_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105597696)))]; + tensor add_87_beta_0_to_fp16 = const()[name = tensor("add_87_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105599040)))]; + tensor add_87_epsilon_0_to_fp16 = const()[name = tensor("add_87_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_87_cast = batch_norm(beta = add_87_beta_0_to_fp16, epsilon = add_87_epsilon_0_to_fp16, gamma = add_87_gamma_0_to_fp16, mean = add_11_mean_0_to_fp16, variance = add_11_variance_0_to_fp16, x = reshape_173_cast)[name = tensor("add_87_cast")]; + tensor input_859_cast = silu(x = add_87_cast)[name = tensor("input_859_cast")]; + tensor var_13977 = const()[name = tensor("op_13977"), val = tensor([1, 1])]; + tensor var_13979 = const()[name = tensor("op_13979"), val = tensor([1, 1])]; + tensor hidden_states_591_pad_type_0 = const()[name = tensor("hidden_states_591_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_591_pad_0 = const()[name = tensor("hidden_states_591_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv1_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2105600384)))]; + tensor up_blocks_2_resnets_2_conv1_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv1_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2109286848)))]; + tensor hidden_states_591_cast = conv(bias = up_blocks_2_resnets_2_conv1_bias_to_fp16, dilations = var_13979, groups = var_13862, pad = hidden_states_591_pad_0, pad_type = hidden_states_591_pad_type_0, strides = var_13977, weight = up_blocks_2_resnets_2_conv1_weight_to_fp16, x = input_859_cast)[name = tensor("hidden_states_591_cast")]; + tensor var_13985 = const()[name = tensor("op_13985"), val = tensor([1, 1])]; + tensor var_13987 = const()[name = tensor("op_13987"), val = tensor([1, 1])]; + tensor temb_pad_type_0 = const()[name = tensor("temb_pad_type_0"), val = tensor("custom")]; + tensor temb_pad_0 = const()[name = tensor("temb_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2109287552)))]; + tensor up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2110106816)))]; + tensor temb_cast = conv(bias = up_blocks_2_resnets_2_time_emb_proj_bias_to_fp16, dilations = var_13987, groups = var_13862, pad = temb_pad_0, pad_type = temb_pad_type_0, strides = var_13985, weight = up_blocks_2_resnets_2_time_emb_proj_weight_to_fp16, x = input_21_cast)[name = tensor("temb_cast")]; + tensor input_863_cast = add(x = hidden_states_591_cast, y = temb_cast)[name = tensor("input_863_cast")]; + tensor reshape_176_shape_0 = const()[name = tensor("reshape_176_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_176_cast = reshape(shape = reshape_176_shape_0, x = input_863_cast)[name = tensor("reshape_176_cast")]; + tensor reduce_mean_132_axes_0 = const()[name = tensor("reduce_mean_132_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_132_keep_dims_0 = const()[name = tensor("reduce_mean_132_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_132_cast = reduce_mean(axes = reduce_mean_132_axes_0, keep_dims = reduce_mean_132_keep_dims_0, x = reshape_176_cast)[name = tensor("reduce_mean_132_cast")]; + tensor sub_88_cast = sub(x = reshape_176_cast, y = reduce_mean_132_cast)[name = tensor("sub_88_cast")]; + tensor square_44_cast = square(x = sub_88_cast)[name = tensor("square_44_cast")]; + tensor reduce_mean_134_axes_0 = const()[name = tensor("reduce_mean_134_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_134_keep_dims_0 = const()[name = tensor("reduce_mean_134_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_134_cast = reduce_mean(axes = reduce_mean_134_axes_0, keep_dims = reduce_mean_134_keep_dims_0, x = square_44_cast)[name = tensor("reduce_mean_134_cast")]; + tensor add_88_y_0_to_fp16 = const()[name = tensor("add_88_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_88_cast = add(x = reduce_mean_134_cast, y = add_88_y_0_to_fp16)[name = tensor("add_88_cast")]; + tensor sqrt_44_cast = sqrt(x = add_88_cast)[name = tensor("sqrt_44_cast")]; + tensor real_div_44_cast = real_div(x = sub_88_cast, y = sqrt_44_cast)[name = tensor("real_div_44_cast")]; + tensor reshape_177_shape_0 = const()[name = tensor("reshape_177_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_177_cast = reshape(shape = reshape_177_shape_0, x = real_div_44_cast)[name = tensor("reshape_177_cast")]; + tensor add_89_gamma_0_to_fp16 = const()[name = tensor("add_89_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2110107520)))]; + tensor add_89_beta_0_to_fp16 = const()[name = tensor("add_89_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2110108224)))]; + tensor add_89_epsilon_0_to_fp16 = const()[name = tensor("add_89_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_89_cast = batch_norm(beta = add_89_beta_0_to_fp16, epsilon = add_89_epsilon_0_to_fp16, gamma = add_89_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_177_cast)[name = tensor("add_89_cast")]; + tensor input_867_cast = silu(x = add_89_cast)[name = tensor("input_867_cast")]; + tensor var_13997 = const()[name = tensor("op_13997"), val = tensor([1, 1])]; + tensor var_13999 = const()[name = tensor("op_13999"), val = tensor([1, 1])]; + tensor hidden_states_pad_type_0 = const()[name = tensor("hidden_states_pad_type_0"), val = tensor("custom")]; + tensor hidden_states_pad_0 = const()[name = tensor("hidden_states_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor up_blocks_2_resnets_2_conv2_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2110108928)))]; + tensor up_blocks_2_resnets_2_conv2_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv2_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2111952192)))]; + tensor hidden_states_cast = conv(bias = up_blocks_2_resnets_2_conv2_bias_to_fp16, dilations = var_13999, groups = var_13862, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = var_13997, weight = up_blocks_2_resnets_2_conv2_weight_to_fp16, x = input_867_cast)[name = tensor("hidden_states_cast")]; + tensor var_14004 = const()[name = tensor("op_14004"), val = tensor([1, 1])]; + tensor var_14006 = const()[name = tensor("op_14006"), val = tensor([1, 1])]; + tensor x_pad_type_0 = const()[name = tensor("x_pad_type_0"), val = tensor("custom")]; + tensor x_pad_0 = const()[name = tensor("x_pad_0"), val = tensor([0, 0, 0, 0])]; + tensor up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2111952896)))]; + tensor up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16 = const()[name = tensor("up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2112362560)))]; + tensor x_cast = conv(bias = up_blocks_2_resnets_2_conv_shortcut_bias_to_fp16, dilations = var_14006, groups = var_13862, pad = x_pad_0, pad_type = x_pad_type_0, strides = var_14004, weight = up_blocks_2_resnets_2_conv_shortcut_weight_to_fp16, x = input_855_cast)[name = tensor("x_cast")]; + tensor input_869_cast = add(x = x_cast, y = hidden_states_cast)[name = tensor("input_869_cast")]; + tensor reshape_180_shape_0 = const()[name = tensor("reshape_180_shape_0"), val = tensor([2, 32, 10, 128, 128])]; + tensor reshape_180_cast = reshape(shape = reshape_180_shape_0, x = input_869_cast)[name = tensor("reshape_180_cast")]; + tensor reduce_mean_135_axes_0 = const()[name = tensor("reduce_mean_135_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_135_keep_dims_0 = const()[name = tensor("reduce_mean_135_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_135_cast = reduce_mean(axes = reduce_mean_135_axes_0, keep_dims = reduce_mean_135_keep_dims_0, x = reshape_180_cast)[name = tensor("reduce_mean_135_cast")]; + tensor sub_90_cast = sub(x = reshape_180_cast, y = reduce_mean_135_cast)[name = tensor("sub_90_cast")]; + tensor square_45_cast = square(x = sub_90_cast)[name = tensor("square_45_cast")]; + tensor reduce_mean_137_axes_0 = const()[name = tensor("reduce_mean_137_axes_0"), val = tensor([2, 3, 4])]; + tensor reduce_mean_137_keep_dims_0 = const()[name = tensor("reduce_mean_137_keep_dims_0"), val = tensor(true)]; + tensor reduce_mean_137_cast = reduce_mean(axes = reduce_mean_137_axes_0, keep_dims = reduce_mean_137_keep_dims_0, x = square_45_cast)[name = tensor("reduce_mean_137_cast")]; + tensor add_90_y_0_to_fp16 = const()[name = tensor("add_90_y_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_90_cast = add(x = reduce_mean_137_cast, y = add_90_y_0_to_fp16)[name = tensor("add_90_cast")]; + tensor sqrt_45_cast = sqrt(x = add_90_cast)[name = tensor("sqrt_45_cast")]; + tensor real_div_45_cast = real_div(x = sub_90_cast, y = sqrt_45_cast)[name = tensor("real_div_45_cast")]; + tensor reshape_181_shape_0 = const()[name = tensor("reshape_181_shape_0"), val = tensor([2, 320, 128, 128])]; + tensor reshape_181_cast = reshape(shape = reshape_181_shape_0, x = real_div_45_cast)[name = tensor("reshape_181_cast")]; + tensor add_91_gamma_0_to_fp16 = const()[name = tensor("add_91_gamma_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2112363264)))]; + tensor add_91_beta_0_to_fp16 = const()[name = tensor("add_91_beta_0_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2112363968)))]; + tensor add_91_epsilon_0_to_fp16 = const()[name = tensor("add_91_epsilon_0_to_fp16"), val = tensor(0x1.5p-17)]; + tensor add_91_cast = batch_norm(beta = add_91_beta_0_to_fp16, epsilon = add_91_epsilon_0_to_fp16, gamma = add_91_gamma_0_to_fp16, mean = add_1_mean_0_to_fp16, variance = add_1_variance_0_to_fp16, x = reshape_181_cast)[name = tensor("add_91_cast")]; + tensor input_cast = silu(x = add_91_cast)[name = tensor("input_cast")]; + tensor var_14020 = const()[name = tensor("op_14020"), val = tensor(1)]; + tensor var_14023 = const()[name = tensor("op_14023"), val = tensor([1, 1])]; + tensor var_14025 = const()[name = tensor("op_14025"), val = tensor([1, 1])]; + tensor var_14027_pad_type_0 = const()[name = tensor("op_14027_pad_type_0"), val = tensor("custom")]; + tensor var_14027_pad_0 = const()[name = tensor("op_14027_pad_0"), val = tensor([1, 1, 1, 1])]; + tensor conv_out_weight_to_fp16 = const()[name = tensor("conv_out_weight_to_fp16"), val = tensor(BLOBFILE(path = tensor("@model_path/weights/weight.bin"), offset = tensor(2112364672)))]; + tensor conv_out_bias_to_fp16 = const()[name = tensor("conv_out_bias_to_fp16"), val = tensor([0x1.664p-9, -0x1.72p-10, 0x1.06p-9, -0x1.9b8p-9])]; + tensor var_14027_cast = conv(bias = conv_out_bias_to_fp16, dilations = var_14025, groups = var_14020, pad = var_14027_pad_0, pad_type = var_14027_pad_type_0, strides = var_14023, weight = conv_out_weight_to_fp16, x = input_cast)[name = tensor("op_14027_cast")]; + tensor var_14027_cast_to_fp32_dtype_0 = const()[name = tensor("op_14027_cast_to_fp32_dtype_0"), val = tensor("fp32")]; + tensor noise_pred = cast(dtype = var_14027_cast_to_fp32_dtype_0, x = var_14027_cast)[name = tensor("cast_0")]; + } -> (noise_pred); +} \ No newline at end of file diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..5f20f6c486ab1eeb6fff40fd4bbd75492e5ed624 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlmodelc/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:719ef0ca040ee293f7725a2c421de8dd031ed8c8026c6e23616ee0fdfa71f90c +size 2112387776 diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel new file mode 100644 index 0000000000000000000000000000000000000000..51a84a2154b3396096140c3cbd75640d56c3b24f --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/model.mlmodel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bedd239d138a42c768ce4336148e887555da6609295a34784758d67499a4f5f6 +size 2128383 diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin new file mode 100644 index 0000000000000000000000000000000000000000..5f20f6c486ab1eeb6fff40fd4bbd75492e5ed624 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Data/com.apple.CoreML/weights/weight.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:719ef0ca040ee293f7725a2c421de8dd031ed8c8026c6e23616ee0fdfa71f90c +size 2112387776 diff --git a/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Manifest.json b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..5dcae9e0e13c6c2af76f1e73a4f8b422995a4f32 --- /dev/null +++ b/unet-mbp-sdxl-1-base/recipe_6_55_bit_mixedpalette/Unet.mlpackage/Manifest.json @@ -0,0 +1,18 @@ +{ + "fileFormatVersion": "1.0.0", + "itemInfoEntries": { + "e5fd326a-3756-4003-bac7-a84d55dca5ad": { + "author": "com.apple.CoreML", + "description": "CoreML Model Weights", + "name": "weights", + "path": "com.apple.CoreML/weights" + }, + "f6f23951-ce1d-43bc-ac13-37805f79102c": { + "author": "com.apple.CoreML", + "description": "CoreML Model Specification", + "name": "model.mlmodel", + "path": "com.apple.CoreML/model.mlmodel" + } + }, + "rootModelIdentifier": "f6f23951-ce1d-43bc-ac13-37805f79102c" +}